diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..ff9067195b05d4fc672ab02303bc584bdb09ddb6 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +PuzzleTuning/Counterpart[[:space:]]PreTrain[[:space:]]Methods/gcmae/nohup.out filter=lfs diff=lfs merge=lfs -text diff --git a/PuzzleTuning/Archive/PuzzleTuning_demoset.zip b/PuzzleTuning/Archive/PuzzleTuning_demoset.zip new file mode 100644 index 0000000000000000000000000000000000000000..0f68e9c28f797862af4c45e71ff446aa933aabf4 --- /dev/null +++ b/PuzzleTuning/Archive/PuzzleTuning_demoset.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:363cbdc5f8e944f99542a5727d4b2457dd79bbfca7eb70d76857de4b8be92858 +size 4987321 diff --git a/PuzzleTuning/Archive/build_CPIA_dataset.sh b/PuzzleTuning/Archive/build_CPIA_dataset.sh new file mode 100644 index 0000000000000000000000000000000000000000..8705457a4af70f46a90bc8f30c961d904618b11b --- /dev/null +++ b/PuzzleTuning/Archive/build_CPIA_dataset.sh @@ -0,0 +1,35 @@ +#!/bin/sh +# go to the dataset location + +# altering the zip files + +zip -F L.zip --out L_Scale.zip +zip -FF L_Scale.zip --out L.zip -fz +zip -F M.zip --out M_Scale.zip +zip -FF M_Scale.zip --out M.zip -fz + +rm -f L_Scale.zip +rm -f L.z01 +rm -f M_Scale.zip +rm -f M.z01 +rm -f M.z02 + +# build a directory of datasets +mkdir datasets +mv L.zip datasets +mv M.zip datasets +mv S.zip datasets + +cd datasets +unzip L.zip +unzip M.zip +unzip S.zip + +rm -f L.zip +rm -f M.zip +rm -f S.zip + +mkdir All +cp -r L/* All/ & +cp -r M/* All/ & +cp -r S/* All/ \ No newline at end of file diff --git a/PuzzleTuning/Archive/warwick_CLS.zip b/PuzzleTuning/Archive/warwick_CLS.zip new file mode 100644 index 0000000000000000000000000000000000000000..7f15149809e24a4ad39828243f39bcda306fce0e --- /dev/null +++ b/PuzzleTuning/Archive/warwick_CLS.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9813cdc86e2420476e06638965df6040cec0197b148be786f0bf88b020e445f6 +size 15098031 diff --git a/PuzzleTuning/Backbone/GetPromptModel.py b/PuzzleTuning/Backbone/GetPromptModel.py new file mode 100644 index 0000000000000000000000000000000000000000..43f281b8b79494f6c2cb47a40ec8dcf9173f7bb1 --- /dev/null +++ b/PuzzleTuning/Backbone/GetPromptModel.py @@ -0,0 +1,87 @@ +""" +build_promptmodel Script ver: Oct 17th 14:20 + +""" + +try: + from Backbone.VPT_structure import * +except: + from Backbone.VPT_structure import * + + +def build_promptmodel(num_classes=1000, edge_size=224, model_idx='ViT', patch_size=16, + Prompt_Token_num=20, VPT_type="Deep", prompt_state_dict=None, base_state_dict='timm'): + """ + following the https://github.com/sagizty/VPT + this build the VPT (prompt version of ViT), with additional prompt tokens, + each layer the information become [B, N_patch + N_prompt, Dim] + + During training only the prompt tokens and the head layer are + set to be learnable while the rest of Transformer layers are frozen + + # VPT_type = "Shallow" / "Deep" + - Shallow: concatenate N_prompt of prompt tokens before the first Transformer Encoder block, + each layer the information become [B, N_patch + N_prompt, Dim] + - Deep: concatenate N_prompt of prompt tokens to each Transformer Encoder block, + this will replace the output prompt tokens learnt form previous encoder. + """ + + if model_idx[0:3] == 'ViT': + + if base_state_dict is None: + basic_state_dict = None + + elif type(base_state_dict) == str: + if base_state_dict == 'timm': + # ViT_Prompt + import timm + # from pprint import pprint + # model_names = timm.list_models('*vit*') + # pprint(model_names) + + basic_model = timm.create_model('vit_base_patch' + str(patch_size) + '_' + str(edge_size), + pretrained=True) + basic_state_dict = basic_model.state_dict() + print('in prompt model building, timm ViT loaded for base_state_dict') + + else: + basic_state_dict = None + print('in prompt model building, no vaild str for base_state_dict') + + else: # state dict: collections.OrderedDict + basic_state_dict = base_state_dict + print('in prompt model building, a .pth base_state_dict loaded') + + model = VPT_ViT(img_size=edge_size, patch_size=patch_size, Prompt_Token_num=Prompt_Token_num, + VPT_type=VPT_type, basic_state_dict=basic_state_dict) + + model.New_CLS_head(num_classes) + + if prompt_state_dict is not None: + try: + model.load_prompt(prompt_state_dict) + except: + print('erro in .pth prompt_state_dict') + else: + print('in prompt model building, a .pth prompt_state_dict loaded') + + model.Freeze() + else: + print("The model is not difined in the Prompt script!!") + return -1 + + try: + img = torch.randn(1, 3, edge_size, edge_size) + preds = model(img) # (1, class_number) + print('Build VPT model with in/out shape: ', img.shape, ' -> ', preds.shape) + + except: + print("Problem exist in the model defining process!!") + return -1 + else: + print('model is ready now!') + return model + + +if __name__ == '__main__': + model = build_promptmodel(prompt_state_dict=None, base_state_dict='timm', num_classes=0) diff --git a/PuzzleTuning/Backbone/ResHybrid.py b/PuzzleTuning/Backbone/ResHybrid.py new file mode 100644 index 0000000000000000000000000000000000000000..edbff2272721852f0f47ac6cc948c39b8ec9a344 --- /dev/null +++ b/PuzzleTuning/Backbone/ResHybrid.py @@ -0,0 +1,481 @@ +""" +MSHT + +Models ver: OCT 27th 20:00 official release + +by the authors, check our github page: +https://github.com/sagizty/Multi-Stage-Hybrid-Transformer + + +ResNet stages' feature map + +# input = 3, 384, 384 +torch.Size([1, 256, 96, 96]) +torch.Size([1, 512, 48, 48]) +torch.Size([1, 1024, 24, 24]) +torch.Size([1, 2048, 12, 12]) +torch.Size([1, 1000]) + +# input = 3, 224, 224 +torch.Size([1, 256, 56, 56]) +torch.Size([1, 512, 28, 28]) +torch.Size([1, 1024, 14, 14]) +torch.Size([1, 2048, 7, 7]) +torch.Size([1, 1000]) + +ref +https://note.youdao.com/ynoteshare1/index.html?id=5a7dbe1a71713c317062ddeedd97d98e&type=note +""" +import torch +from torch import nn +from functools import partial +from torchsummary import summary +import os +from Backbone import Transformer_blocks + + +# ResNet Bottleneck_block_constructor +class Bottleneck_block_constructor(nn.Module): + + extention = 4 + + # 定义初始化的网络和参数 + def __init__(self, inplane, midplane, stride, downsample=None): + super(Bottleneck_block_constructor, self).__init__() + + outplane = midplane * self.extention + + self.conv1 = nn.Conv2d(inplane, midplane, kernel_size=1, stride=stride, bias=False) + self.bn1 = nn.BatchNorm2d(midplane) + + self.conv2 = nn.Conv2d(midplane, midplane, kernel_size=3, stride=1, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(midplane) + + self.conv3 = nn.Conv2d(midplane, outplane, kernel_size=1, stride=1, bias=False) + self.bn3 = nn.BatchNorm2d(midplane * self.extention) + + self.relu = nn.ReLU(inplace=False) + + self.downsample = downsample + self.stride = stride + + def forward(self, x): + + out = self.relu(self.bn1(self.conv1(x))) + out = self.relu(self.bn2(self.conv2(out))) + out = self.relu(self.bn3(self.conv3(out))) + + if self.downsample is not None: + residual = self.downsample(x) + else: + residual = x + + out += residual + + out = self.relu(out) + + return out + + +# Hybrid_backbone of ResNets +class Hybrid_backbone_4(nn.Module): + + def __init__(self, block_constructor, bottleneck_channels_setting=None, identity_layers_setting=None, + stage_stride_setting=None, fc_num_classes=None, feature_idx=None): + + if bottleneck_channels_setting is None: + bottleneck_channels_setting = [64, 128, 256, 512] + if identity_layers_setting is None: + identity_layers_setting = [3, 4, 6, 3] + if stage_stride_setting is None: + stage_stride_setting = [1, 2, 2, 2] + + self.inplane = 64 + self.fc_num_classes = fc_num_classes + self.feature_idx = feature_idx + + super(Hybrid_backbone_4, self).__init__() + + self.block_constructor = block_constructor # Bottleneck_block_constructor + self.bcs = bottleneck_channels_setting # [64, 128, 256, 512] + self.ils = identity_layers_setting # [3, 4, 6, 3] + self.sss = stage_stride_setting # [1, 2, 2, 2] + + # stem + # alter the RGB pic chanel to match inplane + self.conv1 = nn.Conv2d(3, self.inplane, kernel_size=7, stride=2, padding=3, bias=False) + self.bn1 = nn.BatchNorm2d(self.inplane) + self.relu = nn.ReLU() + self.maxpool = nn.MaxPool2d(kernel_size=3, padding=1, stride=2) + + # ResNet stages + self.layer1 = self.make_stage_layer(self.block_constructor, self.bcs[0], self.ils[0], self.sss[0]) + self.layer2 = self.make_stage_layer(self.block_constructor, self.bcs[1], self.ils[1], self.sss[1]) + self.layer3 = self.make_stage_layer(self.block_constructor, self.bcs[2], self.ils[2], self.sss[2]) + self.layer4 = self.make_stage_layer(self.block_constructor, self.bcs[3], self.ils[3], self.sss[3]) + + # cls head + if self.fc_num_classes is not None: + self.avgpool = nn.AvgPool2d(7) + self.fc = nn.Linear(512 * self.block_constructor.extention, fc_num_classes) + + def forward(self, x): + + # stem + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + stem_out = self.maxpool(out) + + # Resnet block of 4 stages + stage1_out = self.layer1(stem_out) + stage2_out = self.layer2(stage1_out) + stage3_out = self.layer3(stage2_out) + stage4_out = self.layer4(stage3_out) + + if self.fc_num_classes is not None: + # connect to cls head mlp if asked + fc_out = self.avgpool(stage4_out) + fc_out = torch.flatten(fc_out, 1) + fc_out = self.fc(fc_out) + + # get what we need for different usage + if self.feature_idx == 'stages': + if self.fc_num_classes is not None: + return stage1_out, stage2_out, stage3_out, stage4_out, fc_out + else: + return stage1_out, stage2_out, stage3_out, stage4_out + elif self.feature_idx == 'features': + if self.fc_num_classes is not None: + return stem_out, stage1_out, stage2_out, stage3_out, stage4_out, fc_out + else: + return stem_out, stage1_out, stage2_out, stage3_out, stage4_out + else: # self.feature_idx is None + if self.fc_num_classes is not None: + return fc_out + else: + return stage4_out + + def make_stage_layer(self, block_constractor, midplane, block_num, stride=1): + """ + block: + midplane:usually = output chanel/4 + block_num: + stride:stride of the ResNet Conv Block + """ + + block_list = [] + + outplane = midplane * block_constractor.extention # extention + + if stride != 1 or self.inplane != outplane: + downsample = nn.Sequential( + nn.Conv2d(self.inplane, outplane, stride=stride, kernel_size=1, bias=False), + nn.BatchNorm2d(midplane * block_constractor.extention) + ) + else: + downsample = None + + # Conv Block + conv_block = block_constractor(self.inplane, midplane, stride=stride, downsample=downsample) + block_list.append(conv_block) + + self.inplane = outplane # update inplane for the next stage + + # Identity Block + for i in range(1, block_num): + block_list.append(block_constractor(self.inplane, midplane, stride=1, downsample=None)) + + return nn.Sequential(*block_list) # stack blocks + + +class Hybrid_backbone_3(nn.Module): # 3 stages version + + def __init__(self, block_constructor, bottleneck_channels_setting=None, identity_layers_setting=None, + stage_stride_setting=None, fc_num_classes=None, feature_idx=None): + + if bottleneck_channels_setting is None: + bottleneck_channels_setting = [64, 128, 256] + if identity_layers_setting is None: + identity_layers_setting = [3, 4, 6] + if stage_stride_setting is None: + stage_stride_setting = [1, 2, 2] + + self.inplane = 64 + self.fc_num_classes = fc_num_classes + self.feature_idx = feature_idx + + super(Hybrid_backbone_3, self).__init__() + + self.block_constructor = block_constructor # Bottleneck_block_constructor + self.bcs = bottleneck_channels_setting # [64, 128, 256] + self.ils = identity_layers_setting # [3, 4, 6] + self.sss = stage_stride_setting # [1, 2, 2] + + # stem + self.conv1 = nn.Conv2d(3, self.inplane, kernel_size=7, stride=2, padding=3, bias=False) + self.bn1 = nn.BatchNorm2d(self.inplane) + self.relu = nn.ReLU() + self.maxpool = nn.MaxPool2d(kernel_size=3, padding=1, stride=2) + + # ResNet 3 stages + self.layer1 = self.make_stage_layer(self.block_constructor, self.bcs[0], self.ils[0], self.sss[0]) + self.layer2 = self.make_stage_layer(self.block_constructor, self.bcs[1], self.ils[1], self.sss[1]) + self.layer3 = self.make_stage_layer(self.block_constructor, self.bcs[2], self.ils[2], self.sss[2]) + + if self.fc_num_classes is not None: + self.avgpool = nn.AvgPool2d(24) # 224-14 384-24 + self.fc = nn.Linear(self.bcs[-1] * self.block_constructor.extention, fc_num_classes) + + def forward(self, x): + # stem:conv+bn+relu+maxpool + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + stem_out = self.maxpool(out) + + # Resnet 3 stages + stage1_out = self.layer1(stem_out) + stage2_out = self.layer2(stage1_out) + stage3_out = self.layer3(stage2_out) + + if self.fc_num_classes is not None: + fc_out = self.avgpool(stage3_out) + fc_out = torch.flatten(fc_out, 1) + fc_out = self.fc(fc_out) + + if self.feature_idx == 'stages': + if self.fc_num_classes is not None: + return stage1_out, stage2_out, stage3_out, fc_out + else: + return stage1_out, stage2_out, stage3_out + elif self.feature_idx == 'features': + if self.fc_num_classes is not None: + return stem_out, stage1_out, stage2_out, stage3_out, fc_out + else: + return stem_out, stage1_out, stage2_out, stage3_out + else: # self.feature_idx is None + if self.fc_num_classes is not None: + return fc_out + else: + return stage3_out + + def make_stage_layer(self, block_constractor, midplane, block_num, stride=1): + """ + block: + midplane: + block_num: + stride: + """ + + block_list = [] + + outplane = midplane * block_constractor.extention # extention + + if stride != 1 or self.inplane != outplane: + downsample = nn.Sequential( + nn.Conv2d(self.inplane, outplane, stride=stride, kernel_size=1, bias=False), + nn.BatchNorm2d(midplane * block_constractor.extention) + ) + else: + downsample = None + + # Conv Block + conv_block = block_constractor(self.inplane, midplane, stride=stride, downsample=downsample) + block_list.append(conv_block) + + self.inplane = outplane + + # Identity Block + for i in range(1, block_num): + block_list.append(block_constractor(self.inplane, midplane, stride=1, downsample=None)) + + return nn.Sequential(*block_list) + + +def Hybrid_a(backbone, img_size=224, patch_size=1, in_chans=3, num_classes=1000, embed_dim=768, depth=8, + num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., norm_layer=None, act_layer=None): + # directly stack CNNs and Transformer blocks + embed_layer = partial(Transformer_blocks.Hybrid_feature_map_Embed, backbone=backbone) + + Hybrid_model = Transformer_blocks.VisionTransformer(img_size, patch_size, in_chans, num_classes, embed_dim, depth, + num_heads, mlp_ratio, qkv_bias, representation_size, + drop_rate, attn_drop_rate, drop_path_rate, embed_layer, + norm_layer, act_layer) + + return Hybrid_model + + +def create_model(model_idx, edge_size, pretrained=True, num_classes=2, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., use_cls_token=True, use_pos_embedding=True, use_att_module='SimAM'): + """ + get one of MSHT models + + :param model_idx: the model we are going to use. by the format of Model_size_other_info + :param edge_size: the input edge size of the dataloder + :param pretrained: The backbone CNN is initiate randomly or by its official Pretrained models + :param num_classes: classification required number of your dataset + + :param drop_rate: The dropout layer's probility of proposed models + :param attn_drop_rate: The dropout layer(right after the MHSA block or MHGA block)'s probility of proposed models + :param drop_path_rate: The probility of stochastic depth + + :param use_cls_token: To use the class token + :param use_pos_embedding: To use the positional enbedding + :param use_att_module: To use which attention module in the FGD Focus block + # use_att_module in ['SimAM', 'CBAM', 'SE'] different attention module we applied in the ablation study + + :return: prepared model + """ + + if pretrained: + from torchvision import models + backbone_weights = models.resnet50(pretrained=True).state_dict() + # True for pretrained Resnet50 model, False will randomly initiate + else: + backbone_weights = None + + if model_idx[0:11] == 'Hybrid1_224' and edge_size == 224: # ablation study: no focus depth=8 edge_size == 224 + backbone = Hybrid_backbone_4(block_constructor=Bottleneck_block_constructor, + bottleneck_channels_setting=[64, 128, 256, 512], + identity_layers_setting=[3, 4, 6, 3], + stage_stride_setting=[1, 2, 2, 2], + fc_num_classes=None, + feature_idx=None) + + if pretrained: + try: + backbone.load_state_dict(backbone_weights, False) + except: + print("backbone not loaded") + else: + print("backbone loaded") + + model = Hybrid_a(backbone, img_size=edge_size, patch_size=1, in_chans=3, num_classes=num_classes, embed_dim=768, + depth=8, num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, + drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=drop_path_rate, + norm_layer=None, act_layer=None) + + elif model_idx[0:11] == 'Hybrid1_384' and edge_size == 384: # ablation study: no focus depth=8 edge_size == 384 + backbone = Hybrid_backbone_4(block_constructor=Bottleneck_block_constructor, + bottleneck_channels_setting=[64, 128, 256, 512], + identity_layers_setting=[3, 4, 6, 3], + stage_stride_setting=[1, 2, 2, 2], + fc_num_classes=None, + feature_idx=None) + + if pretrained: + try: + backbone.load_state_dict(backbone_weights, False) + except: + print("backbone not loaded") + else: + print("backbone loaded") + + model = Hybrid_a(backbone, img_size=edge_size, patch_size=1, in_chans=3, num_classes=num_classes, embed_dim=768, + depth=8, num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, + drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=drop_path_rate, + norm_layer=None, act_layer=None) + + elif model_idx[0:11] == 'Hybrid2_224' and edge_size == 224: # Proposed model ablation study: edge_size==224 + backbone = Hybrid_backbone_4(block_constructor=Bottleneck_block_constructor, + bottleneck_channels_setting=[64, 128, 256, 512], + identity_layers_setting=[3, 4, 6, 3], + stage_stride_setting=[1, 2, 2, 2], + fc_num_classes=None, + feature_idx='stages') + if pretrained: + try: + backbone.load_state_dict(backbone_weights, False) + except: + print("backbone not loaded") + else: + print("backbone loaded") + + model = Transformer_blocks.Stage_wise_hybrid_Transformer(backbone, num_classes=num_classes, + drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + use_cls_token=use_cls_token, + use_pos_embedding=use_pos_embedding, + use_att_module=use_att_module, + stage_size=(56, 28, 14, 7), + stage_dim=[256, 512, 1024, 2048]) + + elif model_idx[0:11] == 'Hybrid2_384' and edge_size == 384: # Proposed model 384 !!! + backbone = Hybrid_backbone_4(block_constructor=Bottleneck_block_constructor, + bottleneck_channels_setting=[64, 128, 256, 512], + identity_layers_setting=[3, 4, 6, 3], + stage_stride_setting=[1, 2, 2, 2], + fc_num_classes=None, + feature_idx='stages') + if pretrained: + try: + backbone.load_state_dict(backbone_weights, False) + except: + print("backbone not loaded") + else: + print("backbone loaded") + + model = Transformer_blocks.Stage_wise_hybrid_Transformer(backbone, num_classes=num_classes, + drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + use_cls_token=use_cls_token, + use_pos_embedding=use_pos_embedding, + use_att_module=use_att_module, + stage_size=(96, 48, 24, 12), + stage_dim=[256, 512, 1024, 2048]) + + elif model_idx[0:11] == 'Hybrid3_224' and edge_size == 224: # Proposed model ablation study: edge_size==224 + backbone = Hybrid_backbone_3(block_constructor=Bottleneck_block_constructor, + bottleneck_channels_setting=[64, 128, 256], + identity_layers_setting=[3, 4, 6], + stage_stride_setting=[1, 2, 2], + fc_num_classes=None, + feature_idx='stages') + if pretrained: + try: + backbone.load_state_dict(backbone_weights, False) + except: + print("backbone not loaded") + else: + print("backbone loaded") + + model = Transformer_blocks.Stage_wise_hybrid_Transformer(backbone, num_classes=num_classes, + drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + use_cls_token=use_cls_token, + use_pos_embedding=use_pos_embedding, + use_att_module=use_att_module, + stage_size=(56, 28, 14), + stage_dim=[256, 512, 1024]) + + elif model_idx[0:11] == 'Hybrid3_384' and edge_size == 384: # Proposed model 384 !!! + backbone = Hybrid_backbone_3(block_constructor=Bottleneck_block_constructor, + bottleneck_channels_setting=[64, 128, 256], + identity_layers_setting=[3, 4, 6], + stage_stride_setting=[1, 2, 2], + fc_num_classes=None, + feature_idx='stages') + if pretrained: + try: + backbone.load_state_dict(backbone_weights, False) + except: + print("backbone not loaded") + else: + print("backbone loaded") + + model = Transformer_blocks.Stage_wise_hybrid_Transformer(backbone, num_classes=num_classes, + drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + use_cls_token=use_cls_token, + use_pos_embedding=use_pos_embedding, + use_att_module=use_att_module, + stage_size=(96, 48, 24), + stage_dim=[256, 512, 1024]) + + else: + print('not a valid hybrid model') + return -1 + + return model diff --git a/PuzzleTuning/Backbone/Transformer_blocks.py b/PuzzleTuning/Backbone/Transformer_blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..ff1bcf9965b10a3df635349b217d3c4590f1fa48 --- /dev/null +++ b/PuzzleTuning/Backbone/Transformer_blocks.py @@ -0,0 +1,1631 @@ +""" +Transformer blocks script ver: OCT 28th 15:00 + +bug fix: 'Cross-attn' name is used in MHGA for compareability + +by the authors, check our github page: +https://github.com/sagizty/Multi-Stage-Hybrid-Transformer + +based on:timm +https://www.freeaihub.com/post/94067.html + +""" + +import math +import logging +from functools import partial +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F + +import numpy as np + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + +from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ + +from .attention_modules import simam_module, cbam_module, se_module + + +class FFN(nn.Module): # Mlp from timm + """ + FFN (from timm) + + :param in_features: + :param hidden_features: + :param out_features: + :param act_layer: + :param drop: + """ + + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + + out_features = out_features or in_features + hidden_features = hidden_features or in_features + + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + + x = self.fc2(x) + x = self.drop(x) + + return x + + +class Attention(nn.Module): # qkv Transform + MSA(MHSA) (Attention from timm) + """ + qkv Transform + MSA(MHSA) (from timm) + + # input x.shape = batch, patch_number, patch_dim + # output x.shape = batch, patch_number, patch_dim + + :param dim: dim=CNN feature dim, because the patch size is 1x1 + :param num_heads: + :param qkv_bias: + :param qk_scale: by default head_dim ** -0.5 (squre root) + :param attn_drop: dropout rate after MHSA + :param proj_drop: + + """ + + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + # input x.shape = batch, patch_number, patch_dim + batch, patch_number, patch_dim = x.shape + + # mlp transform + head split [N, P, D] -> [N, P, 3D] -> [N, P, 3, H, D/H] -> [3, N, H, P, D/H] + qkv = self.qkv(x).reshape(batch, patch_number, 3, self.num_heads, patch_dim // + self.num_heads).permute(2, 0, 3, 1, 4) + # 3 [N, H, P, D/H] + q, k, v = qkv[0], qkv[1], qkv[2] + + # [N, H, P, D/H] -> [N, H, P, D/H] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + attn = self.attn_drop(attn) # Dropout + + # head fusion [N, H, P, D/H] -> [N, P, H, D/H] -> [N, P, D] + x = (attn @ v).transpose(1, 2).reshape(batch, patch_number, patch_dim) + + x = self.proj(x) + x = self.proj_drop(x) # mlp + + # output x.shape = batch, patch_number, patch_dim + return x + + +class Encoder_Block(nn.Module): # teansformer Block from timm + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + """ + # input x.shape = batch, patch_number, patch_dim + # output x.shape = batch, patch_number, patch_dim + + :param dim: dim + :param num_heads: + :param mlp_ratio: FFN + :param qkv_bias: + :param qk_scale: by default head_dim ** -0.5 (squre root) + :param drop: + :param attn_drop: dropout rate after Attention + :param drop_path: dropout rate after sd + :param act_layer: FFN act + :param norm_layer: Pre Norm + """ + super().__init__() + # Pre Norm + self.norm1 = norm_layer(dim) # Transformer used the nn.LayerNorm + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, + proj_drop=drop) + # NOTE from timm: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() # stochastic depth + + # Add & Norm + self.norm2 = norm_layer(dim) + + # FFN + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = FFN(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Guided_Attention(nn.Module): # q1 k1 v0 Transform + MSA(MHSA) (based on timm Attention) + """ + notice the q abd k is guided information from Focus module + qkv Transform + MSA(MHSA) (from timm) + + # 3 input of x.shape = batch, patch_number, patch_dim + # 1 output of x.shape = batch, patch_number, patch_dim + + :param dim: dim = CNN feature dim, because the patch size is 1x1 + :param num_heads: + :param qkv_bias: + :param qk_scale: by default head_dim ** -0.5 (squre root) + :param attn_drop: + :param proj_drop: + + """ + + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.qT = nn.Linear(dim, dim, bias=qkv_bias) + self.kT = nn.Linear(dim, dim, bias=qkv_bias) + self.vT = nn.Linear(dim, dim, bias=qkv_bias) + + self.attn_drop = nn.Dropout(attn_drop) + + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, q_encoder, k_encoder, v_input): + # 3 input of x.shape = batch, patch_number, patch_dim + batch, patch_number, patch_dim = v_input.shape + + q = self.qT(q_encoder).reshape(batch, patch_number, 1, self.num_heads, + patch_dim // self.num_heads).permute(2, 0, 3, 1, 4) + k = self.kT(k_encoder).reshape(batch, patch_number, 1, self.num_heads, + patch_dim // self.num_heads).permute(2, 0, 3, 1, 4) + v = self.vT(v_input).reshape(batch, patch_number, 1, self.num_heads, + patch_dim // self.num_heads).permute(2, 0, 3, 1, 4) + q = q[0] + k = k[0] + v = v[0] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + attn = self.attn_drop(attn) # Dropout + + x = (attn @ v).transpose(1, 2).reshape(batch, patch_number, patch_dim) + + x = self.proj(x) + x = self.proj_drop(x) # mlp Dropout + + # output of x.shape = batch, patch_number, patch_dim + return x + + +class Decoder_Block(nn.Module): + # FGD Decoder (Transformer encoder + Guided Attention block block) + def __init__(self, dim, num_heads=8, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + """ + # input x.shape = batch, patch_number, patch_dim + # output x.shape = batch, patch_number, patch_dim + + :param dim: dim=CNN feature dim, because the patch size is 1x1 + :param num_heads: multi-head + :param mlp_ratio: FFN expand ratio + :param qkv_bias: qkv MLP bias + :param qk_scale: by default head_dim ** -0.5 (squre root) + :param drop: the MLP after MHSA equipt a dropout rate + :param attn_drop: dropout rate after attention block + :param drop_path: dropout rate for stochastic depth + :param act_layer: FFN act + :param norm_layer: Pre Norm strategy with norm layer + """ + super().__init__() + # Pre Norm + self.norm0 = norm_layer(dim) # nn.LayerNorm + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, + proj_drop=drop) + # stochastic depth + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + # Pre Norm + self.norm1 = norm_layer(dim) + + # FFN1 + mlp_hidden_dim = int(dim * mlp_ratio) + self.FFN1 = FFN(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + # Guided_Attention + self.Cross_attn = Guided_Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, + attn_drop=attn_drop, proj_drop=drop) + + # Add & Norm + self.norm2 = norm_layer(dim) + # FFN2 + self.FFN2 = FFN(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + # Add & Norm + self.norm3 = norm_layer(dim) + + def forward(self, q_encoder, k_encoder, v_input): + v_self = v_input + self.drop_path(self.attn(self.norm0(v_input))) + + v_self = v_self + self.drop_path(self.FFN1(self.norm1(v_self))) + + # norm layer for v only, the normalization of q and k is inside FGD Focus block + v_self = v_self + self.drop_path(self.Cross_attn(q_encoder, k_encoder, self.norm2(v_self))) + + v_self = v_self + self.drop_path(self.FFN2(self.norm3(v_self))) + + return v_self + + +''' +# testing example + +model=Decoder_Block(dim=768) +k = torch.randn(7, 49, 768) +q = torch.randn(7, 49, 768) +v = torch.randn(7, 49, 768) +x = model(k,q,v) +print(x.shape) +''' + + +# MViT modules +# from https://github.com/facebookresearch/SlowFast/slowfast/models/attention.py +def attention_pool(tensor, pool, thw_shape, has_cls_embed=True, norm=None): + """ + attention pooling constructor + + input: + tensor of (B, Head, N, C) or (B, N, C) + thw_shape: T, H, W 对应CNN的特征图形状(2D形状)T is video frams + + numpy.prob(T, H, W) == N(Num_patches) - 1 (cls token if it is there) + + output: + tensor of (B, Head, N_O, C) or (B, N_O, C) + thw_shape: T_O, H_O, W_O + + :param tensor: input feature patches + :param pool: pooling/conv layer + :param thw_shape: reconstruction feature map shape + :param has_cls_embed: if cls token is used + :param norm: norm layer + + """ + if pool is None: # no pool + return tensor, thw_shape + + tensor_dim = tensor.ndim + + # fix dim: [B, Head, N, C] + # N is Num_patches in Transformer modeling + + if tensor_dim == 4: + pass + elif tensor_dim == 3: # [B, N, C] -> [B, Head(1), N, C] + tensor = tensor.unsqueeze(1) + else: + raise NotImplementedError(f"Unsupported input dimension {tensor.shape}") + + if has_cls_embed: + cls_tok, tensor = tensor[:, :, :1, :], tensor[:, :, 1:, :] + + B, Head, N, C = tensor.shape + T, H, W = thw_shape # numpy.prob(T, H, W) == N(Num_patches) - 1 (cls token if it is there) + + # [B, Head, N, C] -> [B * Head, T, H, W, C] -> [B * Head, C, T, H, W] + tensor = (tensor.reshape(B * Head, T, H, W, C).permute(0, 4, 1, 2, 3).contiguous()) + # use tensor.contiguous() to matain its memory location + + # [B * Head, C, T, H, W] -> [B * Head, C, T_O, H_O, W_O] + tensor = pool(tensor) # 3D Pooling/ 3D Conv + + # output T, H, W + thw_shape = [tensor.shape[2], tensor.shape[3], tensor.shape[4]] + # output Num_patches: numpy.prob(T, H, W) + N_pooled = tensor.shape[2] * tensor.shape[3] * tensor.shape[4] + + # [B * Head, C, T_O, H_O, W_O] -> [B, Head, C, N_O(T_O*H_O*W_O)] -> [B, Head, N_O, C] + tensor = tensor.reshape(B, Head, C, N_pooled).transpose(2, 3) + + if has_cls_embed: + # [B, Head, N_O, C] -> [B, Head, N_O+1(cls token), C] + tensor = torch.cat((cls_tok, tensor), dim=2) + + # norm + if norm is not None: + tensor = norm(tensor) + + # Assert tensor_dim in [3, 4] + if tensor_dim == 4: # [B, Head, N_O, C] multi-head + pass + else: # tensor_dim == 3: this is a single Head + tensor = tensor.squeeze(1) # [B, N_O, C] + + return tensor, thw_shape + + +''' +# case 1 single-head no pooling scale +x = torch.randn(1, 197, 768) +thw_shape = [1, 14, 14] +pool = nn.MaxPool3d((1, 1, 1), (1, 1, 1), (0, 0, 0), ceil_mode=False) +y, thw = attention_pool(x, pool, thw_shape) + +print(y.shape) # torch.Size([1, 197, 768]) +print(thw) # [1, 14, 14] + + +# case 2 multi-head no pooling scale +x = torch.randn(1, 8, 197, 96) # [B, Head, N_O, C] multi-head +thw_shape = [1, 14, 14] +pool = nn.MaxPool3d((1, 1, 1), (1, 1, 1), (0, 0, 0), ceil_mode=False) +y, thw = attention_pool(x, pool, thw_shape) + +print(y.shape) # torch.Size([1, 8, 197, 96]) +print(thw) # [1, 14, 14] + + +# case 3 pooling scale +x = torch.randn(1, 197, 768) +thw_shape = [1, 14, 14] +pool = nn.MaxPool3d((1, 2, 2), (1, 2, 2), (0, 0, 0), ceil_mode=False) +y, thw = attention_pool(x, pool, thw_shape) + +print(y.shape) # torch.Size([1, 50, 768]) +print(thw) # [1, 7, 7] + + +# case 4 multi-head pooling scale +x = torch.randn(1, 8, 197, 96) # [B, Head, N_O, C] multi-head +thw_shape = [1, 14, 14] +pool = nn.MaxPool3d((1, 2, 2), (1, 2, 2), (0, 0, 0), ceil_mode=False) +y, thw = attention_pool(x, pool, thw_shape) + +print(y.shape) # torch.Size([1, 8, 50, 96]) +print(thw) # [1, 7, 7] +''' + + +class MultiScaleAttention(nn.Module): # Attention module + """ + Attention module constructor + + input: + tensor of (B, N, C) + thw_shape: T, H, W 对应CNN的特征图形状(2D形状)T is video frams + + numpy.prob(T, H, W) == N(Num_patches) - 1 (cls token if it is there) + + output: + tensor of (B, N_O, C) + thw_shape: T_O, H_O, W_O + + :param dim: Transformer feature dim + :param num_heads: Transformer heads + :param qkv_bias: projecting bias + :param drop_rate: dropout rate after attention calculation and mlp + + :param kernel_q: pooling kernal size for q + :param kernel_kv: pooling kernal size for k and v + :param stride_q: pooling kernal stride for q + :param stride_kv: pooling kernal stride for k and v + + :param norm_layer: norm layer + :param has_cls_embed: if cls token is used + :param mode: mode for attention pooling(downsampling) Options include `conv`, `avg`, and `max`. + :param pool_first: process pooling(downsampling) before liner projecting + + """ + + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + drop_rate=0.0, + kernel_q=(1, 1, 1), + kernel_kv=(1, 1, 1), + stride_q=(1, 1, 1), + stride_kv=(1, 1, 1), + norm_layer=nn.LayerNorm, + has_cls_embed=True, + # Options include `conv`, `avg`, and `max`. + mode="conv", + # If True, perform pool before projection. + pool_first=False, + ): + super().__init__() + + self.pool_first = pool_first + self.drop_rate = drop_rate + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 # squre root + self.has_cls_embed = has_cls_embed + + padding_q = [int(q // 2) for q in kernel_q] # 以半个kernal size进行padding,向下取整 + padding_kv = [int(kv // 2) for kv in kernel_kv] + + # projecting mlp + self.q = nn.Linear(dim, dim, bias=qkv_bias) + self.k = nn.Linear(dim, dim, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + if drop_rate > 0.0: + self.proj_drop = nn.Dropout(drop_rate) + + # Skip pooling with kernel and stride size of (1, 1, 1). + if np.prod(kernel_q) == 1 and np.prod(stride_q) == 1: + kernel_q = () # clear + if np.prod(kernel_kv) == 1 and np.prod(stride_kv) == 1: + kernel_kv = () + + if mode in ("avg", "max"): # use nn.MaxPool3d or nn.AvgPool3d + pool_op = nn.MaxPool3d if mode == "max" else nn.AvgPool3d + self.pool_q = ( + pool_op(kernel_q, stride_q, padding_q, ceil_mode=False) + if len(kernel_q) > 0 + else None # Skip pooling if kernel is cleared + ) + self.pool_k = ( + pool_op(kernel_kv, stride_kv, padding_kv, ceil_mode=False) + if len(kernel_kv) > 0 + else None + ) + self.pool_v = ( + pool_op(kernel_kv, stride_kv, padding_kv, ceil_mode=False) + if len(kernel_kv) > 0 + else None + ) + + elif mode == "conv": # use nn.Conv3d with depth wise conv and fixed channel setting + self.pool_q = ( + nn.Conv3d( + head_dim, + head_dim, + kernel_q, + stride=stride_q, + padding=padding_q, + groups=head_dim, + bias=False, + ) + if len(kernel_q) > 0 + else None + ) + self.norm_q = norm_layer(head_dim) if len(kernel_q) > 0 else None + + self.pool_k = ( + nn.Conv3d( + head_dim, + head_dim, + kernel_kv, + stride=stride_kv, + padding=padding_kv, + groups=head_dim, + bias=False, + ) + if len(kernel_kv) > 0 + else None + ) + self.norm_k = norm_layer(head_dim) if len(kernel_kv) > 0 else None + + self.pool_v = ( + nn.Conv3d( + head_dim, + head_dim, + kernel_kv, + stride=stride_kv, + padding=padding_kv, + groups=head_dim, + bias=False, + ) + if len(kernel_kv) > 0 + else None + ) + self.norm_v = norm_layer(head_dim) if len(kernel_kv) > 0 else None + else: + raise NotImplementedError(f"Unsupported model {mode}") + + def forward(self, x, thw_shape): + """ + x: Transformer feature patches + thw_shape: reconstruction feature map shape + """ + + B, N, C = x.shape + + # step 1: duplicate projecting + head split: [B, N, C] -> [B, H, N, C/H] + + if self.pool_first: # step a.1 embedding + # head split [B, N, C] -> [B, N, H, C/H] -> [B, H, N, C/H] + x = x.reshape(B, N, self.num_heads, C // self.num_heads).permute( + 0, 2, 1, 3 + ) + q = k = v = x + + else: # step b.1 projecting first + # mlp transform + head split: [B, N, C] -> [B, N, H, C/H] -> [B, H, N, C/H] + # todo 这里我觉得可能共享mlp映射更好,能有更好的交互,但是分离mlp更节约计算量 + q = k = v = x + q = ( + self.q(q) + .reshape(B, N, self.num_heads, C // self.num_heads) + .permute(0, 2, 1, 3) + ) + k = ( + self.k(k) + .reshape(B, N, self.num_heads, C // self.num_heads) + .permute(0, 2, 1, 3) + ) + v = ( + self.v(v) + .reshape(B, N, self.num_heads, C // self.num_heads) + .permute(0, 2, 1, 3) + ) + + # step 2: calculate attention_pool feature sequence and its shape + # [B, H, N0, C/H] -> [B, H, N1, C/H] + q, q_shape = attention_pool( + q, + self.pool_q, + thw_shape, + has_cls_embed=self.has_cls_embed, + norm=self.norm_q if hasattr(self, "norm_q") else None, + ) + k, k_shape = attention_pool( + k, + self.pool_k, + thw_shape, + has_cls_embed=self.has_cls_embed, + norm=self.norm_k if hasattr(self, "norm_k") else None, + ) + v, v_shape = attention_pool( + v, + self.pool_v, + thw_shape, + has_cls_embed=self.has_cls_embed, + norm=self.norm_v if hasattr(self, "norm_v") else None, + ) + + if self.pool_first: # step a.3 MLP projecting + # calculate patch number, q_N, k_N, v_N + q_N = ( + np.prod(q_shape) + 1 + if self.has_cls_embed + else np.prod(q_shape) + ) + k_N = ( + np.prod(k_shape) + 1 + if self.has_cls_embed + else np.prod(k_shape) + ) + v_N = ( + np.prod(v_shape) + 1 + if self.has_cls_embed + else np.prod(v_shape) + ) + + # [B, H, N1, C/H] -> [B, N1, H, C/H] -> [B, N1, C] -> MLP + # -> [B, N1, C] -> [B, N1, H, C/H] -> [B, H, N1, C/H] + q = q.permute(0, 2, 1, 3).reshape(B, q_N, C) + q = ( + self.q(q) + .reshape(B, q_N, self.num_heads, C // self.num_heads) + .permute(0, 2, 1, 3) + ) + + v = v.permute(0, 2, 1, 3).reshape(B, v_N, C) + v = ( + self.v(v) + .reshape(B, v_N, self.num_heads, C // self.num_heads) + .permute(0, 2, 1, 3) + ) + + k = k.permute(0, 2, 1, 3).reshape(B, k_N, C) + k = ( + self.k(k) + .reshape(B, k_N, self.num_heads, C // self.num_heads) + .permute(0, 2, 1, 3) + ) + + # step 3: attention calculation + # multi-head self attention [B, H, N1, C/H] -> [B, H, N1, C/H] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + # head squeeze [B, H, N1, C/H] -> [B, N1, H, C/H] -> [B, N1, C] + N = q.shape[2] + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + + # step 4: mlp stablization and dropout [B, N1, C] -> [B, N1, C] + x = self.proj(x) + if self.drop_rate > 0.0: + x = self.proj_drop(x) + + return x, q_shape + + +''' +# case 1 +model = MultiScaleAttention(768) +x = torch.randn(1, 197, 768) +y, thw = model(x, [1, 14, 14]) +print(y.shape) + + +# case 2 +kernel_q = (1, 2, 2) +kernel_kv = (1, 2, 2) +stride_q = (1, 2, 2) +stride_kv = (1, 2, 2) +# MultiScaleAttention 中设计以半个kernal size进行padding,向下取整 + +model = MultiScaleAttention(768, kernel_q=kernel_q, kernel_kv=kernel_kv, stride_q=stride_q, stride_kv=stride_kv) +x = torch.randn(1, 197, 768) +y, thw = model(x, [1, 14, 14]) + +print(y.shape) # 输出torch.Size([1, 65, 768]):不padding是7*7 由于padding变成8*8, 之后加上cls token +''' + + +class MultiScaleBlock(nn.Module): # MViT Encoder + """ + Attention module constructor + + input: + tensor of (B, N, C) + thw_shape: T, H, W 对应CNN的特征图形状(2D形状)T is video frams + + numpy.prob(T, H, W) == N(Num_patches) - 1 (cls token if it is there) + + output: + tensor of (B, N_O, C) + thw_shape: T_O, H_O, W_O + + :param dim: Transformer feature dim + :param dim_out: + + :param num_heads: Transformer heads + :param mlp_ratio: FFN hidden expansion + :param qkv_bias: projecting bias + :param drop_rate: dropout rate after attention calculation and mlp + :param drop_path: dropout rate for SD + :param act_layer: FFN act + :param norm_layer: Pre Norm + + :param up_rate: + :param kernel_q: pooling kernal size for q + :param kernel_kv: pooling kernal size for k and v + :param stride_q: pooling kernal stride for q + :param stride_kv: pooling kernal stride for k and v + + :param has_cls_embed: if cls token is used + :param mode: mode for attention pooling(downsampling) Options include `conv`, `avg`, and `max`. + :param pool_first: process pooling(downsampling) before liner projecting + + """ + + def __init__( + self, + dim, + dim_out, + num_heads=8, + mlp_ratio=4.0, + qkv_bias=False, + drop_rate=0.0, + drop_path=0.0, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + up_rate=None, + kernel_q=(1, 1, 1), + kernel_kv=(1, 1, 1), + stride_q=(1, 1, 1), + stride_kv=(1, 1, 1), + has_cls_embed=True, + mode="conv", + pool_first=False, + ): + super().__init__() + + self.has_cls_embed = has_cls_embed + + # step 1: Attention projecting + self.dim = dim + self.dim_out = dim_out + self.norm1 = norm_layer(dim) # pre-norm + + self.attn = MultiScaleAttention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + drop_rate=drop_rate, + kernel_q=kernel_q, + kernel_kv=kernel_kv, + stride_q=stride_q, + stride_kv=stride_kv, + norm_layer=nn.LayerNorm, + has_cls_embed=self.has_cls_embed, + mode=mode, + pool_first=pool_first, + ) + + self.drop_path = (DropPath(drop_path) if drop_path > 0.0 else nn.Identity()) + + # residual connection for Attention projecting + kernel_skip = kernel_q # fixme ori: [s + 1 if s > 1 else s for s in stride_q] + stride_skip = stride_q + padding_skip = [int(skip // 2) for skip in kernel_skip] # 以半个kernal size进行padding,向下取整 + + self.pool_skip = ( + nn.MaxPool3d(kernel_skip, stride_skip, padding_skip, ceil_mode=False) + if len(kernel_skip) > 0 + else None) + + self.norm2 = norm_layer(dim) # pre-norm + + # step 2: FFN projecting + mlp_hidden_dim = int(dim * mlp_ratio) + + # here use FFN to encode feature into abstractive information in the dimension + # TODO: check the use case for up_rate, and merge the following lines + if up_rate is not None and up_rate > 1: + mlp_dim_out = dim * up_rate + else: + mlp_dim_out = dim_out + + self.mlp = FFN( + in_features=dim, + hidden_features=mlp_hidden_dim, + out_features=mlp_dim_out, + act_layer=act_layer, + drop=drop_rate, + ) + + # residual connection for FFN projecting + if dim != dim_out: + self.proj = nn.Linear(dim, dim_out) + + def forward(self, x, thw_shape): + # step 1: Attention projecting + x_block, thw_shape_new = self.attn(self.norm1(x), thw_shape) + # residual connection for Attention projecting + x_res, _ = attention_pool(x, self.pool_skip, thw_shape, has_cls_embed=self.has_cls_embed) + x = x_res + self.drop_path(x_block) + + # step 2: FFN projecting + x_norm = self.norm2(x) + x_mlp = self.mlp(x_norm) + # residual connection for FFN projecting + if self.dim != self.dim_out: + x = self.proj(x_norm) + x = x + self.drop_path(x_mlp) + + return x, thw_shape_new + + +''' +# case 1 +model = MultiScaleBlock(768,1024) +x = torch.randn(1, 197, 768) +y, thw = model(x, [1, 14, 14]) +print(y.shape) # torch.Size([1, 197, 1024]) + + +# case 2 +kernel_q = (1, 2, 2) +kernel_kv = (1, 2, 2) +stride_q = (1, 2, 2) +stride_kv = (1, 2, 2) +# MultiScaleAttention 中设计以半个kernal size进行padding,向下取整 + +model = MultiScaleBlock(768, 1024, kernel_q=kernel_q, kernel_kv=kernel_kv, stride_q=stride_q, stride_kv=stride_kv) +x = torch.randn(1, 197, 768) +y, thw = model(x, [1, 14, 14]) + +print(y.shape) # 输出torch.Size([1, 65, 1024]):不padding是7*7 由于padding变成8*8, 之后加上cls token +''' + + +class PatchEmbed(nn.Module): # PatchEmbed from timm + """ + Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) + + # x: (B, 14*14, 768) + return x + + +class Hybrid_feature_map_Embed(nn.Module): # HybridEmbed from timm + """ + CNN Feature Map Embedding, required backbone which is just for referance here + Extract feature map from CNN, flatten, project to embedding dim. + + # input x.shape = batch, feature_dim, feature_size[0], feature_size[1] + # output x.shape = batch, patch_number, patch_dim + """ + + def __init__(self, backbone, img_size=224, patch_size=1, feature_size=None, feature_dim=None, + in_chans=3, embed_dim=768): + super().__init__() + + assert isinstance(backbone, nn.Module) + + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.backbone = backbone + + if feature_size is None or feature_dim is None: # backbone output feature_size + with torch.no_grad(): + # NOTE Most reliable way of determining output dims is to run forward pass + training = backbone.training + if training: + backbone.eval() + o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1])) + if isinstance(o, (list, tuple)): + o = o[-1] # last feature if backbone outputs list/tuple of features + feature_size = o.shape[-2:] + feature_dim = o.shape[1] + backbone.train(training) + else: + feature_size = to_2tuple(feature_size) + ''' + if hasattr(self.backbone, 'feature_info'): + feature_dim = self.backbone.feature_info.channels()[-1] + else: + feature_dim = self.backbone.num_features + ''' + + assert feature_size[0] % patch_size[0] == 0 and feature_size[1] % patch_size[1] == 0 + + self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1]) # patchlize + + self.num_patches = self.grid_size[0] * self.grid_size[1] + + self.proj = nn.Conv2d(in_channels=feature_dim, out_channels=embed_dim, + kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + x = self.backbone(x) + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + + x = self.proj(x).flatten(2).transpose(1, 2) # shape = ( ) + """ + x.shape: batch, feature_dim, feature_size[0], feature_size[1] + proj(x).shape: batch, embed_dim, patch_height_num, patch_width_num + flatten(2).shape: batch, embed_dim, patch_num + .transpose(1, 2).shape: batch feature_patch_number feature_patch_dim + """ + # output: x.shape = batch, patch_number, patch_dim + return x + + +class Last_feature_map_Embed(nn.Module): + """ + use this block to connect last CNN stage to the first Transformer block + Extract feature map from CNN, flatten, project to embedding dim. + + # input x.shape = batch, feature_dim, feature_size[0], feature_size[1] + # output x.shape = batch, patch_number, patch_dim + """ + + def __init__(self, patch_size=1, feature_size=(7, 7), feature_dim=2048, embed_dim=768, + Attention_module=None): + super().__init__() + + # Attention module + if Attention_module is not None: + if Attention_module == 'SimAM': + self.Attention_module = simam_module(e_lambda=1e-4) + elif Attention_module == 'CBAM': + self.Attention_module = cbam_module(gate_channels=feature_dim) + elif Attention_module == 'SE': + self.Attention_module = se_module(channel=feature_dim) + else: + self.Attention_module = None + + patch_size = to_2tuple(patch_size) + self.patch_size = patch_size + + feature_size = to_2tuple(feature_size) + + # feature map should be matching the size + assert feature_size[0] % self.patch_size[0] == 0 and feature_size[1] % self.patch_size[1] == 0 + + self.grid_size = (feature_size[0] // self.patch_size[0], feature_size[1] // self.patch_size[1]) # patch + + self.num_patches = self.grid_size[0] * self.grid_size[1] + + # use the conv to split the patch by the following design: + self.proj = nn.Conv2d(in_channels=feature_dim, out_channels=embed_dim, + kernel_size=self.patch_size, stride=self.patch_size) + + def forward(self, x): + if self.Attention_module is not None: + x = self.Attention_module(x) + + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + + x = self.proj(x).flatten(2).transpose(1, 2) + """ + x.shape: batch, feature_dim, feature_size[0], feature_size[1] + proj(x).shape: batch, embed_dim, patch_height_num, patch_width_num + flatten(2).shape: batch, embed_dim, patch_num + .transpose(1, 2).shape: batch feature_patch_number feature_patch_dim + """ + # output 格式 x.shape = batch, patch_number, patch_dim + return x + + +class Focus_Embed(nn.Module): # Attention guided module for hybridzing the early stages CNN feature + """ + FGD Focus module + Extract feature map from CNN, flatten, project to embedding dim. and use them as attention guidance + + input: x.shape = batch, feature_dim, feature_size[0], feature_size[1] + + Firstly, an attention block will be used to stable the feature projecting process + + Secondly, for each feature map,the focus will be 2 path: gaze and glance + in gaze path Max pool will be applied to get prominent information + in glance path Avg pool will be applied to get general information + + after the dual pooling path 2 seperate CNNs will be used to project the dimension + Finally, flattern and transpose will be applied + + output 2 attention guidance: gaze, glance + x.shape = batch, patch_number, patch_dim + + + ref: + ResNet50's feature map from different stages (edge size of 224) + stage 1 output feature map: torch.Size([b, 256, 56, 56]) + stage 2 output feature map: torch.Size([b, 512, 28, 28]) + stage 3 output feature map: torch.Size([b, 1024, 14, 14]) + stage 4 output feature map: torch.Size([b, 2048, 7, 7]) + """ + + def __init__(self, patch_size=1, target_feature_size=(7, 7), feature_size=(56, 56), feature_dim=256, embed_dim=768, + Attention_module=None, norm_layer=nn.LayerNorm): + super().__init__() + patch_size = to_2tuple(patch_size) + feature_size = to_2tuple(feature_size) # patch size of the current feature map + + target_feature_size = to_2tuple(target_feature_size) # patch size of the last feature map + + # cheak feature map can be patchlize to target_feature_size + assert feature_size[0] % target_feature_size[0] == 0 and feature_size[1] % target_feature_size[1] == 0 + + # cheak target_feature map can be patchlize to patch + assert target_feature_size[0] % patch_size[0] == 0 and target_feature_size[1] % patch_size[1] == 0 + + # Attention block + if Attention_module is not None: + if Attention_module == 'SimAM': + self.Attention_module = simam_module(e_lambda=1e-4) + elif Attention_module == 'CBAM': + self.Attention_module = cbam_module(gate_channels=feature_dim) + elif Attention_module == 'SE': + self.Attention_module = se_module(channel=feature_dim) + else: + self.Attention_module = None + + # split focus ROI + self.focus_size = (feature_size[0] // target_feature_size[0], feature_size[1] // target_feature_size[1]) + self.num_focus = self.focus_size[0] * self.focus_size[1] + # by kernel_size=focus_size, stride=focus_size design + # output_size=target_feature_size=7x7 so as to match the minist feature map + + self.gaze = nn.MaxPool2d(self.focus_size, stride=self.focus_size) + self.glance = nn.AvgPool2d(self.focus_size, stride=self.focus_size) + # x.shape: batch, feature_dim, target_feature_size[0], target_feature_size[1] + + # split patch + self.grid_size = (target_feature_size[0] // patch_size[0], target_feature_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + + # use CNN to project dim to patch_dim + self.gaze_proj = nn.Conv2d(in_channels=feature_dim, out_channels=embed_dim, + kernel_size=patch_size, stride=patch_size) + self.glance_proj = nn.Conv2d(in_channels=feature_dim, out_channels=embed_dim, + kernel_size=patch_size, stride=patch_size) + + self.norm_q = norm_layer(embed_dim) # Transformer nn.LayerNorm + self.norm_k = norm_layer(embed_dim) # Transformer nn.LayerNorm + + def forward(self, x): + if self.Attention_module is not None: + x = self.Attention_module(x) + + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + + q = self.norm_q(self.gaze_proj(self.gaze(x)).flatten(2).transpose(1, 2)) + k = self.norm_k(self.glance_proj(self.glance(x)).flatten(2).transpose(1, 2)) + """ + x.shape: batch, feature_dim, feature_size[0], feature_size[1] + gaze/glance(x).shape: batch, feature_dim, target_feature_size[0], target_feature_size[1] + proj(x).shape: batch, embed_dim, patch_height_num, patch_width_num + flatten(2).shape: batch, embed_dim, patch_num + .transpose(1, 2).shape: batch feature_patch_number feature_patch_dim + """ + # output x.shape = batch, patch_number, patch_dim + return q, k + + +''' +# test sample +model = Focus_Embed() +x = torch.randn(4, 256, 56, 56) +y1,y2 = model(x) +print(y1.shape) +print(y2.shape) +''' + + +class Focus_SEmbed(nn.Module): # Attention guided module for hybridzing the early stages CNN feature + """ + + self focus (q=k) based on FGD Focus block + + Extract feature map from CNN, flatten, project to embedding dim. and use them as attention guidance + + input: x.shape = batch, feature_dim, feature_size[0], feature_size[1] + + Firstly, an attention block will be used to stable the feature projecting process + + Secondly, for each feature map,the focus will be 1 path: glance + in glance path Avg pool will be applied to get general information + + after the pooling process 1 CNN will be used to project the dimension + Finally, flattern and transpose will be applied + + output 2 attention guidance: glance, glance + x.shape = batch, patch_number, patch_dim + """ + + def __init__(self, patch_size=1, target_feature_size=(7, 7), feature_size=(56, 56), feature_dim=256, embed_dim=768, + Attention_module=None, norm_layer=nn.LayerNorm): + super().__init__() + patch_size = to_2tuple(patch_size) + feature_size = to_2tuple(feature_size) + + target_feature_size = to_2tuple(target_feature_size) + + assert feature_size[0] % target_feature_size[0] == 0 and feature_size[1] % target_feature_size[1] == 0 + + assert target_feature_size[0] % patch_size[0] == 0 and target_feature_size[1] % patch_size[1] == 0 + + if Attention_module is not None: + if Attention_module == 'SimAM': + self.Attention_module = simam_module(e_lambda=1e-4) + elif Attention_module == 'CBAM': + self.Attention_module = cbam_module(gate_channels=feature_dim) + elif Attention_module == 'SE': + self.Attention_module = se_module(channel=feature_dim) + else: + self.Attention_module = None + + self.focus_size = (feature_size[0] // target_feature_size[0], feature_size[1] // target_feature_size[1]) + self.num_focus = self.focus_size[0] * self.focus_size[1] + + self.gaze = nn.MaxPool2d(self.focus_size, stride=self.focus_size) + + self.grid_size = (target_feature_size[0] // patch_size[0], target_feature_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + + self.proj = nn.Conv2d(in_channels=feature_dim, out_channels=embed_dim, kernel_size=patch_size, + stride=patch_size) + + self.norm_f = norm_layer(embed_dim) + + def forward(self, x): + if self.Attention_module is not None: + x = self.Attention_module(x) + + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + + q = self.norm_f(self.proj(self.gaze(x)).flatten(2).transpose(1, 2)) + k = q + """ + x.shape: batch, feature_dim, feature_size[0], feature_size[1] + gaze/glance(x).shape: batch, feature_dim, target_feature_size[0], target_feature_size[1] + proj(x).shape: batch, embed_dim, patch_height_num, patch_width_num + flatten(2).shape: batch, embed_dim, patch_num + .transpose(1, 2).shape: batch feature_patch_number feature_patch_dim + """ + # output x.shape = batch, patch_number, patch_dim + return q, k + + +class Focus_Aggressive(nn.Module): # Attention guided module for hybridzing the early stages CNN feature + """ + Aggressive CNN Focus based on FGD Focus block + + Extract feature map from CNN, flatten, project to embedding dim. and use them as attention guidance + + input: x.shape = batch, feature_dim, feature_size[0], feature_size[1] + + Firstly, an attention block will be used to stable the feature projecting process + + Secondly, 2 CNNs will be used to project the dimension + + Finally, flattern and transpose will be applied + + output 2 attention guidance: gaze, glance + x.shape = batch, patch_number, patch_dim + + """ + + def __init__(self, patch_size=1, target_feature_size=(7, 7), feature_size=(56, 56), feature_dim=256, embed_dim=768, + Attention_module=None, norm_layer=nn.LayerNorm): + super().__init__() + patch_size = to_2tuple(patch_size) # patch size of the last feature map + feature_size = to_2tuple(feature_size) + + target_feature_size = to_2tuple(target_feature_size) + + assert feature_size[0] % target_feature_size[0] == 0 and feature_size[1] % target_feature_size[1] == 0 + + assert target_feature_size[0] % patch_size[0] == 0 and target_feature_size[1] % patch_size[1] == 0 + + if Attention_module is not None: + if Attention_module == 'SimAM': + self.Attention_module = simam_module(e_lambda=1e-4) + elif Attention_module == 'CBAM': + self.Attention_module = cbam_module(gate_channels=feature_dim) + elif Attention_module == 'SE': + self.Attention_module = se_module(channel=feature_dim) + else: + self.Attention_module = None + + self.focus_size = (feature_size[0] // target_feature_size[0], feature_size[1] // target_feature_size[1]) + + self.grid_size = (self.focus_size[0] * patch_size[0], self.focus_size[1] * patch_size[1]) + self.num_patches = (feature_size[0] // self.grid_size[0]) * (feature_size[1] // self.grid_size[1]) + + self.gaze_proj = nn.Conv2d(in_channels=feature_dim, out_channels=embed_dim, + kernel_size=self.grid_size, stride=self.grid_size) + self.glance_proj = nn.Conv2d(in_channels=feature_dim, out_channels=embed_dim, + kernel_size=self.grid_size, stride=self.grid_size) + + self.norm_q = norm_layer(embed_dim) + self.norm_k = norm_layer(embed_dim) + + def forward(self, x): + if self.Attention_module is not None: + x = self.Attention_module(x) + + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + + q = self.norm_q(self.gaze_proj(x).flatten(2).transpose(1, 2)) + k = self.norm_k(self.glance_proj(x).flatten(2).transpose(1, 2)) + """ + x.shape: batch, feature_dim, feature_size[0], feature_size[1] + proj(x).shape: batch, embed_dim, patch_height_num, patch_width_num + flatten(2).shape: batch, embed_dim, patch_num + .transpose(1, 2).shape: batch feature_patch_number feature_patch_dim + """ + # output x.shape = batch, patch_number, patch_dim + return q, k + + +class Focus_SAggressive(nn.Module): # Attention guided module for hybridzing the early stages CNN feature + """ + Aggressive CNN self Focus + Extract feature map from CNN, flatten, project to embedding dim. and use them as attention guidance + + input: x.shape = batch, feature_dim, feature_size[0], feature_size[1] + + Firstly, an attention block will be used to stable the feature projecting process + + Secondly, 1 CNN will be used to project the dimension + + Finally, flattern and transpose will be applied + + output 2 attention guidance: glance, glance + x.shape = batch, patch_number, patch_dim + """ + + def __init__(self, patch_size=1, target_feature_size=(7, 7), feature_size=(56, 56), feature_dim=256, embed_dim=768, + Attention_module=None, norm_layer=nn.LayerNorm): + super().__init__() + patch_size = to_2tuple(patch_size) + feature_size = to_2tuple(feature_size) + + target_feature_size = to_2tuple(target_feature_size) + + assert feature_size[0] % target_feature_size[0] == 0 and feature_size[1] % target_feature_size[1] == 0 + + assert target_feature_size[0] % patch_size[0] == 0 and target_feature_size[1] % patch_size[1] == 0 + + if Attention_module is not None: + if Attention_module == 'SimAM': + self.Attention_module = simam_module(e_lambda=1e-4) + elif Attention_module == 'CBAM': + self.Attention_module = cbam_module(gate_channels=feature_dim) + elif Attention_module == 'SE': + self.Attention_module = se_module(channel=feature_dim) + else: + self.Attention_module = None + + self.focus_size = (feature_size[0] // target_feature_size[0], feature_size[1] // target_feature_size[1]) + + self.grid_size = (self.focus_size[0] * patch_size[0], self.focus_size[1] * patch_size[1]) + self.num_patches = (feature_size[0] // self.grid_size[0]) * (feature_size[1] // self.grid_size[1]) + + self.proj = nn.Conv2d(in_channels=feature_dim, out_channels=embed_dim, + kernel_size=self.grid_size, stride=self.grid_size) + + self.norm_f = norm_layer(embed_dim) + + def forward(self, x): + if self.Attention_module is not None: + x = self.Attention_module(x) + + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + + q = self.norm_f(self.proj(x).flatten(2).transpose(1, 2)) + k = q + """ + x.shape: batch, feature_dim, feature_size[0], feature_size[1] + proj(x).shape: batch, embed_dim, patch_height_num, patch_width_num + flatten(2).shape: batch, embed_dim, patch_num + .transpose(1, 2).shape: batch feature_patch_number feature_patch_dim + """ + # output x.shape = batch, patch_number, patch_dim + return q, k + + +class VisionTransformer(nn.Module): # From timm to review the ViT and ViT_resn5 + """ + Vision Transformer + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` + - https://arxiv.org/abs/2010.11929 + Includes distillation token & head support for `DeiT: Data-efficient Image Transformers` + - https://arxiv.org/abs/2012.12877 + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, act_layer=None): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + embed_layer (nn.Module): patch embedding layer + norm_layer: (nn.Module): normalization layer + """ + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.num_tokens = 1 + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + self.patch_embed = embed_layer( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + + self.blocks = nn.Sequential(*[ + Encoder_Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, + attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) + for i in range(depth)]) + + self.norm = norm_layer(embed_dim) + + # Representation layer + if representation_size: + self.num_features = representation_size + self.pre_logits = nn.Sequential(OrderedDict([ + ('fc', nn.Linear(embed_dim, representation_size)), + ('act', nn.Tanh()) + ])) + else: + self.pre_logits = nn.Identity() + + # Classifier head(s) + self.head = nn.Linear(self.num_features, self.num_classes) if self.num_classes > 0 else nn.Identity() + self.head_dist = None + + def forward_features(self, x): + x = self.patch_embed(x) + # print(x.shape,self.pos_embed.shape) + cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_token, x), dim=1) + x = self.pos_drop(x + self.pos_embed) + + x = self.blocks(x) + x = self.norm(x) + return self.pre_logits(x[:, 0]) # use cls token for cls head + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +class Stage_wise_hybrid_Transformer(nn.Module): + """ + MSHT: Multi Stage Backbone Transformer + Stem + 4 ResNet stages(Backbone)is used as backbone + then, last feature map patch embedding is used to connect the CNN output to the decoder1 input + + horizonally, 4 ResNet Stage has its feature map connecting to the Focus module + which we be use as attention guidance into the FGD decoder + """ + + def __init__(self, backbone, num_classes=1000, patch_size=1, embed_dim=768, depth=4, num_heads=8, mlp_ratio=4., + qkv_bias=True, representation_size=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + use_cls_token=True, use_pos_embedding=True, use_att_module='SimAM', stage_size=(56, 28, 14, 7), + stage_dim=(256, 512, 1024, 2048), norm_layer=None, act_layer=None): + """ + Args: + backbone (nn.Module): input backbone = stem + 4 ResNet stages + num_classes (int): number of classes for classification head + patch_size (int, tuple): patch size + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + + use_cls_token(bool): classification token + use_pos_embedding(bool): use positional embedding + use_att_module(str or None): use which attention module in embedding + + stage_size (int, tuple): the stage feature map size of ResNet stages + stage_dim (int, tuple): the stage feature map dimension of ResNet stages + norm_layer: (nn.Module): normalization layer + """ + super().__init__() + self.num_classes = num_classes + if len(stage_dim) != len(stage_size): + raise TypeError('stage_dim and stage_size mismatch!') + else: + self.stage_num = len(stage_dim) + + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.cls_token_num = 1 if use_cls_token else 0 + self.use_pos_embedding = use_pos_embedding + + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + # backbone CNN + self.backbone = backbone + + # Attention module + if use_att_module is not None: + if use_att_module in ['SimAM', 'CBAM', 'SE']: + Attention_module = use_att_module + else: + Attention_module = None + else: + Attention_module = None + + self.patch_embed = Last_feature_map_Embed(patch_size=patch_size, feature_size=stage_size[-1], + feature_dim=stage_dim[-1], embed_dim=self.embed_dim, + Attention_module=Attention_module) + num_patches = self.patch_embed.num_patches + + # global sharing cls token and positional embedding + self.cls_token_0 = nn.Parameter(torch.zeros(1, 1, embed_dim)) # like message token + if self.use_pos_embedding: + self.pos_embed_0 = nn.Parameter(torch.zeros(1, num_patches + self.cls_token_num, embed_dim)) + + ''' + self.cls_token_1 = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed_1 = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) + + self.cls_token_2 = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed_2 = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) + + self.cls_token_3 = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed_3 = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) + + self.cls_token_4 = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed_4 = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) + ''' + + self.pos_drop = nn.Dropout(p=drop_rate) + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + + self.dec1 = Decoder_Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[0], norm_layer=norm_layer, + act_layer=act_layer) + self.Fo1 = Focus_Embed(patch_size=patch_size, target_feature_size=stage_size[-1], feature_size=stage_size[0], + feature_dim=stage_dim[0], embed_dim=embed_dim, Attention_module=Attention_module, + norm_layer=norm_layer) + + self.dec2 = Decoder_Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[1], norm_layer=norm_layer, + act_layer=act_layer) + self.Fo2 = Focus_Embed(patch_size=patch_size, target_feature_size=stage_size[-1], feature_size=stage_size[1], + feature_dim=stage_dim[1], embed_dim=embed_dim, Attention_module=Attention_module, + norm_layer=norm_layer) + + self.dec3 = Decoder_Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[2], norm_layer=norm_layer, + act_layer=act_layer) + self.Fo3 = Focus_Embed(patch_size=patch_size, target_feature_size=stage_size[-1], feature_size=stage_size[2], + feature_dim=stage_dim[2], embed_dim=embed_dim, Attention_module=Attention_module, + norm_layer=norm_layer) + + if self.stage_num == 4: + self.dec4 = Decoder_Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[3], norm_layer=norm_layer, + act_layer=act_layer) + self.Fo4 = Focus_Embed(patch_size=patch_size, target_feature_size=stage_size[-1], + feature_size=stage_size[-1], + feature_dim=stage_dim[-1], embed_dim=embed_dim, Attention_module=Attention_module, + norm_layer=norm_layer) + + self.norm = norm_layer(embed_dim) + + # Representation layer + if representation_size: + self.num_features = representation_size + self.pre_logits = nn.Sequential(OrderedDict([ + ('fc', nn.Linear(embed_dim, representation_size)), + ('act', nn.Tanh()) + ])) + else: + self.pre_logits = nn.Identity() + + # Classifier head(s) + self.head = nn.Linear(self.num_features, self.num_classes) if self.num_classes > 0 else nn.Identity() + self.head_dist = None + + def forward_features(self, x): + if self.stage_num == 3: + stage1_out, stage2_out, stage3_out = self.backbone(x) + # embedding the last feature map + x = self.patch_embed(stage3_out) + + elif self.stage_num == 4: + stage1_out, stage2_out, stage3_out, stage4_out = self.backbone(x) + # embedding the last feature map + x = self.patch_embed(stage4_out) + else: + raise TypeError('stage_dim is not legal !') + + # get guidance info + s1_q, s1_k = self.Fo1(stage1_out) + s2_q, s2_k = self.Fo2(stage2_out) + s3_q, s3_k = self.Fo3(stage3_out) + if self.stage_num == 4: + s4_q, s4_k = self.Fo4(stage4_out) + + if self.cls_token_num != 0: # concat cls token + # process the(cls token / message token) + cls_token_0 = self.cls_token_0.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_token_0, x), dim=1) # 增加classification head patch + + s1_q = torch.cat((cls_token_0, s1_q), dim=1) + s1_k = torch.cat((cls_token_0, s1_k), dim=1) + s2_q = torch.cat((cls_token_0, s2_q), dim=1) + s2_k = torch.cat((cls_token_0, s2_k), dim=1) + s3_q = torch.cat((cls_token_0, s3_q), dim=1) + s3_k = torch.cat((cls_token_0, s3_k), dim=1) + if self.stage_num == 4: + s4_q = torch.cat((cls_token_0, s4_q), dim=1) + s4_k = torch.cat((cls_token_0, s4_k), dim=1) + + if self.use_pos_embedding: + + s1_q = self.pos_drop(s1_q + self.pos_embed_0) + s1_k = self.pos_drop(s1_k + self.pos_embed_0) + s2_q = self.pos_drop(s2_q + self.pos_embed_0) + s2_k = self.pos_drop(s2_k + self.pos_embed_0) + s3_q = self.pos_drop(s3_q + self.pos_embed_0) + s3_k = self.pos_drop(s3_k + self.pos_embed_0) + if self.stage_num == 4: + s4_q = self.pos_drop(s4_q + self.pos_embed_0) + s4_k = self.pos_drop(s4_k + self.pos_embed_0) + + # plus to encoding positional infor + x = self.pos_drop(x + self.pos_embed_0) + + else: + + s1_q = self.pos_drop(s1_q) + s1_k = self.pos_drop(s1_k) + s2_q = self.pos_drop(s2_q) + s2_k = self.pos_drop(s2_k) + s3_q = self.pos_drop(s3_q) + s3_k = self.pos_drop(s3_k) + if self.stage_num == 4: + s4_q = self.pos_drop(s4_q) + s4_k = self.pos_drop(s4_k) + + # stem's feature map + x = self.pos_drop(x) + + # Decoder module use the guidance to help global modeling process + + x = self.dec1(s1_q, s1_k, x) + + x = self.dec2(s2_q, s2_k, x) + + x = self.dec3(s3_q, s3_k, x) + + if self.stage_num == 4: + x = self.dec4(s4_q, s4_k, x) + + x = self.norm(x) + return self.pre_logits(x[:, 0]) # take the first cls token + + def forward(self, x): + x = self.forward_features(x) # connect the cls token to the cls head + x = self.head(x) + return x diff --git a/PuzzleTuning/Backbone/VPT_structure.py b/PuzzleTuning/Backbone/VPT_structure.py new file mode 100644 index 0000000000000000000000000000000000000000..5c8802c71295c7a20103bdc0cf19484a7194e84a --- /dev/null +++ b/PuzzleTuning/Backbone/VPT_structure.py @@ -0,0 +1,133 @@ +""" +VPT Script ver: Oct 17th 14:30 + +based on +timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm +""" + +import torch +import torch.nn as nn + +from timm.models.vision_transformer import VisionTransformer, PatchEmbed + + +class VPT_ViT(VisionTransformer): + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + embed_layer=PatchEmbed, norm_layer=None, act_layer=None, Prompt_Token_num=1, + VPT_type="Shallow", basic_state_dict=None): + + # Recreate ViT + super().__init__(img_size=img_size, patch_size=patch_size, in_chans=in_chans, num_classes=num_classes, + embed_dim=embed_dim, depth=depth, num_heads=num_heads, mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, embed_layer=embed_layer, + norm_layer=norm_layer, act_layer=act_layer) + + # load basic state_dict + if basic_state_dict is not None: + self.load_state_dict(basic_state_dict, False) + + self.VPT_type = VPT_type + if VPT_type == "Deep": + self.Prompt_Tokens = nn.Parameter(torch.zeros(depth, Prompt_Token_num, embed_dim)) + else: # "Shallow" + self.Prompt_Tokens = nn.Parameter(torch.zeros(1, Prompt_Token_num, embed_dim)) + + def New_CLS_head(self, new_classes=15): + if new_classes != 0: + self.head = nn.Linear(self.embed_dim, new_classes) + else: + self.head = nn.Identity() + + def Freeze(self): + for param in self.parameters(): + param.requires_grad = False + + self.Prompt_Tokens.requires_grad = True + try: + for param in self.head.parameters(): + param.requires_grad = True + except: + pass + + def UnFreeze(self): + for param in self.parameters(): + param.requires_grad = True + + def obtain_prompt(self): + prompt_state_dict = {'head': self.head.state_dict(), + 'Prompt_Tokens': self.Prompt_Tokens} + # print(prompt_state_dict) + return prompt_state_dict + + def load_prompt(self, prompt_state_dict): + try: + self.head.load_state_dict(prompt_state_dict['head'], False) + except: + print('head not match, so skip head') + else: + print('prompt head match') + + if self.Prompt_Tokens.shape == prompt_state_dict['Prompt_Tokens'].shape: + + # device check + Prompt_Tokens = nn.Parameter(prompt_state_dict['Prompt_Tokens'].cpu()) + Prompt_Tokens.to(torch.device(self.Prompt_Tokens.device)) + + self.Prompt_Tokens = Prompt_Tokens + + else: + print('\n !!! cannot load prompt') + print('shape of model req prompt', self.Prompt_Tokens.shape) + print('shape of model given prompt', prompt_state_dict['Prompt_Tokens'].shape) + print('') + + def forward_features(self, x): + x = self.patch_embed(x) + # print(x.shape,self.pos_embed.shape) + cls_token = self.cls_token.expand(x.shape[0], -1, -1) + + # concatenate CLS token + x = torch.cat((cls_token, x), dim=1) + x = self.pos_drop(x + self.pos_embed) + + if self.VPT_type == "Deep": + + Prompt_Token_num = self.Prompt_Tokens.shape[1] + + for i in range(len(self.blocks)): + # concatenate Prompt_Tokens + Prompt_Tokens = self.Prompt_Tokens[i].unsqueeze(0) + # firstly concatenate + x = torch.cat((x, Prompt_Tokens.expand(x.shape[0], -1, -1)), dim=1) + num_tokens = x.shape[1] + # lastly remove, a genius trick + x = self.blocks[i](x)[:, :num_tokens - Prompt_Token_num] + + else: # self.VPT_type == "Shallow" + Prompt_Token_num = self.Prompt_Tokens.shape[1] + + # concatenate Prompt_Tokens + Prompt_Tokens = self.Prompt_Tokens.expand(x.shape[0], -1, -1) + x = torch.cat((x, Prompt_Tokens), dim=1) + num_tokens = x.shape[1] + # Sequntially procees + x = self.blocks(x)[:, :num_tokens - Prompt_Token_num] + + x = self.norm(x) + return x + + def forward(self, x): + + x = self.forward_features(x) + + # use cls token for cls head + try: + x = self.pre_logits(x[:, 0, :]) + except: + x = self.fc_norm(x[:, 0, :]) + else: + pass + x = self.head(x) + return x diff --git a/PuzzleTuning/Backbone/attention_modules.py b/PuzzleTuning/Backbone/attention_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..207b2cbc4f08b996489e4fc757a3afdb08194025 --- /dev/null +++ b/PuzzleTuning/Backbone/attention_modules.py @@ -0,0 +1,303 @@ +""" +attention modules in ['SimAM', 'CBAM', 'SE', 'GAM'] were applied in the ablation study + +ver: Dec 24th 15:00 + + +ref: +https://github.com/xmu-xiaoma666/External-Attention-pytorch +""" + +import torch +import torch.nn as nn +import math +import torch.nn.functional as F +from torch.nn import init + + +# help func +class BasicConv(nn.Module): + def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, + bn=True, bias=False): + super(BasicConv, self).__init__() + self.out_channels = out_planes + self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, + dilation=dilation, groups=groups, bias=bias) + self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None + self.relu = nn.ReLU() if relu else None + + def forward(self, x): + x = self.conv(x) + if self.bn is not None: + x = self.bn(x) + if self.relu is not None: + x = self.relu(x) + return x + + +class Flatten(nn.Module): + def forward(self, x): + return x.view(x.size(0), -1) + + +class ChannelGate(nn.Module): + def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max']): + super(ChannelGate, self).__init__() + self.gate_channels = gate_channels + self.mlp = nn.Sequential( + Flatten(), + nn.Linear(gate_channels, int(gate_channels // reduction_ratio)), + nn.ReLU(), + nn.Linear(int(gate_channels // reduction_ratio), gate_channels) + ) + self.pool_types = pool_types + + def forward(self, x): + channel_att_sum = None + for pool_type in self.pool_types: + if pool_type == 'avg': + avg_pool = F.avg_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3))) + channel_att_raw = self.mlp(avg_pool) + elif pool_type == 'max': + max_pool = F.max_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3))) + channel_att_raw = self.mlp(max_pool) + elif pool_type == 'lp': + lp_pool = F.lp_pool2d(x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3))) + channel_att_raw = self.mlp(lp_pool) + elif pool_type == 'lse': + # LSE pool only + lse_pool = logsumexp_2d(x) + channel_att_raw = self.mlp(lse_pool) + + if channel_att_sum is None: + channel_att_sum = channel_att_raw + else: + channel_att_sum = channel_att_sum + channel_att_raw + + scale = F.sigmoid(channel_att_sum).unsqueeze(2).unsqueeze(3).expand_as(x) + return x * scale + + +def logsumexp_2d(tensor): + tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1) + s, _ = torch.max(tensor_flatten, dim=2, keepdim=True) + outputs = s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log() + return outputs + + +class ChannelPool(nn.Module): + def forward(self, x): + return torch.cat((torch.max(x, 1)[0].unsqueeze(1), torch.mean(x, 1).unsqueeze(1)), dim=1) + + +class SpatialGate(nn.Module): + def __init__(self): + super(SpatialGate, self).__init__() + kernel_size = 7 + self.compress = ChannelPool() + self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=int((kernel_size - 1) // 2), relu=False) + + def forward(self, x): + x_compress = self.compress(x) + x_out = self.spatial(x_compress) + scale = F.sigmoid(x_out) # broadcasting + return x * scale + + +# attention modules: +class cbam_module(nn.Module): + """ + module:CBAM + + input、output= b, c, h, w + + paper: + https://arxiv.org/abs/1807.06521 + code: + https://github.com/ZjjConan/SimAM/blob/master/networks/attentions + """ + + def __init__(self, gate_channels, reduction=16, pool_types=['avg', 'max'], no_spatial=False): + super(cbam_module, self).__init__() + self.ChannelGate = ChannelGate(gate_channels, reduction, pool_types) + self.no_spatial = no_spatial + if not no_spatial: + self.SpatialGate = SpatialGate() + + @staticmethod + def get_module_name(): + return "cbam" + + def forward(self, x): + x_out = self.ChannelGate(x) + if not self.no_spatial: + x_out = self.SpatialGate(x_out) + return x_out + + +class se_module(nn.Module): + """ + module: SE + + input、output= b, c, h, w + + from paper Squeeze-and-Excitation Networks + SE-Net https://arxiv.org/abs/1709.01507 + code: + https://github.com/ZjjConan/SimAM/blob/master/networks/attentions + """ + + def __init__(self, channel, reduction=16): + super(se_module, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential( + nn.Linear(channel, int(channel // reduction), bias=False), + nn.ReLU(inplace=True), + nn.Linear(int(channel // reduction), channel, bias=False), + nn.Sigmoid() + ) + + @staticmethod + def get_module_name(): + return "se" + + def forward(self, x): + b, c, _, _ = x.size() + y = self.avg_pool(x).view(b, c) + y = self.fc(y).view(b, c, 1, 1) + return x * y + + +class simam_module(torch.nn.Module): + """ + module:SimAM + + input、output= b, c, h, w + + paper:(ICML) + SimAM: A Simple, Parameter-Free Attention Module for Convolutional Neural Networks + code: + https://github.com/ZjjConan/SimAM/blob/master/networks/attentions/simam_module.py + """ + + def __init__(self, channels=None, e_lambda=1e-4): + super(simam_module, self).__init__() + + self.activaton = nn.Sigmoid() + self.e_lambda = e_lambda + + def __repr__(self): + s = self.__class__.__name__ + '(' + s += ('lambda=%f)' % self.e_lambda) + return s + + @staticmethod + def get_module_name(): + return "simam" + + def forward(self, x): + b, c, h, w = x.size() + + n = w * h - 1 + + x_minus_mu_square = (x - x.mean(dim=[2, 3], keepdim=True)).pow(2) + y = x_minus_mu_square / (4 * (x_minus_mu_square.sum(dim=[2, 3], keepdim=True) / n + self.e_lambda)) + 0.5 + + return x * self.activaton(y) + + +class ResidualAttention(nn.Module): + """ + module: ResidualAttention + + input、output= b, c, h, w + + Paper:ICCV 2021 Residual Attention: A Simple but Effective Method for Multi-Label Recognition + code:https://github.com/xmu-xiaoma666/External-Attention-pytorch/blob/master/attention/ResidualAttention.py + """ + + def __init__(self, channel=512, num_class=1000, la=0.2): + super().__init__() + self.la = la + self.fc = nn.Conv2d(in_channels=channel, out_channels=num_class, kernel_size=1, stride=1, bias=False) + + def forward(self, x): + b, c, h, w = x.shape + y_raw = self.fc(x).flatten(2) # b,num_class,hxw + y_avg = torch.mean(y_raw, dim=2) # b,num_class + y_max = torch.max(y_raw, dim=2)[0] # b,num_class + score = y_avg + self.la * y_max + return score + + +class eca_module(nn.Module): + """Constructs a ECA module. + + Args: + channel: Number of channels of the input feature map + k_size: Adaptive selection of kernel size + """ + def __init__(self, channel, k_size=3): + super(eca_module, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + # x: input features with shape [b, c, h, w] + b, c, h, w = x.size() + + # feature descriptor on the global spatial information + y = self.avg_pool(x) + + # Two different branches of ECA module + y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1) + + # Multi-scale information fusion + y = self.sigmoid(y) + + return x * y.expand_as(x) + + +class GAM_Attention(nn.Module): + """ + module:GAM + + input= b, in_channels, h, w + output= b, out_channels, h, w + + paper: + Global Attention Mechanism: Retain Information to Enhance Channel-Spatial Interactions + https://arxiv.org/abs/2112.05561 + code: + https://mp.weixin.qq.com/s/VL6rXjyUDmHToYTqM32hUg + """ + def __init__(self, in_channels, out_channels, rate=4): + super(GAM_Attention, self).__init__() + + self.channel_attention = nn.Sequential( + nn.Linear(in_channels, int(in_channels / rate)), + nn.ReLU(inplace=True), + nn.Linear(int(in_channels / rate), in_channels) + ) + + self.spatial_attention = nn.Sequential( + nn.Conv2d(in_channels, int(in_channels / rate), kernel_size=7, padding=3), + nn.BatchNorm2d(int(in_channels / rate)), + nn.ReLU(inplace=True), + nn.Conv2d(int(in_channels / rate), out_channels, kernel_size=7, padding=3), + nn.BatchNorm2d(out_channels) + ) + + def forward(self, x): + b, c, h, w = x.shape + x_permute = x.permute(0, 2, 3, 1).view(b, -1, c) + x_att_permute = self.channel_attention(x_permute).view(b, h, w, c) + x_channel_att = x_att_permute.permute(0, 3, 1, 2) + + x = x * x_channel_att + + x_spatial_att = self.spatial_attention(x).sigmoid() + out = x * x_spatial_att + + return out diff --git a/PuzzleTuning/Backbone/counterpart_models/README.md b/PuzzleTuning/Backbone/counterpart_models/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cb3ec6bc9f3d5afddec97e5c5cb6952e43745187 --- /dev/null +++ b/PuzzleTuning/Backbone/counterpart_models/README.md @@ -0,0 +1,25 @@ +Recent SOTA works in fine-grained Tasks + + +CrossFormer + +Paper: +https://arxiv.org/pdf/2108.00154.pdf + + +Code from: +https://github.com/cheerss/CrossFormer + + + +Conformer +Paper: +https://arxiv.org/pdf/2105.03889.pdf + + +Code from: +https://github.com/pengzhiliang/Conformer/blob/main/conformer.py + + +both work will be compared with official pretrained backbone +and a new MLP head (classification head). \ No newline at end of file diff --git a/PuzzleTuning/Backbone/counterpart_models/conformer.py b/PuzzleTuning/Backbone/counterpart_models/conformer.py new file mode 100644 index 0000000000000000000000000000000000000000..2ca3fcba4a6f815be98545b9e9594ce58fb84d10 --- /dev/null +++ b/PuzzleTuning/Backbone/counterpart_models/conformer.py @@ -0,0 +1,483 @@ +""" +From Conformer with alter: conv and trans cls head was changed to volting together +ver: DEC 1st 16:00 official release + +ref: https://github.com/pengzhiliang/Conformer/blob/main/conformer.py +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + +from timm.models.layers import DropPath, trunc_normal_ + + +class Mlp(nn.Module): # FFN + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): # MHSA + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape # N is patch number, C is patch dimension + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) # re arrange + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): # Encoder from ViT + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=partial(nn.LayerNorm, eps=1e-6)): + super().__init__() + # pre norm 1 + self.norm1 = norm_layer(dim) + # MHSA + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + # pre norm 2 + self.norm2 = norm_layer(dim) + + # FFN(MLP) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ConvBlock(nn.Module): # ResNet bottleneck Convblock actually + + def __init__(self, inplanes, outplanes, stride=1, res_conv=False, act_layer=nn.ReLU, groups=1, + norm_layer=partial(nn.BatchNorm2d, eps=1e-6), drop_block=None, drop_path=None): + super(ConvBlock, self).__init__() + + expansion = 4 + med_planes = outplanes // expansion + + self.conv1 = nn.Conv2d(inplanes, med_planes, kernel_size=1, stride=1, padding=0, bias=False) + self.bn1 = norm_layer(med_planes) + self.act1 = act_layer(inplace=True) + + self.conv2 = nn.Conv2d(med_planes, med_planes, kernel_size=3, stride=stride, groups=groups, padding=1, + bias=False) + self.bn2 = norm_layer(med_planes) + self.act2 = act_layer(inplace=True) + + self.conv3 = nn.Conv2d(med_planes, outplanes, kernel_size=1, stride=1, padding=0, bias=False) + self.bn3 = norm_layer(outplanes) + self.act3 = act_layer(inplace=True) + + if res_conv: + self.residual_conv = nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=stride, padding=0, bias=False) + self.residual_bn = norm_layer(outplanes) + + self.res_conv = res_conv + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x, x_t=None, return_x_2=True): + residual = x + + x = self.conv1(x) + x = self.bn1(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act1(x) + + x = self.conv2(x) if x_t is None else self.conv2(x + x_t) + x = self.bn2(x) + if self.drop_block is not None: + x = self.drop_block(x) + x2 = self.act2(x) + + x = self.conv3(x2) + x = self.bn3(x) + if self.drop_block is not None: + x = self.drop_block(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + if self.res_conv: + residual = self.residual_conv(residual) + residual = self.residual_bn(residual) + + x += residual + x = self.act3(x) + + if return_x_2: + return x, x2 + else: + return x + + +class FCUDown(nn.Module): + """ CNN feature maps -> Transformer patch embeddings + """ + + def __init__(self, inplanes, outplanes, dw_stride, act_layer=nn.GELU, + norm_layer=partial(nn.LayerNorm, eps=1e-6)): + super(FCUDown, self).__init__() + self.dw_stride = dw_stride + + self.conv_project = nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=1, padding=0) # fix dimension + self.sample_pooling = nn.AvgPool2d(kernel_size=dw_stride, stride=dw_stride) # fix feature map size + + self.ln = norm_layer(outplanes) + self.act = act_layer() + + def forward(self, x, x_t): + x = self.conv_project(x) # [N, C, H, W] + + x = self.sample_pooling(x).flatten(2).transpose(1, 2) + x = self.ln(x) + x = self.act(x) + + x = torch.cat([x_t[:, 0][:, None, :], x], dim=1) # concatenate class token from x_t + + return x + + +class FCUUp(nn.Module): + """ Transformer patch embeddings -> CNN feature maps + by interpolate operation + """ + + def __init__(self, inplanes, outplanes, up_stride, act_layer=nn.ReLU, + norm_layer=partial(nn.BatchNorm2d, eps=1e-6), ): + super(FCUUp, self).__init__() + + self.up_stride = up_stride + self.conv_project = nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=1, padding=0) + self.bn = norm_layer(outplanes) + self.act = act_layer() + + def forward(self, x, H, W): # interpolate to + B, _, C = x.shape + + # [N, 197, 384] -> [N, 196, 384] -> [N, 384, 196] -> [N, 384, 14, 14] + x_r = x[:, 1:].transpose(1, 2).reshape(B, C, H, W) # drop cls token of x_t + + x_r = self.act(self.bn(self.conv_project(x_r))) + + return F.interpolate(x_r, size=(H * self.up_stride, W * self.up_stride)) # interpolate operation + + +class Med_ConvBlock(nn.Module): # ResNet bottleneck indentity actually + """ special case for Convblock without down sampling, + """ + + def __init__(self, inplanes, act_layer=nn.ReLU, groups=1, norm_layer=partial(nn.BatchNorm2d, eps=1e-6), + drop_block=None, drop_path=None): + + super(Med_ConvBlock, self).__init__() + + expansion = 4 + med_planes = inplanes // expansion + + self.conv1 = nn.Conv2d(inplanes, med_planes, kernel_size=1, stride=1, padding=0, bias=False) + self.bn1 = norm_layer(med_planes) + self.act1 = act_layer(inplace=True) + + self.conv2 = nn.Conv2d(med_planes, med_planes, kernel_size=3, stride=1, groups=groups, padding=1, bias=False) + self.bn2 = norm_layer(med_planes) + self.act2 = act_layer(inplace=True) + + self.conv3 = nn.Conv2d(med_planes, inplanes, kernel_size=1, stride=1, padding=0, bias=False) + self.bn3 = norm_layer(inplanes) + self.act3 = act_layer(inplace=True) + + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + residual = x + + x = self.conv1(x) + x = self.bn1(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act2(x) + + x = self.conv3(x) + x = self.bn3(x) + if self.drop_block is not None: + x = self.drop_block(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + x += residual + x = self.act3(x) + + return x + + +class ConvTransBlock(nn.Module): + """ + Basic module for ConvTransformer, keep feature maps for CNN block and patch embeddings for transformer encoder block + """ + + def __init__(self, inplanes, outplanes, res_conv, stride, dw_stride, embed_dim, num_heads=12, mlp_ratio=4., + qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + last_fusion=False, num_med_block=0, groups=1): + + super(ConvTransBlock, self).__init__() + expansion = 4 + # ConvBlock + self.cnn_block = ConvBlock(inplanes=inplanes, outplanes=outplanes, res_conv=res_conv, stride=stride, + groups=groups) + + if last_fusion: + self.fusion_block = ConvBlock(inplanes=outplanes, outplanes=outplanes, stride=2, res_conv=True, + groups=groups) + else: + self.fusion_block = ConvBlock(inplanes=outplanes, outplanes=outplanes, groups=groups) + + # identity block + if num_med_block > 0: + self.med_block = [] + for i in range(num_med_block): + self.med_block.append(Med_ConvBlock(inplanes=outplanes, groups=groups)) + + self.med_block = nn.ModuleList(self.med_block) # nn.ModuleList + + # FCU + self.squeeze_block = FCUDown(inplanes=outplanes // expansion, outplanes=embed_dim, dw_stride=dw_stride) + + self.expand_block = FCUUp(inplanes=embed_dim, outplanes=outplanes // expansion, up_stride=dw_stride) + + # Transformer Encoder block + self.trans_block = Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=drop_path_rate) + + self.dw_stride = dw_stride + self.embed_dim = embed_dim + self.num_med_block = num_med_block + self.last_fusion = last_fusion + + def forward(self, x, x_t): + x, x2 = self.cnn_block(x) + + _, _, H, W = x2.shape + + x_st = self.squeeze_block(x2, x_t) + + x_t = self.trans_block(x_st + x_t) + + if self.num_med_block > 0: + for m in self.med_block: + x = m(x) + + x_t_r = self.expand_block(x_t, H // self.dw_stride, W // self.dw_stride) + x = self.fusion_block(x, x_t_r, return_x_2=False) + + return x, x_t + + +class Conformer(nn.Module): + + def __init__(self, patch_size=16, in_chans=3, num_classes=1000, base_channel=64, channel_ratio=4, num_med_block=0, + embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.): + + # Transformer + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + assert depth % 3 == 0 + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.trans_dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + + # Classifier head + self.trans_norm = nn.LayerNorm(embed_dim) + self.trans_cls_head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + self.pooling = nn.AdaptiveAvgPool2d(1) + self.conv_cls_head = nn.Linear(int(256 * channel_ratio), num_classes) + self.cls_head = nn.Linear(int(2 * num_classes), num_classes) + + # Stem stage: get the feature maps by conv block (copied form resnet.py) + self.conv1 = nn.Conv2d(in_chans, 64, kernel_size=7, stride=2, padding=3, bias=False) # 1 / 2 [112, 112] + self.bn1 = nn.BatchNorm2d(64) + self.act1 = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # 1 / 4 [56, 56] + + # 1 stage + stage_1_channel = int(base_channel * channel_ratio) + trans_dw_stride = patch_size // 4 + self.conv_1 = ConvBlock(inplanes=64, outplanes=stage_1_channel, res_conv=True, stride=1) + # embedding + self.trans_patch_conv = nn.Conv2d(64, embed_dim, kernel_size=trans_dw_stride, stride=trans_dw_stride, padding=0) + self.trans_1 = Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=self.trans_dpr[0], + ) + + # 2~4 stage + init_stage = 2 + fin_stage = depth // 3 + 1 + for i in range(init_stage, fin_stage): + self.add_module('conv_trans_' + str(i), + ConvTransBlock( + stage_1_channel, stage_1_channel, False, 1, dw_stride=trans_dw_stride, + embed_dim=embed_dim, + num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, + drop_path_rate=self.trans_dpr[i - 1], + num_med_block=num_med_block + ) + ) + + stage_2_channel = int(base_channel * channel_ratio * 2) + # 5~8 stage + init_stage = fin_stage # 5 + fin_stage = fin_stage + depth // 3 # 9 + for i in range(init_stage, fin_stage): + s = 2 if i == init_stage else 1 + in_channel = stage_1_channel if i == init_stage else stage_2_channel + res_conv = True if i == init_stage else False + self.add_module('conv_trans_' + str(i), + ConvTransBlock( + in_channel, stage_2_channel, res_conv, s, dw_stride=trans_dw_stride // 2, + embed_dim=embed_dim, + num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, + drop_path_rate=self.trans_dpr[i - 1], + num_med_block=num_med_block + ) + ) + + stage_3_channel = int(base_channel * channel_ratio * 2 * 2) + # 9~12 stage + init_stage = fin_stage # 9 + fin_stage = fin_stage + depth // 3 # 13 + for i in range(init_stage, fin_stage): + s = 2 if i == init_stage else 1 + in_channel = stage_2_channel if i == init_stage else stage_3_channel + res_conv = True if i == init_stage else False + last_fusion = True if i == depth else False + self.add_module('conv_trans_' + str(i), + ConvTransBlock( + in_channel, stage_3_channel, res_conv, s, dw_stride=trans_dw_stride // 4, + embed_dim=embed_dim, + num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, + drop_path_rate=self.trans_dpr[i - 1], + num_med_block=num_med_block, last_fusion=last_fusion + ) + ) + self.fin_stage = fin_stage + + trunc_normal_(self.cls_token, std=.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + elif isinstance(m, nn.GroupNorm): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + + @torch.jit.ignore + def no_weight_decay(self): + return {'cls_token'} + + def forward(self, x): + B = x.shape[0] + cls_tokens = self.cls_token.expand(B, -1, -1) + + # pdb.set_trace() + # stem stage [N, 3, 224, 224] -> [N, 64, 56, 56] + x_base = self.maxpool(self.act1(self.bn1(self.conv1(x)))) + + # 1 stage + x = self.conv_1(x_base, return_x_2=False) + # embedding: [N, 64, 56, 56] -> [N, d, p, p] -> [N, d, p^2] -> [N, p^2, d] -> [N, p^2 + 1, d] + x_t = self.trans_patch_conv(x_base).flatten(2).transpose(1, 2) + x_t = torch.cat([cls_tokens, x_t], dim=1) + x_t = self.trans_1(x_t) + + # 2 ~ final + for i in range(2, self.fin_stage): + x, x_t = eval('self.conv_trans_' + str(i))(x, x_t) + + # conv classification + x_p = self.pooling(x).flatten(1) + conv_cls = self.conv_cls_head(x_p) + + # trans classification + x_t = self.trans_norm(x_t) + tran_cls = self.trans_cls_head(x_t[:, 0]) + + # 加一个类别投票 + cls = torch.cat([conv_cls, tran_cls], dim=1) + cls = self.cls_head(cls) + return cls + + # return [conv_cls, tran_cls] diff --git a/PuzzleTuning/Backbone/counterpart_models/crossformer.py b/PuzzleTuning/Backbone/counterpart_models/crossformer.py new file mode 100644 index 0000000000000000000000000000000000000000..80f0784a75501b25202538bd67da0b2e24f6c7ee --- /dev/null +++ b/PuzzleTuning/Backbone/counterpart_models/crossformer.py @@ -0,0 +1,624 @@ +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + +class DynamicPosBias(nn.Module): + def __init__(self, dim, num_heads, residual): + super().__init__() + self.residual = residual + self.num_heads = num_heads + self.pos_dim = dim // 4 + self.pos_proj = nn.Linear(2, self.pos_dim) + self.pos1 = nn.Sequential( + nn.LayerNorm(self.pos_dim), + nn.ReLU(inplace=True), + nn.Linear(self.pos_dim, self.pos_dim), + ) + self.pos2 = nn.Sequential( + nn.LayerNorm(self.pos_dim), + nn.ReLU(inplace=True), + nn.Linear(self.pos_dim, self.pos_dim) + ) + self.pos3 = nn.Sequential( + nn.LayerNorm(self.pos_dim), + nn.ReLU(inplace=True), + nn.Linear(self.pos_dim, self.num_heads) + ) + def forward(self, biases): + if self.residual: + pos = self.pos_proj(biases) # 2Wh-1 * 2Ww-1, heads + pos = pos + self.pos1(pos) + pos = pos + self.pos2(pos) + pos = self.pos3(pos) + else: + pos = self.pos3(self.pos2(self.pos1(self.pos_proj(biases)))) + return pos + + def flops(self, N): + flops = N * 2 * self.pos_dim + flops += N * self.pos_dim * self.pos_dim + flops += N * self.pos_dim * self.pos_dim + flops += N * self.pos_dim * self.num_heads + return flops + +class Attention(nn.Module): + r""" Multi-head self attention module with dynamic position bias. + + Args: + dim (int): Number of input channels. + group_size (tuple[int]): The height and width of the group. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, dim, group_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0., + position_bias=True): + + super().__init__() + self.dim = dim + self.group_size = group_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + self.position_bias = position_bias + + if position_bias: + self.pos = DynamicPosBias(self.dim // 4, self.num_heads, residual=False) + + # generate mother-set + position_bias_h = torch.arange(1 - self.group_size[0], self.group_size[0]) + position_bias_w = torch.arange(1 - self.group_size[1], self.group_size[1]) + biases = torch.stack(torch.meshgrid([position_bias_h, position_bias_w])) # 2, 2Wh-1, 2W2-1 + biases = biases.flatten(1).transpose(0, 1).float() + self.register_buffer("biases", biases) + + # get pair-wise relative position index for each token inside the group + coords_h = torch.arange(self.group_size[0]) + coords_w = torch.arange(self.group_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.group_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.group_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.group_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """ + Args: + x: input features with shape of (num_groups*B, N, C) + mask: (0/-inf) mask with shape of (num_groups, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if self.position_bias: + pos = self.pos(self.biases) # 2Wh-1 * 2Ww-1, heads + # select position bias + relative_position_bias = pos[self.relative_position_index.view(-1)].view( + self.group_size[0] * self.group_size[1], self.group_size[0] * self.group_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def extra_repr(self) -> str: + return f'dim={self.dim}, group_size={self.group_size}, num_heads={self.num_heads}' + + def flops(self, N): + # calculate flops for 1 group with token length of N + flops = 0 + # qkv = self.qkv(x) + flops += N * self.dim * 3 * self.dim + # attn = (q @ k.transpose(-2, -1)) + flops += self.num_heads * N * (self.dim // self.num_heads) * N + # x = (attn @ v) + flops += self.num_heads * N * N * (self.dim // self.num_heads) + # x = self.proj(x) + flops += N * self.dim * self.dim + if self.position_bias: + flops += self.pos.flops(N) + return flops + + +class CrossFormerBlock(nn.Module): + r""" CrossFormer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + group_size (int): Group size. + lsda_flag (int): use SDA or LDA, 0 for SDA and 1 for LDA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, input_resolution, num_heads, group_size=7, lsda_flag=0, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm, num_patch_size=1): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.group_size = group_size + self.lsda_flag = lsda_flag + self.mlp_ratio = mlp_ratio + self.num_patch_size = num_patch_size + if min(self.input_resolution) <= self.group_size: + # if group size is larger than input resolution, we don't partition groups + self.lsda_flag = 0 + self.group_size = min(self.input_resolution) + + self.norm1 = norm_layer(dim) + + self.attn = Attention( + dim, group_size=to_2tuple(self.group_size), num_heads=num_heads, + qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, + position_bias=True) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + attn_mask = None + self.register_buffer("attn_mask", attn_mask) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size %d, %d, %d" % (L, H, W) + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # group embeddings + G = self.group_size + if self.lsda_flag == 0: # 0 for SDA + x = x.reshape(B, H // G, G, W // G, G, C).permute(0, 1, 3, 2, 4, 5) + else: # 1 for LDA + x = x.reshape(B, G, H // G, G, W // G, C).permute(0, 2, 4, 1, 3, 5) + x = x.reshape(B * H * W // G**2, G**2, C) + + # multi-head self-attention + x = self.attn(x, mask=self.attn_mask) # nW*B, G*G, C + + # ungroup embeddings + x = x.reshape(B, H // G, W // G, G, G, C) + if self.lsda_flag == 0: + x = x.permute(0, 1, 3, 2, 4, 5).reshape(B, H, W, C) + else: + x = x.permute(0, 3, 1, 4, 2, 5).reshape(B, H, W, C) + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ + f"group_size={self.group_size}, lsda_flag={self.lsda_flag}, mlp_ratio={self.mlp_ratio}" + + def flops(self): + flops = 0 + H, W = self.input_resolution + # norm1 + flops += self.dim * H * W + # LSDA + nW = H * W / self.group_size / self.group_size + flops += nW * self.attn.flops(self.group_size * self.group_size) + # mlp + flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio + # norm2 + flops += self.dim * H * W + return flops + +class PatchMerging(nn.Module): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm, patch_size=[2], num_input_patch_size=1): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reductions = nn.ModuleList() + self.patch_size = patch_size + self.norm = norm_layer(dim) + + for i, ps in enumerate(patch_size): + if i == len(patch_size) - 1: + out_dim = 2 * dim // 2 ** i + else: + out_dim = 2 * dim // 2 ** (i + 1) + stride = 2 + padding = (ps - stride) // 2 + self.reductions.append(nn.Conv2d(dim, out_dim, kernel_size=ps, + stride=stride, padding=padding)) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." + + x = self.norm(x) + x = x.view(B, H, W, C).permute(0, 3, 1, 2) + + xs = [] + for i in range(len(self.reductions)): + tmp_x = self.reductions[i](x).flatten(2).transpose(1, 2) + xs.append(tmp_x) + x = torch.cat(xs, dim=2) + return x + + def extra_repr(self) -> str: + return f"input_resolution={self.input_resolution}, dim={self.dim}" + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.dim + for i, ps in enumerate(self.patch_size): + if i == len(self.patch_size) - 1: + out_dim = 2 * self.dim // 2 ** i + else: + out_dim = 2 * self.dim // 2 ** (i + 1) + flops += (H // 2) * (W // 2) * ps * ps * out_dim * self.dim + return flops + + +class Stage(nn.Module): + """ CrossFormer blocks for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + group_size (int): variable G in the paper, one group has GxG embeddings + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, group_size, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False, + patch_size_end=[4], num_patch_size=None): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList() + for i in range(depth): + lsda_flag = 0 if (i % 2 == 0) else 1 + self.blocks.append(CrossFormerBlock(dim=dim, input_resolution=input_resolution, + num_heads=num_heads, group_size=group_size, + lsda_flag=lsda_flag, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + num_patch_size=num_patch_size)) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer, + patch_size=patch_size_end, num_input_patch_size=num_patch_size) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" + + def flops(self): + flops = 0 + for blk in self.blocks: + flops += blk.flops() + if self.downsample is not None: + flops += self.downsample.flops() + return flops + + +class PatchEmbed(nn.Module): + r""" Image to Patch Embedding + + Args: + img_size (int): Image size. Default: 224. + patch_size (int): Patch token size. Default: [4]. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, img_size=224, patch_size=[4], in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + img_size = to_2tuple(img_size) + # patch_size = to_2tuple(patch_size) + patches_resolution = [img_size[0] // patch_size[0], img_size[0] // patch_size[0]] + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + self.num_patches = patches_resolution[0] * patches_resolution[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.projs = nn.ModuleList() + for i, ps in enumerate(patch_size): + if i == len(patch_size) - 1: + dim = embed_dim // 2 ** i + else: + dim = embed_dim // 2 ** (i + 1) + stride = patch_size[0] + padding = (ps - patch_size[0]) // 2 + self.projs.append(nn.Conv2d(in_chans, dim, kernel_size=ps, stride=stride, padding=padding)) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + xs = [] + for i in range(len(self.projs)): + tx = self.projs[i](x).flatten(2).transpose(1, 2) + xs.append(tx) # B Ph*Pw C + x = torch.cat(xs, dim=2) + if self.norm is not None: + x = self.norm(x) + return x + + def flops(self): + Ho, Wo = self.patches_resolution + flops = 0 + for i, ps in enumerate(self.patch_size): + if i == len(self.patch_size) - 1: + dim = self.embed_dim // 2 ** i + else: + dim = self.embed_dim // 2 ** (i + 1) + flops += Ho * Wo * dim * self.in_chans * (self.patch_size[i] * self.patch_size[i]) + if self.norm is not None: + flops += Ho * Wo * self.embed_dim + return flops + + +class CrossFormer(nn.Module): + r""" CrossFormer + A PyTorch impl of : `CrossFormer: A Versatile Vision Transformer Based on Cross-scale Attention` - + + Args: + img_size (int | tuple(int)): Input image size. Default 224 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each stage. + num_heads (tuple(int)): Number of attention heads in different layers. + group_size (int): Group size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False + """ + + def __init__(self, img_size=224, patch_size=[4], in_chans=3, num_classes=1000, + embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], + group_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, + norm_layer=nn.LayerNorm, ape=False, patch_norm=True, + use_checkpoint=False, merge_size=[[2], [2], [2]], **kwargs): + super().__init__() + + self.num_classes = num_classes + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) + self.mlp_ratio = mlp_ratio + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.absolute_pos_embed, std=.02) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + + # build layers + self.layers = nn.ModuleList() + + num_patch_sizes = [len(patch_size)] + [len(m) for m in merge_size] + for i_layer in range(self.num_layers): + patch_size_end = merge_size[i_layer] if i_layer < self.num_layers - 1 else None + num_patch_size = num_patch_sizes[i_layer] + layer = Stage(dim=int(embed_dim * 2 ** i_layer), + input_resolution=(patches_resolution[0] // (2 ** i_layer), + patches_resolution[1] // (2 ** i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + group_size=group_size[i_layer], + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint, + patch_size_end=patch_size_end, + num_patch_size=num_patch_size) + self.layers.append(layer) + + self.norm = norm_layer(self.num_features) + self.avgpool = nn.AdaptiveAvgPool1d(1) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'absolute_pos_embed'} + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {'relative_position_bias_table'} + + def forward_features(self, x): + x = self.patch_embed(x) + if self.ape: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + + for layer in self.layers: + x = layer(x) + + x = self.norm(x) # B L C + x = self.avgpool(x.transpose(1, 2)) # B C 1 + x = torch.flatten(x, 1) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + def flops(self): + flops = 0 + flops += self.patch_embed.flops() + for i, layer in enumerate(self.layers): + flops += layer.flops() + flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers) + flops += self.num_features * self.num_classes + return flops + + +class cross_former_cls_head_warp(nn.Module): + def __init__(self, backbone, num_classes): + super().__init__() + embed_dim = 96 + depths = [2, 2, 18, 2] + num_layers = len(depths) + num_features = int(embed_dim * 2 ** (num_layers - 1)) + self.backbone = backbone + self.head = nn.Linear(num_features, num_classes) + + def forward(self, x): + x = self.backbone(x) + x = self.head(x) + return x \ No newline at end of file diff --git a/PuzzleTuning/Backbone/counterpart_models/crossformer_backbone.py b/PuzzleTuning/Backbone/counterpart_models/crossformer_backbone.py new file mode 100644 index 0000000000000000000000000000000000000000..a05ec04199153559410ed407504aa4259d9ea59c --- /dev/null +++ b/PuzzleTuning/Backbone/counterpart_models/crossformer_backbone.py @@ -0,0 +1,659 @@ +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ +from mmdet.utils import get_root_logger +from mmcv.runner import load_checkpoint + +NEG_INF = -1000000 + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class DynamicPosBias(nn.Module): + def __init__(self, dim, num_heads, residual): + super().__init__() + self.residual = residual + self.num_heads = num_heads + self.pos_dim = dim // 4 + self.pos_proj = nn.Linear(2, self.pos_dim) + self.pos1 = nn.Sequential( + nn.LayerNorm(self.pos_dim), + nn.ReLU(inplace=True), + nn.Linear(self.pos_dim, self.pos_dim), + ) + self.pos2 = nn.Sequential( + nn.LayerNorm(self.pos_dim), + nn.ReLU(inplace=True), + nn.Linear(self.pos_dim, self.pos_dim) + ) + self.pos3 = nn.Sequential( + nn.LayerNorm(self.pos_dim), + nn.ReLU(inplace=True), + nn.Linear(self.pos_dim, self.num_heads) + ) + def forward(self, biases): + if self.residual: + pos = self.pos_proj(biases) # 2Gh-1 * 2Gw-1, heads + pos = pos + self.pos1(pos) + pos = pos + self.pos2(pos) + pos = self.pos3(pos) + else: + pos = self.pos3(self.pos2(self.pos1(self.pos_proj(biases)))) + return pos + + def flops(self, N): + flops = N * 2 * self.pos_dim + flops += N * self.pos_dim * self.pos_dim + flops += N * self.pos_dim * self.pos_dim + flops += N * self.pos_dim * self.num_heads + return flops + +class Attention(nn.Module): + r""" Multi-head self attention module with relative position bias. + + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, dim, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0., + position_bias=True): + + super().__init__() + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + self.position_bias = position_bias + if self.position_bias: + self.pos = DynamicPosBias(self.dim // 4, self.num_heads, residual=False) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, H, W, mask=None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Gh*Gw, Gh*Gw) or None + """ + group_size = (H, W) + B_, N, C = x.shape + assert H*W == N + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4).contiguous() + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) # (num_windows*B, N, N), N = Gh*Gw + + if self.position_bias: + # generate mother-set + position_bias_h = torch.arange(1 - group_size[0], group_size[0], device=attn.device) + position_bias_w = torch.arange(1 - group_size[1], group_size[1], device=attn.device) + biases = torch.stack(torch.meshgrid([position_bias_h, position_bias_w])) # 2, 2Gh-1, 2W2-1 + biases = biases.flatten(1).transpose(0, 1).contiguous().float() + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(group_size[0], device=attn.device) + coords_w = torch.arange(group_size[1], device=attn.device) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Gh, Gw + coords_flatten = torch.flatten(coords, 1) # 2, Gh*Gw + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Gh*Gw, Gh*Gw + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Gh*Gw, Gh*Gw, 2 + relative_coords[:, :, 0] += group_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += group_size[1] - 1 + relative_coords[:, :, 0] *= 2 * group_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Gh*Gw, Gh*Gw + + pos = self.pos(biases) # 2Gh-1 * 2Gw-1, heads + # select position bias + relative_position_bias = pos[relative_position_index.view(-1)].view( + group_size[0] * group_size[1], group_size[0] * group_size[1], -1) # Gh*Gw,Gh*Gw,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Gh*Gw, Gh*Gw + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nG = mask.shape[0] + attn = attn.view(B_ // nG, nG, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) # (B, nG, nHead, N, N) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def extra_repr(self) -> str: + return f'dim={self.dim}, num_heads={self.num_heads}' + + def flops(self, N): + # calculate flops for 1 window with token length of N + flops = 0 + excluded_flops = 0 + # qkv = self.qkv(x) + flops += N * self.dim * 3 * self.dim + # attn = (q @ k.transpose(-2, -1)) + flops += self.num_heads * N * (self.dim // self.num_heads) * N + excluded_flops += self.num_heads * N * (self.dim // self.num_heads) * N + # x = (attn @ v) + flops += self.num_heads * N * N * (self.dim // self.num_heads) + excluded_flops += self.num_heads * N * N * (self.dim // self.num_heads) + # x = self.proj(x) + flops += N * self.dim * self.dim + if self.position_bias: + flops += self.pos.flops(N) + return flops, excluded_flops + + +class CrossFormerBlock(nn.Module): + r""" CrossFormer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + group_size (int): Window size. + lsda_flag (int): use SDA or LDA, 0 for SDA and 1 for LDA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, input_resolution, num_heads, group_size=7, interval=8, lsda_flag=0, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm, num_patch_size=1): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.group_size = group_size + self.interval = interval + self.lsda_flag = lsda_flag + self.mlp_ratio = mlp_ratio + self.num_patch_size = num_patch_size + + self.norm1 = norm_layer(dim) + + self.attn = Attention( + dim, num_heads=num_heads, + qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, + position_bias=True) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x, H, W): + B, L, C = x.shape + assert L == H * W, "input feature has wrong size %d, %d, %d" % (L, H, W) + + if min(H, W) <= self.group_size: + # if window size is larger than input resolution, we don't partition windows + self.lsda_flag = 0 + self.group_size = min(H, W) + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # padding + size_div = self.interval if self.lsda_flag == 1 else self.group_size + pad_l = pad_t = 0 + pad_r = (size_div - W % size_div) % size_div + pad_b = (size_div - H % size_div) % size_div + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + + mask = torch.zeros((1, Hp, Wp, 1), device=x.device) + if pad_b > 0: + mask[:, -pad_b:, :, :] = -1 + if pad_r > 0: + mask[:, :, -pad_r:, :] = -1 + + # group embeddings and generate attn_mask + if self.lsda_flag == 0: # SDA + G = Gh = Gw = self.group_size + x = x.reshape(B, Hp // G, G, Wp // G, G, C).permute(0, 1, 3, 2, 4, 5).contiguous() + x = x.reshape(B * Hp * Wp // G**2, G**2, C) + nG = Hp * Wp // G**2 + # attn_mask + if pad_r > 0 or pad_b > 0: + mask = mask.reshape(1, Hp // G, G, Wp // G, G, 1).permute(0, 1, 3, 2, 4, 5).contiguous() + mask = mask.reshape(nG, 1, G * G) + attn_mask = torch.zeros((nG, G * G, G * G), device=x.device) + attn_mask = attn_mask.masked_fill(mask < 0, NEG_INF) + else: + attn_mask = None + else: # LDA + I, Gh, Gw = self.interval, Hp // self.interval, Wp // self.interval + x = x.reshape(B, Gh, I, Gw, I, C).permute(0, 2, 4, 1, 3, 5).contiguous() + x = x.reshape(B * I * I, Gh * Gw, C) + nG = I ** 2 + # attn_mask + if pad_r > 0 or pad_b > 0: + mask = mask.reshape(1, Gh, I, Gw, I, 1).permute(0, 2, 4, 1, 3, 5).contiguous() + mask = mask.reshape(nG, 1, Gh * Gw) + attn_mask = torch.zeros((nG, Gh * Gw, Gh * Gw), device=x.device) + attn_mask = attn_mask.masked_fill(mask < 0, NEG_INF) + else: + attn_mask = None + + # multi-head self-attention + x = self.attn(x, Gh, Gw, mask=attn_mask) # nG*B, G*G, C + + # ungroup embeddings + if self.lsda_flag == 0: + x = x.reshape(B, Hp // G, Wp // G, G, G, C).permute(0, 1, 3, 2, 4, 5).contiguous() # B, Hp//G, G, Wp//G, G, C + else: + x = x.reshape(B, I, I, Gh, Gw, C).permute(0, 3, 1, 4, 2, 5).contiguous() # B, Gh, I, Gw, I, C + x = x.reshape(B, Hp, Wp, C) + + # remove padding + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ + f"group_size={self.group_size}, lsda_flag={self.lsda_flag}, mlp_ratio={self.mlp_ratio}" + + def flops(self): + flops = 0 + H, W = self.input_resolution + # norm1 + flops += self.dim * H * W + # Attention + size_div = self.interval if self.lsda_flag == 1 else self.group_size + Hp = math.ceil(H / size_div) * size_div + Wp = math.ceil(W / size_div) * size_div + Gh = Hp / size_div if self.lsda_flag == 1 else self.group_size + Gw = Wp / size_div if self.lsda_flag == 1 else self.group_size + nG = Hp * Wp / Gh / Gw + attn_flops, attn_excluded_flops = self.attn.flops(Gh * Gw) + flops += nG * attn_flops + excluded_flops = nG * attn_excluded_flops + # mlp + flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio + # norm2 + flops += self.dim * H * W + return flops, excluded_flops + +class PatchMerging(nn.Module): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm, patch_size=[2], num_input_patch_size=1): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reductions = nn.ModuleList() + self.patch_size = patch_size + self.norm = norm_layer(dim) + + for i, ps in enumerate(patch_size): + if i == len(patch_size) - 1: + out_dim = 2 * dim // 2 ** i + else: + out_dim = 2 * dim // 2 ** (i + 1) + stride = 2 + padding = (ps - stride) // 2 + self.reductions.append(nn.Conv2d(dim, out_dim, kernel_size=ps, + stride=stride, padding=padding)) + + def forward(self, x, H, W): + """ + x: B, H*W, C + """ + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." + + x = self.norm(x) + x = x.view(B, H, W, C).permute(0, 3, 1, 2).contiguous() + + xs = [] + for i in range(len(self.reductions)): + tmp_x = self.reductions[i](x).flatten(2).transpose(1, 2).contiguous() + xs.append(tmp_x) + x = torch.cat(xs, dim=2) + return x + + def extra_repr(self) -> str: + return f"input_resolution={self.input_resolution}, dim={self.dim}" + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.dim + for i, ps in enumerate(self.patch_size): + if i == len(self.patch_size) - 1: + out_dim = 2 * self.dim // 2 ** i + else: + out_dim = 2 * self.dim // 2 ** (i + 1) + flops += (H // 2) * (W // 2) * ps * ps * out_dim * self.dim + return flops + + +class Stage(nn.Module): + """ CrossFormer blocks for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + group_size (int): Group size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Ghether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, group_size, interval, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False, + patch_size_end=[4], num_patch_size=None): + + super().__init__() + self.dim = dim + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList() + for i in range(depth): + lsda_flag = 0 if (i % 2 == 0) else 1 + self.blocks.append(CrossFormerBlock(dim=dim, input_resolution=input_resolution, + num_heads=num_heads, group_size=group_size, interval=interval, + lsda_flag=lsda_flag, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + num_patch_size=num_patch_size)) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer, + patch_size=patch_size_end, num_input_patch_size=num_patch_size) + else: + self.downsample = None + + def forward(self, x, H, W): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x, H, W) + + B, _, C = x.shape + feat = x.view(B, H, W, C).permute(0, 3, 1, 2).contiguous() + if self.downsample is not None: + x = self.downsample(x, H, W) + return feat, x + + def extra_repr(self) -> str: + return f"dim={self.dim}, depth={self.depth}" + + def flops(self): + flops = 0 + excluded_flops = 0 + for blk in self.blocks: + blk_flops, blk_excluded_flops = blk.flops() + flops += blk_flops + excluded_flops += blk_excluded_flops + if self.downsample is not None: + flops += self.downsample.flops() + return flops, excluded_flops + + +class PatchEmbed(nn.Module): + r""" Image to Patch Embedding + + Args: + img_size (int): Image size. Default: 224. + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, img_size=224, patch_size=[4], in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + img_size = to_2tuple(img_size) + # patch_size = to_2tuple(patch_size) + patches_resolution = [img_size[0] // 4, img_size[1] // 4] # only for flops calculation + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.projs = nn.ModuleList() + for i, ps in enumerate(patch_size): + if i == len(patch_size) - 1: + dim = embed_dim // 2 ** i + else: + dim = embed_dim // 2 ** (i + 1) + stride = 4 + padding = (ps - 4) // 2 + self.projs.append(nn.Conv2d(in_chans, dim, kernel_size=ps, stride=stride, padding=padding)) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + B, C, H, W = x.shape + xs = [] + for i in range(len(self.projs)): + tx = self.projs[i](x).flatten(2).transpose(1, 2) + xs.append(tx) # B Ph*Pw C + x = torch.cat(xs, dim=2) + if self.norm is not None: + x = self.norm(x) + return x, H, W + + def flops(self): + Ho, Wo = self.patches_resolution + flops = 0 + for i, ps in enumerate(self.patch_size): + if i == len(self.patch_size) - 1: + dim = self.embed_dim // 2 ** i + else: + dim = self.embed_dim // 2 ** (i + 1) + flops += Ho * Wo * dim * self.in_chans * (self.patch_size[i] * self.patch_size[i]) + if self.norm is not None: + flops += Ho * Wo * self.embed_dim + return flops + + +class CrossFormer(nn.Module): + r""" CrossFormer + A PyTorch impl of : `CrossFormer: A Versatile Vision Transformer Based on Cross-scale Attention` - + + Args: + img_size (int | tuple(int)): Input image size. Default 224 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each stage. + num_heads (tuple(int)): Number of attention heads in different layers. + group_size (int): Group size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Ghether to use checkpointing to save memory. Default: False + """ + + def __init__(self, img_size=224, patch_size=[4], in_chans=3, num_classes=1000, + embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], + group_size=7, crs_interval=[8, 4, 2, 1], mlp_ratio=4., qkv_bias=True, qk_scale=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, + norm_layer=nn.LayerNorm, patch_norm=True, + use_checkpoint=False, merge_size=[[2], [2], [2]], **kwargs): + super().__init__() + + self.num_classes = num_classes + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) + self.mlp_ratio = mlp_ratio + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution # [H//4, W//4] of original image size + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + + # build layers + self.layers = nn.ModuleList() + + num_patch_sizes = [len(patch_size)] + [len(m) for m in merge_size] + for i_layer in range(self.num_layers): + patch_size_end = merge_size[i_layer] if i_layer < self.num_layers - 1 else None + num_patch_size = num_patch_sizes[i_layer] + layer = Stage(dim=int(embed_dim * 2 ** i_layer), + input_resolution=(patches_resolution[0] // (2 ** i_layer), + patches_resolution[1] // (2 ** i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + group_size=group_size[i_layer], + interval=crs_interval[i_layer], + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint, + patch_size_end=patch_size_end, + num_patch_size=num_patch_size) + self.layers.append(layer) + + # # classification + # self.norm = norm_layer(self.num_features) + # self.avgpool = nn.AdaptiveAvgPool1d(1) + # self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = get_root_logger() + load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'absolute_pos_embed'} + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {'relative_position_bias_table'} + + def forward(self, x): + x, H, W = self.patch_embed(x) + x = self.pos_drop(x) + + outs = [] + for i, layer in enumerate(self.layers): + feat, x = layer(x, H //4 //(2 ** i), W //4 //(2 ** i)) + outs.append(feat) + + # # classification + # x = self.norm(x) # B L C + # x = self.avgpool(x.transpose(1, 2)) # B C 1 + # x = torch.flatten(x, 1) + # x = self.head(x) + # return x + + return outs + + def flops(self): + flops = 0 + excluded_flops = 0 + flops += self.patch_embed.flops() + for i, layer in enumerate(self.layers): + layer_flops, layer_excluded_flops = layer.flops() + flops += layer_flops + excluded_flops += layer_excluded_flops + # flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers) + # flops += self.num_features * self.num_classes + return flops, excluded_flops diff --git a/PuzzleTuning/Backbone/getmodel.py b/PuzzleTuning/Backbone/getmodel.py new file mode 100644 index 0000000000000000000000000000000000000000..3faf89ff69d89aba3604fa646743be9aa04e236f --- /dev/null +++ b/PuzzleTuning/Backbone/getmodel.py @@ -0,0 +1,392 @@ +""" +get model func Script ver: Dec 5th 14:20 +""" +import os +import sys +sys.path.append(os.path.realpath('.')) + +import torch +import torch.nn as nn +from torchvision import models +from Backbone import ResHybrid + + +# get model +def get_model(num_classes=1000, edge_size=224, model_idx=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, + pretrained_backbone=True, use_cls_token=True, use_pos_embedding=True, use_att_module='SimAM'): + """ + :param num_classes: classification required number of your dataset + :param edge_size: the input edge size of the dataloder + :param model_idx: the model we are going to use. by the format of Model_size_other_info + + :param drop_rate: The dropout layer's probility of proposed models + :param attn_drop_rate: The dropout layer(right after the MHSA block or MHGA block)'s probility of proposed models + :param drop_path_rate: The probility of stochastic depth + + :param pretrained_backbone: The backbone CNN is initiate randomly or by its official Pretrained models + + :param use_cls_token: To use the class token + :param use_pos_embedding: To use the positional enbedding + :param use_att_module: To use which attention module in the FGD Focus block + + :return: prepared model + """ + if model_idx[0:5] == 'ViT_h': + # Transfer learning for ViT + import timm + from pprint import pprint + model_names = timm.list_models('*vit*') + pprint(model_names) + if edge_size == 224: + model = timm.create_model('vit_huge_patch14_224_in21k', pretrained=pretrained_backbone, num_classes=num_classes) + else: + print('not a avaliable image size with', model_idx) + + elif model_idx[0:5] == 'ViT_l': + # Transfer learning for ViT + import timm + from pprint import pprint + model_names = timm.list_models('*vit*') + pprint(model_names) + if edge_size == 224: + model = timm.create_model('vit_large_patch16_224', pretrained=pretrained_backbone, num_classes=num_classes) + elif edge_size == 384: + model = timm.create_model('vit_large_patch16_384', pretrained=pretrained_backbone, num_classes=num_classes) + else: + print('not a avaliable image size with', model_idx) + + elif model_idx[0:5] == 'ViT_s': + # Transfer learning for ViT + import timm + from pprint import pprint + model_names = timm.list_models('*vit*') + pprint(model_names) + if edge_size == 224: + model = timm.create_model('vit_small_patch16_224', pretrained=pretrained_backbone, num_classes=num_classes) + elif edge_size == 384: + model = timm.create_model('vit_small_patch16_384', pretrained=pretrained_backbone, num_classes=num_classes) + else: + print('not a avaliable image size with', model_idx) + + elif model_idx[0:5] == 'ViT_t': + # Transfer learning for ViT + import timm + from pprint import pprint + model_names = timm.list_models('*vit*') + pprint(model_names) + if edge_size == 224: + model = timm.create_model('vit_tiny_patch16_224', pretrained=pretrained_backbone, num_classes=num_classes) + elif edge_size == 384: + model = timm.create_model('vit_tiny_patch16_384', pretrained=pretrained_backbone, num_classes=num_classes) + else: + print('not a avaliable image size with', model_idx) + + elif model_idx[0:5] == 'ViT_b' or model_idx[0:3] == 'ViT': # vit_base + # Transfer learning for ViT + import timm + from pprint import pprint + model_names = timm.list_models('*vit*') + pprint(model_names) + if edge_size == 224: + model = timm.create_model('vit_base_patch16_224', pretrained=pretrained_backbone, num_classes=num_classes) + elif edge_size == 384: + model = timm.create_model('vit_base_patch16_384', pretrained=pretrained_backbone, num_classes=num_classes) + else: + print('not a avaliable image size with', model_idx) + + elif model_idx[0:3] == 'vgg': + # Transfer learning for vgg16_bn + import timm + from pprint import pprint + model_names = timm.list_models('*vgg*') + pprint(model_names) + if model_idx[0:8] == 'vgg16_bn': + model = timm.create_model('vgg16_bn', pretrained=pretrained_backbone, num_classes=num_classes) + elif model_idx[0:5] == 'vgg16': + model = timm.create_model('vgg16', pretrained=pretrained_backbone, num_classes=num_classes) + elif model_idx[0:8] == 'vgg19_bn': + model = timm.create_model('vgg19_bn', pretrained=pretrained_backbone, num_classes=num_classes) + elif model_idx[0:5] == 'vgg19': + model = timm.create_model('vgg19', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:4] == 'deit': # Transfer learning for DeiT + import timm + from pprint import pprint + model_names = timm.list_models('*deit*') + pprint(model_names) + if edge_size == 384: + model = timm.create_model('deit_base_patch16_384', pretrained=pretrained_backbone, num_classes=2) + elif edge_size == 224: + model = timm.create_model('deit_base_patch16_224', pretrained=pretrained_backbone, num_classes=2) + else: + pass + + elif model_idx[0:5] == 'twins': # Transfer learning for twins + import timm + from pprint import pprint + + model_names = timm.list_models('*twins*') + pprint(model_names) + model = timm.create_model('twins_pcpvt_base', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:5] == 'pit_b' and edge_size == 224: # Transfer learning for PiT + import timm + from pprint import pprint + + model_names = timm.list_models('*pit*') + pprint(model_names) + model = timm.create_model('pit_b_224', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:5] == 'gcvit' and edge_size == 224: # Transfer learning for gcvit + import timm + from pprint import pprint + + model_names = timm.list_models('*gcvit*') + pprint(model_names) + model = timm.create_model('gcvit_base', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:6] == 'xcit_s': # Transfer learning for XCiT + import timm + from pprint import pprint + model_names = timm.list_models('*xcit*') + pprint(model_names) + if edge_size == 384: + model = timm.create_model('xcit_small_12_p16_384_dist', pretrained=pretrained_backbone, + num_classes=num_classes) + elif edge_size == 224: + model = timm.create_model('xcit_small_12_p16_224_dist', pretrained=pretrained_backbone, + num_classes=num_classes) + else: + pass + + elif model_idx[0:6] == 'xcit_m': # Transfer learning for XCiT + import timm + from pprint import pprint + model_names = timm.list_models('*xcit*') + pprint(model_names) + if edge_size == 384: + model = timm.create_model('xcit_medium_24_p16_384_dist', pretrained=pretrained_backbone, + num_classes=num_classes) + elif edge_size == 224: + model = timm.create_model('xcit_medium_24_p16_224_dist', pretrained=pretrained_backbone, + num_classes=num_classes) + else: + pass + + elif model_idx[0:6] == 'mvitv2': # Transfer learning for MViT v2 small fixme bug in model! + import timm + from pprint import pprint + model_names = timm.list_models('*mvitv2*') + pprint(model_names) + model = timm.create_model('mvitv2_small_cls', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:6] == 'convit' and edge_size == 224: # Transfer learning for ConViT fixme bug in model! + import timm + from pprint import pprint + + model_names = timm.list_models('*convit*') + pprint(model_names) + model = timm.create_model('convit_base', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:6] == 'ResNet': # Transfer learning for the ResNets + if model_idx[0:8] == 'ResNet34': + model = models.resnet34(pretrained=pretrained_backbone) + elif model_idx[0:8] == 'ResNet50': + model = models.resnet50(pretrained=pretrained_backbone) + elif model_idx[0:9] == 'ResNet101': + model = models.resnet101(pretrained=pretrained_backbone) + else: + print('this model is not defined in get model') + return -1 + num_ftrs = model.fc.in_features + model.fc = nn.Linear(num_ftrs, num_classes) + + elif model_idx[0:6] == 'Backbone': # ours: MSHT + # NOTICE: HERE 'pretrained' controls only The backbone CNN is initiate randomly + # or by its official Pretrained models + model = ResHybrid.create_model(model_idx, edge_size, pretrained=pretrained_backbone, num_classes=num_classes, + drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, use_cls_token=use_cls_token, + use_pos_embedding=use_pos_embedding, use_att_module=use_att_module) + + elif model_idx[0:7] == 'bot_256' and edge_size == 256: # Model: BoT + import timm + from pprint import pprint + model_names = timm.list_models('*bot*') + pprint(model_names) + # NOTICE: we find no weight for BoT in timm + # ['botnet26t_256', 'botnet50ts_256', 'eca_botnext26ts_256'] + model = timm.create_model('botnet26t_256', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:8] == 'densenet': # Transfer learning for densenet + import timm + from pprint import pprint + + model_names = timm.list_models('*densenet*') + pprint(model_names) + model = timm.create_model('densenet121', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:8] == 'xception': # Transfer learning for Xception + import timm + from pprint import pprint + model_names = timm.list_models('*xception*') + pprint(model_names) + model = timm.create_model('xception', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:9] == 'pvt_v2_b0': # Transfer learning for PVT v2 (todo not okey with torch summary) + import timm + from pprint import pprint + model_names = timm.list_models('*pvt_v2*') + pprint(model_names) + model = timm.create_model('pvt_v2_b0', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:9] == 'visformer' and edge_size == 224: # Transfer learning for Visformer + import timm + from pprint import pprint + model_names = timm.list_models('*visformer*') + pprint(model_names) + model = timm.create_model('visformer_small', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:9] == 'conformer': # Transfer learning for Conformer base + from Backbone.counterpart_models import conformer + + embed_dim = 576 + channel_ratio = 6 + + if pretrained_backbone: + model = conformer.Conformer(num_classes=1000, patch_size=16, channel_ratio=channel_ratio, + embed_dim=embed_dim, depth=12, num_heads=9, mlp_ratio=4, qkv_bias=True) + # this is the related path to , not + save_model_path = '../saved_models/Conformer_base_patch16.pth' # fixme model is downloaded at this path + # downloaded from official model state at https://github.com/pengzhiliang/Conformer + model.load_state_dict(torch.load(save_model_path), False) + + model.trans_cls_head = nn.Linear(embed_dim, num_classes) + model.conv_cls_head = nn.Linear(int(256 * channel_ratio), num_classes) + model.cls_head = nn.Linear(int(2 * num_classes), num_classes) + + else: + model = conformer.Conformer(num_classes=num_classes, patch_size=16, channel_ratio=channel_ratio, + embed_dim=embed_dim, depth=12, num_heads=9, mlp_ratio=4, qkv_bias=True) + + elif model_idx[0:9] == 'coat_mini' and edge_size == 224: # Transfer learning for coat_mini + import timm + from pprint import pprint + + model_names = timm.list_models('*coat*') + pprint(model_names) + model = timm.create_model('coat_mini', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:10] == 'swin_b_384' and edge_size == 384: # Transfer learning for Swin Transformer (swin_b_384) + import timm + from pprint import pprint + model_names = timm.list_models('*swin*') + pprint(model_names) # swin_base_patch4_window12_384 swin_base_patch4_window12_384_in22k + model = timm.create_model('swin_base_patch4_window12_384', pretrained=pretrained_backbone, + num_classes=num_classes) + + elif model_idx[0:10] == 'swin_b_224' and edge_size == 224: # Transfer learning for Swin Transformer (swin_b_384) + import timm + from pprint import pprint + model_names = timm.list_models('*swin*') + pprint(model_names) # swin_base_patch4_window7_224 swin_base_patch4_window7_224_in22k + model = timm.create_model('swin_base_patch4_window7_224', pretrained=pretrained_backbone, + num_classes=num_classes) + + elif model_idx[0:11] == 'mobilenetv3': # Transfer learning for mobilenetv3 + import timm + from pprint import pprint + model_names = timm.list_models('*mobilenet*') + pprint(model_names) + model = timm.create_model('mobilenetv3_large_100', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:11] == 'mobilevit_s': # Transfer learning for mobilevit_s + import timm + from pprint import pprint + model_names = timm.list_models('*mobilevit*') + pprint(model_names) + model = timm.create_model('mobilevit_s', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:11] == 'inceptionv3': # Transfer learning for Inception v3 + import timm + from pprint import pprint + model_names = timm.list_models('*inception*') + pprint(model_names) + model = timm.create_model('inception_v3', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:12] == 'cross_former' and edge_size == 224: # Transfer learning for crossformer base + from Backbone.counterpart_models import crossformer + backbone = crossformer.CrossFormer(img_size=edge_size, + patch_size=[4, 8, 16, 32], + in_chans=3, + num_classes=0, # get backbone only + embed_dim=96, + depths=[2, 2, 18, 2], + num_heads=[3, 6, 12, 24], + group_size=[7, 7, 7, 7], + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop_rate=0.0, + drop_path_rate=0.3, + ape=False, + patch_norm=True, + use_checkpoint=False, + merge_size=[[2, 4], [2, 4], [2, 4]], ) + if pretrained_backbone: + save_model_path = '../saved_models/crossformer-b.pth' # fixme model is downloaded at this path + # downloaded from official model state at https://github.com/cheerss/CrossFormer + backbone.load_state_dict(torch.load(save_model_path)['model'], False) + model = crossformer.cross_former_cls_head_warp(backbone, num_classes) + + elif model_idx[0:13] == 'crossvit_base': # Transfer learning for crossvit_base (todo not okey with torch summary) + import timm + from pprint import pprint + model_names = timm.list_models('*crossvit_base*') + pprint(model_names) + model = timm.create_model('crossvit_base_240', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:14] == 'efficientnet_b': # Transfer learning for efficientnet_b3,4 + import timm + from pprint import pprint + model_names = timm.list_models('*efficientnet*') + pprint(model_names) + model = timm.create_model(model_idx[0:15], pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:14] == 'ResN50_ViT_384': # ResNet+ViT融合模型384 + import timm + from pprint import pprint + model_names = timm.list_models('*vit_base_resnet*') + pprint(model_names) + model = timm.create_model('vit_base_resnet50_384', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:15] == 'coat_lite_small' and edge_size == 224: # Transfer learning for coat_lite_small + import timm + from pprint import pprint + + model_names = timm.list_models('*coat*') + pprint(model_names) + model = timm.create_model('coat_lite_small', pretrained=pretrained_backbone, num_classes=num_classes) + + elif model_idx[0:17] == 'efficientformer_l' and edge_size == 224: # Transfer learning for efficientnet_b3,4 + import timm + from pprint import pprint + model_names = timm.list_models('*efficientformer*') + pprint(model_names) + model = timm.create_model(model_idx[0:18], pretrained=pretrained_backbone, num_classes=num_classes) + + else: + print('\nThe model', model_idx, 'with the edge size of', edge_size) + print("is not defined in the script!!", '\n') + return -1 + + try: + img = torch.randn(1, 3, edge_size, edge_size) + preds = model(img) # (1, class_number) + print('test model output:', preds) + except: + print("Problem exist in the model defining process!!") + return -1 + else: + print('model is ready now!') + return model diff --git a/PuzzleTuning/Counterpart PreTrain Methods/ReadMe.md b/PuzzleTuning/Counterpart PreTrain Methods/ReadMe.md new file mode 100644 index 0000000000000000000000000000000000000000..9b0da00e2df00268598fe19e83413fbfc853fb48 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/ReadMe.md @@ -0,0 +1,2 @@ +we have used MOCO-V3, Dino, MAE to pretrain the ViT-base-224 model. +The official codes are implemented here. diff --git a/PuzzleTuning/Counterpart PreTrain Methods/dino-main/LICENSE b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..b09cd7856d58590578ee1a4f3ad45d1310a97f87 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/PuzzleTuning/Counterpart PreTrain Methods/dino-main/README.md b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/README.md new file mode 100644 index 0000000000000000000000000000000000000000..50c8534217e768b68f9a834a95ca80a3ca2b83fc --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/README.md @@ -0,0 +1,14 @@ +# Self-Supervised Vision Transformers with DINO + +The original repo of DINO could be found [here](https://github.com/facebookresearch/dino "DINO") + +Pip requirements: timm == 0.4.9, PyTorch == 1.7.1, Torchvision == 0.8.2, Cuda == 11.0 + +Typical BASH: + ```console +python -m torch.distributed.launch \ +--nproc_per_node=2 main_dino.py --arch vit_base --batch_size_per_gpu 128 \ +--lr 1.5e-4 --epochs 100 --data_path /root/autodl-tmp/All \ +--basic_state_dict /root/autodl-tmp/ViT_b16_224_Imagenet.pth \ +--num_workers 32 --output_dir the/path/of/CPIA + ``` diff --git a/PuzzleTuning/Counterpart PreTrain Methods/dino-main/eval_copy_detection.py b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/eval_copy_detection.py new file mode 100644 index 0000000000000000000000000000000000000000..73dcd507893f204a47a5036cc61bd65b30cf1ead --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/eval_copy_detection.py @@ -0,0 +1,301 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +import pickle +import argparse + +import torch +from torch import nn +import torch.distributed as dist +import torch.backends.cudnn as cudnn +from torchvision import models as torchvision_models +from torchvision import transforms as pth_transforms +from PIL import Image, ImageFile +import numpy as np + +import utils +import vision_transformer as vits +from eval_knn import extract_features + + +class CopydaysDataset(): + def __init__(self, basedir): + self.basedir = basedir + self.block_names = ( + ['original', 'strong'] + + ['jpegqual/%d' % i for i in + [3, 5, 8, 10, 15, 20, 30, 50, 75]] + + ['crops/%d' % i for i in + [10, 15, 20, 30, 40, 50, 60, 70, 80]]) + self.nblocks = len(self.block_names) + + self.query_blocks = range(self.nblocks) + self.q_block_sizes = np.ones(self.nblocks, dtype=int) * 157 + self.q_block_sizes[1] = 229 + # search only among originals + self.database_blocks = [0] + + def get_block(self, i): + dirname = self.basedir + '/' + self.block_names[i] + fnames = [dirname + '/' + fname + for fname in sorted(os.listdir(dirname)) + if fname.endswith('.jpg')] + return fnames + + def get_block_filenames(self, subdir_name): + dirname = self.basedir + '/' + subdir_name + return [fname + for fname in sorted(os.listdir(dirname)) + if fname.endswith('.jpg')] + + def eval_result(self, ids, distances): + j0 = 0 + for i in range(self.nblocks): + j1 = j0 + self.q_block_sizes[i] + block_name = self.block_names[i] + I = ids[j0:j1] # block size + sum_AP = 0 + if block_name != 'strong': + # 1:1 mapping of files to names + positives_per_query = [[i] for i in range(j1 - j0)] + else: + originals = self.get_block_filenames('original') + strongs = self.get_block_filenames('strong') + + # check if prefixes match + positives_per_query = [ + [j for j, bname in enumerate(originals) + if bname[:4] == qname[:4]] + for qname in strongs] + + for qno, Iline in enumerate(I): + positives = positives_per_query[qno] + ranks = [] + for rank, bno in enumerate(Iline): + if bno in positives: + ranks.append(rank) + sum_AP += score_ap_from_ranks_1(ranks, len(positives)) + + print("eval on %s mAP=%.3f" % ( + block_name, sum_AP / (j1 - j0))) + j0 = j1 + + +# from the Holidays evaluation package +def score_ap_from_ranks_1(ranks, nres): + """ Compute the average precision of one search. + ranks = ordered list of ranks of true positives + nres = total number of positives in dataset + """ + + # accumulate trapezoids in PR-plot + ap = 0.0 + + # All have an x-size of: + recall_step = 1.0 / nres + + for ntp, rank in enumerate(ranks): + + # y-size on left side of trapezoid: + # ntp = nb of true positives so far + # rank = nb of retrieved items so far + if rank == 0: + precision_0 = 1.0 + else: + precision_0 = ntp / float(rank) + + # y-size on right side of trapezoid: + # ntp and rank are increased by one + precision_1 = (ntp + 1) / float(rank + 1) + + ap += (precision_1 + precision_0) * recall_step / 2.0 + + return ap + + +class ImgListDataset(torch.utils.data.Dataset): + def __init__(self, img_list, transform=None): + self.samples = img_list + self.transform = transform + + def __getitem__(self, i): + with open(self.samples[i], 'rb') as f: + img = Image.open(f) + img = img.convert('RGB') + if self.transform is not None: + img = self.transform(img) + return img, i + + def __len__(self): + return len(self.samples) + + +def is_image_file(s): + ext = s.split(".")[-1] + if ext in ['jpg', 'jpeg', 'png', 'ppm', 'bmp', 'pgm', 'tif', 'tiff', 'webp']: + return True + return False + + +@torch.no_grad() +def extract_features(image_list, model, args): + transform = pth_transforms.Compose([ + pth_transforms.Resize((args.imsize, args.imsize), interpolation=3), + pth_transforms.ToTensor(), + pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + ]) + tempdataset = ImgListDataset(image_list, transform=transform) + data_loader = torch.utils.data.DataLoader(tempdataset, batch_size=args.batch_size_per_gpu, + num_workers=args.num_workers, drop_last=False, + sampler=torch.utils.data.DistributedSampler(tempdataset, shuffle=False)) + features = None + for samples, index in utils.MetricLogger(delimiter=" ").log_every(data_loader, 10): + samples, index = samples.cuda(non_blocking=True), index.cuda(non_blocking=True) + feats = model.get_intermediate_layers(samples, n=1)[0].clone() + + cls_output_token = feats[:, 0, :] # [CLS] token + # GeM with exponent 4 for output patch tokens + b, h, w, d = len(samples), int(samples.shape[-2] / model.patch_embed.patch_size), int(samples.shape[-1] / model.patch_embed.patch_size), feats.shape[-1] + feats = feats[:, 1:, :].reshape(b, h, w, d) + feats = feats.clamp(min=1e-6).permute(0, 3, 1, 2) + feats = nn.functional.avg_pool2d(feats.pow(4), (h, w)).pow(1. / 4).reshape(b, -1) + # concatenate [CLS] token and GeM pooled patch tokens + feats = torch.cat((cls_output_token, feats), dim=1) + + # init storage feature matrix + if dist.get_rank() == 0 and features is None: + features = torch.zeros(len(data_loader.dataset), feats.shape[-1]) + if args.use_cuda: + features = features.cuda(non_blocking=True) + + # get indexes from all processes + y_all = torch.empty(dist.get_world_size(), index.size(0), dtype=index.dtype, device=index.device) + y_l = list(y_all.unbind(0)) + y_all_reduce = torch.distributed.all_gather(y_l, index, async_op=True) + y_all_reduce.wait() + index_all = torch.cat(y_l) + + # share features between processes + feats_all = torch.empty(dist.get_world_size(), feats.size(0), feats.size(1), + dtype=feats.dtype, device=feats.device) + output_l = list(feats_all.unbind(0)) + output_all_reduce = torch.distributed.all_gather(output_l, feats, async_op=True) + output_all_reduce.wait() + + # update storage feature matrix + if dist.get_rank() == 0: + if args.use_cuda: + features.index_copy_(0, index_all, torch.cat(output_l)) + else: + features.index_copy_(0, index_all.cpu(), torch.cat(output_l).cpu()) + return features # features is still None for every rank which is not 0 (main) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser('Copy detection on Copydays') + parser.add_argument('--data_path', default='/path/to/copydays/', type=str, + help="See https://lear.inrialpes.fr/~jegou/data.php#copydays") + parser.add_argument('--whitening_path', default='/path/to/whitening_data/', type=str, + help="""Path to directory with images used for computing the whitening operator. + In our paper, we use 20k random images from YFCC100M.""") + parser.add_argument('--distractors_path', default='/path/to/distractors/', type=str, + help="Path to directory with distractors images. In our paper, we use 10k random images from YFCC100M.") + parser.add_argument('--imsize', default=320, type=int, help='Image size (square image)') + parser.add_argument('--batch_size_per_gpu', default=16, type=int, help='Per-GPU batch-size') + parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.") + parser.add_argument('--use_cuda', default=True, type=utils.bool_flag) + parser.add_argument('--arch', default='vit_base', type=str, help='Architecture') + parser.add_argument('--patch_size', default=8, type=int, help='Patch resolution of the model.') + parser.add_argument("--checkpoint_key", default="teacher", type=str, + help='Key to use in the checkpoint (example: "teacher")') + parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.') + parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up + distributed training; see https://pytorch.org/docs/stable/distributed.html""") + parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.") + args = parser.parse_args() + + utils.init_distributed_mode(args) + print("git:\n {}\n".format(utils.get_sha())) + print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items()))) + cudnn.benchmark = True + + # ============ building network ... ============ + if "vit" in args.arch: + model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0) + print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.") + else: + print(f"Architecture {args.arch} non supported") + sys.exit(1) + if args.use_cuda: + model.cuda() + model.eval() + utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size) + + dataset = CopydaysDataset(args.data_path) + + # ============ Extract features ... ============ + # extract features for queries + queries = [] + for q in dataset.query_blocks: + queries.append(extract_features(dataset.get_block(q), model, args)) + if utils.get_rank() == 0: + queries = torch.cat(queries) + print(f"Extraction of queries features done. Shape: {queries.shape}") + + # extract features for database + database = [] + for b in dataset.database_blocks: + database.append(extract_features(dataset.get_block(b), model, args)) + + # extract features for distractors + if os.path.isdir(args.distractors_path): + print("Using distractors...") + list_distractors = [os.path.join(args.distractors_path, s) for s in os.listdir(args.distractors_path) if is_image_file(s)] + database.append(extract_features(list_distractors, model, args)) + if utils.get_rank() == 0: + database = torch.cat(database) + print(f"Extraction of database and distractors features done. Shape: {database.shape}") + + # ============ Whitening ... ============ + if os.path.isdir(args.whitening_path): + print(f"Extracting features on images from {args.whitening_path} for learning the whitening operator.") + list_whit = [os.path.join(args.whitening_path, s) for s in os.listdir(args.whitening_path) if is_image_file(s)] + features_for_whitening = extract_features(list_whit, model, args) + if utils.get_rank() == 0: + # center + mean_feature = torch.mean(features_for_whitening, dim=0) + database -= mean_feature + queries -= mean_feature + pca = utils.PCA(dim=database.shape[-1], whit=0.5) + # compute covariance + cov = torch.mm(features_for_whitening.T, features_for_whitening) / features_for_whitening.shape[0] + pca.train_pca(cov.cpu().numpy()) + database = pca.apply(database) + queries = pca.apply(queries) + + # ============ Copy detection ... ============ + if utils.get_rank() == 0: + # l2 normalize the features + database = nn.functional.normalize(database, dim=1, p=2) + queries = nn.functional.normalize(queries, dim=1, p=2) + + # similarity + similarity = torch.mm(queries, database.T) + distances, indices = similarity.topk(20, largest=True, sorted=True) + + # evaluate + retrieved = dataset.eval_result(indices, distances) + dist.barrier() + diff --git a/PuzzleTuning/Counterpart PreTrain Methods/dino-main/eval_image_retrieval.py b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/eval_image_retrieval.py new file mode 100644 index 0000000000000000000000000000000000000000..999f8c9009a9abcc28308c5995c286f65b1522ac --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/eval_image_retrieval.py @@ -0,0 +1,201 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +import pickle +import argparse + +import torch +from torch import nn +import torch.distributed as dist +import torch.backends.cudnn as cudnn +from torchvision import models as torchvision_models +from torchvision import transforms as pth_transforms +from PIL import Image, ImageFile +import numpy as np + +import utils +import vision_transformer as vits +from eval_knn import extract_features + + +class OxfordParisDataset(torch.utils.data.Dataset): + def __init__(self, dir_main, dataset, split, transform=None, imsize=None): + if dataset not in ['roxford5k', 'rparis6k']: + raise ValueError('Unknown dataset: {}!'.format(dataset)) + + # loading imlist, qimlist, and gnd, in cfg as a dict + gnd_fname = os.path.join(dir_main, dataset, 'gnd_{}.pkl'.format(dataset)) + with open(gnd_fname, 'rb') as f: + cfg = pickle.load(f) + cfg['gnd_fname'] = gnd_fname + cfg['ext'] = '.jpg' + cfg['qext'] = '.jpg' + cfg['dir_data'] = os.path.join(dir_main, dataset) + cfg['dir_images'] = os.path.join(cfg['dir_data'], 'jpg') + cfg['n'] = len(cfg['imlist']) + cfg['nq'] = len(cfg['qimlist']) + cfg['im_fname'] = config_imname + cfg['qim_fname'] = config_qimname + cfg['dataset'] = dataset + self.cfg = cfg + + self.samples = cfg["qimlist"] if split == "query" else cfg["imlist"] + self.transform = transform + self.imsize = imsize + + def __len__(self): + return len(self.samples) + + def __getitem__(self, index): + path = os.path.join(self.cfg["dir_images"], self.samples[index] + ".jpg") + ImageFile.LOAD_TRUNCATED_IMAGES = True + with open(path, 'rb') as f: + img = Image.open(f) + img = img.convert('RGB') + if self.imsize is not None: + img.thumbnail((self.imsize, self.imsize), Image.ANTIALIAS) + if self.transform is not None: + img = self.transform(img) + return img, index + + +def config_imname(cfg, i): + return os.path.join(cfg['dir_images'], cfg['imlist'][i] + cfg['ext']) + + +def config_qimname(cfg, i): + return os.path.join(cfg['dir_images'], cfg['qimlist'][i] + cfg['qext']) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser('Image Retrieval on revisited Paris and Oxford') + parser.add_argument('--data_path', default='/path/to/revisited_paris_oxford/', type=str) + parser.add_argument('--dataset', default='roxford5k', type=str, choices=['roxford5k', 'rparis6k']) + parser.add_argument('--multiscale', default=False, type=utils.bool_flag) + parser.add_argument('--imsize', default=224, type=int, help='Image size') + parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.") + parser.add_argument('--use_cuda', default=True, type=utils.bool_flag) + parser.add_argument('--arch', default='vit_small', type=str, help='Architecture') + parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.') + parser.add_argument("--checkpoint_key", default="teacher", type=str, + help='Key to use in the checkpoint (example: "teacher")') + parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.') + parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up + distributed training; see https://pytorch.org/docs/stable/distributed.html""") + parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.") + args = parser.parse_args() + + utils.init_distributed_mode(args) + print("git:\n {}\n".format(utils.get_sha())) + print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items()))) + cudnn.benchmark = True + + # ============ preparing data ... ============ + transform = pth_transforms.Compose([ + pth_transforms.ToTensor(), + pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + ]) + dataset_train = OxfordParisDataset(args.data_path, args.dataset, split="train", transform=transform, imsize=args.imsize) + dataset_query = OxfordParisDataset(args.data_path, args.dataset, split="query", transform=transform, imsize=args.imsize) + sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False) + data_loader_train = torch.utils.data.DataLoader( + dataset_train, + sampler=sampler, + batch_size=1, + num_workers=args.num_workers, + pin_memory=True, + drop_last=False, + ) + data_loader_query = torch.utils.data.DataLoader( + dataset_query, + batch_size=1, + num_workers=args.num_workers, + pin_memory=True, + drop_last=False, + ) + print(f"train: {len(dataset_train)} imgs / query: {len(dataset_query)} imgs") + + # ============ building network ... ============ + if "vit" in args.arch: + model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0) + print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.") + elif "xcit" in args.arch: + model = torch.hub.load('facebookresearch/xcit:main', args.arch, num_classes=0) + elif args.arch in torchvision_models.__dict__.keys(): + model = torchvision_models.__dict__[args.arch](num_classes=0) + else: + print(f"Architecture {args.arch} non supported") + sys.exit(1) + if args.use_cuda: + model.cuda() + model.eval() + + # load pretrained weights + if os.path.isfile(args.pretrained_weights): + state_dict = torch.load(args.pretrained_weights, map_location="cpu") + if args.checkpoint_key is not None and args.checkpoint_key in state_dict: + print(f"Take key {args.checkpoint_key} in provided checkpoint dict") + state_dict = state_dict[args.checkpoint_key] + # remove `module.` prefix + state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()} + # remove `backbone.` prefix induced by multicrop wrapper + state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()} + msg = model.load_state_dict(state_dict, strict=False) + print('Pretrained weights found at {} and loaded with msg: {}'.format(args.pretrained_weights, msg)) + elif args.arch == "vit_small" and args.patch_size == 16: + print("Since no pretrained weights have been provided, we load pretrained DINO weights on Google Landmark v2.") + model.load_state_dict(torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/dino_vitsmall16_googlelandmark_pretrain/dino_vitsmall16_googlelandmark_pretrain.pth")) + else: + print("Warning: We use random weights.") + + ############################################################################ + # Step 1: extract features + train_features = extract_features(model, data_loader_train, args.use_cuda, multiscale=args.multiscale) + query_features = extract_features(model, data_loader_query, args.use_cuda, multiscale=args.multiscale) + + if utils.get_rank() == 0: # only rank 0 will work from now on + # normalize features + train_features = nn.functional.normalize(train_features, dim=1, p=2) + query_features = nn.functional.normalize(query_features, dim=1, p=2) + + ############################################################################ + # Step 2: similarity + sim = torch.mm(train_features, query_features.T) + ranks = torch.argsort(-sim, dim=0).cpu().numpy() + + ############################################################################ + # Step 3: evaluate + gnd = dataset_train.cfg['gnd'] + # evaluate ranks + ks = [1, 5, 10] + # search for easy & hard + gnd_t = [] + for i in range(len(gnd)): + g = {} + g['ok'] = np.concatenate([gnd[i]['easy'], gnd[i]['hard']]) + g['junk'] = np.concatenate([gnd[i]['junk']]) + gnd_t.append(g) + mapM, apsM, mprM, prsM = utils.compute_map(ranks, gnd_t, ks) + # search for hard + gnd_t = [] + for i in range(len(gnd)): + g = {} + g['ok'] = np.concatenate([gnd[i]['hard']]) + g['junk'] = np.concatenate([gnd[i]['junk'], gnd[i]['easy']]) + gnd_t.append(g) + mapH, apsH, mprH, prsH = utils.compute_map(ranks, gnd_t, ks) + print('>> {}: mAP M: {}, H: {}'.format(args.dataset, np.around(mapM*100, decimals=2), np.around(mapH*100, decimals=2))) + print('>> {}: mP@k{} M: {}, H: {}'.format(args.dataset, np.array(ks), np.around(mprM*100, decimals=2), np.around(mprH*100, decimals=2))) + dist.barrier() diff --git a/PuzzleTuning/Counterpart PreTrain Methods/dino-main/eval_knn.py b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/eval_knn.py new file mode 100644 index 0000000000000000000000000000000000000000..fe99a26049cda2d764086727223e6cc9a8f2bfb8 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/eval_knn.py @@ -0,0 +1,242 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +import argparse + +import torch +from torch import nn +import torch.distributed as dist +import torch.backends.cudnn as cudnn +from torchvision import datasets +from torchvision import transforms as pth_transforms +from torchvision import models as torchvision_models + +import utils +import vision_transformer as vits + + +def extract_feature_pipeline(args): + # ============ preparing data ... ============ + transform = pth_transforms.Compose([ + pth_transforms.Resize(256, interpolation=3), + pth_transforms.CenterCrop(224), + pth_transforms.ToTensor(), + pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + ]) + dataset_train = ReturnIndexDataset(os.path.join(args.data_path, "train"), transform=transform) + dataset_val = ReturnIndexDataset(os.path.join(args.data_path, "val"), transform=transform) + sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False) + data_loader_train = torch.utils.data.DataLoader( + dataset_train, + sampler=sampler, + batch_size=args.batch_size_per_gpu, + num_workers=args.num_workers, + pin_memory=True, + drop_last=False, + ) + data_loader_val = torch.utils.data.DataLoader( + dataset_val, + batch_size=args.batch_size_per_gpu, + num_workers=args.num_workers, + pin_memory=True, + drop_last=False, + ) + print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.") + + # ============ building network ... ============ + if "vit" in args.arch: + model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0) + print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.") + elif "xcit" in args.arch: + model = torch.hub.load('facebookresearch/xcit:main', args.arch, num_classes=0) + elif args.arch in torchvision_models.__dict__.keys(): + model = torchvision_models.__dict__[args.arch](num_classes=0) + model.fc = nn.Identity() + else: + print(f"Architecture {args.arch} non supported") + sys.exit(1) + model.cuda() + utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size) + model.eval() + + # ============ extract features ... ============ + print("Extracting features for train set...") + train_features = extract_features(model, data_loader_train, args.use_cuda) + print("Extracting features for val set...") + test_features = extract_features(model, data_loader_val, args.use_cuda) + + if utils.get_rank() == 0: + train_features = nn.functional.normalize(train_features, dim=1, p=2) + test_features = nn.functional.normalize(test_features, dim=1, p=2) + + train_labels = torch.tensor([s[-1] for s in dataset_train.samples]).long() + test_labels = torch.tensor([s[-1] for s in dataset_val.samples]).long() + # save features and labels + if args.dump_features and dist.get_rank() == 0: + torch.save(train_features.cpu(), os.path.join(args.dump_features, "trainfeat.pth")) + torch.save(test_features.cpu(), os.path.join(args.dump_features, "testfeat.pth")) + torch.save(train_labels.cpu(), os.path.join(args.dump_features, "trainlabels.pth")) + torch.save(test_labels.cpu(), os.path.join(args.dump_features, "testlabels.pth")) + return train_features, test_features, train_labels, test_labels + + +@torch.no_grad() +def extract_features(model, data_loader, use_cuda=True, multiscale=False): + metric_logger = utils.MetricLogger(delimiter=" ") + features = None + for samples, index in metric_logger.log_every(data_loader, 10): + samples = samples.cuda(non_blocking=True) + index = index.cuda(non_blocking=True) + if multiscale: + feats = utils.multi_scale(samples, model) + else: + feats = model(samples).clone() + + # init storage feature matrix + if dist.get_rank() == 0 and features is None: + features = torch.zeros(len(data_loader.dataset), feats.shape[-1]) + if use_cuda: + features = features.cuda(non_blocking=True) + print(f"Storing features into tensor of shape {features.shape}") + + # get indexes from all processes + y_all = torch.empty(dist.get_world_size(), index.size(0), dtype=index.dtype, device=index.device) + y_l = list(y_all.unbind(0)) + y_all_reduce = torch.distributed.all_gather(y_l, index, async_op=True) + y_all_reduce.wait() + index_all = torch.cat(y_l) + + # share features between processes + feats_all = torch.empty( + dist.get_world_size(), + feats.size(0), + feats.size(1), + dtype=feats.dtype, + device=feats.device, + ) + output_l = list(feats_all.unbind(0)) + output_all_reduce = torch.distributed.all_gather(output_l, feats, async_op=True) + output_all_reduce.wait() + + # update storage feature matrix + if dist.get_rank() == 0: + if use_cuda: + features.index_copy_(0, index_all, torch.cat(output_l)) + else: + features.index_copy_(0, index_all.cpu(), torch.cat(output_l).cpu()) + return features + + +@torch.no_grad() +def knn_classifier(train_features, train_labels, test_features, test_labels, k, T, num_classes=1000): + top1, top5, total = 0.0, 0.0, 0 + train_features = train_features.t() + num_test_images, num_chunks = test_labels.shape[0], 100 + imgs_per_chunk = num_test_images // num_chunks + retrieval_one_hot = torch.zeros(k, num_classes).to(train_features.device) + for idx in range(0, num_test_images, imgs_per_chunk): + # get the features for test images + features = test_features[ + idx : min((idx + imgs_per_chunk), num_test_images), : + ] + targets = test_labels[idx : min((idx + imgs_per_chunk), num_test_images)] + batch_size = targets.shape[0] + + # calculate the dot product and compute top-k neighbors + similarity = torch.mm(features, train_features) + distances, indices = similarity.topk(k, largest=True, sorted=True) + candidates = train_labels.view(1, -1).expand(batch_size, -1) + retrieved_neighbors = torch.gather(candidates, 1, indices) + + retrieval_one_hot.resize_(batch_size * k, num_classes).zero_() + retrieval_one_hot.scatter_(1, retrieved_neighbors.view(-1, 1), 1) + distances_transform = distances.clone().div_(T).exp_() + probs = torch.sum( + torch.mul( + retrieval_one_hot.view(batch_size, -1, num_classes), + distances_transform.view(batch_size, -1, 1), + ), + 1, + ) + _, predictions = probs.sort(1, True) + + # find the predictions that match the target + correct = predictions.eq(targets.data.view(-1, 1)) + top1 = top1 + correct.narrow(1, 0, 1).sum().item() + top5 = top5 + correct.narrow(1, 0, min(5, k)).sum().item() # top5 does not make sense if k < 5 + total += targets.size(0) + top1 = top1 * 100.0 / total + top5 = top5 * 100.0 / total + return top1, top5 + + +class ReturnIndexDataset(datasets.ImageFolder): + def __getitem__(self, idx): + img, lab = super(ReturnIndexDataset, self).__getitem__(idx) + return img, idx + + +if __name__ == '__main__': + parser = argparse.ArgumentParser('Evaluation with weighted k-NN on ImageNet') + parser.add_argument('--batch_size_per_gpu', default=128, type=int, help='Per-GPU batch-size') + parser.add_argument('--nb_knn', default=[10, 20, 100, 200], nargs='+', type=int, + help='Number of NN to use. 20 is usually working the best.') + parser.add_argument('--temperature', default=0.07, type=float, + help='Temperature used in the voting coefficient') + parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.") + parser.add_argument('--use_cuda', default=True, type=utils.bool_flag, + help="Should we store the features on GPU? We recommend setting this to False if you encounter OOM") + parser.add_argument('--arch', default='vit_small', type=str, help='Architecture') + parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.') + parser.add_argument("--checkpoint_key", default="teacher", type=str, + help='Key to use in the checkpoint (example: "teacher")') + parser.add_argument('--dump_features', default=None, + help='Path where to save computed features, empty for no saving') + parser.add_argument('--load_features', default=None, help="""If the features have + already been computed, where to find them.""") + parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.') + parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up + distributed training; see https://pytorch.org/docs/stable/distributed.html""") + parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.") + parser.add_argument('--data_path', default='/path/to/imagenet/', type=str) + args = parser.parse_args() + + utils.init_distributed_mode(args) + print("git:\n {}\n".format(utils.get_sha())) + print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items()))) + cudnn.benchmark = True + + if args.load_features: + train_features = torch.load(os.path.join(args.load_features, "trainfeat.pth")) + test_features = torch.load(os.path.join(args.load_features, "testfeat.pth")) + train_labels = torch.load(os.path.join(args.load_features, "trainlabels.pth")) + test_labels = torch.load(os.path.join(args.load_features, "testlabels.pth")) + else: + # need to extract features ! + train_features, test_features, train_labels, test_labels = extract_feature_pipeline(args) + + if utils.get_rank() == 0: + if args.use_cuda: + train_features = train_features.cuda() + test_features = test_features.cuda() + train_labels = train_labels.cuda() + test_labels = test_labels.cuda() + + print("Features are ready!\nStart the k-NN classification.") + for k in args.nb_knn: + top1, top5 = knn_classifier(train_features, train_labels, + test_features, test_labels, k, args.temperature) + print(f"{k}-NN classifier result: Top1: {top1}, Top5: {top5}") + dist.barrier() diff --git a/PuzzleTuning/Counterpart PreTrain Methods/dino-main/eval_linear.py b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/eval_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..cdef16b473d216889b493aa0c7a63e15f945092c --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/eval_linear.py @@ -0,0 +1,281 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import argparse +import json +from pathlib import Path + +import torch +from torch import nn +import torch.distributed as dist +import torch.backends.cudnn as cudnn +from torchvision import datasets +from torchvision import transforms as pth_transforms +from torchvision import models as torchvision_models + +import utils +import vision_transformer as vits + + +def eval_linear(args): + utils.init_distributed_mode(args) + print("git:\n {}\n".format(utils.get_sha())) + print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items()))) + cudnn.benchmark = True + + # ============ building network ... ============ + # if the network is a Vision Transformer (i.e. vit_tiny, vit_small, vit_base) + if args.arch in vits.__dict__.keys(): + model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0) + embed_dim = model.embed_dim * (args.n_last_blocks + int(args.avgpool_patchtokens)) + # if the network is a XCiT + elif "xcit" in args.arch: + model = torch.hub.load('facebookresearch/xcit:main', args.arch, num_classes=0) + embed_dim = model.embed_dim + # otherwise, we check if the architecture is in torchvision models + elif args.arch in torchvision_models.__dict__.keys(): + model = torchvision_models.__dict__[args.arch]() + embed_dim = model.fc.weight.shape[1] + model.fc = nn.Identity() + else: + print(f"Unknow architecture: {args.arch}") + sys.exit(1) + model.cuda() + model.eval() + # load weights to evaluate + utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size) + print(f"Model {args.arch} built.") + + linear_classifier = LinearClassifier(embed_dim, num_labels=args.num_labels) + linear_classifier = linear_classifier.cuda() + linear_classifier = nn.parallel.DistributedDataParallel(linear_classifier, device_ids=[args.gpu]) + + # ============ preparing data ... ============ + val_transform = pth_transforms.Compose([ + pth_transforms.Resize(256, interpolation=3), + pth_transforms.CenterCrop(224), + pth_transforms.ToTensor(), + pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + ]) + dataset_val = datasets.ImageFolder(os.path.join(args.data_path, "val"), transform=val_transform) + val_loader = torch.utils.data.DataLoader( + dataset_val, + batch_size=args.batch_size_per_gpu, + num_workers=args.num_workers, + pin_memory=True, + ) + + if args.evaluate: + utils.load_pretrained_linear_weights(linear_classifier, args.arch, args.patch_size) + test_stats = validate_network(val_loader, model, linear_classifier, args.n_last_blocks, args.avgpool_patchtokens) + print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%") + return + + train_transform = pth_transforms.Compose([ + pth_transforms.RandomResizedCrop(224), + pth_transforms.RandomHorizontalFlip(), + pth_transforms.ToTensor(), + pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + ]) + dataset_train = datasets.ImageFolder(os.path.join(args.data_path, "train"), transform=train_transform) + sampler = torch.utils.data.distributed.DistributedSampler(dataset_train) + train_loader = torch.utils.data.DataLoader( + dataset_train, + sampler=sampler, + batch_size=args.batch_size_per_gpu, + num_workers=args.num_workers, + pin_memory=True, + ) + print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.") + + # set optimizer + optimizer = torch.optim.SGD( + linear_classifier.parameters(), + args.lr * (args.batch_size_per_gpu * utils.get_world_size()) / 256., # linear scaling rule + momentum=0.9, + weight_decay=0, # we do not apply weight decay + ) + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=0) + + # Optionally resume from a checkpoint + to_restore = {"epoch": 0, "best_acc": 0.} + utils.restart_from_checkpoint( + os.path.join(args.output_dir, "checkpoint.pth.tar"), + run_variables=to_restore, + state_dict=linear_classifier, + optimizer=optimizer, + scheduler=scheduler, + ) + start_epoch = to_restore["epoch"] + best_acc = to_restore["best_acc"] + + for epoch in range(start_epoch, args.epochs): + train_loader.sampler.set_epoch(epoch) + + train_stats = train(model, linear_classifier, optimizer, train_loader, epoch, args.n_last_blocks, args.avgpool_patchtokens) + scheduler.step() + + log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, + 'epoch': epoch} + if epoch % args.val_freq == 0 or epoch == args.epochs - 1: + test_stats = validate_network(val_loader, model, linear_classifier, args.n_last_blocks, args.avgpool_patchtokens) + print(f"Accuracy at epoch {epoch} of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%") + best_acc = max(best_acc, test_stats["acc1"]) + print(f'Max accuracy so far: {best_acc:.2f}%') + log_stats = {**{k: v for k, v in log_stats.items()}, + **{f'test_{k}': v for k, v in test_stats.items()}} + if utils.is_main_process(): + with (Path(args.output_dir) / "log.txt").open("a") as f: + f.write(json.dumps(log_stats) + "\n") + save_dict = { + "epoch": epoch + 1, + "state_dict": linear_classifier.state_dict(), + "optimizer": optimizer.state_dict(), + "scheduler": scheduler.state_dict(), + "best_acc": best_acc, + } + torch.save(save_dict, os.path.join(args.output_dir, "checkpoint.pth.tar")) + print("Training of the supervised linear classifier on frozen features completed.\n" + "Top-1 test accuracy: {acc:.1f}".format(acc=best_acc)) + + +def train(model, linear_classifier, optimizer, loader, epoch, n, avgpool): + linear_classifier.train() + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) + header = 'Epoch: [{}]'.format(epoch) + for (inp, target) in metric_logger.log_every(loader, 20, header): + # move to gpu + inp = inp.cuda(non_blocking=True) + target = target.cuda(non_blocking=True) + + # forward + with torch.no_grad(): + if "vit" in args.arch: + intermediate_output = model.get_intermediate_layers(inp, n) + output = torch.cat([x[:, 0] for x in intermediate_output], dim=-1) + if avgpool: + output = torch.cat((output.unsqueeze(-1), torch.mean(intermediate_output[-1][:, 1:], dim=1).unsqueeze(-1)), dim=-1) + output = output.reshape(output.shape[0], -1) + else: + output = model(inp) + output = linear_classifier(output) + + # compute cross entropy loss + loss = nn.CrossEntropyLoss()(output, target) + + # compute the gradients + optimizer.zero_grad() + loss.backward() + + # step + optimizer.step() + + # log + torch.cuda.synchronize() + metric_logger.update(loss=loss.item()) + metric_logger.update(lr=optimizer.param_groups[0]["lr"]) + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + + +@torch.no_grad() +def validate_network(val_loader, model, linear_classifier, n, avgpool): + linear_classifier.eval() + metric_logger = utils.MetricLogger(delimiter=" ") + header = 'Test:' + for inp, target in metric_logger.log_every(val_loader, 20, header): + # move to gpu + inp = inp.cuda(non_blocking=True) + target = target.cuda(non_blocking=True) + + # forward + with torch.no_grad(): + if "vit" in args.arch: + intermediate_output = model.get_intermediate_layers(inp, n) + output = torch.cat([x[:, 0] for x in intermediate_output], dim=-1) + if avgpool: + output = torch.cat((output.unsqueeze(-1), torch.mean(intermediate_output[-1][:, 1:], dim=1).unsqueeze(-1)), dim=-1) + output = output.reshape(output.shape[0], -1) + else: + output = model(inp) + output = linear_classifier(output) + loss = nn.CrossEntropyLoss()(output, target) + + if linear_classifier.module.num_labels >= 5: + acc1, acc5 = utils.accuracy(output, target, topk=(1, 5)) + else: + acc1, = utils.accuracy(output, target, topk=(1,)) + + batch_size = inp.shape[0] + metric_logger.update(loss=loss.item()) + metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) + if linear_classifier.module.num_labels >= 5: + metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) + if linear_classifier.module.num_labels >= 5: + print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}' + .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss)) + else: + print('* Acc@1 {top1.global_avg:.3f} loss {losses.global_avg:.3f}' + .format(top1=metric_logger.acc1, losses=metric_logger.loss)) + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + + +class LinearClassifier(nn.Module): + """Linear layer to train on top of frozen features""" + def __init__(self, dim, num_labels=1000): + super(LinearClassifier, self).__init__() + self.num_labels = num_labels + self.linear = nn.Linear(dim, num_labels) + self.linear.weight.data.normal_(mean=0.0, std=0.01) + self.linear.bias.data.zero_() + + def forward(self, x): + # flatten + x = x.view(x.size(0), -1) + + # linear layer + return self.linear(x) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser('Evaluation with linear classification on ImageNet') + parser.add_argument('--n_last_blocks', default=4, type=int, help="""Concatenate [CLS] tokens + for the `n` last blocks. We use `n=4` when evaluating ViT-Small and `n=1` with ViT-Base.""") + parser.add_argument('--avgpool_patchtokens', default=False, type=utils.bool_flag, + help="""Whether ot not to concatenate the global average pooled features to the [CLS] token. + We typically set this to False for ViT-Small and to True with ViT-Base.""") + parser.add_argument('--arch', default='vit_small', type=str, help='Architecture') + parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.') + parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.") + parser.add_argument("--checkpoint_key", default="teacher", type=str, help='Key to use in the checkpoint (example: "teacher")') + parser.add_argument('--epochs', default=100, type=int, help='Number of epochs of training.') + parser.add_argument("--lr", default=0.001, type=float, help="""Learning rate at the beginning of + training (highest LR used during training). The learning rate is linearly scaled + with the batch size, and specified here for a reference batch size of 256. + We recommend tweaking the LR depending on the checkpoint evaluated.""") + parser.add_argument('--batch_size_per_gpu', default=128, type=int, help='Per-GPU batch-size') + parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up + distributed training; see https://pytorch.org/docs/stable/distributed.html""") + parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.") + parser.add_argument('--data_path', default='/path/to/imagenet/', type=str) + parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.') + parser.add_argument('--val_freq', default=1, type=int, help="Epoch frequency for validation.") + parser.add_argument('--output_dir', default=".", help='Path to save logs and checkpoints') + parser.add_argument('--num_labels', default=1000, type=int, help='Number of labels for linear classifier') + parser.add_argument('--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set') + args = parser.parse_args() + eval_linear(args) diff --git a/PuzzleTuning/Counterpart PreTrain Methods/dino-main/eval_video_segmentation.py b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/eval_video_segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..08a18c475db9cbadb29d2e0f22113c0cc9efed49 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/eval_video_segmentation.py @@ -0,0 +1,292 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Some parts are taken from https://github.com/Liusifei/UVC +""" +import os +import copy +import glob +import queue +from urllib.request import urlopen +import argparse +import numpy as np +from tqdm import tqdm + +import cv2 +import torch +import torch.nn as nn +from torch.nn import functional as F +from PIL import Image +from torchvision import transforms + +import utils +import vision_transformer as vits + + +@torch.no_grad() +def eval_video_tracking_davis(args, model, frame_list, video_dir, first_seg, seg_ori, color_palette): + """ + Evaluate tracking on a video given first frame & segmentation + """ + video_folder = os.path.join(args.output_dir, video_dir.split('/')[-1]) + os.makedirs(video_folder, exist_ok=True) + + # The queue stores the n preceeding frames + que = queue.Queue(args.n_last_frames) + + # first frame + frame1, ori_h, ori_w = read_frame(frame_list[0]) + # extract first frame feature + frame1_feat = extract_feature(model, frame1).T # dim x h*w + + # saving first segmentation + out_path = os.path.join(video_folder, "00000.png") + imwrite_indexed(out_path, seg_ori, color_palette) + mask_neighborhood = None + for cnt in tqdm(range(1, len(frame_list))): + frame_tar = read_frame(frame_list[cnt])[0] + + # we use the first segmentation and the n previous ones + used_frame_feats = [frame1_feat] + [pair[0] for pair in list(que.queue)] + used_segs = [first_seg] + [pair[1] for pair in list(que.queue)] + + frame_tar_avg, feat_tar, mask_neighborhood = label_propagation(args, model, frame_tar, used_frame_feats, used_segs, mask_neighborhood) + + # pop out oldest frame if neccessary + if que.qsize() == args.n_last_frames: + que.get() + # push current results into queue + seg = copy.deepcopy(frame_tar_avg) + que.put([feat_tar, seg]) + + # upsampling & argmax + frame_tar_avg = F.interpolate(frame_tar_avg, scale_factor=args.patch_size, mode='bilinear', align_corners=False, recompute_scale_factor=False)[0] + frame_tar_avg = norm_mask(frame_tar_avg) + _, frame_tar_seg = torch.max(frame_tar_avg, dim=0) + + # saving to disk + frame_tar_seg = np.array(frame_tar_seg.squeeze().cpu(), dtype=np.uint8) + frame_tar_seg = np.array(Image.fromarray(frame_tar_seg).resize((ori_w, ori_h), 0)) + frame_nm = frame_list[cnt].split('/')[-1].replace(".jpg", ".png") + imwrite_indexed(os.path.join(video_folder, frame_nm), frame_tar_seg, color_palette) + + +def restrict_neighborhood(h, w): + # We restrict the set of source nodes considered to a spatial neighborhood of the query node (i.e. ``local attention'') + mask = torch.zeros(h, w, h, w) + for i in range(h): + for j in range(w): + for p in range(2 * args.size_mask_neighborhood + 1): + for q in range(2 * args.size_mask_neighborhood + 1): + if i - args.size_mask_neighborhood + p < 0 or i - args.size_mask_neighborhood + p >= h: + continue + if j - args.size_mask_neighborhood + q < 0 or j - args.size_mask_neighborhood + q >= w: + continue + mask[i, j, i - args.size_mask_neighborhood + p, j - args.size_mask_neighborhood + q] = 1 + + mask = mask.reshape(h * w, h * w) + return mask.cuda(non_blocking=True) + + +def norm_mask(mask): + c, h, w = mask.size() + for cnt in range(c): + mask_cnt = mask[cnt,:,:] + if(mask_cnt.max() > 0): + mask_cnt = (mask_cnt - mask_cnt.min()) + mask_cnt = mask_cnt/mask_cnt.max() + mask[cnt,:,:] = mask_cnt + return mask + + +def label_propagation(args, model, frame_tar, list_frame_feats, list_segs, mask_neighborhood=None): + """ + propagate segs of frames in list_frames to frame_tar + """ + ## we only need to extract feature of the target frame + feat_tar, h, w = extract_feature(model, frame_tar, return_h_w=True) + + return_feat_tar = feat_tar.T # dim x h*w + + ncontext = len(list_frame_feats) + feat_sources = torch.stack(list_frame_feats) # nmb_context x dim x h*w + + feat_tar = F.normalize(feat_tar, dim=1, p=2) + feat_sources = F.normalize(feat_sources, dim=1, p=2) + + feat_tar = feat_tar.unsqueeze(0).repeat(ncontext, 1, 1) + aff = torch.exp(torch.bmm(feat_tar, feat_sources) / 0.1) # nmb_context x h*w (tar: query) x h*w (source: keys) + + if args.size_mask_neighborhood > 0: + if mask_neighborhood is None: + mask_neighborhood = restrict_neighborhood(h, w) + mask_neighborhood = mask_neighborhood.unsqueeze(0).repeat(ncontext, 1, 1) + aff *= mask_neighborhood + + aff = aff.transpose(2, 1).reshape(-1, h * w) # nmb_context*h*w (source: keys) x h*w (tar: queries) + tk_val, _ = torch.topk(aff, dim=0, k=args.topk) + tk_val_min, _ = torch.min(tk_val, dim=0) + aff[aff < tk_val_min] = 0 + + aff = aff / torch.sum(aff, keepdim=True, axis=0) + + list_segs = [s.cuda() for s in list_segs] + segs = torch.cat(list_segs) + nmb_context, C, h, w = segs.shape + segs = segs.reshape(nmb_context, C, -1).transpose(2, 1).reshape(-1, C).T # C x nmb_context*h*w + seg_tar = torch.mm(segs, aff) + seg_tar = seg_tar.reshape(1, C, h, w) + return seg_tar, return_feat_tar, mask_neighborhood + + +def extract_feature(model, frame, return_h_w=False): + """Extract one frame feature everytime.""" + out = model.get_intermediate_layers(frame.unsqueeze(0).cuda(), n=1)[0] + out = out[:, 1:, :] # we discard the [CLS] token + h, w = int(frame.shape[1] / model.patch_embed.patch_size), int(frame.shape[2] / model.patch_embed.patch_size) + dim = out.shape[-1] + out = out[0].reshape(h, w, dim) + out = out.reshape(-1, dim) + if return_h_w: + return out, h, w + return out + + +def imwrite_indexed(filename, array, color_palette): + """ Save indexed png for DAVIS.""" + if np.atleast_3d(array).shape[2] != 1: + raise Exception("Saving indexed PNGs requires 2D array.") + + im = Image.fromarray(array) + im.putpalette(color_palette.ravel()) + im.save(filename, format='PNG') + + +def to_one_hot(y_tensor, n_dims=None): + """ + Take integer y (tensor or variable) with n dims & + convert it to 1-hot representation with n+1 dims. + """ + if(n_dims is None): + n_dims = int(y_tensor.max()+ 1) + _,h,w = y_tensor.size() + y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1) + n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1 + y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).scatter_(1, y_tensor, 1) + y_one_hot = y_one_hot.view(h,w,n_dims) + return y_one_hot.permute(2, 0, 1).unsqueeze(0) + + +def read_frame_list(video_dir): + frame_list = [img for img in glob.glob(os.path.join(video_dir,"*.jpg"))] + frame_list = sorted(frame_list) + return frame_list + + +def read_frame(frame_dir, scale_size=[480]): + """ + read a single frame & preprocess + """ + img = cv2.imread(frame_dir) + ori_h, ori_w, _ = img.shape + if len(scale_size) == 1: + if(ori_h > ori_w): + tw = scale_size[0] + th = (tw * ori_h) / ori_w + th = int((th // 64) * 64) + else: + th = scale_size[0] + tw = (th * ori_w) / ori_h + tw = int((tw // 64) * 64) + else: + th, tw = scale_size + img = cv2.resize(img, (tw, th)) + img = img.astype(np.float32) + img = img / 255.0 + img = img[:, :, ::-1] + img = np.transpose(img.copy(), (2, 0, 1)) + img = torch.from_numpy(img).float() + img = color_normalize(img) + return img, ori_h, ori_w + + +def read_seg(seg_dir, factor, scale_size=[480]): + seg = Image.open(seg_dir) + _w, _h = seg.size # note PIL.Image.Image's size is (w, h) + if len(scale_size) == 1: + if(_w > _h): + _th = scale_size[0] + _tw = (_th * _w) / _h + _tw = int((_tw // 64) * 64) + else: + _tw = scale_size[0] + _th = (_tw * _h) / _w + _th = int((_th // 64) * 64) + else: + _th = scale_size[1] + _tw = scale_size[0] + small_seg = np.array(seg.resize((_tw // factor, _th // factor), 0)) + small_seg = torch.from_numpy(small_seg.copy()).contiguous().float().unsqueeze(0) + return to_one_hot(small_seg), np.asarray(seg) + + +def color_normalize(x, mean=[0.485, 0.456, 0.406], std=[0.228, 0.224, 0.225]): + for t, m, s in zip(x, mean, std): + t.sub_(m) + t.div_(s) + return x + + +if __name__ == '__main__': + parser = argparse.ArgumentParser('Evaluation with video object segmentation on DAVIS 2017') + parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.") + parser.add_argument('--arch', default='vit_small', type=str, + choices=['vit_tiny', 'vit_small', 'vit_base'], help='Architecture (support only ViT atm).') + parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.') + parser.add_argument("--checkpoint_key", default="teacher", type=str, help='Key to use in the checkpoint (example: "teacher")') + parser.add_argument('--output_dir', default=".", help='Path where to save segmentations') + parser.add_argument('--data_path', default='/path/to/davis/', type=str) + parser.add_argument("--n_last_frames", type=int, default=7, help="number of preceeding frames") + parser.add_argument("--size_mask_neighborhood", default=12, type=int, + help="We restrict the set of source nodes considered to a spatial neighborhood of the query node") + parser.add_argument("--topk", type=int, default=5, help="accumulate label from top k neighbors") + parser.add_argument("--bs", type=int, default=6, help="Batch size, try to reduce if OOM") + args = parser.parse_args() + + print("git:\n {}\n".format(utils.get_sha())) + print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items()))) + + # building network + model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0) + print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.") + model.cuda() + utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size) + for param in model.parameters(): + param.requires_grad = False + model.eval() + + color_palette = [] + for line in urlopen("https://raw.githubusercontent.com/Liusifei/UVC/master/libs/data/palette.txt"): + color_palette.append([int(i) for i in line.decode("utf-8").split('\n')[0].split(" ")]) + color_palette = np.asarray(color_palette, dtype=np.uint8).reshape(-1,3) + + video_list = open(os.path.join(args.data_path, "ImageSets/2017/val.txt")).readlines() + for i, video_name in enumerate(video_list): + video_name = video_name.strip() + print(f'[{i}/{len(video_list)}] Begin to segmentate video {video_name}.') + video_dir = os.path.join(args.data_path, "JPEGImages/480p/", video_name) + frame_list = read_frame_list(video_dir) + seg_path = frame_list[0].replace("JPEGImages", "Annotations").replace("jpg", "png") + first_seg, seg_ori = read_seg(seg_path, args.patch_size) + eval_video_tracking_davis(args, model, frame_list, video_dir, first_seg, seg_ori, color_palette) diff --git a/PuzzleTuning/Counterpart PreTrain Methods/dino-main/hubconf.py b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/hubconf.py new file mode 100644 index 0000000000000000000000000000000000000000..3709271ed2b52bb86fbeb70fc02bc47d1add207e --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/hubconf.py @@ -0,0 +1,151 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch +from torchvision.models.resnet import resnet50 + +import vision_transformer as vits + +dependencies = ["torch", "torchvision"] + + +def dino_vits16(pretrained=True, **kwargs): + """ + ViT-Small/16x16 pre-trained with DINO. + Achieves 74.5% top-1 accuracy on ImageNet with k-NN classification. + """ + model = vits.__dict__["vit_small"](patch_size=16, num_classes=0, **kwargs) + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth", + map_location="cpu", + ) + model.load_state_dict(state_dict, strict=True) + return model + + +def dino_vits8(pretrained=True, **kwargs): + """ + ViT-Small/8x8 pre-trained with DINO. + Achieves 78.3% top-1 accuracy on ImageNet with k-NN classification. + """ + model = vits.__dict__["vit_small"](patch_size=8, num_classes=0, **kwargs) + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth", + map_location="cpu", + ) + model.load_state_dict(state_dict, strict=True) + return model + + +def dino_vitb16(pretrained=True, **kwargs): + """ + ViT-Base/16x16 pre-trained with DINO. + Achieves 76.1% top-1 accuracy on ImageNet with k-NN classification. + """ + model = vits.__dict__["vit_base"](patch_size=16, num_classes=0, **kwargs) + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth", + map_location="cpu", + ) + model.load_state_dict(state_dict, strict=True) + return model + + +def dino_vitb8(pretrained=True, **kwargs): + """ + ViT-Base/8x8 pre-trained with DINO. + Achieves 77.4% top-1 accuracy on ImageNet with k-NN classification. + """ + model = vits.__dict__["vit_base"](patch_size=8, num_classes=0, **kwargs) + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth", + map_location="cpu", + ) + model.load_state_dict(state_dict, strict=True) + return model + + +def dino_resnet50(pretrained=True, **kwargs): + """ + ResNet-50 pre-trained with DINO. + Achieves 75.3% top-1 accuracy on ImageNet linear evaluation benchmark (requires to train `fc`). + """ + model = resnet50(pretrained=False, **kwargs) + model.fc = torch.nn.Identity() + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/dino/dino_resnet50_pretrain/dino_resnet50_pretrain.pth", + map_location="cpu", + ) + model.load_state_dict(state_dict, strict=False) + return model + + +def dino_xcit_small_12_p16(pretrained=True, **kwargs): + """ + XCiT-Small-12/16 pre-trained with DINO. + """ + model = torch.hub.load('facebookresearch/xcit:main', "xcit_small_12_p16", num_classes=0, **kwargs) + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/dino/dino_xcit_small_12_p16_pretrain/dino_xcit_small_12_p16_pretrain.pth", + map_location="cpu", + ) + model.load_state_dict(state_dict, strict=True) + return model + + +def dino_xcit_small_12_p8(pretrained=True, **kwargs): + """ + XCiT-Small-12/8 pre-trained with DINO. + """ + model = torch.hub.load('facebookresearch/xcit:main', "xcit_small_12_p8", num_classes=0, **kwargs) + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/dino/dino_xcit_small_12_p8_pretrain/dino_xcit_small_12_p8_pretrain.pth", + map_location="cpu", + ) + model.load_state_dict(state_dict, strict=True) + return model + + +def dino_xcit_medium_24_p16(pretrained=True, **kwargs): + """ + XCiT-Medium-24/16 pre-trained with DINO. + """ + model = torch.hub.load('facebookresearch/xcit:main', "xcit_medium_24_p16", num_classes=0, **kwargs) + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/dino/dino_xcit_medium_24_p16_pretrain/dino_xcit_medium_24_p16_pretrain.pth", + map_location="cpu", + ) + model.load_state_dict(state_dict, strict=True) + return model + + +def dino_xcit_medium_24_p8(pretrained=True, **kwargs): + """ + XCiT-Medium-24/8 pre-trained with DINO. + """ + model = torch.hub.load('facebookresearch/xcit:main', "xcit_medium_24_p8", num_classes=0, **kwargs) + if pretrained: + state_dict = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/dino/dino_xcit_medium_24_p8_pretrain/dino_xcit_medium_24_p8_pretrain.pth", + map_location="cpu", + ) + model.load_state_dict(state_dict, strict=True) + return model diff --git a/PuzzleTuning/Counterpart PreTrain Methods/dino-main/main_dino.py b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/main_dino.py new file mode 100644 index 0000000000000000000000000000000000000000..e04ec9dc125423c4bb46779e7b27c181da378844 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/main_dino.py @@ -0,0 +1,515 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import os +import sys +import datetime +import time +import math +import json +from pathlib import Path + +import numpy as np +from PIL import Image +import torch +import torch.nn as nn +import torch.distributed as dist +import torch.backends.cudnn as cudnn +import torch.nn.functional as F +from torchvision import datasets, transforms +from torchvision import models as torchvision_models + +import utils +import vision_transformer as vits +from vision_transformer import DINOHead + +torchvision_archs = sorted(name for name in torchvision_models.__dict__ + if name.islower() and not name.startswith("__") + and callable(torchvision_models.__dict__[name])) + +def get_args_parser(): + parser = argparse.ArgumentParser('DINO', add_help=False) + + # Model parameters + parser.add_argument('--arch', default='vit_base', type=str, + help="""Name of architecture to train. For quick experiments with ViTs, + we recommend using vit_tiny or vit_small.""") + parser.add_argument('--patch_size', default=16, type=int, help="""Size in pixels + of input square patches - default 16 (for 16x16 patches). Using smaller + values leads to better performance but requires more memory. Applies only + for ViTs (vit_tiny, vit_small and vit_base). If <16, we recommend disabling + mixed precision training (--use_fp16 false) to avoid unstabilities.""") + parser.add_argument('--input_size', default=224, type=int) + parser.add_argument('--out_dim', default=65536, type=int, help="""Dimensionality of + the DINO head output. For complex and large datasets large values (like 65k) work well.""") + parser.add_argument('--norm_last_layer', default=True, type=utils.bool_flag, + help="""Whether or not to weight normalize the last layer of the DINO head. + Not normalizing leads to better performance but can make the training unstable. + In our experiments, we typically set this paramater to False with vit_small and True with vit_base.""") + parser.add_argument('--momentum_teacher', default=0.996, type=float, help="""Base EMA + parameter for teacher update. The value is increased to 1 during training with cosine schedule. + We recommend setting a higher value with small batches: for example use 0.9995 with batch size of 256.""") + parser.add_argument('--use_bn_in_head', default=False, type=utils.bool_flag, + help="Whether to use batch normalizations in projection head (Default: False)") + + # Temperature teacher parameters + parser.add_argument('--warmup_teacher_temp', default=0.04, type=float, + help="""Initial value for the teacher temperature: 0.04 works well in most cases. + Try decreasing it if the training loss does not decrease.""") + parser.add_argument('--teacher_temp', default=0.04, type=float, help="""Final value (after linear warmup) + of the teacher temperature. For most experiments, anything above 0.07 is unstable. We recommend + starting with the default value of 0.04 and increase this slightly if needed.""") + parser.add_argument('--warmup_teacher_temp_epochs', default=0, type=int, + help='Number of warmup epochs for the teacher temperature (Default: 30).') + + # Training/Optimization parameters + parser.add_argument('--use_fp16', type=utils.bool_flag, default=True, help="""Whether or not + to use half precision for training. Improves training time and memory requirements, + but can provoke instability and slight decay of performance. We recommend disabling + mixed precision if the loss is unstable, if reducing the patch size or if training with bigger ViTs.""") + parser.add_argument('--weight_decay', type=float, default=0.04, help="""Initial value of the + weight decay. With ViT, a smaller value at the beginning of training works well.""") + parser.add_argument('--weight_decay_end', type=float, default=0.4, help="""Final value of the + weight decay. We use a cosine schedule for WD and using a larger decay by + the end of training improves performance for ViTs.""") + parser.add_argument('--clip_grad', type=float, default=3.0, help="""Maximal parameter + gradient norm if using gradient clipping. Clipping with norm .3 ~ 1.0 can + help optimization for larger ViT architectures. 0 for disabling.""") + parser.add_argument('--batch_size_per_gpu', default=512, type=int, + help='Per-GPU batch-size : number of distinct images loaded on one GPU.') + parser.add_argument('--epochs', default=150, type=int, help='Number of epochs of training.') + parser.add_argument('--freeze_last_layer', default=1, type=int, help="""Number of epochs + during which we keep the output layer fixed. Typically doing so during + the first epoch helps training. Try increasing this value if the loss does not decrease.""") + parser.add_argument("--lr", default=1.5e-4, type=float, help="""Learning rate at the end of + linear warmup (highest LR used during training). The learning rate is linearly scaled + with the batch size, and specified here for a reference batch size of 512.""") + parser.add_argument("--warmup_epochs", default=20, type=int, + help="Number of epochs for the linear learning-rate warm up.") + parser.add_argument('--min_lr', type=float, default=1e-6, help="""Target LR at the + end of optimization. We use a cosine LR schedule with linear warmup.""") + parser.add_argument('--optimizer', default='adamw', type=str, + choices=['adamw', 'sgd', 'lars'], help="""Type of optimizer. We recommend using adamw with ViTs.""") + parser.add_argument('--drop_path_rate', type=float, default=0.1, help="stochastic depth rate") + + # Multi-crop parameters + parser.add_argument('--global_crops_scale', type=float, nargs='+', default=(0.4, 1.), + help="""Scale range of the cropped image before resizing, relatively to the origin image. + Used for large global view cropping. When disabling multi-crop (--local_crops_number 0), we + recommand using a wider range of scale ("--global_crops_scale 0.14 1." for example)""") + parser.add_argument('--local_crops_number', type=int, default=8, help="""Number of small + local views to generate. Set this parameter to 0 to disable multi-crop training. + When disabling multi-crop we recommend to use "--global_crops_scale 0.14 1." """) + parser.add_argument('--local_crops_scale', type=float, nargs='+', default=(0.05, 0.4), + help="""Scale range of the cropped image before resizing, relatively to the origin image. + Used for small local view cropping of multi-crop.""") + + # Misc + parser.add_argument('--data_path', default='/root/autodl-tmp/All', type=str, + help='Please specify path to the ImageNet training data.') + parser.add_argument('--basic_state_dict', default='/root/autodl-tmp/ViT_b16_224_Imagenet.pth', type=str, + help='Load in pretrained or un-pretrained model pth') + parser.add_argument('--output_dir', default="/home/CPIA/saved_models/DINO", type=str, help='Path to save logs and checkpoints.') + parser.add_argument('--saveckp_freq', default=50, type=int, help='Save checkpoint every x epochs.') + parser.add_argument('--seed', default=0, type=int, help='Random seed.') + parser.add_argument('--num_workers', default=32, type=int, help='Number of data loading workers per GPU.') + parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up + distributed training; see https://pytorch.org/docs/stable/distributed.html""") + parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.") + return parser + + +def train_dino(args): + utils.init_distributed_mode(args) + utils.fix_random_seeds(args.seed) + print('ok') + print("git:\n {}\n".format(utils.get_sha())) + print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items()))) + cudnn.benchmark = True + + # ============ preparing data ... ============ + transform = DataAugmentationDINO( + args.global_crops_scale, + args.local_crops_scale, + args.local_crops_number, + ) + dataset = datasets.ImageFolder(args.data_path, transform=transform) + sampler = torch.utils.data.DistributedSampler(dataset, shuffle=True) + data_loader = torch.utils.data.DataLoader( + dataset, + sampler=sampler, + batch_size=args.batch_size_per_gpu, + num_workers=args.num_workers, + pin_memory=True, + drop_last=True, + ) + print(f"Data loaded: there are {len(dataset)} images.") + + # ============ building student and teacher networks ... ============ + # we changed the name DeiT-S for ViT-S to avoid confusions + args.arch = args.arch.replace("deit", "vit") + # if the network is a Vision Transformer (i.e. vit_tiny, vit_small, vit_base) + if args.arch in vits.__dict__.keys(): + student = vits.__dict__[args.arch]( + patch_size=args.patch_size, + drop_path_rate=args.drop_path_rate, # stochastic depth + ) + if args.basic_state_dict is not None: # Transfer-learning + try: + basic_state_dict = torch.load(args.basic_state_dict) + if 'model' in basic_state_dict: + basic_state_dict = basic_state_dict['model'] + else: + pass + student.load_state_dict(basic_state_dict, False) + + except: + print('erro in args.basic_state_dict:', args.basic_state_dict) + print('Student PreTuning Restart') # 没倒进去 + + else: + print('Student PreTuning with Transfer-learning with:', args.basic_state_dict) + + else: + print('Student PreTuning Restart') + teacher = vits.__dict__[args.arch](patch_size=args.patch_size) + if args.basic_state_dict is not None: # Transfer-learning + try: + basic_state_dict = torch.load(args.basic_state_dict) + if 'model' in basic_state_dict: + basic_state_dict = basic_state_dict['model'] + else: + pass + teacher.load_state_dict(basic_state_dict, False) + + except: + print('erro in args.basic_state_dict:', args.basic_state_dict) + print('Teacher PreTuning Restart') # 没倒进去 + + else: + print('Teacher PreTuning with Transfer-learning with:', args.basic_state_dict) + + else: + print('Teacher PreTuning Restart') + embed_dim = student.embed_dim + # if the network is a XCiT + """elif args.arch in torch.hub.list("facebookresearch/xcit:main"): + student = torch.hub.load('facebookresearch/xcit:main', args.arch, + pretrained=False, drop_path_rate=args.drop_path_rate) + teacher = torch.hub.load('facebookresearch/xcit:main', args.arch, pretrained=False) + embed_dim = student.embed_dim + # otherwise, we check if the architecture is in torchvision models + elif args.arch in torchvision_models.__dict__.keys(): + student = torchvision_models.__dict__[args.arch]() + teacher = torchvision_models.__dict__[args.arch]() + embed_dim = student.fc.weight.shape[1] + else: + print(f"Unknow architecture: {args.arch}")""" + + # multi-crop wrapper handles forward with inputs of different resolutions + student = utils.MultiCropWrapper(student, DINOHead( + embed_dim, + args.out_dim, + use_bn=args.use_bn_in_head, + norm_last_layer=args.norm_last_layer, + )) + teacher = utils.MultiCropWrapper( + teacher, + DINOHead(embed_dim, args.out_dim, args.use_bn_in_head), + ) + # move networks to gpu + student, teacher = student.cuda(), teacher.cuda() + # synchronize batch norms (if any) + if utils.has_batchnorms(student): + student = nn.SyncBatchNorm.convert_sync_batchnorm(student) + teacher = nn.SyncBatchNorm.convert_sync_batchnorm(teacher) + + # we need DDP wrapper to have synchro batch norms working... + teacher = nn.parallel.DistributedDataParallel(teacher, device_ids=[args.gpu]) + teacher_without_ddp = teacher.module + else: + # teacher_without_ddp and teacher are the same thing + teacher_without_ddp = teacher + student = nn.parallel.DistributedDataParallel(student, device_ids=[args.gpu]) + # teacher and student start with the same weights + teacher_without_ddp.load_state_dict(student.module.state_dict()) + # there is no backpropagation through the teacher, so no need for gradients + for p in teacher.parameters(): + p.requires_grad = False + print(f"Student and Teacher are built: they are both {args.arch} network.") + + # ============ preparing loss ... ============ + dino_loss = DINOLoss( + args.out_dim, + args.local_crops_number + 2, # total number of crops = 2 global crops + local_crops_number + args.warmup_teacher_temp, + args.teacher_temp, + args.warmup_teacher_temp_epochs, + args.epochs, + ).cuda() + + # ============ preparing optimizer ... ============ + params_groups = utils.get_params_groups(student) + if args.optimizer == "adamw": + optimizer = torch.optim.AdamW(params_groups) # to use with ViTs + elif args.optimizer == "sgd": + optimizer = torch.optim.SGD(params_groups, lr=0, momentum=0.9) # lr is set by scheduler + elif args.optimizer == "lars": + optimizer = utils.LARS(params_groups) # to use with convnet and large batches + # for mixed precision training + fp16_scaler = None + if args.use_fp16: + fp16_scaler = torch.cuda.amp.GradScaler() + + # ============ init schedulers ... ============ + lr_schedule = utils.cosine_scheduler( + args.lr * (args.batch_size_per_gpu * utils.get_world_size()) / 256., # linear scaling rule + args.min_lr, + args.epochs, len(data_loader), + warmup_epochs=args.warmup_epochs, + ) + wd_schedule = utils.cosine_scheduler( + args.weight_decay, + args.weight_decay_end, + args.epochs, len(data_loader), + ) + # momentum parameter is increased to 1. during training with a cosine schedule + momentum_schedule = utils.cosine_scheduler(args.momentum_teacher, 1, + args.epochs, len(data_loader)) + print(f"Loss, optimizer and schedulers ready.") + + # ============ optionally resume training ... ============ + to_restore = {"epoch": 0} + utils.restart_from_checkpoint( + os.path.join(args.output_dir, "checkpoint.pth"), + run_variables=to_restore, + student=student, + teacher=teacher, + optimizer=optimizer, + fp16_scaler=fp16_scaler, + dino_loss=dino_loss, + ) + start_epoch = to_restore["epoch"] + + start_time = time.time() + print("Starting DINO training !") + for epoch in range(start_epoch, args.epochs): + data_loader.sampler.set_epoch(epoch) + + # ============ training one epoch of DINO ... ============ + train_stats = train_one_epoch(student, teacher, teacher_without_ddp, dino_loss, + data_loader, optimizer, lr_schedule, wd_schedule, momentum_schedule, + epoch, fp16_scaler, args) + + # ============ writing logs ... ============ + save_dict = { + 'student': student.state_dict(), + 'teacher': teacher.state_dict(), + 'optimizer': optimizer.state_dict(), + 'epoch': epoch + 1, + 'args': args, + 'dino_loss': dino_loss.state_dict(), + } + if fp16_scaler is not None: + save_dict['fp16_scaler'] = fp16_scaler.state_dict() + utils.save_on_master(save_dict, os.path.join(args.output_dir, 'checkpoint.pth')) + if args.saveckp_freq and (epoch % args.saveckp_freq == 0 or epoch + 1 == args.epochs): + initial_setting = os.path.split(args.basic_state_dict)[1].split('.')[0] # 'ViT_b16_224_Imagenet' + dataset_using = os.path.split(args.data_path)[1] + utils.save_on_master(save_dict, os.path.join(args.output_dir, f'dino_'+initial_setting + '_' + dataset_using+f'_checkpoint{epoch:04}.pth')) + + + + + log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, + 'epoch': epoch} + if utils.is_main_process(): + with (Path(args.output_dir) / "log.txt").open("a") as f: + f.write(json.dumps(log_stats) + "\n") + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +def train_one_epoch(student, teacher, teacher_without_ddp, dino_loss, data_loader, + optimizer, lr_schedule, wd_schedule, momentum_schedule,epoch, + fp16_scaler, args): + metric_logger = utils.MetricLogger(delimiter=" ") + header = 'Epoch: [{}/{}]'.format(epoch, args.epochs) + for it, (images, _) in enumerate(metric_logger.log_every(data_loader, 10, header)): + # update weight decay and learning rate according to their schedule + it = len(data_loader) * epoch + it # global training iteration + for i, param_group in enumerate(optimizer.param_groups): + param_group["lr"] = lr_schedule[it] + if i == 0: # only the first group is regularized + param_group["weight_decay"] = wd_schedule[it] + + # move images to gpu + images = [im.cuda(non_blocking=True) for im in images] + # teacher and student forward passes + compute dino loss + with torch.cuda.amp.autocast(fp16_scaler is not None): + teacher_output = teacher(images[:2]) # only the 2 global views pass through the teacher + student_output = student(images) + loss = dino_loss(student_output, teacher_output, epoch) + + if not math.isfinite(loss.item()): + print("Loss is {}, stopping training".format(loss.item()), force=True) + sys.exit(1) + + # student update + optimizer.zero_grad() + param_norms = None + if fp16_scaler is None: + loss.backward() + if args.clip_grad: + param_norms = utils.clip_gradients(student, args.clip_grad) + utils.cancel_gradients_last_layer(epoch, student, + args.freeze_last_layer) + optimizer.step() + else: + fp16_scaler.scale(loss).backward() + if args.clip_grad: + fp16_scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place + param_norms = utils.clip_gradients(student, args.clip_grad) + utils.cancel_gradients_last_layer(epoch, student, + args.freeze_last_layer) + fp16_scaler.step(optimizer) + fp16_scaler.update() + + # EMA update for the teacher + with torch.no_grad(): + m = momentum_schedule[it] # momentum parameter + for param_q, param_k in zip(student.module.parameters(), teacher_without_ddp.parameters()): + param_k.data.mul_(m).add_((1 - m) * param_q.detach().data) + + # logging + torch.cuda.synchronize() + metric_logger.update(loss=loss.item()) + metric_logger.update(lr=optimizer.param_groups[0]["lr"]) + metric_logger.update(wd=optimizer.param_groups[0]["weight_decay"]) + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + + +class DINOLoss(nn.Module): + def __init__(self, out_dim, ncrops, warmup_teacher_temp, teacher_temp, + warmup_teacher_temp_epochs, nepochs, student_temp=0.1, + center_momentum=0.9): + super().__init__() + self.student_temp = student_temp + self.center_momentum = center_momentum + self.ncrops = ncrops + self.register_buffer("center", torch.zeros(1, out_dim)) + # we apply a warm up for the teacher temperature because + # a too high temperature makes the training instable at the beginning + self.teacher_temp_schedule = np.concatenate(( + np.linspace(warmup_teacher_temp, + teacher_temp, warmup_teacher_temp_epochs), + np.ones(nepochs - warmup_teacher_temp_epochs) * teacher_temp + )) + + def forward(self, student_output, teacher_output, epoch): + """ + Cross-entropy between softmax outputs of the teacher and student networks. + """ + student_out = student_output / self.student_temp + student_out = student_out.chunk(self.ncrops) + + # teacher centering and sharpening + temp = self.teacher_temp_schedule[epoch] + teacher_out = F.softmax((teacher_output - self.center) / temp, dim=-1) + teacher_out = teacher_out.detach().chunk(2) + + total_loss = 0 + n_loss_terms = 0 + for iq, q in enumerate(teacher_out): + for v in range(len(student_out)): + if v == iq: + # we skip cases where student and teacher operate on the same view + continue + loss = torch.sum(-q * F.log_softmax(student_out[v], dim=-1), dim=-1) + total_loss += loss.mean() + n_loss_terms += 1 + total_loss /= n_loss_terms + self.update_center(teacher_output) + return total_loss + + @torch.no_grad() + def update_center(self, teacher_output): + """ + Update center used for teacher output. + """ + batch_center = torch.sum(teacher_output, dim=0, keepdim=True) + dist.all_reduce(batch_center) + batch_center = batch_center / (len(teacher_output) * dist.get_world_size()) + + # ema update + self.center = self.center * self.center_momentum + batch_center * (1 - self.center_momentum) + + +class DataAugmentationDINO(object): + def __init__(self, global_crops_scale, local_crops_scale, local_crops_number): + flip_and_color_jitter = transforms.Compose([ + transforms.RandomHorizontalFlip(p=0.5), + transforms.RandomApply( + [transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)], + p=0.8 + ), + transforms.RandomGrayscale(p=0.2), + ]) + normalize = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + ]) + + # first global crop + self.global_transfo1 = transforms.Compose([ + transforms.RandomResizedCrop(224, scale=global_crops_scale, interpolation=Image.BICUBIC), + flip_and_color_jitter, + utils.GaussianBlur(1.0), + normalize, + ]) + # second global crop + self.global_transfo2 = transforms.Compose([ + transforms.RandomResizedCrop(224, scale=global_crops_scale, interpolation=Image.BICUBIC), + flip_and_color_jitter, + utils.GaussianBlur(0.1), + utils.Solarization(0.2), + normalize, + ]) + # transformation for the local small crops + self.local_crops_number = local_crops_number + self.local_transfo = transforms.Compose([ + transforms.RandomResizedCrop(96, scale=local_crops_scale, interpolation=Image.BICUBIC), + flip_and_color_jitter, + utils.GaussianBlur(p=0.5), + normalize, + ]) + + def __call__(self, image): + crops = [] + crops.append(self.global_transfo1(image)) + crops.append(self.global_transfo2(image)) + for _ in range(self.local_crops_number): + crops.append(self.local_transfo(image)) + return crops + + +if __name__ == '__main__': + parser = argparse.ArgumentParser('DINO', parents=[get_args_parser()]) + args = parser.parse_args() + Path(args.output_dir).mkdir(parents=True, exist_ok=True) + train_dino(args) diff --git a/PuzzleTuning/Counterpart PreTrain Methods/dino-main/run_with_submitit.py b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/run_with_submitit.py new file mode 100644 index 0000000000000000000000000000000000000000..33d4116f2ff512b39d0cec5c936f999df1ac80fe --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/run_with_submitit.py @@ -0,0 +1,132 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +A script to run multinode training with submitit. +Almost copy-paste from https://github.com/facebookresearch/deit/blob/main/run_with_submitit.py +""" +import argparse +import os +import uuid +from pathlib import Path + +import main_dino +import submitit + + +def parse_args(): + parser = argparse.ArgumentParser("Submitit for DINO", parents=[main_dino.get_args_parser()]) + parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node") + parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request") + parser.add_argument("--timeout", default=2800, type=int, help="Duration of the job") + + parser.add_argument("--partition", default="learnfair", type=str, help="Partition where to submit") + parser.add_argument("--use_volta32", action='store_true', help="Big models? Use this") + parser.add_argument('--comment', default="", type=str, + help='Comment to pass to scheduler, e.g. priority message') + return parser.parse_args() + + +def get_shared_folder() -> Path: + user = os.getenv("USER") + if Path("/checkpoint/").is_dir(): + p = Path(f"/checkpoint/{user}/experiments") + p.mkdir(exist_ok=True) + return p + raise RuntimeError("No shared folder available") + + +def get_init_file(): + # Init file must not exist, but it's parent dir must exist. + os.makedirs(str(get_shared_folder()), exist_ok=True) + init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init" + if init_file.exists(): + os.remove(str(init_file)) + return init_file + + +class Trainer(object): + def __init__(self, args): + self.args = args + + def __call__(self): + import main_dino + + self._setup_gpu_args() + main_dino.train_dino(self.args) + + def checkpoint(self): + import os + import submitit + + self.args.dist_url = get_init_file().as_uri() + print("Requeuing ", self.args) + empty_trainer = type(self)(self.args) + return submitit.helpers.DelayedSubmission(empty_trainer) + + def _setup_gpu_args(self): + import submitit + from pathlib import Path + + job_env = submitit.JobEnvironment() + self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id))) + self.args.gpu = job_env.local_rank + self.args.rank = job_env.global_rank + self.args.world_size = job_env.num_tasks + print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}") + + +def main(): + args = parse_args() + if args.output_dir == "": + args.output_dir = get_shared_folder() / "%j" + Path(args.output_dir).mkdir(parents=True, exist_ok=True) + executor = submitit.AutoExecutor(folder=args.output_dir, slurm_max_num_timeout=30) + + num_gpus_per_node = args.ngpus + nodes = args.nodes + timeout_min = args.timeout + + partition = args.partition + kwargs = {} + if args.use_volta32: + kwargs['slurm_constraint'] = 'volta32gb' + if args.comment: + kwargs['slurm_comment'] = args.comment + + executor.update_parameters( + mem_gb=40 * num_gpus_per_node, + gpus_per_node=num_gpus_per_node, + tasks_per_node=num_gpus_per_node, # one task per GPU + cpus_per_task=10, + nodes=nodes, + timeout_min=timeout_min, # max is 60 * 72 + # Below are cluster dependent parameters + slurm_partition=partition, + slurm_signal_delay_s=120, + **kwargs + ) + + executor.update_parameters(name="dino") + + args.dist_url = get_init_file().as_uri() + + trainer = Trainer(args) + job = executor.submit(trainer) + + print(f"Submitted job_id: {job.job_id}") + print(f"Logs and checkpoints will be saved at: {args.output_dir}") + + +if __name__ == "__main__": + main() diff --git a/PuzzleTuning/Counterpart PreTrain Methods/dino-main/utils.py b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9586250123a125a83ea2679e121b1b0ef8089916 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/utils.py @@ -0,0 +1,829 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Misc functions. + +Mostly copy-paste from torchvision references or other public repos like DETR: +https://github.com/facebookresearch/detr/blob/master/util/misc.py +""" +import os +import sys +import time +import math +import random +import datetime +import subprocess +from collections import defaultdict, deque + +import numpy as np +import torch +from torch import nn +import torch.distributed as dist +from PIL import ImageFilter, ImageOps + + +class GaussianBlur(object): + """ + Apply Gaussian Blur to the PIL image. + """ + def __init__(self, p=0.5, radius_min=0.1, radius_max=2.): + self.prob = p + self.radius_min = radius_min + self.radius_max = radius_max + + def __call__(self, img): + do_it = random.random() <= self.prob + if not do_it: + return img + + return img.filter( + ImageFilter.GaussianBlur( + radius=random.uniform(self.radius_min, self.radius_max) + ) + ) + + +class Solarization(object): + """ + Apply Solarization to the PIL image. + """ + def __init__(self, p): + self.p = p + + def __call__(self, img): + if random.random() < self.p: + return ImageOps.solarize(img) + else: + return img + + +def load_pretrained_weights(model, pretrained_weights, checkpoint_key, model_name, patch_size): + if os.path.isfile(pretrained_weights): + state_dict = torch.load(pretrained_weights, map_location="cpu") + if checkpoint_key is not None and checkpoint_key in state_dict: + print(f"Take key {checkpoint_key} in provided checkpoint dict") + state_dict = state_dict[checkpoint_key] + # remove `module.` prefix + state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()} + # remove `backbone.` prefix induced by multicrop wrapper + state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()} + msg = model.load_state_dict(state_dict, strict=False) + print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg)) + else: + print("Please use the `--pretrained_weights` argument to indicate the path of the checkpoint to evaluate.") + url = None + if model_name == "vit_small" and patch_size == 16: + url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth" + elif model_name == "vit_small" and patch_size == 8: + url = "dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth" + elif model_name == "vit_base" and patch_size == 16: + url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth" + elif model_name == "vit_base" and patch_size == 8: + url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth" + elif model_name == "xcit_small_12_p16": + url = "dino_xcit_small_12_p16_pretrain/dino_xcit_small_12_p16_pretrain.pth" + elif model_name == "xcit_small_12_p8": + url = "dino_xcit_small_12_p8_pretrain/dino_xcit_small_12_p8_pretrain.pth" + elif model_name == "xcit_medium_24_p16": + url = "dino_xcit_medium_24_p16_pretrain/dino_xcit_medium_24_p16_pretrain.pth" + elif model_name == "xcit_medium_24_p8": + url = "dino_xcit_medium_24_p8_pretrain/dino_xcit_medium_24_p8_pretrain.pth" + elif model_name == "resnet50": + url = "dino_resnet50_pretrain/dino_resnet50_pretrain.pth" + if url is not None: + print("Since no pretrained weights have been provided, we load the reference pretrained DINO weights.") + state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url) + model.load_state_dict(state_dict, strict=True) + else: + print("There is no reference weights available for this model => We use random weights.") + + +def load_pretrained_linear_weights(linear_classifier, model_name, patch_size): + url = None + if model_name == "vit_small" and patch_size == 16: + url = "dino_deitsmall16_pretrain/dino_deitsmall16_linearweights.pth" + elif model_name == "vit_small" and patch_size == 8: + url = "dino_deitsmall8_pretrain/dino_deitsmall8_linearweights.pth" + elif model_name == "vit_base" and patch_size == 16: + url = "dino_vitbase16_pretrain/dino_vitbase16_linearweights.pth" + elif model_name == "vit_base" and patch_size == 8: + url = "dino_vitbase8_pretrain/dino_vitbase8_linearweights.pth" + elif model_name == "resnet50": + url = "dino_resnet50_pretrain/dino_resnet50_linearweights.pth" + if url is not None: + print("We load the reference pretrained linear weights.") + state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url)["state_dict"] + linear_classifier.load_state_dict(state_dict, strict=True) + else: + print("We use random linear weights.") + + +def clip_gradients(model, clip): + norms = [] + for name, p in model.named_parameters(): + if p.grad is not None: + param_norm = p.grad.data.norm(2) + norms.append(param_norm.item()) + clip_coef = clip / (param_norm + 1e-6) + if clip_coef < 1: + p.grad.data.mul_(clip_coef) + return norms + + +def cancel_gradients_last_layer(epoch, model, freeze_last_layer): + if epoch >= freeze_last_layer: + return + for n, p in model.named_parameters(): + if "last_layer" in n: + p.grad = None + + +def restart_from_checkpoint(ckp_path, run_variables=None, **kwargs): + """ + Re-start from checkpoint + """ + if not os.path.isfile(ckp_path): + return + print("Found checkpoint at {}".format(ckp_path)) + + # open checkpoint file + checkpoint = torch.load(ckp_path, map_location="cpu") + + # key is what to look for in the checkpoint file + # value is the object to load + # example: {'state_dict': model} + for key, value in kwargs.items(): + if key in checkpoint and value is not None: + try: + msg = value.load_state_dict(checkpoint[key], strict=False) + print("=> loaded '{}' from checkpoint '{}' with msg {}".format(key, ckp_path, msg)) + except TypeError: + try: + msg = value.load_state_dict(checkpoint[key]) + print("=> loaded '{}' from checkpoint: '{}'".format(key, ckp_path)) + except ValueError: + print("=> failed to load '{}' from checkpoint: '{}'".format(key, ckp_path)) + else: + print("=> key '{}' not found in checkpoint: '{}'".format(key, ckp_path)) + + # re load variable important for the run + if run_variables is not None: + for var_name in run_variables: + if var_name in checkpoint: + run_variables[var_name] = checkpoint[var_name] + + +def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0): + warmup_schedule = np.array([]) + warmup_iters = warmup_epochs * niter_per_ep + if warmup_epochs > 0: + warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters) + + iters = np.arange(epochs * niter_per_ep - warmup_iters) + schedule = final_value + 0.5 * (base_value - final_value) * (1 + np.cos(np.pi * iters / len(iters))) + + schedule = np.concatenate((warmup_schedule, schedule)) + assert len(schedule) == epochs * niter_per_ep + return schedule + + +def bool_flag(s): + """ + Parse boolean arguments from the command line. + """ + FALSY_STRINGS = {"off", "false", "0"} + TRUTHY_STRINGS = {"on", "true", "1"} + if s.lower() in FALSY_STRINGS: + return False + elif s.lower() in TRUTHY_STRINGS: + return True + else: + raise argparse.ArgumentTypeError("invalid value for a boolean flag") + + +def fix_random_seeds(seed=31): + """ + Fix random seeds. + """ + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + np.random.seed(seed) + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.6f} ({global_avg:.6f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that all processes + have the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.all_reduce(values) + if average: + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.6f}') + data_time = SmoothedValue(fmt='{avg:.6f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + if torch.cuda.is_available(): + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}', + 'max mem: {memory:.0f}' + ]) + else: + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ]) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.6f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +def get_sha(): + cwd = os.path.dirname(os.path.abspath(__file__)) + + def _run(command): + return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() + sha = 'N/A' + diff = "clean" + branch = 'N/A' + try: + sha = _run(['git', 'rev-parse', 'HEAD']) + subprocess.check_output(['git', 'diff'], cwd=cwd) + diff = _run(['git', 'diff-index', 'HEAD']) + diff = "has uncommited changes" if diff else "clean" + branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) + except Exception: + pass + message = f"sha: {sha}, status: {diff}, branch: {branch}" + return message + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def init_distributed_mode(args): + # launched with torch.distributed.launch + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + # launched with submitit on a slurm cluster + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + # launched naively with `python main_dino.py` + # we manually add MASTER_ADDR and MASTER_PORT to env variables + elif torch.cuda.is_available(): + print('Will run the code on one GPU.') + args.rank, args.gpu, args.world_size = 0, 0, 1 + os.environ['MASTER_ADDR'] = '127.0.0.1' + os.environ['MASTER_PORT'] = '29500' + else: + print('Does not support training without GPU.') + sys.exit(1) + + dist.init_process_group( + backend="nccl", + init_method=args.dist_url, + world_size=args.world_size, + rank=args.rank, + ) + + torch.cuda.set_device(args.gpu) + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + dist.barrier() + setup_for_distributed(args.rank == 0) + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + maxk = max(topk) + batch_size = target.size(0) + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.reshape(1, -1).expand_as(pred)) + return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk] + + +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): + # type: (Tensor, float, float, float, float) -> Tensor + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +class LARS(torch.optim.Optimizer): + """ + Almost copy-paste from https://github.com/facebookresearch/barlowtwins/blob/main/main.py + """ + def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, eta=0.001, + weight_decay_filter=None, lars_adaptation_filter=None): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum, + eta=eta, weight_decay_filter=weight_decay_filter, + lars_adaptation_filter=lars_adaptation_filter) + super().__init__(params, defaults) + + @torch.no_grad() + def step(self): + for g in self.param_groups: + for p in g['params']: + dp = p.grad + + if dp is None: + continue + + if p.ndim != 1: + dp = dp.add(p, alpha=g['weight_decay']) + + if p.ndim != 1: + param_norm = torch.norm(p) + update_norm = torch.norm(dp) + one = torch.ones_like(param_norm) + q = torch.where(param_norm > 0., + torch.where(update_norm > 0, + (g['eta'] * param_norm / update_norm), one), one) + dp = dp.mul(q) + + param_state = self.state[p] + if 'mu' not in param_state: + param_state['mu'] = torch.zeros_like(p) + mu = param_state['mu'] + mu.mul_(g['momentum']).add_(dp) + + p.add_(mu, alpha=-g['lr']) + + +class MultiCropWrapper(nn.Module): + """ + Perform forward pass separately on each resolution input. + The inputs corresponding to a single resolution are clubbed and single + forward is run on the same resolution inputs. Hence we do several + forward passes = number of different resolutions used. We then + concatenate all the output features and run the head forward on these + concatenated features. + """ + def __init__(self, backbone, head): + super(MultiCropWrapper, self).__init__() + # disable layers dedicated to ImageNet labels classification + backbone.fc, backbone.head = nn.Identity(), nn.Identity() + self.backbone = backbone + self.head = head + + def forward(self, x): + # convert to list + if not isinstance(x, list): + x = [x] + idx_crops = torch.cumsum(torch.unique_consecutive( + torch.tensor([inp.shape[-1] for inp in x]), + return_counts=True, + )[1], 0) + start_idx, output = 0, torch.empty(0).to(x[0].device) + for end_idx in idx_crops: + _out = self.backbone(torch.cat(x[start_idx: end_idx])) + # The output is a tuple with XCiT model. See: + # https://github.com/facebookresearch/xcit/blob/master/xcit.py#L404-L405 + if isinstance(_out, tuple): + _out = _out[0] + # accumulate outputs + output = torch.cat((output, _out)) + start_idx = end_idx + # Run the head forward on the concatenated features. + return self.head(output) + + +def get_params_groups(model): + regularized = [] + not_regularized = [] + for name, param in model.named_parameters(): + if not param.requires_grad: + continue + # we do not regularize biases nor Norm parameters + if name.endswith(".bias") or len(param.shape) == 1: + not_regularized.append(param) + else: + regularized.append(param) + return [{'params': regularized}, {'params': not_regularized, 'weight_decay': 0.}] + + +def has_batchnorms(model): + bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm) + for name, module in model.named_modules(): + if isinstance(module, bn_types): + return True + return False + + +class PCA(): + """ + Class to compute and apply PCA. + """ + def __init__(self, dim=256, whit=0.5): + self.dim = dim + self.whit = whit + self.mean = None + + def train_pca(self, cov): + """ + Takes a covariance matrix (np.ndarray) as input. + """ + d, v = np.linalg.eigh(cov) + eps = d.max() * 1e-5 + n_0 = (d < eps).sum() + if n_0 > 0: + d[d < eps] = eps + + # total energy + totenergy = d.sum() + + # sort eigenvectors with eigenvalues order + idx = np.argsort(d)[::-1][:self.dim] + d = d[idx] + v = v[:, idx] + + print("keeping %.2f %% of the energy" % (d.sum() / totenergy * 100.0)) + + # for the whitening + d = np.diag(1. / d**self.whit) + + # principal components + self.dvt = np.dot(d, v.T) + + def apply(self, x): + # input is from numpy + if isinstance(x, np.ndarray): + if self.mean is not None: + x -= self.mean + return np.dot(self.dvt, x.T).T + + # input is from torch and is on GPU + if x.is_cuda: + if self.mean is not None: + x -= torch.cuda.FloatTensor(self.mean) + return torch.mm(torch.cuda.FloatTensor(self.dvt), x.transpose(0, 1)).transpose(0, 1) + + # input if from torch, on CPU + if self.mean is not None: + x -= torch.FloatTensor(self.mean) + return torch.mm(torch.FloatTensor(self.dvt), x.transpose(0, 1)).transpose(0, 1) + + +def compute_ap(ranks, nres): + """ + Computes average precision for given ranked indexes. + Arguments + --------- + ranks : zerro-based ranks of positive images + nres : number of positive images + Returns + ------- + ap : average precision + """ + + # number of images ranked by the system + nimgranks = len(ranks) + + # accumulate trapezoids in PR-plot + ap = 0 + + recall_step = 1. / nres + + for j in np.arange(nimgranks): + rank = ranks[j] + + if rank == 0: + precision_0 = 1. + else: + precision_0 = float(j) / rank + + precision_1 = float(j + 1) / (rank + 1) + + ap += (precision_0 + precision_1) * recall_step / 2. + + return ap + + +def compute_map(ranks, gnd, kappas=[]): + """ + Computes the mAP for a given set of returned results. + Usage: + map = compute_map (ranks, gnd) + computes mean average precsion (map) only + map, aps, pr, prs = compute_map (ranks, gnd, kappas) + computes mean average precision (map), average precision (aps) for each query + computes mean precision at kappas (pr), precision at kappas (prs) for each query + Notes: + 1) ranks starts from 0, ranks.shape = db_size X #queries + 2) The junk results (e.g., the query itself) should be declared in the gnd stuct array + 3) If there are no positive images for some query, that query is excluded from the evaluation + """ + + map = 0. + nq = len(gnd) # number of queries + aps = np.zeros(nq) + pr = np.zeros(len(kappas)) + prs = np.zeros((nq, len(kappas))) + nempty = 0 + + for i in np.arange(nq): + qgnd = np.array(gnd[i]['ok']) + + # no positive images, skip from the average + if qgnd.shape[0] == 0: + aps[i] = float('nan') + prs[i, :] = float('nan') + nempty += 1 + continue + + try: + qgndj = np.array(gnd[i]['junk']) + except: + qgndj = np.empty(0) + + # sorted positions of positive and junk images (0 based) + pos = np.arange(ranks.shape[0])[np.in1d(ranks[:,i], qgnd)] + junk = np.arange(ranks.shape[0])[np.in1d(ranks[:,i], qgndj)] + + k = 0; + ij = 0; + if len(junk): + # decrease positions of positives based on the number of + # junk images appearing before them + ip = 0 + while (ip < len(pos)): + while (ij < len(junk) and pos[ip] > junk[ij]): + k += 1 + ij += 1 + pos[ip] = pos[ip] - k + ip += 1 + + # compute ap + ap = compute_ap(pos, len(qgnd)) + map = map + ap + aps[i] = ap + + # compute precision @ k + pos += 1 # get it to 1-based + for j in np.arange(len(kappas)): + kq = min(max(pos), kappas[j]); + prs[i, j] = (pos <= kq).sum() / kq + pr = pr + prs[i, :] + + map = map / (nq - nempty) + pr = pr / (nq - nempty) + + return map, aps, pr, prs + + +def multi_scale(samples, model): + v = None + for s in [1, 1/2**(1/2), 1/2]: # we use 3 different scales + if s == 1: + inp = samples.clone() + else: + inp = nn.functional.interpolate(samples, scale_factor=s, mode='bilinear', align_corners=False) + feats = model(inp).clone() + if v is None: + v = feats + else: + v += feats + v /= 3 + v /= v.norm() + return v diff --git a/PuzzleTuning/Counterpart PreTrain Methods/dino-main/video_generation.py b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/video_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..94da9836ad0e9bd8dccf0f989b93a93ed11cfd7e --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/video_generation.py @@ -0,0 +1,378 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import glob +import sys +import argparse +import cv2 + +from tqdm import tqdm +import matplotlib.pyplot as plt +import torch +import torch.nn as nn +import torchvision +from torchvision import transforms as pth_transforms +import numpy as np +from PIL import Image + +import utils +import vision_transformer as vits + + +FOURCC = { + "mp4": cv2.VideoWriter_fourcc(*"MP4V"), + "avi": cv2.VideoWriter_fourcc(*"XVID"), +} +DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + + +class VideoGenerator: + def __init__(self, args): + self.args = args + # self.model = None + # Don't need to load model if you only want a video + if not self.args.video_only: + self.model = self.__load_model() + + def run(self): + if self.args.input_path is None: + print(f"Provided input path {self.args.input_path} is non valid.") + sys.exit(1) + else: + if self.args.video_only: + self._generate_video_from_images( + self.args.input_path, self.args.output_path + ) + else: + # If input path exists + if os.path.exists(self.args.input_path): + # If input is a video file + if os.path.isfile(self.args.input_path): + frames_folder = os.path.join(self.args.output_path, "frames") + attention_folder = os.path.join( + self.args.output_path, "attention" + ) + + os.makedirs(frames_folder, exist_ok=True) + os.makedirs(attention_folder, exist_ok=True) + + self._extract_frames_from_video( + self.args.input_path, frames_folder + ) + + self._inference( + frames_folder, + attention_folder, + ) + + self._generate_video_from_images( + attention_folder, self.args.output_path + ) + + # If input is a folder of already extracted frames + if os.path.isdir(self.args.input_path): + attention_folder = os.path.join( + self.args.output_path, "attention" + ) + + os.makedirs(attention_folder, exist_ok=True) + + self._inference(self.args.input_path, attention_folder) + + self._generate_video_from_images( + attention_folder, self.args.output_path + ) + + # If input path doesn't exists + else: + print(f"Provided input path {self.args.input_path} doesn't exists.") + sys.exit(1) + + def _extract_frames_from_video(self, inp: str, out: str): + vidcap = cv2.VideoCapture(inp) + self.args.fps = vidcap.get(cv2.CAP_PROP_FPS) + + print(f"Video: {inp} ({self.args.fps} fps)") + print(f"Extracting frames to {out}") + + success, image = vidcap.read() + count = 0 + while success: + cv2.imwrite( + os.path.join(out, f"frame-{count:04}.jpg"), + image, + ) + success, image = vidcap.read() + count += 1 + + def _generate_video_from_images(self, inp: str, out: str): + img_array = [] + attention_images_list = sorted(glob.glob(os.path.join(inp, "attn-*.jpg"))) + + # Get size of the first image + with open(attention_images_list[0], "rb") as f: + img = Image.open(f) + img = img.convert("RGB") + size = (img.width, img.height) + img_array.append(cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)) + + print(f"Generating video {size} to {out}") + + for filename in tqdm(attention_images_list[1:]): + with open(filename, "rb") as f: + img = Image.open(f) + img = img.convert("RGB") + img_array.append(cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)) + + out = cv2.VideoWriter( + os.path.join(out, "video." + self.args.video_format), + FOURCC[self.args.video_format], + self.args.fps, + size, + ) + + for i in range(len(img_array)): + out.write(img_array[i]) + out.release() + print("Done") + + def _inference(self, inp: str, out: str): + print(f"Generating attention images to {out}") + + for img_path in tqdm(sorted(glob.glob(os.path.join(inp, "*.jpg")))): + with open(img_path, "rb") as f: + img = Image.open(f) + img = img.convert("RGB") + + if self.args.resize is not None: + transform = pth_transforms.Compose( + [ + pth_transforms.ToTensor(), + pth_transforms.Resize(self.args.resize), + pth_transforms.Normalize( + (0.485, 0.456, 0.406), (0.229, 0.224, 0.225) + ), + ] + ) + else: + transform = pth_transforms.Compose( + [ + pth_transforms.ToTensor(), + pth_transforms.Normalize( + (0.485, 0.456, 0.406), (0.229, 0.224, 0.225) + ), + ] + ) + + img = transform(img) + + # make the image divisible by the patch size + w, h = ( + img.shape[1] - img.shape[1] % self.args.patch_size, + img.shape[2] - img.shape[2] % self.args.patch_size, + ) + img = img[:, :w, :h].unsqueeze(0) + + w_featmap = img.shape[-2] // self.args.patch_size + h_featmap = img.shape[-1] // self.args.patch_size + + attentions = self.model.get_last_selfattention(img.to(DEVICE)) + + nh = attentions.shape[1] # number of head + + # we keep only the output patch attention + attentions = attentions[0, :, 0, 1:].reshape(nh, -1) + + # we keep only a certain percentage of the mass + val, idx = torch.sort(attentions) + val /= torch.sum(val, dim=1, keepdim=True) + cumval = torch.cumsum(val, dim=1) + th_attn = cumval > (1 - self.args.threshold) + idx2 = torch.argsort(idx) + for head in range(nh): + th_attn[head] = th_attn[head][idx2[head]] + th_attn = th_attn.reshape(nh, w_featmap, h_featmap).float() + # interpolate + th_attn = ( + nn.functional.interpolate( + th_attn.unsqueeze(0), + scale_factor=self.args.patch_size, + mode="nearest", + )[0] + .cpu() + .numpy() + ) + + attentions = attentions.reshape(nh, w_featmap, h_featmap) + attentions = ( + nn.functional.interpolate( + attentions.unsqueeze(0), + scale_factor=self.args.patch_size, + mode="nearest", + )[0] + .cpu() + .numpy() + ) + + # save attentions heatmaps + fname = os.path.join(out, "attn-" + os.path.basename(img_path)) + plt.imsave( + fname=fname, + arr=sum( + attentions[i] * 1 / attentions.shape[0] + for i in range(attentions.shape[0]) + ), + cmap="inferno", + format="jpg", + ) + + def __load_model(self): + # build model + model = vits.__dict__[self.args.arch]( + patch_size=self.args.patch_size, num_classes=0 + ) + for p in model.parameters(): + p.requires_grad = False + model.eval() + model.to(DEVICE) + + if os.path.isfile(self.args.pretrained_weights): + state_dict = torch.load(self.args.pretrained_weights, map_location="cpu") + if ( + self.args.checkpoint_key is not None + and self.args.checkpoint_key in state_dict + ): + print( + f"Take key {self.args.checkpoint_key} in provided checkpoint dict" + ) + state_dict = state_dict[self.args.checkpoint_key] + state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()} + # remove `backbone.` prefix induced by multicrop wrapper + state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()} + msg = model.load_state_dict(state_dict, strict=False) + print( + "Pretrained weights found at {} and loaded with msg: {}".format( + self.args.pretrained_weights, msg + ) + ) + else: + print( + "Please use the `--pretrained_weights` argument to indicate the path of the checkpoint to evaluate." + ) + url = None + if self.args.arch == "vit_small" and self.args.patch_size == 16: + url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth" + elif self.args.arch == "vit_small" and self.args.patch_size == 8: + url = "dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth" # model used for visualizations in our paper + elif self.args.arch == "vit_base" and self.args.patch_size == 16: + url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth" + elif self.args.arch == "vit_base" and self.args.patch_size == 8: + url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth" + if url is not None: + print( + "Since no pretrained weights have been provided, we load the reference pretrained DINO weights." + ) + state_dict = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/dino/" + url + ) + model.load_state_dict(state_dict, strict=True) + else: + print( + "There is no reference weights available for this model => We use random weights." + ) + return model + + +def parse_args(): + parser = argparse.ArgumentParser("Generation self-attention video") + parser.add_argument( + "--arch", + default="vit_small", + type=str, + choices=["vit_tiny", "vit_small", "vit_base"], + help="Architecture (support only ViT atm).", + ) + parser.add_argument( + "--patch_size", default=8, type=int, help="Patch resolution of the self.model." + ) + parser.add_argument( + "--pretrained_weights", + default="", + type=str, + help="Path to pretrained weights to load.", + ) + parser.add_argument( + "--checkpoint_key", + default="teacher", + type=str, + help='Key to use in the checkpoint (example: "teacher")', + ) + parser.add_argument( + "--input_path", + required=True, + type=str, + help="""Path to a video file if you want to extract frames + or to a folder of images already extracted by yourself. + or to a folder of attention images.""", + ) + parser.add_argument( + "--output_path", + default="./", + type=str, + help="""Path to store a folder of frames and / or a folder of attention images. + and / or a final video. Default to current directory.""", + ) + parser.add_argument( + "--threshold", + type=float, + default=0.6, + help="""We visualize masks + obtained by thresholding the self-attention maps to keep xx percent of the mass.""", + ) + parser.add_argument( + "--resize", + default=None, + type=int, + nargs="+", + help="""Apply a resize transformation to input image(s). Use if OOM error. + Usage (single or W H): --resize 512, --resize 720 1280""", + ) + parser.add_argument( + "--video_only", + action="store_true", + help="""Use this flag if you only want to generate a video and not all attention images. + If used, --input_path must be set to the folder of attention images. Ex: ./attention/""", + ) + parser.add_argument( + "--fps", + default=30.0, + type=float, + help="FPS of input / output video. Automatically set if you extract frames from a video.", + ) + parser.add_argument( + "--video_format", + default="mp4", + type=str, + choices=["mp4", "avi"], + help="Format of generated video (mp4 or avi).", + ) + + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + vg = VideoGenerator(args) + vg.run() diff --git a/PuzzleTuning/Counterpart PreTrain Methods/dino-main/vision_transformer.py b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/vision_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..f69a7ad0522500ca2a85305a789be5ca6ac474d0 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/vision_transformer.py @@ -0,0 +1,291 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Mostly copy-paste from timm library. +https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +""" +import math +from functools import partial + +import torch +import torch.nn as nn + +from utils import trunc_normal_ + + +def drop_path(x, drop_prob: float = 0., training: bool = False): + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) + random_tensor.floor_() # binarize + output = x.div(keep_prob) * random_tensor + return output + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x, attn + + +class Block(nn.Module): + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x, return_attention=False): + y, attn = self.attn(self.norm1(x)) + if return_attention: + return attn + x = x + self.drop_path(y) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + num_patches = (img_size // patch_size) * (img_size // patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class VisionTransformer(nn.Module): + """ Vision Transformer """ + def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs): + super().__init__() + self.num_features = self.embed_dim = embed_dim + + self.patch_embed = PatchEmbed( + img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + # Classifier head + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def interpolate_pos_encoding(self, x, w, h): + npatch = x.shape[1] - 1 + N = self.pos_embed.shape[1] - 1 + if npatch == N and w == h: + return self.pos_embed + class_pos_embed = self.pos_embed[:, 0] + patch_pos_embed = self.pos_embed[:, 1:] + dim = x.shape[-1] + w0 = w // self.patch_embed.patch_size + h0 = h // self.patch_embed.patch_size + # we add a small number to avoid floating point error in the interpolation + # see discussion at https://github.com/facebookresearch/dino/issues/8 + w0, h0 = w0 + 0.1, h0 + 0.1 + patch_pos_embed = nn.functional.interpolate( + patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), + scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)), + mode='bicubic', + ) + assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1] + patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) + + def prepare_tokens(self, x): + B, nc, w, h = x.shape + x = self.patch_embed(x) # patch linear embedding + + # add the [CLS] token to the embed patch tokens + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + # add positional encoding to each token + x = x + self.interpolate_pos_encoding(x, w, h) + + return self.pos_drop(x) + + def forward(self, x): + x = self.prepare_tokens(x) + for blk in self.blocks: + x = blk(x) + x = self.norm(x) + return x[:, 0] + + def get_last_selfattention(self, x): + x = self.prepare_tokens(x) + for i, blk in enumerate(self.blocks): + if i < len(self.blocks) - 1: + x = blk(x) + else: + # return attention of the last block + return blk(x, return_attention=True) + + def get_intermediate_layers(self, x, n=1): + x = self.prepare_tokens(x) + # we return the output tokens from the `n` last blocks + output = [] + for i, blk in enumerate(self.blocks): + x = blk(x) + if len(self.blocks) - i <= n: + output.append(self.norm(x)) + return output + + +def vit_tiny(patch_size=16, **kwargs): + model = VisionTransformer( + patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def vit_small(patch_size=16, **kwargs): + model = VisionTransformer( + patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def vit_base(patch_size=16, **kwargs): + model = VisionTransformer( + patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +class DINOHead(nn.Module): + def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256): + super().__init__() + nlayers = max(nlayers, 1) + if nlayers == 1: + self.mlp = nn.Linear(in_dim, bottleneck_dim) + else: + layers = [nn.Linear(in_dim, hidden_dim)] + if use_bn: + layers.append(nn.BatchNorm1d(hidden_dim)) + layers.append(nn.GELU()) + for _ in range(nlayers - 2): + layers.append(nn.Linear(hidden_dim, hidden_dim)) + if use_bn: + layers.append(nn.BatchNorm1d(hidden_dim)) + layers.append(nn.GELU()) + layers.append(nn.Linear(hidden_dim, bottleneck_dim)) + self.mlp = nn.Sequential(*layers) + self.apply(self._init_weights) + self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False)) + self.last_layer.weight_g.data.fill_(1) + if norm_last_layer: + self.last_layer.weight_g.requires_grad = False + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + x = self.mlp(x) + x = nn.functional.normalize(x, dim=-1, p=2) + x = self.last_layer(x) + return x diff --git a/PuzzleTuning/Counterpart PreTrain Methods/dino-main/visualize_attention.py b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/visualize_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..4288265b9b8865bebfcaad1d350a114da35ff055 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/dino-main/visualize_attention.py @@ -0,0 +1,213 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +import argparse +import cv2 +import random +import colorsys +import requests +from io import BytesIO + +import skimage.io +from skimage.measure import find_contours +import matplotlib.pyplot as plt +from matplotlib.patches import Polygon +import torch +import torch.nn as nn +import torchvision +from torchvision import transforms as pth_transforms +import numpy as np +from PIL import Image + +import utils +import vision_transformer as vits + + +def apply_mask(image, mask, color, alpha=0.5): + for c in range(3): + image[:, :, c] = image[:, :, c] * (1 - alpha * mask) + alpha * mask * color[c] * 255 + return image + + +def random_colors(N, bright=True): + """ + Generate random colors. + """ + brightness = 1.0 if bright else 0.7 + hsv = [(i / N, 1, brightness) for i in range(N)] + colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)) + random.shuffle(colors) + return colors + + +def display_instances(image, mask, fname="test", figsize=(5, 5), blur=False, contour=True, alpha=0.5): + fig = plt.figure(figsize=figsize, frameon=False) + ax = plt.Axes(fig, [0., 0., 1., 1.]) + ax.set_axis_off() + fig.add_axes(ax) + ax = plt.gca() + + N = 1 + mask = mask[None, :, :] + # Generate random colors + colors = random_colors(N) + + # Show area outside image boundaries. + height, width = image.shape[:2] + margin = 0 + ax.set_ylim(height + margin, -margin) + ax.set_xlim(-margin, width + margin) + ax.axis('off') + masked_image = image.astype(np.uint32).copy() + for i in range(N): + color = colors[i] + _mask = mask[i] + if blur: + _mask = cv2.blur(_mask,(10,10)) + # Mask + masked_image = apply_mask(masked_image, _mask, color, alpha) + # Mask Polygon + # Pad to ensure proper polygons for masks that touch image edges. + if contour: + padded_mask = np.zeros((_mask.shape[0] + 2, _mask.shape[1] + 2)) + padded_mask[1:-1, 1:-1] = _mask + contours = find_contours(padded_mask, 0.5) + for verts in contours: + # Subtract the padding and flip (y, x) to (x, y) + verts = np.fliplr(verts) - 1 + p = Polygon(verts, facecolor="none", edgecolor=color) + ax.add_patch(p) + ax.imshow(masked_image.astype(np.uint8), aspect='auto') + fig.savefig(fname) + print(f"{fname} saved.") + return + + +if __name__ == '__main__': + parser = argparse.ArgumentParser('Visualize Self-Attention maps') + parser.add_argument('--arch', default='vit_small', type=str, + choices=['vit_tiny', 'vit_small', 'vit_base'], help='Architecture (support only ViT atm).') + parser.add_argument('--patch_size', default=8, type=int, help='Patch resolution of the model.') + parser.add_argument('--pretrained_weights', default='', type=str, + help="Path to pretrained weights to load.") + parser.add_argument("--checkpoint_key", default="teacher", type=str, + help='Key to use in the checkpoint (example: "teacher")') + parser.add_argument("--image_path", default=None, type=str, help="Path of the image to load.") + parser.add_argument("--image_size", default=(480, 480), type=int, nargs="+", help="Resize image.") + parser.add_argument('--output_dir', default='.', help='Path where to save visualizations.') + parser.add_argument("--threshold", type=float, default=None, help="""We visualize masks + obtained by thresholding the self-attention maps to keep xx% of the mass.""") + args = parser.parse_args() + + device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + # build model + model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0) + for p in model.parameters(): + p.requires_grad = False + model.eval() + model.to(device) + if os.path.isfile(args.pretrained_weights): + state_dict = torch.load(args.pretrained_weights, map_location="cpu") + if args.checkpoint_key is not None and args.checkpoint_key in state_dict: + print(f"Take key {args.checkpoint_key} in provided checkpoint dict") + state_dict = state_dict[args.checkpoint_key] + # remove `module.` prefix + state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()} + # remove `backbone.` prefix induced by multicrop wrapper + state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()} + msg = model.load_state_dict(state_dict, strict=False) + print('Pretrained weights found at {} and loaded with msg: {}'.format(args.pretrained_weights, msg)) + else: + print("Please use the `--pretrained_weights` argument to indicate the path of the checkpoint to evaluate.") + url = None + if args.arch == "vit_small" and args.patch_size == 16: + url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth" + elif args.arch == "vit_small" and args.patch_size == 8: + url = "dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth" # model used for visualizations in our paper + elif args.arch == "vit_base" and args.patch_size == 16: + url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth" + elif args.arch == "vit_base" and args.patch_size == 8: + url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth" + if url is not None: + print("Since no pretrained weights have been provided, we load the reference pretrained DINO weights.") + state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url) + model.load_state_dict(state_dict, strict=True) + else: + print("There is no reference weights available for this model => We use random weights.") + + # open image + if args.image_path is None: + # user has not specified any image - we use our own image + print("Please use the `--image_path` argument to indicate the path of the image you wish to visualize.") + print("Since no image path have been provided, we take the first image in our paper.") + response = requests.get("https://dl.fbaipublicfiles.com/dino/img.png") + img = Image.open(BytesIO(response.content)) + img = img.convert('RGB') + elif os.path.isfile(args.image_path): + with open(args.image_path, 'rb') as f: + img = Image.open(f) + img = img.convert('RGB') + else: + print(f"Provided image path {args.image_path} is non valid.") + sys.exit(1) + transform = pth_transforms.Compose([ + pth_transforms.Resize(args.image_size), + pth_transforms.ToTensor(), + pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + ]) + img = transform(img) + + # make the image divisible by the patch size + w, h = img.shape[1] - img.shape[1] % args.patch_size, img.shape[2] - img.shape[2] % args.patch_size + img = img[:, :w, :h].unsqueeze(0) + + w_featmap = img.shape[-2] // args.patch_size + h_featmap = img.shape[-1] // args.patch_size + + attentions = model.get_last_selfattention(img.to(device)) + + nh = attentions.shape[1] # number of head + + # we keep only the output patch attention + attentions = attentions[0, :, 0, 1:].reshape(nh, -1) + + if args.threshold is not None: + # we keep only a certain percentage of the mass + val, idx = torch.sort(attentions) + val /= torch.sum(val, dim=1, keepdim=True) + cumval = torch.cumsum(val, dim=1) + th_attn = cumval > (1 - args.threshold) + idx2 = torch.argsort(idx) + for head in range(nh): + th_attn[head] = th_attn[head][idx2[head]] + th_attn = th_attn.reshape(nh, w_featmap, h_featmap).float() + # interpolate + th_attn = nn.functional.interpolate(th_attn.unsqueeze(0), scale_factor=args.patch_size, mode="nearest")[0].cpu().numpy() + + attentions = attentions.reshape(nh, w_featmap, h_featmap) + attentions = nn.functional.interpolate(attentions.unsqueeze(0), scale_factor=args.patch_size, mode="nearest")[0].cpu().numpy() + + # save attentions heatmaps + os.makedirs(args.output_dir, exist_ok=True) + torchvision.utils.save_image(torchvision.utils.make_grid(img, normalize=True, scale_each=True), os.path.join(args.output_dir, "img.png")) + for j in range(nh): + fname = os.path.join(args.output_dir, "attn-head" + str(j) + ".png") + plt.imsave(fname=fname, arr=attentions[j], format='png') + print(f"{fname} saved.") + + if args.threshold is not None: + image = skimage.io.imread(os.path.join(args.output_dir, "img.png")) + for j in range(nh): + display_instances(image, th_attn[j], fname=os.path.join(args.output_dir, "mask_th" + str(args.threshold) + "_head" + str(j) +".png"), blur=False) diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/FINETUNE.md b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/FINETUNE.md new file mode 100644 index 0000000000000000000000000000000000000000..387b10df020c82914903fde13af857e577cb6f27 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/FINETUNE.md @@ -0,0 +1,13 @@ +## Fine tune GCMAE + +``` +python main_finetune.py \ + --data_path path/to/data \ + --nb_classes 9 \ + --output_dir path/to/ouput/dir \ + --log_dir path/to/log/dir \ + --batch_size 128 \ + --model vit_base_patch16 \ + --epochs 50 \ + --finetune path/to/pth/path \ +``` diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/GCMAE.png b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/GCMAE.png new file mode 100644 index 0000000000000000000000000000000000000000..1b49c8db9e27862d0414f87aab517766ecc5ace9 Binary files /dev/null and b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/GCMAE.png differ diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/LICENSE b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..0ff744738f193e55a120e2ee6bcc1b4fdace0dd2 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/LICENSE @@ -0,0 +1,399 @@ +Attribution-NonCommercial 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-NonCommercial 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-NonCommercial 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. NonCommercial means not primarily intended for or directed towards + commercial advantage or monetary compensation. For purposes of + this Public License, the exchange of the Licensed Material for + other material subject to Copyright and Similar Rights by digital + file-sharing or similar means is NonCommercial provided there is + no payment of monetary compensation in connection with the + exchange. + + j. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + k. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + l. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part, for NonCommercial purposes only; and + + b. produce, reproduce, and Share Adapted Material for + NonCommercial purposes only. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties, including when + the Licensed Material is used other than for NonCommercial + purposes. + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database for NonCommercial purposes + only; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/LINPROBE.md b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/LINPROBE.md new file mode 100644 index 0000000000000000000000000000000000000000..1cfc347057628562be6637111ee127c28235d4ae --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/LINPROBE.md @@ -0,0 +1,15 @@ +## Linear probe GCMAE + +``` +python main_linprobe.py \ + --data_path_train path/to/train/data \ + --data_path_val path/to/val/data \ + --nb_classes 2 \ + --output_dir path/to/ouput/dir \ + --log_dir path/to/log/dir \ + --batch_size 512 \ + --model vit_base_patch16 \ + --epochs 90 \ + --finetune path/to/pth/path +``` + diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/PRETRAIN.md b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/PRETRAIN.md new file mode 100644 index 0000000000000000000000000000000000000000..ef3e5b89f499461d4208b89a5cfafcfe298c0b2c --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/PRETRAIN.md @@ -0,0 +1,21 @@ +## Pre-training GCMAE + +To pre-train ViT-Base (recommended default) +``` +python main_pretrain.py \ + --data_path path/to/data \ + --data_val_path path/to/data \ + --output_dir path/to/ouput/dir \ + --log_dir path/to/log/dir \ + --batch_size 128 \ + --model gcmae_vit_base_patch16 \ + --norm_pix_loss \ + --mask_ratio 0.5 \ + --epochs 80 \ + --warmup_epochs 40 \ + --blr 1e-3 --weight_decay 0.05 \ + --low_dim 768 \ + --nce_k 8192 \ + --nce_t 0.07 \ + --nce_m 0.5 \ +``` \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/README.md b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a708663eeb22ddb1c096d6e026d1fbfc63dac468 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/README.md @@ -0,0 +1,15 @@ +# GCMAE + +The original repo of GCMAE could be found [here](https://github.com/StarUniversus/gcmae) + +To install environments: +```bash +pip install -r requirements.txt +``` + + +To start pretraining: +```bash +# You need to alter the script according to your directories +bash pretrain.sh +``` diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/README_origin.md b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/README_origin.md new file mode 100644 index 0000000000000000000000000000000000000000..1e112b24eb838119a25ee8877ce2186a3d63bbd2 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/README_origin.md @@ -0,0 +1,39 @@ +# GCMAE + +

+ +

+ +The official implementation of the paper [Global Contrast Masked Autoencoders Are Powerful Pathological Representation Learners](https://arxiv.org/abs/2205.09048) + +``` +@article{li2022gcmae, + author = {Quan, Hao and Li, Xingyu and Chen, Weixing and Bai, Qun and Zou, Mingchen and Yang, Ruijie and Zheng, Tingting and Qi, Ruiqun and Gao, Xinghua and Cui, Xiaoyu}, + title = {Global Contrast Masked Autoencoders Are Powerful Pathological Representation Learners}, + journal={arXiv:2205.09048}, + year = {2022}, +} +``` +## Abstract +Based on digital whole slide scanning technique, artificial intelligence algorithms represented by deep learning have achieved remarkable results in the field of computational pathology. Compared with other medical images such as Computed Tomography (CT) or Magnetic Resonance Imaging (MRI), pathological images are more difficult to annotate, thus there is an extreme lack of data sets that can be used for supervised learning. In this study, a self-supervised learning (SSL) model, Global Contrast Masked Autoencoders (GCMAE), is proposed, which has the ability to represent both global and local domain-specific features of whole slide image (WSI), as well as excellent cross-data transfer ability. The Camelyon16 and NCTCRC datasets are used to evaluate the performance of our model. When dealing with transfer learning tasks with different data sets, the experimental results show that GCMAE has better linear classification accuracy than MAE, which can reach **81.10%** and **89.22%** respectively. Our method outperforms the previous state of-the-art algorithm and even surpass supervised learning (improved by **3.86%** on NCTCRC data sets). + +## Installation +This repo is a modification on the [mae repo](https://github.com/facebookresearch/mae). Installation and preparation follow that repo. + +## Usage + +* [PRETRAIN](PRETRAIN.md) + +* [LINPROBE](LINPROBE.md) + +* [FINETUNE](FINETUNE.md) + +* [ Visual GCMAE feature representation](VISUAL.md) + +## Dataset + * [Camelyon16](https://pan.baidu.com/s/1N0fqJR9u8yq-y6ZY0mSoUw?pwd=noms) + * [NCT-CRC-HE-100K](https://zenodo.org/record/1214456) + * [BreakHis](https://web.inf.ufpr.br/vri/databases/breast-cancer-histopathological-database-breakhis/) + +## License +Distributed under the CC-BY-NC 4.0 License. See [LICENSE](LICENSE) for more information. diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/VISUAL.md b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/VISUAL.md new file mode 100644 index 0000000000000000000000000000000000000000..60ddf39b5439137bd954800bb17d1260f0159f58 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/VISUAL.md @@ -0,0 +1,10 @@ +## Visual GCMAE feature representation + +``` +python tsne.py \ + --batch_size 128 \ + --model vit_base_patch16 \ + --finetune path/to/pth \ + --save_path path/to/png \ + --data_path_val path/to/data \ +``` \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/engine_finetune.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/engine_finetune.py new file mode 100644 index 0000000000000000000000000000000000000000..a838164a11c1e49ad3dfeee932208724bfc99611 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/engine_finetune.py @@ -0,0 +1,183 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# DeiT: https://github.com/facebookresearch/deit +# BEiT: https://github.com/microsoft/unilm/tree/master/beit +# MAE: https://github.com/facebookresearch/mae +# -------------------------------------------------------- + +import math +import sys +from typing import Iterable, Optional + +import torch + +from timm.data import Mixup +from timm.utils import accuracy + +import util.misc as misc +import util.lr_sched as lr_sched +from sklearn.metrics import roc_auc_score +from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix +import torch.nn.functional as F +import numpy +import numpy as np +from torchmetrics import Specificity, AUROC + + + +def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, + data_loader: Iterable, optimizer: torch.optim.Optimizer, + device: torch.device, epoch: int, loss_scaler, max_norm: float = 0, + mixup_fn: Optional[Mixup] = None, log_writer=None, + args=None): + model.train(True) + metric_logger = misc.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}')) + header = 'Epoch: [{}]'.format(epoch) + print_freq = 20 + + accum_iter = args.accum_iter + + optimizer.zero_grad() + + if log_writer is not None: + print('log_dir: {}'.format(log_writer.log_dir)) + + for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): + + # we use a per iteration (instead of per epoch) lr scheduler + if data_iter_step % accum_iter == 0: + lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args) + + samples = samples.to(device, non_blocking=True) + targets = targets.to(device, non_blocking=True) + + if mixup_fn is not None: + samples, targets = mixup_fn(samples, targets) + + with torch.cuda.amp.autocast(): + outputs = model(samples) + loss = criterion(outputs, targets) + + loss_value = loss.item() + + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + sys.exit(1) + + loss /= accum_iter + loss_scaler(loss, optimizer, clip_grad=max_norm, + parameters=model.parameters(), create_graph=False, + update_grad=(data_iter_step + 1) % accum_iter == 0) + if (data_iter_step + 1) % accum_iter == 0: + optimizer.zero_grad() + + torch.cuda.synchronize() + + metric_logger.update(loss=loss_value) + min_lr = 10. + max_lr = 0. + for group in optimizer.param_groups: + min_lr = min(min_lr, group["lr"]) + max_lr = max(max_lr, group["lr"]) + + metric_logger.update(lr=max_lr) + + loss_value_reduce = misc.all_reduce_mean(loss_value) + if log_writer is not None and (data_iter_step + 1) % accum_iter == 0: + """ We use epoch_1000x as the x-axis in tensorboard. + This calibrates different curves when batch size changes. + """ + epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000) + log_writer.add_scalar('loss', loss_value_reduce, epoch_1000x) + log_writer.add_scalar('lr', max_lr, epoch_1000x) + + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + +def confusion_m(y_true, y_pred): + tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel() + return tn, fp, fn, tp + +def compute_metrics_binary(probs, preds, targets): + auc = roc_auc_score(targets, probs) * 100 + precision = precision_score(targets, preds) * 100 + recall = recall_score(targets, preds) * 100 + f1 = f1_score(targets, preds) * 100 + tn, fp, fn, tp = confusion_m(targets, preds) + specificity = (tn / float(tn+fp)) * 100 + return auc, precision, recall, f1, specificity + +def compute_metrics_multiclass(probs, preds, targets, nb_classes): + preds_tensor, probs_tensor, targets_tensor = torch.tensor(preds), torch.tensor(probs), torch.tensor(targets) + auroc = AUROC(average='macro', num_classes=nb_classes) + auc = auroc(probs_tensor, targets_tensor) * 100 + precision = precision_score(targets, preds, average='macro') * 100 + recall = recall_score(targets, preds, average='macro') * 100 + f1 = f1_score(targets, preds, average='macro') * 100 + speci = Specificity(average='macro', num_classes=nb_classes) + specificity = speci(preds_tensor, targets_tensor) * 100 + return auc, precision, recall, f1, specificity + +@torch.no_grad() +def evaluate(data_loader, model, device, nb_classes): + criterion = torch.nn.CrossEntropyLoss() + m = torch.nn.Softmax(dim=1) + metric_logger = misc.MetricLogger(delimiter=" ") + header = 'Test:' + + # switch to evaluation mode + model.eval() + probs = [] + targets = [] + preds = [] + + for batch in metric_logger.log_every(data_loader, 10, header): + images = batch[0] + target = batch[-1] + images = images.to(device, non_blocking=True) + target = target.to(device, non_blocking=True) + + # compute output + with torch.cuda.amp.autocast(): + output = model(images) + loss = criterion(output, target) + output = m(output) + score, pred = output.topk(1, 1, True, True) + if nb_classes == 2: + prob = output[:, 1] + elif nb_classes > 2: + prob = output + + probs.extend(prob.detach().cpu().numpy()) + targets.extend(target.detach().cpu().numpy()) + preds.extend(pred.tolist()) + if nb_classes < 5: + acc1, acc5 = accuracy(output, target, topk=(1, 1)) + else: + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + + batch_size = images.shape[0] + metric_logger.update(loss=loss.item()) + metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) + metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}' + .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss)) + if nb_classes == 2: + print("binary class metrics!") + auc, precision, recall, f1, specificity = compute_metrics_binary(probs, preds, targets) + + elif nb_classes > 2: + print("multi_class metrics!") + auc, precision, recall, f1, specificity = compute_metrics_multiclass(probs, preds, targets, nb_classes) + + return {k: meter.global_avg for k, meter in metric_logger.meters.items()}, auc, precision, recall, f1, specificity \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/engine_pretrain.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/engine_pretrain.py new file mode 100644 index 0000000000000000000000000000000000000000..5203e79127d63c12e63b1a3e7ec60602f80fc915 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/engine_pretrain.py @@ -0,0 +1,99 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# DeiT: https://github.com/facebookresearch/deit +# BEiT: https://github.com/microsoft/unilm/tree/master/beit +# -------------------------------------------------------- +import math +import sys +from typing import Iterable + +import torch + +import util.misc as misc +import util.lr_sched as lr_sched +from test_npid import NN, kNN + +def train_one_epoch(model: torch.nn.Module, + data_loader: Iterable, + optimizer: torch.optim.Optimizer, + device: torch.device, epoch: int, loss_scaler, + log_writer=None, + args=None, + lemniscate=None, + ): + model.train(True) + metric_logger = misc.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}')) + header = 'Epoch: [{}]'.format(epoch) + print_freq = 20 + + accum_iter = args.accum_iter + + optimizer.zero_grad() + + if log_writer is not None: + print('log_dir: {}'.format(log_writer.log_dir)) + + for data_iter_step, (samples, _, index) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): + + # we use a per iteration (instead of per epoch) lr scheduler + if data_iter_step % accum_iter == 0: + lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args) + + samples = samples.to(device, non_blocking=True) + index = index.to(device, non_blocking=True) + with torch.cuda.amp.autocast(): + loss_mae, _, _, loss_npid, _= model(samples, mask_ratio=args.mask_ratio, index = index, is_train=True) + loss = loss_mae + 0.1 * loss_npid + + loss_value = loss.item() + + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + sys.exit(1) + + loss /= accum_iter + loss_scaler(loss, optimizer, parameters=model.parameters(), + update_grad=(data_iter_step + 1) % accum_iter == 0) + if (data_iter_step + 1) % accum_iter == 0: + optimizer.zero_grad() + + torch.cuda.synchronize() + + + metric_logger.update(loss_all=loss_value) + metric_logger.update(loss_mae=loss_mae.item()) + metric_logger.update(loss_npid=loss_npid.item()) + + + lr = optimizer.param_groups[0]["lr"] + metric_logger.update(lr=lr) + + loss_value_reduce = misc.all_reduce_mean(loss_value) + if log_writer is not None and (data_iter_step + 1) % accum_iter == 0: + """ We use epoch_1000x as the x-axis in tensorboard. + This calibrates different curves when batch size changes. + """ + + epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000) + log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x) + log_writer.add_scalar('origin_loss/train_loss_mae', loss_mae.item(), epoch_1000x) + log_writer.add_scalar('origin_loss/train_loss_npid', loss_npid.item(), epoch_1000x) + log_writer.add_scalar('lr', lr, epoch_1000x) + + # pred1 = NN(epoch, model, lemniscate, data_loader, data_loader_val) + # log_writer.add_scalar('NN_ac', pred1, epoch) + # if args.output_dir and (epoch % 20 == 0 or epoch + 1 == args.epochs): + + # top1 = kNN(0, model, lemniscate, data_loader, data_loader_val, 200, args.nce_t) + # log_writer.add_scalar('KNN_top1', top1, epoch) + + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + # return {k: meter.global_avg for k, meter in metric_logger.meters.items()}, pred1 + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/NCEAverage.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/NCEAverage.py new file mode 100644 index 0000000000000000000000000000000000000000..20978a4003c2c145189e53a9f3463b9770b771de --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/NCEAverage.py @@ -0,0 +1,94 @@ +import torch +from torch.autograd import Function +from torch import nn +from .alias_multinomial import AliasMethod +import math + +class NCEFunction(Function): + @staticmethod + def forward(self, x, y, memory, idx, params): + K = int(params[0].item()) + T = params[1].item() + Z = params[2].item() + + momentum = params[3].item() + batchSize = x.size(0) + outputSize = memory.size(0) + inputSize = memory.size(1) + + # sample positives & negatives + idx.select(1,0).copy_(y.detach()) + + # sample correspoinding weights + weight = torch.index_select(memory, 0, idx.view(-1)) + weight.resize_(batchSize, K+1, inputSize) + + # inner product + with torch.cuda.amp.autocast(enabled = False): + out = torch.bmm(weight, x.detach().reshape(batchSize, inputSize, 1)) + #print(out) + out.div_(T).exp_() # batchSize * self.K+1 + #x.detach().resize_(batchSize, inputSize) + #print(out) + if Z < 0: + params[2] = out.mean() * outputSize + Z = params[2].item() + print("normalization constant Z is set to {:.1f}".format(Z)) + + out.div_(Z).resize_(batchSize, K+1) + + self.save_for_backward(x, memory, y, weight, out, params) + + return out + + @staticmethod + def backward(self, gradOutput): + x, memory, y, weight, out, params = self.saved_tensors + K = int(params[0].item()) + T = params[1].item() + Z = params[2].item() + momentum = params[3].item() + batchSize = gradOutput.size(0) + + # gradients d Pm / d linear = exp(linear) / Z + gradOutput.detach().mul_(out.detach()) + # add temperature + gradOutput.detach().div_(T) + + + + # gradient of linear + with torch.cuda.amp.autocast(enabled = False): + gradInput = torch.bmm(gradOutput.detach().reshape(batchSize, 1, K+1), weight) + gradInput.resize_as_(x) + + # update the non-parametric detach() + weight_pos = weight.select(1, 0).resize_as_(x) + weight_pos.mul_(momentum) + weight_pos.add_(torch.mul(x.detach(), 1-momentum)) + w_norm = weight_pos.pow(2).sum(1, keepdim=True).pow(0.5) + updated_weight = weight_pos.div(w_norm) + memory.index_copy_(0, y, updated_weight) + + return gradInput, None, None, None, None + +class NCEAverage(nn.Module): + + def __init__(self, inputSize, outputSize, K, T=0.07, momentum=0.5, Z=None): + super(NCEAverage, self).__init__() + self.nLem = outputSize + self.unigrams = torch.ones(self.nLem) + self.multinomial = AliasMethod(self.unigrams) + self.multinomial.cuda() + self.K = K + + self.register_buffer('params',torch.tensor([K, T, -1, momentum])) + stdv = 1. / math.sqrt(inputSize/3) + self.register_buffer('memory', torch.rand(outputSize, inputSize).mul_(2*stdv).add_(-stdv)) + + def forward(self, x, y): + batchSize = x.size(0) + idx = self.multinomial.draw(batchSize * (self.K+1)).view(batchSize, -1) + out = NCEFunction.apply(x, y, self.memory, idx, self.params) + return out + diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/NCECriterion.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/NCECriterion.py new file mode 100644 index 0000000000000000000000000000000000000000..1fcd33441e7917f82059d20da77625b2052475da --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/NCECriterion.py @@ -0,0 +1,38 @@ +import torch +from torch import nn + +eps = 1e-7 + +class NCECriterion(nn.Module): + + def __init__(self, nLem): + super(NCECriterion, self).__init__() + self.nLem = nLem + + def forward(self, x, targets): + batchSize = x.size(0) + K = x.size(1)-1 + Pnt = 1 / float(self.nLem) + Pns = 1 / float(self.nLem) + + # eq 5.1 : P(origin=model) = Pmt / (Pmt + k*Pnt) + Pmt = x.select(1,0) + Pmt_div = Pmt.add(K * Pnt + eps) + lnPmt = torch.div(Pmt, Pmt_div) + + # eq 5.2 : P(origin=noise) = k*Pns / (Pms + k*Pns) + Pon_div = x.narrow(1,1,K).add(K * Pns + eps) + Pon = Pon_div.clone().fill_(K * Pns) + lnPon = torch.div(Pon, Pon_div) + + # equation 6 in ref. A + lnPmt.log_() + lnPon.log_() + + lnPmtsum = lnPmt.sum(0) + lnPonsum = lnPon.view(-1, 1).sum(0) + + loss = - (lnPmtsum + lnPonsum) / batchSize + + return loss + diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__init__.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..556df42ea177f37fd5e5497c14eae9e17f9b8406 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__init__.py @@ -0,0 +1 @@ +# nothing diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/NCEAverage.cpython-38.pyc b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/NCEAverage.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8617f136507d4e4bcaa2f50895348f1f4efe5ad6 Binary files /dev/null and b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/NCEAverage.cpython-38.pyc differ diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/NCECriterion.cpython-38.pyc b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/NCECriterion.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ee9ccef395b7aea97b1db1d41ecec0dde54a8c4 Binary files /dev/null and b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/NCECriterion.cpython-38.pyc differ diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/__init__.cpython-38.pyc b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9716f8fa40858a99a675881da23dc194af9150db Binary files /dev/null and b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/__init__.cpython-38.pyc differ diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/alias_multinomial.cpython-38.pyc b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/alias_multinomial.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a58dd8b7d9cac7890a0a6846130a0ebde54d987a Binary files /dev/null and b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/alias_multinomial.cpython-38.pyc differ diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/normalize.cpython-38.pyc b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/normalize.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..919d6dc319868c2888d54469c643585aecf18874 Binary files /dev/null and b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/normalize.cpython-38.pyc differ diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/utils.cpython-38.pyc b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34f9b8719b29a41d9e91d62fd0a3a4e17c63a5cc Binary files /dev/null and b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/__pycache__/utils.cpython-38.pyc differ diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/alias_multinomial.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/alias_multinomial.py new file mode 100644 index 0000000000000000000000000000000000000000..a3a0eb78684c849c4827dc294b3684dd75821f00 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/alias_multinomial.py @@ -0,0 +1,64 @@ +import torch +import numpy as np + +class AliasMethod(object): + ''' + From: https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ + ''' + def __init__(self, probs): + + if probs.sum() > 1: + probs.div_(probs.sum()) + K = len(probs) + self.prob = torch.zeros(K) + self.alias = torch.LongTensor([0]*K) + + # Sort the data into the outcomes with probabilities + # that are larger and smaller than 1/K. + smaller = [] + larger = [] + for kk, prob in enumerate(probs): + self.prob[kk] = K*prob + if self.prob[kk] < 1.0: + smaller.append(kk) + else: + larger.append(kk) + + # Loop though and create little binary mixtures that + # appropriately allocate the larger outcomes over the + # overall uniform mixture. + while len(smaller) > 0 and len(larger) > 0: + small = smaller.pop() + large = larger.pop() + + self.alias[small] = large + self.prob[large] = (self.prob[large] - 1.0) + self.prob[small] + + if self.prob[large] < 1.0: + smaller.append(large) + else: + larger.append(large) + + for last_one in smaller+larger: + self.prob[last_one] = 1 + + def cuda(self): + self.prob = self.prob.cuda() + self.alias = self.alias.cuda() + + def draw(self, N): + ''' + Draw N samples from multinomial + ''' + K = self.alias.size(0) + + kk = torch.zeros(N, dtype=torch.long, device=self.prob.device).random_(0, K) + prob = self.prob.index_select(0, kk) + alias = self.alias.index_select(0, kk) + # b is whether a random number is greater than q + b = torch.bernoulli(prob) + oq = kk.mul(b.long()) + oj = alias.mul((1-b).long()) + + return oq + oj + diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/normalize.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/normalize.py new file mode 100644 index 0000000000000000000000000000000000000000..208170b762b87b8b164a02c3f308c9432eee02fd --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/normalize.py @@ -0,0 +1,14 @@ +import torch +from torch.autograd import Variable +from torch import nn + +class Normalize(nn.Module): + + def __init__(self, power=2): + super(Normalize, self).__init__() + self.power = power + + def forward(self, x): + norm = x.pow(self.power).sum(1, keepdim=True).pow(1./self.power) + out = x.div(norm) + return out diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/utils.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ccc2c7fa155547c8976c3b598dca091a76f46600 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/lib/utils.py @@ -0,0 +1,16 @@ +class AverageMeter(object): + """Computes and stores the average and current value""" + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/load_vit_from_ckpt.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/load_vit_from_ckpt.py new file mode 100644 index 0000000000000000000000000000000000000000..cf9b84e39de79b0f9882317d4de4044e4280ac27 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/load_vit_from_ckpt.py @@ -0,0 +1,154 @@ +""" +Extracting backbone from a specified gcmae checkpoint. + +Example: + +python load_vit_from_ckpt.py \ + --checkpoint /home/workenv/label-efficient-dl/gcmae/gcmae/output/checkpoint-19.pth \ + --save-to ./output/final_models/ \ + --save-name vit_gcmae_16_224.pth \ + --num-classes 2 +""" + +import torchvision +import torch +import os +import argparse +from timm import create_model +# from net.models.vit import VisionTransformer + + +def gen_basic_weight(save_dir): + # Load timm vit weight + model = create_model('vit_base_patch16_224', pretrained=False, in_chans=3) + random_state_dict = model.state_dict() + + model = create_model('vit_base_patch16_224', pretrained=True, in_chans=3) + pretrained_state_dict = model.state_dict() + + # Save model + print(f'Saving backbone init weight to {save_dir}...') + if not os.path.exists(save_dir): + os.makedirs(save_dir) + torch.save(random_state_dict, os.path.join(save_dir, 'ViT_b16_224_Random_Init.pth')) + torch.save(pretrained_state_dict, os.path.join(save_dir, 'ViT_b16_224_Imagenet.pth')) + + +# -------------------------------------------------------- +# Interpolate position embeddings for high-resolution +# References: +# DeiT: https://github.com/facebookresearch/deit +# -------------------------------------------------------- +def interpolate_pos_embed(model, checkpoint_model): + if 'pos_embed' in checkpoint_model: + pos_embed_checkpoint = checkpoint_model['pos_embed'] + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = model.patch_embed.num_patches + num_extra_tokens = model.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + checkpoint_model['pos_embed'] = new_pos_embed + + +def main(args): + """Read ViT parameters from BYOL backbone + """ + + # Initialize model + if args.basic_weight: + model = create_model('vit_base_patch16_224', pretrained=False, in_chans=3) + # model = VisionTransformer(num_classes=args.num_classes) + + # Load basic weights (default initial parameters) + basic_weight = torch.load(args.basic_weight) + model.load_state_dict(basic_weight, strict=False) + else: + raise + model = create_model('vit_base_patch16_224', pretrained=True, in_chans=3) + + # Load checkpoint + # state_dict = torch.load(args.checkpoint)['state_dict'] + checkpoint = torch.load(args.checkpoint) + ckp_state_dict = checkpoint['model'] + model_state_dict = model.state_dict() + + # interpolate position embedding + interpolate_pos_embed(model, ckp_state_dict) + + print('checking checkpoint weights...') + # print(ckp_state_dict.keys()) + len_state_dict = len(ckp_state_dict) + for seq, src_k in enumerate(ckp_state_dict.keys()): + tgt_k = str(src_k) + if tgt_k not in model_state_dict.keys(): + print(f'{seq+1}/{len_state_dict} Skipped: {src_k}, {ckp_state_dict[src_k].shape}') + + print('loading weights...') + len_state_dict = len(model_state_dict) + for seq, tgt_k in enumerate(model_state_dict.keys()): + if tgt_k in ckp_state_dict: + # print(f'{seq+1}/{len_state_dict} Loaded: {ckp_state_dict[tgt_k].shape}, {model_state_dict[tgt_k].shape}') + model_state_dict[tgt_k] = ckp_state_dict[tgt_k] + else: + print(f'{seq+1}/{len_state_dict} Skipped: {tgt_k}') + + model.load_state_dict(model_state_dict, strict=False) + + # Save model + print(f'Saving model to {args.save_to}...') + if not os.path.exists(args.save_to): + os.makedirs(args.save_to) + torch.save(model.state_dict(), os.path.join(args.save_to, args.save_name)) + + +def get_args_parser(): + """Input parameters + """ + parser = argparse.ArgumentParser(description='Extract backbone state dict') + parser.add_argument('--checkpoint', default='./checkpoint_0004.pth.tar', type=str, required=True, + help='Path to the checkpoint') + parser.add_argument('--save-to', default='./output', type=str, required=True, + help='Where to save the model') + parser.add_argument('--save-name', default='vit_gcmae_16_224.pth', type=str, required=True, + help='Model save name') + parser.add_argument('--num-classes', default=2, type=int, + help='Number of classes to be classified') + parser.add_argument('--random-seed', default=42, type=int, + help='Random seed (enable reproduction)') + parser.add_argument('--basic-weight', default='', type=str, + help='Basic weight (used to init parameters)') + return parser + + +def setup_seed(seed): + """Fix up the random seed + + Args: + seed (int): Seed to be applied + """ + import random + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + random.seed(seed) + torch.backends.cudnn.deterministic = True + + +if __name__ == '__main__': + parser = get_args_parser() + args = parser.parse_args() + + setup_seed(args.random_seed) + main(args) \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/main_finetune.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/main_finetune.py new file mode 100644 index 0000000000000000000000000000000000000000..1de6c453465689761b7827353cc5186b89a4c30f --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/main_finetune.py @@ -0,0 +1,363 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# DeiT: https://github.com/facebookresearch/deit +# BEiT: https://github.com/microsoft/unilm/tree/master/beit +# MAE: https://github.com/facebookresearch/mae +# -------------------------------------------------------- + +import argparse +import datetime +import json +import numpy as np +import os +import time +from pathlib import Path + +import torch +import torch.backends.cudnn as cudnn +from torch.utils.tensorboard import SummaryWriter + +import timm + +assert timm.__version__ == "0.3.2" # version check +from timm.models.layers import trunc_normal_ +from timm.data.mixup import Mixup +from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy + +import util.lr_decay as lrd +import util.misc as misc +from util.datasets import build_dataset +from util.pos_embed import interpolate_pos_embed +from util.misc import NativeScalerWithGradNormCount as NativeScaler + +import models_vit + +from engine_finetune import train_one_epoch, evaluate + + +def get_args_parser(): + parser = argparse.ArgumentParser('GCMAE fine-tuning for image classification', add_help=False) + parser.add_argument('--batch_size', default=128, type=int, + help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus') + parser.add_argument('--epochs', default=50, type=int) + parser.add_argument('--accum_iter', default=1, type=int, + help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)') + + # Model parameters + parser.add_argument('--model', default='vit_base_patch16', type=str, metavar='MODEL', + help='Name of model to train') + + parser.add_argument('--input_size', default=224, type=int, + help='images input size') + + parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', + help='Drop path rate (default: 0.1)') + + # Optimizer parameters + parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', + help='Clip gradient norm (default: None, no clipping)') + parser.add_argument('--weight_decay', type=float, default=0.05, + help='weight decay (default: 0.05)') + + parser.add_argument('--lr', type=float, default=None, metavar='LR', + help='learning rate (absolute lr)') + parser.add_argument('--blr', type=float, default=1e-3, metavar='LR', + help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')#default=1e-3 + parser.add_argument('--layer_decay', type=float, default=0.75, + help='layer-wise lr decay from ELECTRA/BEiT') + + parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR', + help='lower lr bound for cyclic schedulers that hit 0') + + parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N', + help='epochs to warmup LR') + + # Augmentation parameters + parser.add_argument('--color_jitter', type=float, default=None, metavar='PCT', + help='Color jitter factor (enabled only when not using Auto/RandAug)') + parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', + help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'), + parser.add_argument('--smoothing', type=float, default=0.1, + help='Label smoothing (default: 0.1)') + + # * Random Erase params + parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', + help='Random erase prob (default: 0.25)') + parser.add_argument('--remode', type=str, default='pixel', + help='Random erase mode (default: "pixel")') + parser.add_argument('--recount', type=int, default=1, + help='Random erase count (default: 1)') + parser.add_argument('--resplit', action='store_true', default=False, + help='Do not random erase first (clean) augmentation split') + + # * Mixup params + parser.add_argument('--mixup', type=float, default=0, + help='mixup alpha, mixup enabled if > 0.') + parser.add_argument('--cutmix', type=float, default=0, + help='cutmix alpha, cutmix enabled if > 0.') + parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, + help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') + parser.add_argument('--mixup_prob', type=float, default=1.0, + help='Probability of performing mixup or cutmix when either/both is enabled') + parser.add_argument('--mixup_switch_prob', type=float, default=0.5, + help='Probability of switching to cutmix when both mixup and cutmix enabled') + parser.add_argument('--mixup_mode', type=str, default='batch', + help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') + + # * Finetuning params + parser.add_argument('--finetune', default=' ', + help='finetune from checkpoint') + parser.add_argument('--global_pool', action='store_true') + parser.set_defaults(global_pool=False) + parser.add_argument('--cls_token', action='store_false', dest='global_pool', + help='Use class token instead of global pool for classification') + + # Dataset parameters + parser.add_argument('--data_path', default=' ', type=str, + help='dataset path') + parser.add_argument('--nb_classes', default=2, type=int, + help='number of the classification types') + + parser.add_argument('--output_dir', default=' ', + help='path where to save, empty for no saving') + parser.add_argument('--log_dir', default=' ', + help='path where to tensorboard log') + parser.add_argument('--device', default='cuda', + help='device to use for training / testing') + parser.add_argument('--seed', default=0, type=int) + parser.add_argument('--resume', default='', + help='resume from checkpoint') + + parser.add_argument('--start_epoch', default=0, type=int, metavar='N', + help='start epoch') + parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') + parser.set_defaults(eval=False) + parser.add_argument('--dist_eval', action='store_true', default=False, + help='Enabling distributed evaluation (recommended during training for faster monitor') + parser.add_argument('--num_workers', default=16, type=int) + parser.add_argument('--pin_mem', action='store_true', + help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') + parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') + parser.set_defaults(pin_mem=True) + + # distributed training parameters + parser.add_argument('--world_size', default=1, type=int, + help='number of distributed processes') + parser.add_argument('--local_rank', default=-1, type=int) + parser.add_argument('--dist_on_itp', action='store_true') + parser.add_argument('--dist_url', default='env://', + help='url used to set up distributed training') + parser.add_argument('--gpu_id', default=0, type=int, + help="the order of gpu") + + return parser + + +def main(args): + torch.cuda.set_device(args.gpu_id) + misc.init_distributed_mode(args) + + print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__)))) + print("{}".format(args).replace(', ', ',\n')) + + device = torch.device(args.device) + + # fix the seed for reproducibility + seed = args.seed + misc.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + + cudnn.benchmark = True + + dataset_train = build_dataset(is_train=True, args=args) + dataset_val = build_dataset(is_train=False, args=args) + + if True: # args.distributed: + num_tasks = misc.get_world_size() + global_rank = misc.get_rank() + sampler_train = torch.utils.data.DistributedSampler( + dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True + ) + print("Sampler_train = %s" % str(sampler_train)) + if args.dist_eval: + if len(dataset_val) % num_tasks != 0: + print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. ' + 'This will slightly alter validation results as extra duplicate entries are added to achieve ' + 'equal num of samples per-process.') + sampler_val = torch.utils.data.DistributedSampler( + dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) # shuffle=True to reduce monitor bias + else: + sampler_val = torch.utils.data.SequentialSampler(dataset_val) + else: + sampler_train = torch.utils.data.RandomSampler(dataset_train) + sampler_val = torch.utils.data.SequentialSampler(dataset_val) + + if global_rank == 0 and args.log_dir is not None and not args.eval: + os.makedirs(args.log_dir, exist_ok=True) + log_writer = SummaryWriter(log_dir=args.log_dir) + else: + log_writer = None + + data_loader_train = torch.utils.data.DataLoader( + dataset_train, sampler=sampler_train, + batch_size=args.batch_size, + num_workers=args.num_workers, + pin_memory=args.pin_mem, + drop_last=True, + ) + + data_loader_val = torch.utils.data.DataLoader( + dataset_val, sampler=sampler_val, + batch_size=args.batch_size, + num_workers=args.num_workers, + pin_memory=args.pin_mem, + drop_last=False + ) + + mixup_fn = None + mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None + if mixup_active: + print("Mixup is activated!") + mixup_fn = Mixup( + mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, + prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, + label_smoothing=args.smoothing, num_classes=args.nb_classes) + + model = models_vit.__dict__[args.model]( + num_classes=args.nb_classes, + drop_path_rate=args.drop_path, + global_pool=args.global_pool, + ) + + if args.finetune and not args.eval: + checkpoint = torch.load(args.finetune, map_location='cpu') + + print("Load pre-trained checkpoint from: %s" % args.finetune) + checkpoint_model = checkpoint['model'] + state_dict = model.state_dict() + for k in ['head.weight', 'head.bias']: + if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape: + print(f"Removing key {k} from pretrained checkpoint") + del checkpoint_model[k] + + # interpolate position embedding + interpolate_pos_embed(model, checkpoint_model) + + # load pre-trained model + msg = model.load_state_dict(checkpoint_model, strict=False) + print(msg) + + if args.global_pool: + assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'} + else: + assert set(msg.missing_keys) == {'head.weight', 'head.bias'} + + # manually initialize fc layer + trunc_normal_(model.head.weight, std=2e-5) + + model.to(device) + + model_without_ddp = model + n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) + + print("Model = %s" % str(model_without_ddp)) + print('number of params (M): %.2f' % (n_parameters / 1.e6)) + + eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size() + + if args.lr is None: # only base_lr is specified + args.lr = args.blr * eff_batch_size / 256 + + print("base lr: %.2e" % (args.lr * 256 / eff_batch_size)) + print("actual lr: %.2e" % args.lr) + + print("accumulate grad iterations: %d" % args.accum_iter) + print("effective batch size: %d" % eff_batch_size) + + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + model_without_ddp = model.module + + # build optimizer with layer-wise lr decay (lrd) + param_groups = lrd.param_groups_lrd(model_without_ddp, args.weight_decay, + no_weight_decay_list=model_without_ddp.no_weight_decay(), + layer_decay=args.layer_decay + ) + optimizer = torch.optim.AdamW(param_groups, lr=args.lr) + loss_scaler = NativeScaler() + + if mixup_fn is not None: + # smoothing is handled with mixup label transform + criterion = SoftTargetCrossEntropy() + elif args.smoothing > 0.: + criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing) + else: + criterion = torch.nn.CrossEntropyLoss() + + print("criterion = %s" % str(criterion)) + + misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler) + + if args.eval: + test_stats, auc, precision, recall, f1, specificity = evaluate(data_loader_val, model, device, args.nb_classes) + print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.2f}%, AUC:{auc:.2f}%, precision {precision:.2f}%, recall {recall:.2f}%, f1_score {f1:.2f}%, specificity {specificity:.2f}%") + exit(0) + + print(f"Start training for {args.epochs} epochs") + start_time = time.time() + max_accuracy = 0.0 + max_auc = 0.0 + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + data_loader_train.sampler.set_epoch(epoch) + train_stats = train_one_epoch( + model, criterion, data_loader_train, + optimizer, device, epoch, loss_scaler, + args.clip_grad, mixup_fn, + log_writer=log_writer, + args=args + ) + if args.output_dir and (epoch % 1 == 0 or epoch + 1 == args.epochs): + misc.save_model( + args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, + loss_scaler=loss_scaler, epoch=epoch) + + test_stats, auc, precision, recall, f1, specificity = evaluate(data_loader_val, model, device, args.nb_classes) + print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.2f}%, AUC: {auc:.2f}%, precision {precision:.2f}%, recall {recall:.2f}%, f1_score {f1:.2f}%, specificity {specificity:.2f}%") + max_accuracy = max(max_accuracy, test_stats["acc1"]) + max_auc = max(max_auc, auc) + print(f'Max accuracy: {max_accuracy:.2f}%, Max AUC: {max_auc:.2f}%') + + if log_writer is not None: + log_writer.add_scalar('perf/test_acc1', test_stats['acc1'], epoch) + log_writer.add_scalar('perf/test_acc5', test_stats['acc5'], epoch) + log_writer.add_scalar('perf/test_loss', test_stats['loss'], epoch) + + log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, + **{f'test_{k}': v for k, v in test_stats.items()}, + 'epoch': epoch, + 'n_parameters': n_parameters} + + if args.output_dir and misc.is_main_process(): + if log_writer is not None: + log_writer.flush() + with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + if args.output_dir: + Path(args.output_dir).mkdir(parents=True, exist_ok=True) + main(args) diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/main_linprobe.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/main_linprobe.py new file mode 100644 index 0000000000000000000000000000000000000000..9c40958bc10db2151716b5b0efcead7210b7b759 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/main_linprobe.py @@ -0,0 +1,322 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# DeiT: https://github.com/facebookresearch/deit +# MoCo v3: https://github.com/facebookresearch/moco-v3 +# MAE: https://github.com/facebookresearch/mae +# -------------------------------------------------------- + +import argparse +import datetime +import json +from random import shuffle +import numpy as np +import os +import time +from pathlib import Path + +import torch +import torch.backends.cudnn as cudnn +from torch.utils.tensorboard import SummaryWriter +import torchvision.transforms as transforms +import torchvision.datasets as datasets + +import timm + +assert timm.__version__ == "0.3.2" # version check +from timm.models.layers import trunc_normal_ + +import util.misc as misc +from util.pos_embed import interpolate_pos_embed +from util.misc import NativeScalerWithGradNormCount as NativeScaler +from util.lars import LARS +from util.crop import RandomResizedCrop + +import models_vit +from engine_finetune import train_one_epoch, evaluate + + +def get_args_parser(): + parser = argparse.ArgumentParser('GCMAE linear probing for image classification', add_help=False) + parser.add_argument('--batch_size', default=512, type=int, + help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus') + parser.add_argument('--epochs', default=90, type=int) + parser.add_argument('--accum_iter', default=1, type=int, + help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)') + + # Model parameters + parser.add_argument('--model', default='vit_base_patch16', type=str, metavar='MODEL', + help='Name of model to train') + + # Optimizer parameters + parser.add_argument('--weight_decay', type=float, default=0, + help='weight decay (default: 0 for linear probe following MoCo v1)') + + parser.add_argument('--lr', type=float, default=None, metavar='LR', + help='learning rate (absolute lr)') + parser.add_argument('--blr', type=float, default=0.1, metavar='LR', + help='base learning rate: absolute_lr = base_lr * total_batch_size / 256') + + parser.add_argument('--min_lr', type=float, default=0., metavar='LR', + help='lower lr bound for cyclic schedulers that hit 0') + + parser.add_argument('--warmup_epochs', type=int, default=10, metavar='N', + help='epochs to warmup LR') + + # * Finetuning params + parser.add_argument('--finetune', default=' ', + help='finetune from checkpoint') + parser.add_argument('--global_pool', action='store_true') + parser.set_defaults(global_pool=False) + parser.add_argument('--cls_token', action='store_false', dest='global_pool', + help='Use class token instead of global pool for classification') + + # Dataset parameters + parser.add_argument('--data_path_train', default=' ', type=str, + help='dataset train path') + parser.add_argument('--data_path_val', default=' ', type=str, + help='dataset val path') + parser.add_argument('--nb_classes', default=2, type=int, + help='number of the classification types') + + parser.add_argument('--output_dir', default=' ', + help='path where to save, empty for no saving') + parser.add_argument('--log_dir', default=' ', + help='path where to tensorboard log') + parser.add_argument('--device', default='cuda', + help='device to use for training / testing') + parser.add_argument('--seed', default=0, type=int) + parser.add_argument('--resume', default='', + help='resume from checkpoint') + + parser.add_argument('--start_epoch', default=0, type=int, metavar='N', + help='start epoch') + parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') + parser.add_argument('--dist_eval', action='store_true', default=False, + help='Enabling distributed evaluation (recommended during training for faster monitor') + parser.add_argument('--num_workers', default=20, type=int) + parser.add_argument('--pin_mem', action='store_true', + help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') + parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') + parser.set_defaults(pin_mem=True) + + # distributed training parameters + parser.add_argument('--world_size', default=1, type=int, + help='number of distributed processes') + parser.add_argument('--local_rank', default=-1, type=int) + parser.add_argument('--dist_on_itp', action='store_true') + parser.add_argument('--dist_url', default='env://', + help='url used to set up distributed training') + parser.add_argument('--gpu_id', default=0, type=int, + help="the order of gpu") + return parser + + +def main(args): + torch.cuda.set_device(args.gpu_id) + misc.init_distributed_mode(args) + + print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__)))) + print("{}".format(args).replace(', ', ',\n')) + + device = torch.device(args.device) + + # fix the seed for reproducibility + seed = args.seed + misc.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + + cudnn.benchmark = True + + # linear probe: weak augmentation + transform_train = transforms.Compose([ + RandomResizedCrop(224, interpolation=3), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(mean=[0.6790435, 0.5052883, 0.66902906], std= [0.19158737, 0.2039779, 0.15648715])]) + transform_val = transforms.Compose([ + transforms.Resize(256, interpolation=3), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize(mean=[0.6790435, 0.5052883, 0.66902906], std= [0.19158737, 0.2039779, 0.15648715])]) + dataset_train = datasets.ImageFolder(args.data_path_train, transform=transform_train) + dataset_val = datasets.ImageFolder(args.data_path_val, transform=transform_val) + print(dataset_train) + print(dataset_val) + + # if True: # args.distributed: + # num_tasks = misc.get_world_size() + # global_rank = misc.get_rank() + # sampler_train = torch.utils.data.DistributedSampler( + # dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True + # ) + # print("Sampler_train = %s" % str(sampler_train)) + # if args.dist_eval: + # if len(dataset_val) % num_tasks != 0: + # print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. ' + # 'This will slightly alter validation results as extra duplicate entries are added to achieve ' + # 'equal num of samples per-process.') + # sampler_val = torch.utils.data.DistributedSampler( + # dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) # shuffle=True to reduce monitor bias + # else: + # sampler_val = torch.utils.data.SequentialSampler(dataset_val) + # else: + # sampler_train = torch.utils.data.RandomSampler(dataset_train) + # sampler_val = torch.utils.data.SequentialSampler(dataset_val) + + if args.log_dir is not None and not args.eval: + os.makedirs(args.log_dir, exist_ok=True) + log_writer = SummaryWriter(log_dir=args.log_dir) + else: + log_writer = None + + data_loader_train = torch.utils.data.DataLoader( + dataset_train, + batch_size=args.batch_size, + num_workers=args.num_workers, + pin_memory=args.pin_mem, + shuffle = True, + drop_last=True, + ) + + data_loader_val = torch.utils.data.DataLoader( + dataset_val, + batch_size=args.batch_size, + num_workers=args.num_workers, + pin_memory=args.pin_mem, + drop_last=False + ) + + model = models_vit.__dict__[args.model]( + num_classes=args.nb_classes, + global_pool=args.global_pool, + ) + + if args.finetune and not args.eval: + checkpoint = torch.load(args.finetune, map_location='cpu') + + print("Load pre-trained checkpoint from: %s" % args.finetune) + checkpoint_model = checkpoint['model'] + state_dict = model.state_dict() + for k in ['head.weight', 'head.bias']: + if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape: + print(f"Removing key {k} from pretrained checkpoint") + del checkpoint_model[k] + + # interpolate position embedding + interpolate_pos_embed(model, checkpoint_model) + + # load pre-trained model + msg = model.load_state_dict(checkpoint_model, strict=False) + print(msg) + + if args.global_pool: + assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'} + else: + assert set(msg.missing_keys) == {'head.weight', 'head.bias'} + + # manually initialize fc layer: following MoCo v3 + trunc_normal_(model.head.weight, std=0.01) + + # for linear prob only + # hack: revise model's head with BN + model.head = torch.nn.Sequential(torch.nn.BatchNorm1d(model.head.in_features, affine=False, eps=1e-6), model.head) + # freeze all but the head + for _, p in model.named_parameters(): + p.requires_grad = False + for _, p in model.head.named_parameters(): + p.requires_grad = True + + model.to(device) + + model_without_ddp = model + n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) + + print("Model = %s" % str(model_without_ddp)) + print('number of params (M): %.2f' % (n_parameters / 1.e6)) + + eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size() + + if args.lr is None: # only base_lr is specified + args.lr = args.blr * eff_batch_size / 256 + + print("base lr: %.2e" % (args.lr * 256 / eff_batch_size)) + print("actual lr: %.2e" % args.lr) + + print("accumulate grad iterations: %d" % args.accum_iter) + print("effective batch size: %d" % eff_batch_size) + + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + model_without_ddp = model.module + + optimizer = LARS(model_without_ddp.head.parameters(), lr=args.lr, weight_decay=args.weight_decay) + print(optimizer) + loss_scaler = NativeScaler() + + criterion = torch.nn.CrossEntropyLoss() + + print("criterion = %s" % str(criterion)) + + misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler) + + if args.eval: + test_stats, auc, precision, recall, f1, specificity = evaluate(data_loader_val, model, device, args.nb_classes) + print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.2f}%, AUC:{auc:.2f}%, precision {precision:.2f}%, recall {recall:.2f}%, f1_score {f1:.2f}%, specificity {specificity:.2f}%") + exit(0) + + print(f"Start training for {args.epochs} epochs") + start_time = time.time() + max_accuracy = 0.0 + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + data_loader_train.sampler.set_epoch(epoch) + train_stats = train_one_epoch( + model, criterion, data_loader_train, + optimizer, device, epoch, loss_scaler, + max_norm=None, + log_writer=log_writer, + args=args + ) + if args.output_dir: + misc.save_model( + args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, + loss_scaler=loss_scaler, epoch=epoch) + + test_stats, auc, precision, recall, f1, specificity = evaluate(data_loader_val, model, device, args.nb_classes) + print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.2f}%, AUC: {auc:.2f}%, precision {precision:.2f}%, recall {recall:.2f}%, f1_score {f1:.2f}%, specificity {specificity:.2f}%") + max_accuracy = max(max_accuracy, test_stats["acc1"]) + max_auc = max(max_auc, auc) + print(f'Max accuracy: {max_accuracy:.2f}%, Max AUC: {max_auc:.2f}%') + + if log_writer is not None: + log_writer.add_scalar('perf/test_acc1', test_stats['acc1'], epoch) + log_writer.add_scalar('perf/test_acc5', test_stats['acc5'], epoch) + log_writer.add_scalar('perf/test_loss', test_stats['loss'], epoch) + log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, + **{f'test_{k}': v for k, v in test_stats.items()}, + 'epoch': epoch, + 'n_parameters': n_parameters} + + if args.output_dir and misc.is_main_process(): + if log_writer is not None: + log_writer.flush() + with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + if args.output_dir: + Path(args.output_dir).mkdir(parents=True, exist_ok=True) + main(args) diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/main_pretrain.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/main_pretrain.py new file mode 100644 index 0000000000000000000000000000000000000000..400a117fb03b026cd0edcdfc630b6f569b301b53 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/main_pretrain.py @@ -0,0 +1,260 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# DeiT: https://github.com/facebookresearch/deit +# BEiT: https://github.com/microsoft/unilm/tree/master/beit +# MAE: https://github.com/facebookresearch/mae +# -------------------------------------------------------- +import argparse +import datetime +import json +from random import shuffle +import numpy as np +import os +import time +from pathlib import Path + +import torch +import torch.backends.cudnn as cudnn +from torch.utils.tensorboard import SummaryWriter +import torchvision.transforms as transforms +import torchvision.datasets as datasets + +import timm + +assert timm.__version__ == "0.3.2" # version check +import timm.optim.optim_factory as optim_factory + +import util.misc as misc +from util.misc import NativeScalerWithGradNormCount as NativeScaler + +import models_gcmae + +from engine_pretrain import train_one_epoch +from lib.NCEAverage import NCEAverage +from lib.NCECriterion import NCECriterion +from test_npid import NN, kNN + +from torch.utils.data.distributed import DistributedSampler + +def get_args_parser(): + parser = argparse.ArgumentParser('GCMAE pre-training', add_help=False) + parser.add_argument('--batch_size', default=128, type=int, + help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus') + parser.add_argument('--epochs', default=80, type=int) + parser.add_argument('--accum_iter', default=1, type=int, + help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)') + + # Model parameters + parser.add_argument('--model', default='gcmae_vit_base_patch16', type=str, metavar='MODEL', + help='Name of model to train') + + parser.add_argument('--input_size', default=224, type=int, + help='images input size') + + parser.add_argument('--mask_ratio', default=0.5, type=float, + help='Masking ratio (percentage of removed patches).') + + parser.add_argument('--norm_pix_loss', action='store_true', + help='Use (per-patch) normalized pixels as targets for computing loss') + parser.set_defaults(norm_pix_loss=True) + + # Npid parameters + parser.add_argument('--low_dim', default=768, type=int, help='Low dimension') + parser.add_argument('--nce_k', default=8192, type=int, help='NCE k') + parser.add_argument('--nce_t', default=0.07, type=float, help='NCE t') + parser.add_argument('--nce_m', default=0.5, type=float, help='NCE m') + # Optimizer parameters + parser.add_argument('--weight_decay', type=float, default=0.05, + help='weight decay (default: 0.05)') + + parser.add_argument('--lr', type=float, default=None, metavar='LR', + help='learning rate (absolute lr)') + parser.add_argument('--blr', type=float, default=1e-3, metavar='LR', + help='base learning rate: absolute_lr = base_lr * total_batch_size / 256') + parser.add_argument('--min_lr', type=float, default=0., metavar='LR', + help='lower lr bound for cyclic schedulers that hit 0') + + parser.add_argument('--warmup_epochs', type=int, default=40, metavar='N', + help='epochs to warmup LR') + + # Dataset parameters + parser.add_argument('--data_path', default=' ', type=str, + help='dataset path') + + # parser.add_argument('--data_val_path', default=' ', type=str, + # help='dataset val path') + parser.add_argument('--output_dir', default=' ', + help='path where to save, empty for no saving') + parser.add_argument('--log_dir', default=' ', + help='path where to tensorboard log') + parser.add_argument('--device', default='cuda', + help='device to use for training / testing') + parser.add_argument('--seed', default=0, type=int) + parser.add_argument('--resume', default='', + help='resume from checkpoint') + + parser.add_argument('--start_epoch', default=0, type=int, metavar='N', + help='start epoch') + parser.add_argument('--num_workers', default=20, type=int) + parser.add_argument('--pin_mem', action='store_true', + help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') + parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') + parser.set_defaults(pin_mem=True) + + # distributed training parameters + parser.add_argument('--world_size', default=1, type=int, + help='number of distributed processes') + parser.add_argument('--local_rank', default=-1, type=int) + parser.add_argument('--dist_on_itp', action='store_true') + parser.add_argument('--dist_url', default='env://', + help='url used to set up distributed training') + parser.add_argument('--gpu_id', default=0, type=int, + help="the order of gpu") + + # add: init weight + parser.add_argument('--init_weight_pth', default='', type=str, + help="init weight path") + + return parser + +class ImageFolderInstance(datasets.ImageFolder): + def __getitem__(self, index): + + path, target = self.imgs[index] + img = self.loader(path) + if self.transform is not None: + img = self.transform(img) + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target, index + +def main(args): + torch.cuda.set_device(args.gpu_id) + misc.init_distributed_mode(args) + print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__)))) + print("{}".format(args).replace(', ', ',\n')) + + device = torch.device(args.device) + + # fix the seed for reproducibility + seed = args.seed + misc.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + + cudnn.benchmark = True + + # simple augmentation + transform_data = transforms.Compose([ + transforms.RandomResizedCrop(args.input_size, scale=(0.2, 1.0), interpolation=3), # 3 is bicubic + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(mean=[0.6790435, 0.5052883, 0.66902906], std=[0.19158737, 0.2039779, 0.15648715])]) + dataset_train = ImageFolderInstance(args.data_path, transform=transform_data) + + print(dataset_train) + + if args.log_dir is not None: + os.makedirs(args.log_dir, exist_ok=True) + log_writer = SummaryWriter(log_dir=args.log_dir) + else: + log_writer = None + + if args.distributed: + datasampler = DistributedSampler(dataset_train, rank=misc.get_rank(), shuffle=True, drop_last=True) + else: + datasampler = None + data_loader_train = torch.utils.data.DataLoader( + dataset_train, + batch_size=args.batch_size, + num_workers=args.num_workers, + pin_memory=args.pin_mem, + shuffle=(datasampler is None), + drop_last=(datasampler is None), + sampler=datasampler + ) + + ndata = dataset_train.__len__() + + lemniscate = NCEAverage(args.low_dim, ndata, args.nce_k, args.nce_t, args.nce_m) + criterion = NCECriterion(ndata) + + model = models_gcmae.__dict__[args.model](norm_pix_loss=args.norm_pix_loss, lemniscate=lemniscate, criterion=criterion, args= args) + + # load weight from file + if args.init_weight_pth: + print(f'Loading weight from {args.init_weight_pth}...') + init_weight = torch.load(args.init_weight_pth) + model.load_state_dict(init_weight, strict=False) + print('Weight loaded.') + + model.to(device) + + model_without_ddp = model + print("Model = %s" % str(model_without_ddp)) + + eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size() + + if args.lr is None: # only base_lr is specified + args.lr = args.blr * eff_batch_size / 256 + + print("base lr: %.2e" % (args.lr * 256 / eff_batch_size)) + print("actual lr: %.2e" % args.lr) + + print("accumulate grad iterations: %d" % args.accum_iter) + print("effective batch size: %d" % eff_batch_size) + + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) + model_without_ddp = model.module + # following timm: set wd as 0 for bias and norm layers + param_groups = optim_factory.add_weight_decay(model_without_ddp, args.weight_decay) + optimizer = torch.optim.AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95)) + print(optimizer) + loss_scaler = NativeScaler() + + misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler) + + print(f"Start training for {args.epochs} epochs") + start_time = time.time() + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + data_loader_train.sampler.set_epoch(epoch) + train_stats = train_one_epoch( + model, data_loader_train, + optimizer, device, epoch, loss_scaler, + log_writer=log_writer, + args=args, + lemniscate = lemniscate, + ) + + if args.output_dir and (epoch % 20 == 0 or epoch + 1 == args.epochs): + misc.save_model( + args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, + loss_scaler=loss_scaler, epoch=epoch) + + log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, + 'epoch': epoch,} + + if args.output_dir and misc.is_main_process(): + if log_writer is not None: + log_writer.flush() + with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + if args.output_dir: + Path(args.output_dir).mkdir(parents=True, exist_ok=True) + main(args) \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/models_encoder.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/models_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..f890413383b6c463550a844d3885bb03ebedecc8 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/models_encoder.py @@ -0,0 +1,74 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm +# DeiT: https://github.com/facebookresearch/deit +# -------------------------------------------------------- + +from functools import partial + +import torch +import torch.nn as nn + +import timm.models.vision_transformer + + +class VisionTransformer(timm.models.vision_transformer.VisionTransformer): + """ Vision Transformer with support for global average pooling + """ + def __init__(self, global_pool=False, **kwargs): + super(VisionTransformer, self).__init__(**kwargs) + + self.global_pool = global_pool + if self.global_pool: + norm_layer = kwargs['norm_layer'] + embed_dim = kwargs['embed_dim'] + self.fc_norm = norm_layer(embed_dim) + + del self.norm # remove the original norm + + def forward(self, x): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + x = x + self.pos_embed + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x) + + if self.global_pool: + x = x[:, 1:, :].mean(dim=1) # global pool without cls token + outcome = self.fc_norm(x) + else: + x = self.norm(x) + outcome = x[:, 0] + + return outcome + + +def vit_base_patch16(**kwargs): + model = VisionTransformer( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def vit_large_patch16(**kwargs): + model = VisionTransformer( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def vit_huge_patch14(**kwargs): + model = VisionTransformer( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/models_gcmae.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/models_gcmae.py new file mode 100644 index 0000000000000000000000000000000000000000..a92895fec6fbdb1f4f53f5f1bea37847a583d928 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/models_gcmae.py @@ -0,0 +1,298 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm +# DeiT: https://github.com/facebookresearch/deit +# -------------------------------------------------------- + +from functools import partial +from re import X + +import torch +import torch.nn as nn + +from timm.models.vision_transformer import PatchEmbed, Block + +from util.pos_embed import get_2d_sincos_pos_embed + +from lib.normalize import Normalize + +import torch.nn.functional as F +class MaskedAutoencoderViT(nn.Module): + """ Masked Autoencoder with VisionTransformer backbone + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, + embed_dim=1024, depth=24, num_heads=16, + decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False, lemniscate=None, criterion=None,args=None, + iter_size = 1): + super().__init__() + self.args = args + #self.fc = nn.Sequential(nn.Linear(embed_dim, embed_dim), nn.ReLU(), nn.Linear(embed_dim, self.args.low_dim)) + + + + # -------------------------------------------------------------------------- + # MAE encoder specifics + self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False) # fixed sin-cos embedding + + self.blocks = nn.ModuleList([ + Block(embed_dim, num_heads, mlp_ratio, qkv_bias=True, qk_scale=None, norm_layer=norm_layer) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + # -------------------------------------------------------------------------- + self.lemniscate = lemniscate + self.criterion = criterion + self.iter_size = iter_size + self.l2norm = Normalize(2) + # -------------------------------------------------------------------------- + # MAE decoder specifics + self.decoder_embed = nn.Linear(embed_dim, decoder_embed_dim, bias=True) + + self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim)) + + self.decoder_pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, decoder_embed_dim), requires_grad=False) # fixed sin-cos embedding + + self.decoder_blocks = nn.ModuleList([ + Block(decoder_embed_dim, decoder_num_heads, mlp_ratio, qkv_bias=True, qk_scale=None, norm_layer=norm_layer) + for i in range(decoder_depth)]) + + self.decoder_norm = norm_layer(decoder_embed_dim) + self.decoder_pred = nn.Linear(decoder_embed_dim, patch_size**2 * in_chans, bias=True) # decoder to patch + # -------------------------------------------------------------------------- + + self.norm_pix_loss = norm_pix_loss + + self.initialize_weights() + + def initialize_weights(self): + # initialization + # initialize (and freeze) pos_embed by sin-cos embedding + pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True) + self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0)) + + decoder_pos_embed = get_2d_sincos_pos_embed(self.decoder_pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True) + self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0)) + + # initialize patch_embed like nn.Linear (instead of nn.Conv2d) + w = self.patch_embed.proj.weight.data + torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1])) + + # timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.) + torch.nn.init.normal_(self.cls_token, std=.02) + torch.nn.init.normal_(self.mask_token, std=.02) + + # initialize nn.Linear and nn.LayerNorm + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + # we use xavier_uniform following official JAX ViT: + torch.nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def patchify(self, imgs): + """ + imgs: (N, 3, H, W) + x: (N, L, patch_size**2 *3) + """ + p = self.patch_embed.patch_size[0] + assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0 + + h = w = imgs.shape[2] // p + x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p)) + x = torch.einsum('nchpwq->nhwpqc', x) + x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3)) + return x + + def unpatchify(self, x): + """ + x: (N, L, patch_size**2 *3) + imgs: (N, 3, H, W) + """ + p = self.patch_embed.patch_size[0] + h = w = int(x.shape[1]**.5) + assert h * w == x.shape[1] + + x = x.reshape(shape=(x.shape[0], h, w, p, p, 3)) + x = torch.einsum('nhwpqc->nchpwq', x) + imgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p)) + return imgs + def outside_block_fix(self, x, mask_ratio): + N, L, D = x.shape # batch, length, dim + h = w = int((L * (1 - mask_ratio))**.5) + keep = torch.zeros([N, h, w], device=x.device) + pad = nn.ConstantPad2d((3,4,3,4), 1) + mask = pad(keep).flatten(1) + return mask + + + def random_masking(self, x, mask_ratio, random = True): + """ + Perform per-sample random masking by per-sample shuffling. + Per-sample shuffling is done by argsort random noise. + x: [N, L, D], sequence + """ + N, L, D = x.shape # batch, length, dim + len_keep = int(L * (1 - mask_ratio)) + if random: + noise = torch.rand(N, L, device=x.device) # noise in [0, 1] + else: + noise = self.outside_block_fix(x, mask_ratio) + # sort noise for each sample + ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove + ids_restore = torch.argsort(ids_shuffle, dim=1) + + # keep the first subset + ids_keep = ids_shuffle[:, :len_keep] + x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D)) + + # generate the binary mask: 0 is keep, 1 is remove + mask = torch.ones([N, L], device=x.device) + mask[:, :len_keep] = 0 + # unshuffle to get the binary mask + mask = torch.gather(mask, dim=1, index=ids_restore) + + return x_masked, mask, ids_restore + + def forward_encoder(self, x, mask_ratio): + # embed patches + x = self.patch_embed(x) + + # add pos embed w/o cls token + x = x + self.pos_embed[:, 1:, :] + + # masking: length -> length * mask_ratio + x, mask, ids_restore = self.random_masking(x, mask_ratio, True) + + # append cls token + cls_token = self.cls_token + self.pos_embed[:, :1, :] + cls_tokens = cls_token.expand(x.shape[0], -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + # apply Transformer blocks + for blk in self.blocks: + x = blk(x) + + x = self.norm(x) + return x, mask, ids_restore + + def forward_decoder(self, x, ids_restore): + # embed tokens + x = self.decoder_embed(x) + + # append mask tokens to sequence + mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1) + x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) # no cls token + x_ = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])) # unshuffle + x = torch.cat([x[:, :1, :], x_], dim=1) # append cls token + + # add pos embed + x = x + self.decoder_pos_embed + + # apply Transformer blocks + for blk in self.decoder_blocks: + x = blk(x) + x = self.decoder_norm(x) + + # predictor projection + x = self.decoder_pred(x) + + # remove cls token + x = x[:, 1:, :] + + return x + def forward_npid(self, x): + x = x.mean(dim=1) + #x = self.fc(x) + x = self.l2norm(x) + return x + def forward_npid_loss(self, x, index): + output = self.lemniscate(x, index) #index [256] output P(i|v) x [64, 50, 768] + loss = self.criterion(output, index) / self.iter_size + return output, loss + + def forward_loss(self, imgs, pred, mask): + """ + imgs: [N, 3, H, W] + pred: [N, L, p*p*3] + mask: [N, L], 0 is keep, 1 is remove, + """ + target = self.patchify(imgs) + if self.norm_pix_loss: + mean = target.mean(dim=-1, keepdim=True) + var = target.var(dim=-1, keepdim=True) + target = (target - mean) / (var + 1.e-6)**.5 + + loss = (pred - target) ** 2 + loss = loss.mean(dim=-1) # [N, L], mean loss per patch + + loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches + return loss + + def forward(self, imgs, mask_ratio=0.75, index = None, is_train=False, mae=False, npid=False, npid_feature=False): + latent, mask, ids_restore = self.forward_encoder(imgs, mask_ratio) + #latent [256, 50, 768] + if is_train: + npid_x = self.forward_npid(latent) + output_npid, loss_npid = self.forward_npid_loss(npid_x, index) + + pred = self.forward_decoder(latent, ids_restore) # [N, L, p*p*3] + loss = self.forward_loss(imgs, pred, mask) + return loss, pred, mask, loss_npid, output_npid + elif mae: + pred = self.forward_decoder(latent, ids_restore) # [N, L, p*p*3] + loss = self.forward_loss(imgs, pred, mask) + return loss, pred, mask, None, None + elif npid: + npid_x = self.forward_npid(latent) + output_npid, loss_npid = self.forward_npid_loss(npid_x, index) + return None, None, None, loss_npid, output_npid + elif npid_feature: + npid_x = self.forward_npid(latent) + return npid_x + +def gcmae_vit_base_patch16_dec512d8b(**kwargs): + model = MaskedAutoencoderViT( + patch_size=16, embed_dim=768, depth=12, num_heads=12, + decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def gcmae_vit_large_patch16_dec512d8b(**kwargs): + model = MaskedAutoencoderViT( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, + decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def gcmae_vit_huge_patch14_dec512d8b(**kwargs): + model = MaskedAutoencoderViT( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, + decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +# set recommended archs +gcmae_vit_base_patch16 = gcmae_vit_base_patch16_dec512d8b # decoder: 512 dim, 8 blocks +gcmae_vit_large_patch16 = gcmae_vit_large_patch16_dec512d8b # decoder: 512 dim, 8 blocks +gcmae_vit_huge_patch14 = gcmae_vit_huge_patch14_dec512d8b # decoder: 512 dim, 8 blocks + + + + diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/models_vit.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/models_vit.py new file mode 100644 index 0000000000000000000000000000000000000000..2244a17ab7ce7193d560b1f6938c22b670907a06 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/models_vit.py @@ -0,0 +1,74 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm +# DeiT: https://github.com/facebookresearch/deit +# -------------------------------------------------------- + +from functools import partial + +import torch +import torch.nn as nn + +import timm.models.vision_transformer + + +class VisionTransformer(timm.models.vision_transformer.VisionTransformer): + """ Vision Transformer with support for global average pooling + """ + def __init__(self, global_pool=False, **kwargs): + super(VisionTransformer, self).__init__(**kwargs) + + self.global_pool = global_pool + if self.global_pool: + norm_layer = kwargs['norm_layer'] + embed_dim = kwargs['embed_dim'] + self.fc_norm = norm_layer(embed_dim) + + del self.norm # remove the original norm + + def forward_features(self, x): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + x = x + self.pos_embed + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x) + + if self.global_pool: + x = x[:, 1:, :].mean(dim=1) # global pool without cls token + outcome = self.fc_norm(x) + else: + x = self.norm(x) + outcome = x[:, 0] + + return outcome + + +def vit_base_patch16(**kwargs): + model = VisionTransformer( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def vit_large_patch16(**kwargs): + model = VisionTransformer( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def vit_huge_patch14(**kwargs): + model = VisionTransformer( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/nohup.out b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/nohup.out new file mode 100644 index 0000000000000000000000000000000000000000..4fc73e466691ac13d44bd37963f83bcb3df8fc0a --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/nohup.out @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0196109994c58e9b073c1f239463dba4a9fd1f714ccb919b1f6989233284fb13 +size 10860427 diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/output/log.txt b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/output/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..de03ade4b03833522e0896d16cc7bb602abb2308 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/output/log.txt @@ -0,0 +1,20 @@ +{"train_lr": 1.249928325688073e-05, "train_loss_all": 1.6951392864893882, "train_loss_mae": 0.6232919644752364, "train_loss_npid": 10.718472986538476, "epoch": 0} +{"train_lr": 3.7499283256880734e-05, "train_loss_all": 1.2544193148886391, "train_loss_mae": 0.29345881425168946, "train_loss_npid": 9.609604887513939, "epoch": 1} +{"train_lr": 6.249928325688072e-05, "train_loss_all": 0.9934084515198381, "train_loss_mae": 0.23100289355433726, "train_loss_npid": 7.6240554602867965, "epoch": 2} +{"train_lr": 8.749928325688076e-05, "train_loss_all": 0.8070334031836155, "train_loss_mae": 0.2147223876712314, "train_loss_npid": 5.923110032737802, "epoch": 3} +{"train_lr": 0.00011249928325688074, "train_loss_all": 0.6624781838387525, "train_loss_mae": 0.20488846144732942, "train_loss_npid": 4.575897163918259, "epoch": 4} +{"train_lr": 0.00013749928325688075, "train_loss_all": 0.5504872641466353, "train_loss_mae": 0.19673029863352487, "train_loss_npid": 3.5375695937270417, "epoch": 5} +{"train_lr": 0.00016249928325688076, "train_loss_all": 0.4680212429699001, "train_loss_mae": 0.18649238266153345, "train_loss_npid": 2.8152885437285136, "epoch": 6} +{"train_lr": 0.0001874992832568808, "train_loss_all": 0.4125429083225787, "train_loss_mae": 0.17726122452193924, "train_loss_npid": 2.352816803630339, "epoch": 7} +{"train_lr": 0.0002124992832568808, "train_loss_all": 0.3781979799766196, "train_loss_mae": 0.17158613237253295, "train_loss_npid": 2.0661184463274043, "epoch": 8} +{"train_lr": 0.00023749928325688058, "train_loss_all": 0.3560920784813822, "train_loss_mae": 0.16740265586328001, "train_loss_npid": 1.886894195021019, "epoch": 9} +{"train_lr": 0.00024795413078163535, "train_loss_all": 0.339586502899749, "train_loss_mae": 0.16270318633338454, "train_loss_npid": 1.768833134712976, "epoch": 10} +{"train_lr": 0.00023591887458797715, "train_loss_all": 0.3261361276269506, "train_loss_mae": 0.15826478182346723, "train_loss_npid": 1.678713425973413, "epoch": 11} +{"train_lr": 0.0002130261061324063, "train_loss_all": 0.31538585492606286, "train_loss_mae": 0.1542001661195189, "train_loss_npid": 1.6118568577350827, "epoch": 12} +{"train_lr": 0.00018151672909465076, "train_loss_all": 0.3060923837296186, "train_loss_mae": 0.15042925238429886, "train_loss_npid": 1.5566312829767346, "epoch": 13} +{"train_lr": 0.0001444751008379059, "train_loss_all": 0.29751537771673375, "train_loss_mae": 0.14653637044798207, "train_loss_npid": 1.5097900425212099, "epoch": 14} +{"train_lr": 0.00010552711402013923, "train_loss_all": 0.28930359132622085, "train_loss_mae": 0.14255208674809733, "train_loss_npid": 1.467515015848186, "epoch": 15} +{"train_lr": 6.848526895765559e-05, "train_loss_all": 0.2819806246206574, "train_loss_mae": 0.13879279353049234, "train_loss_npid": 1.43187828023636, "epoch": 16} +{"train_lr": 3.6975479530882305e-05, "train_loss_all": 0.27513516249493997, "train_loss_mae": 0.13510516505397924, "train_loss_npid": 1.400299943135966, "epoch": 17} +{"train_lr": 1.408214347052291e-05, "train_loss_all": 0.2700371737241608, "train_loss_mae": 0.13246216934490437, "train_loss_npid": 1.3757500144456505, "epoch": 18} +{"train_lr": 2.046220017417465e-06, "train_loss_all": 0.26699303928762674, "train_loss_mae": 0.13090999836503708, "train_loss_npid": 1.3608303785324096, "epoch": 19} diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/pretrain.sh b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/pretrain.sh new file mode 100644 index 0000000000000000000000000000000000000000..af15be241dc8b494ffe8c8939b4a01a395a321a9 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/pretrain.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# ps -ef | grep pretrain | awk '{print $2}' |xargs kill + +# Training settings +pretrain_model="timm" +dataset="All" +model_weights="/root/autodl-tmp/model_base/ViT_b16_224_Imagenet.pth" + +# Init params +data_path="/root/autodl-tmp/datasets/${dataset}" +model_name="ViT_b16_224_timm_GCMAE_ALL_80.pth" +checkpoint_path="/root/autodl-tmp/LSQ/checkpoint/${pretrain_model}" +save_weight_path="/root/autodl-tmp/LSQ/model_saved/" +tensorboard_path="/root/tf-logs/" + +# Training. Save checkpoint every 20 epochs. +# The checkpoint and backbone model will be available under checkpoint_path folder. +set -e + +# train +python -u -m torch.distributed.launch \ + --nproc_per_node 4 \ + main_pretrain.py \ + --data_path $data_path \ + --output_dir $checkpoint_path \ + --log_dir $tensorboard_path \ + --batch_size 64 \ + --model gcmae_vit_base_patch16 \ + --norm_pix_loss \ + --mask_ratio 0.5 \ + --epochs 80 \ + --warmup_epochs 40 \ + --blr 1e-3 --weight_decay 0.05 \ + --low_dim 768 \ + --nce_k 8192 \ + --nce_t 0.07 \ + --nce_m 0.5 \ + --init_weight_pth $model_weights + +# extract & save model +python -u load_vit_from_ckpt.py \ + --basic-weight ${model_weights} \ + --checkpoint ${checkpoint_path}/checkpoint-79.pth \ + --save-to $save_weight_path \ + --save-name $model_name \ + --num-classes 2 + +set +e + +# # packup checkpoints +# nohup zip GCMAE_2.zip checkpoint-0.pth & +# nohup zip GCMAE_3.zip checkpoint-20.pth & +# nohup zip GCMAE_4.zip checkpoint-40.pth & +# nohup zip GCMAE_5.zip checkpoint-60.pth & +# nohup zip GCMAE_6.zip checkpoint-79.pth & \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/requirements.txt b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..4285171bad0b03f89a37ef1320178d012d7abd46 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/requirements.txt @@ -0,0 +1,13 @@ +matplotlib +tensorboardx +opencv-python +pandas +Pillow +scikit-image +scikit-learn +scipy +seaborn +sentry-sdk +urllib3 +tensorboard +tqdm \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/submitit_finetune.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/submitit_finetune.py new file mode 100644 index 0000000000000000000000000000000000000000..cce5883bccac0329ca67f99d6a56219afe31425d --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/submitit_finetune.py @@ -0,0 +1,131 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# A script to run multinode training with submitit. +# -------------------------------------------------------- + +import argparse +import os +import uuid +from pathlib import Path + +import main_finetune as classification +import submitit + + +def parse_args(): + classification_parser = classification.get_args_parser() + parser = argparse.ArgumentParser("Submitit for MAE finetune", parents=[classification_parser]) + parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node") + parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request") + parser.add_argument("--timeout", default=4320, type=int, help="Duration of the job") + parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.") + + parser.add_argument("--partition", default="learnfair", type=str, help="Partition where to submit") + parser.add_argument("--use_volta32", action='store_true', help="Request 32G V100 GPUs") + parser.add_argument('--comment', default="", type=str, help="Comment to pass to scheduler") + return parser.parse_args() + + +def get_shared_folder() -> Path: + user = os.getenv("USER") + if Path("/checkpoint/").is_dir(): + p = Path(f"/checkpoint/{user}/experiments") + p.mkdir(exist_ok=True) + return p + raise RuntimeError("No shared folder available") + + +def get_init_file(): + # Init file must not exist, but it's parent dir must exist. + os.makedirs(str(get_shared_folder()), exist_ok=True) + init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init" + if init_file.exists(): + os.remove(str(init_file)) + return init_file + + +class Trainer(object): + def __init__(self, args): + self.args = args + + def __call__(self): + import main_finetune as classification + + self._setup_gpu_args() + classification.main(self.args) + + def checkpoint(self): + import os + import submitit + + self.args.dist_url = get_init_file().as_uri() + checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth") + if os.path.exists(checkpoint_file): + self.args.resume = checkpoint_file + print("Requeuing ", self.args) + empty_trainer = type(self)(self.args) + return submitit.helpers.DelayedSubmission(empty_trainer) + + def _setup_gpu_args(self): + import submitit + from pathlib import Path + + job_env = submitit.JobEnvironment() + self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id))) + self.args.log_dir = self.args.output_dir + self.args.gpu = job_env.local_rank + self.args.rank = job_env.global_rank + self.args.world_size = job_env.num_tasks + print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}") + + +def main(): + args = parse_args() + if args.job_dir == "": + args.job_dir = get_shared_folder() / "%j" + + # Note that the folder will depend on the job_id, to easily track experiments + executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30) + + num_gpus_per_node = args.ngpus + nodes = args.nodes + timeout_min = args.timeout + + partition = args.partition + kwargs = {} + if args.use_volta32: + kwargs['slurm_constraint'] = 'volta32gb' + if args.comment: + kwargs['slurm_comment'] = args.comment + + executor.update_parameters( + mem_gb=40 * num_gpus_per_node, + gpus_per_node=num_gpus_per_node, + tasks_per_node=num_gpus_per_node, # one task per GPU + cpus_per_task=10, + nodes=nodes, + timeout_min=timeout_min, + # Below are cluster dependent parameters + slurm_partition=partition, + slurm_signal_delay_s=120, + **kwargs + ) + + executor.update_parameters(name="mae") + + args.dist_url = get_init_file().as_uri() + args.output_dir = args.job_dir + + trainer = Trainer(args) + job = executor.submit(trainer) + + # print("Submitted job_id:", job.job_id) + print(job.job_id) + + +if __name__ == "__main__": + main() diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/submitit_linprobe.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/submitit_linprobe.py new file mode 100644 index 0000000000000000000000000000000000000000..571186d3de27c68933a5a009206d793840f51da6 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/submitit_linprobe.py @@ -0,0 +1,131 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# A script to run multinode training with submitit. +# -------------------------------------------------------- + +import argparse +import os +import uuid +from pathlib import Path + +import main_linprobe as classification +import submitit + + +def parse_args(): + classification_parser = classification.get_args_parser() + parser = argparse.ArgumentParser("Submitit for MAE linear probe", parents=[classification_parser]) + parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node") + parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request") + parser.add_argument("--timeout", default=4320, type=int, help="Duration of the job") + parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.") + + parser.add_argument("--partition", default="learnfair", type=str, help="Partition where to submit") + parser.add_argument("--use_volta32", action='store_true', help="Request 32G V100 GPUs") + parser.add_argument('--comment', default="", type=str, help="Comment to pass to scheduler") + return parser.parse_args() + + +def get_shared_folder() -> Path: + user = os.getenv("USER") + if Path("/checkpoint/").is_dir(): + p = Path(f"/checkpoint/{user}/experiments") + p.mkdir(exist_ok=True) + return p + raise RuntimeError("No shared folder available") + + +def get_init_file(): + # Init file must not exist, but it's parent dir must exist. + os.makedirs(str(get_shared_folder()), exist_ok=True) + init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init" + if init_file.exists(): + os.remove(str(init_file)) + return init_file + + +class Trainer(object): + def __init__(self, args): + self.args = args + + def __call__(self): + import main_linprobe as classification + + self._setup_gpu_args() + classification.main(self.args) + + def checkpoint(self): + import os + import submitit + + self.args.dist_url = get_init_file().as_uri() + checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth") + if os.path.exists(checkpoint_file): + self.args.resume = checkpoint_file + print("Requeuing ", self.args) + empty_trainer = type(self)(self.args) + return submitit.helpers.DelayedSubmission(empty_trainer) + + def _setup_gpu_args(self): + import submitit + from pathlib import Path + + job_env = submitit.JobEnvironment() + self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id))) + self.args.log_dir = self.args.output_dir + self.args.gpu = job_env.local_rank + self.args.rank = job_env.global_rank + self.args.world_size = job_env.num_tasks + print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}") + + +def main(): + args = parse_args() + if args.job_dir == "": + args.job_dir = get_shared_folder() / "%j" + + # Note that the folder will depend on the job_id, to easily track experiments + executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30) + + num_gpus_per_node = args.ngpus + nodes = args.nodes + timeout_min = args.timeout + + partition = args.partition + kwargs = {} + if args.use_volta32: + kwargs['slurm_constraint'] = 'volta32gb' + if args.comment: + kwargs['slurm_comment'] = args.comment + + executor.update_parameters( + mem_gb=40 * num_gpus_per_node, + gpus_per_node=num_gpus_per_node, + tasks_per_node=num_gpus_per_node, # one task per GPU + cpus_per_task=10, + nodes=nodes, + timeout_min=timeout_min, + # Below are cluster dependent parameters + slurm_partition=partition, + slurm_signal_delay_s=120, + **kwargs + ) + + executor.update_parameters(name="mae") + + args.dist_url = get_init_file().as_uri() + args.output_dir = args.job_dir + + trainer = Trainer(args) + job = executor.submit(trainer) + + # print("Submitted job_id:", job.job_id) + print(job.job_id) + + +if __name__ == "__main__": + main() diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/submitit_pretrain.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/submitit_pretrain.py new file mode 100644 index 0000000000000000000000000000000000000000..384b8ad0e65359b656e104df664c4d88711ee49d --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/submitit_pretrain.py @@ -0,0 +1,131 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# A script to run multinode training with submitit. +# -------------------------------------------------------- + +import argparse +import os +import uuid +from pathlib import Path + +import main_pretrain as trainer +import submitit + + +def parse_args(): + trainer_parser = trainer.get_args_parser() + parser = argparse.ArgumentParser("Submitit for MAE pretrain", parents=[trainer_parser]) + parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node") + parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request") + parser.add_argument("--timeout", default=4320, type=int, help="Duration of the job") + parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.") + + parser.add_argument("--partition", default="learnfair", type=str, help="Partition where to submit") + parser.add_argument("--use_volta32", action='store_true', help="Request 32G V100 GPUs") + parser.add_argument('--comment', default="", type=str, help="Comment to pass to scheduler") + return parser.parse_args() + + +def get_shared_folder() -> Path: + user = os.getenv("USER") + if Path("/checkpoint/").is_dir(): + p = Path(f"/checkpoint/{user}/experiments") + p.mkdir(exist_ok=True) + return p + raise RuntimeError("No shared folder available") + + +def get_init_file(): + # Init file must not exist, but it's parent dir must exist. + os.makedirs(str(get_shared_folder()), exist_ok=True) + init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init" + if init_file.exists(): + os.remove(str(init_file)) + return init_file + + +class Trainer(object): + def __init__(self, args): + self.args = args + + def __call__(self): + import main_pretrain as trainer + + self._setup_gpu_args() + trainer.main(self.args) + + def checkpoint(self): + import os + import submitit + + self.args.dist_url = get_init_file().as_uri() + checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth") + if os.path.exists(checkpoint_file): + self.args.resume = checkpoint_file + print("Requeuing ", self.args) + empty_trainer = type(self)(self.args) + return submitit.helpers.DelayedSubmission(empty_trainer) + + def _setup_gpu_args(self): + import submitit + from pathlib import Path + + job_env = submitit.JobEnvironment() + self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id))) + self.args.log_dir = self.args.output_dir + self.args.gpu = job_env.local_rank + self.args.rank = job_env.global_rank + self.args.world_size = job_env.num_tasks + print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}") + + +def main(): + args = parse_args() + if args.job_dir == "": + args.job_dir = get_shared_folder() / "%j" + + # Note that the folder will depend on the job_id, to easily track experiments + executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30) + + num_gpus_per_node = args.ngpus + nodes = args.nodes + timeout_min = args.timeout + + partition = args.partition + kwargs = {} + if args.use_volta32: + kwargs['slurm_constraint'] = 'volta32gb' + if args.comment: + kwargs['slurm_comment'] = args.comment + + executor.update_parameters( + mem_gb=40 * num_gpus_per_node, + gpus_per_node=num_gpus_per_node, + tasks_per_node=num_gpus_per_node, # one task per GPU + cpus_per_task=10, + nodes=nodes, + timeout_min=timeout_min, # max is 60 * 72 + # Below are cluster dependent parameters + slurm_partition=partition, + slurm_signal_delay_s=120, + **kwargs + ) + + executor.update_parameters(name="mae") + + args.dist_url = get_init_file().as_uri() + args.output_dir = args.job_dir + + trainer = Trainer(args) + job = executor.submit(trainer) + + # print("Submitted job_id:", job.job_id) + print(job.job_id) + + +if __name__ == "__main__": + main() diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/test_npid.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/test_npid.py new file mode 100644 index 0000000000000000000000000000000000000000..91c9711b060103e935bf8b6f906cd1afb26b8aa9 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/test_npid.py @@ -0,0 +1,138 @@ +import torch +import time +from lib.utils import AverageMeter + + +def NN(epoch, net, lemniscate, trainloader, testloader, recompute_memory=0): + net.eval() + net_time = AverageMeter() + cls_time = AverageMeter() + losses = AverageMeter() + correct = 0. + total = 0 + testsize = testloader.dataset.__len__() + + trainFeatures = lemniscate.memory.t() + if hasattr(trainloader.dataset, 'imgs'): + trainLabels = torch.LongTensor([y for (p, y) in trainloader.dataset.imgs]).cuda() + else: + trainLabels = torch.LongTensor(trainloader.dataset.train_labels).cuda() + + if recompute_memory: + transform_bak = trainloader.dataset.transform + trainloader.dataset.transform = testloader.dataset.transform + temploader = torch.utils.data.DataLoader(trainloader.dataset, batch_size=300, shuffle=False, num_workers=20) + for batch_idx, (inputs, targets, indexes) in enumerate(temploader): + targets = targets.cuda(non_blocking = True) + inputs = inputs.cuda(non_blocking = True) + batchSize = inputs.size(0) + features = net(inputs, npid_feature = True) + trainFeatures[:, batch_idx*batchSize:batch_idx*batchSize+batchSize] = features.data.t() + trainLabels = torch.LongTensor(temploader.dataset.targets).cuda() + trainloader.dataset.transform = transform_bak + + end = time.time() + with torch.no_grad(): + for batch_idx, (inputs, targets, indexes) in enumerate(testloader): + targets = targets.cuda(non_blocking = True) + batchSize = inputs.size(0) + inputs = inputs.cuda(non_blocking = True) + features = net(inputs, npid_feature = True) + net_time.update(time.time() - end) + end = time.time() + + dist = torch.mm(features, trainFeatures) + + yd, yi = dist.topk(1, dim=1, largest=True, sorted=True) + candidates = trainLabels.view(1,-1).expand(batchSize, -1) + retrieval = torch.gather(candidates, 1, yi) + + retrieval = retrieval.narrow(1, 0, 1).clone().view(-1) + yd = yd.narrow(1, 0, 1) + + total += targets.size(0) + correct += retrieval.eq(targets.data).sum().item() + + cls_time.update(time.time() - end) + end = time.time() + + print('Test [{}/{}]\t' + 'Net Time {net_time.val:.3f} ({net_time.avg:.3f})\t' + 'Cls Time {cls_time.val:.3f} ({cls_time.avg:.3f})\t' + 'Top1: {:.2f}'.format( + total, testsize, correct*100./total, net_time=net_time, cls_time=cls_time)) + + return correct/total + +def kNN(epoch, net, lemniscate, trainloader, testloader, K, sigma, recompute_memory=0): + net.eval() + net_time = AverageMeter() + cls_time = AverageMeter() + total = 0 + testsize = testloader.dataset.__len__() + + trainFeatures = lemniscate.memory.t() + if hasattr(trainloader.dataset, 'imgs'): + trainLabels = torch.LongTensor([y for (p, y) in trainloader.dataset.imgs]).cuda() + else: + trainLabels = torch.LongTensor(trainloader.dataset.train_labels).cuda() + C = trainLabels.max() + 1 + + if recompute_memory: + transform_bak = trainloader.dataset.transform + trainloader.dataset.transform = testloader.dataset.transform + temploader = torch.utils.data.DataLoader(trainloader.dataset, batch_size=300, shuffle=False, num_workers=20) + for batch_idx, (inputs, targets, indexes) in enumerate(temploader): + targets = targets.cuda(non_blocking = True) + inputs = inputs.cuda(non_blocking = True) + batchSize = inputs.size(0) + features = net(inputs, npid_feature = True) + trainFeatures[:, batch_idx*batchSize:batch_idx*batchSize+batchSize] = features.data.t() + trainLabels = torch.LongTensor(temploader.dataset.targets).cuda() + trainloader.dataset.transform = transform_bak + + top1 = 0. + top5 = 0. + end = time.time() + with torch.no_grad(): + retrieval_one_hot = torch.zeros(K, C).cuda() #[200, 2] + for batch_idx, (inputs, targets, indexes) in enumerate(testloader): + end = time.time() + targets = targets.cuda(non_blocking = True) + inputs = inputs.cuda(non_blocking = True) + batchSize = inputs.size(0) + features = net(inputs, npid_feature = True) #[128, 768] + net_time.update(time.time() - end) + end = time.time() + + dist = torch.mm(features, trainFeatures) #[128, 22000] + + yd, yi = dist.topk(K, dim=1, largest=True, sorted=True) #[128, 200] + candidates = trainLabels.view(1,-1).expand(batchSize, -1) #[128, 22000] + retrieval = torch.gather(candidates, 1, yi) #[128, 200] + + retrieval_one_hot.resize_(batchSize * K, C).zero_() #[25600, 2] + retrieval_one_hot.scatter_(1, retrieval.view(-1, 1), 1) + yd_transform = yd.clone().div_(sigma).exp_() + probs = torch.sum(torch.mul(retrieval_one_hot.view(batchSize, -1 , C), yd_transform.view(batchSize, -1, 1)), 1) #[128, 2] + _, predictions = probs.sort(1, True) #取前k个,0和1的概率分别相加后选最大 + + # Find which predictions match the target + correct = predictions.eq(targets.data.view(-1,1)) + cls_time.update(time.time() - end) + + top1 = top1 + correct.narrow(1,0,1).sum().item() + #top5 = top5 + correct.narrow(1,0,5).sum().item() + + total += targets.size(0) + + print('Test [{}/{}]\t' + 'Net Time {net_time.val:.3f} ({net_time.avg:.3f})\t' + 'Cls Time {cls_time.val:.3f} ({cls_time.avg:.3f})\t' + 'Top1: {:.2f} Top5: {:.2f}'.format( + total, testsize, top1*100./total, top5*100./total, net_time=net_time, cls_time=cls_time)) + + print(top1*100./total) + + return top1/total + diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/tsne.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/tsne.py new file mode 100644 index 0000000000000000000000000000000000000000..4ee864a777f61b039147cced9ba48e22f10afb81 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/tsne.py @@ -0,0 +1,183 @@ +import argparse +from random import shuffle +import numpy as np +import os +from pathlib import Path + +import torch +import torch.backends.cudnn as cudnn +import torchvision.transforms as transforms +import torchvision.datasets as datasets + +import timm + +assert timm.__version__ == "0.3.2" # version check +from timm.models.layers import trunc_normal_ + +import util.misc as misc +from util.pos_embed import interpolate_pos_embed +import models_encoder +from sklearn.manifold import TSNE +import matplotlib.pyplot as plt +def get_args_parser(): + parser = argparse.ArgumentParser('GCMAE feature representation visual', add_help=False) + # Model parameters + parser.add_argument('--model', default='vit_base_patch16', type=str, metavar='MODEL', + help='Name of model to train') + + parser.add_argument('--batch_size', default='128', type=int, + help='batch size') + # * Finetuning params + parser.add_argument('--random', default=False, + help='random init only') + ### mae + # camelyon/pre + # nctcrc/pre + ###gcmae + # camelyon/pre + # nctcrc/pre + parser.add_argument('--finetune', default='', + help='finetune from checkpoint') + parser.add_argument('--save_path', default='') + parser.add_argument('--data_path_val', default='', type=str, + help='dataset val path') + + parser.add_argument('--global_pool', action='store_true') + parser.set_defaults(global_pool=True) + parser.add_argument('--cls_token', action='store_false', dest='global_pool', + help='Use class token instead of global pool for classification') + + parser.add_argument('--device', default='cuda', + help='device to use for training / testing') + parser.add_argument('--seed', default=0, type=int) + + parser.add_argument('--num_workers', default=20, type=int) + parser.add_argument('--pin_mem', action='store_true', + help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') + parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') + parser.set_defaults(pin_mem=True) + + # distributed training parameters + parser.add_argument('--gpu_id', default=0, type=int, + help="the order of gpu") + return parser +def main(args): + torch.cuda.set_device(args.gpu_id) + + print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__)))) + print("{}".format(args).replace(', ', ',\n')) + + device = torch.device(args.device) + + # fix the seed for reproducibility + seed = args.seed + misc.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + + cudnn.benchmark = True + + # weak augmentation + transform_val = transforms.Compose([ + transforms.Resize(256, interpolation=3), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize(mean=[0.6790435, 0.5052883, 0.66902906], std= [0.19158737, 0.2039779, 0.15648715])]) + + dataset_val = datasets.ImageFolder(args.data_path_val, transform=transform_val) + print(dataset_val) + + + data_loader_val = torch.utils.data.DataLoader( + dataset_val, + batch_size=args.batch_size, + num_workers=args.num_workers, + pin_memory=args.pin_mem, + drop_last=False + ) + + model = models_encoder.__dict__[args.model]( + global_pool=args.global_pool, + ) + + if args.finetune and not args.random: + checkpoint = torch.load(args.finetune, map_location='cpu') + + print("Load pre-trained checkpoint from: %s" % args.finetune) + checkpoint_model = checkpoint['model'] + state_dict = model.state_dict() + for k in ['head.weight', 'head.bias']: + if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape: + print(f"Removing key {k} from pretrained checkpoint") + del checkpoint_model[k] + + # interpolate position embedding + interpolate_pos_embed(model, checkpoint_model) + + # load pre-trained model + msg = model.load_state_dict(checkpoint_model, strict=False) + print(msg) + + if args.global_pool: + assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'} + else: + assert set(msg.missing_keys) == {'head.weight', 'head.bias'} + + # manually initialize fc layer: following MoCo v3 + trunc_normal_(model.head.weight, std=0.01) + + # for linear prob only + # hack: revise model's head with BN + model.head = torch.nn.Sequential(torch.nn.BatchNorm1d(model.head.in_features, affine=False, eps=1e-6), model.head) + # freeze all but the head + for _, p in model.named_parameters(): + p.requires_grad = False + for _, p in model.head.named_parameters(): + p.requires_grad = True + + model.to(device) + evaluate(data_loader_val, model, device) + +def evaluate(data_loader, model, device): + t = TSNE(n_components=2, init='pca', random_state=0, perplexity=30, n_iter=5000) + + metric_logger = misc.MetricLogger(delimiter=" ") + header = 'Test:' + + # switch to evaluation mode + model.eval() + output_full = [] + target_full = [] + for batch in metric_logger.log_every(data_loader, 10, header): + images = batch[0] + target = batch[-1] + images = images.to(device, non_blocking=True) + target = target.to(device, non_blocking=True) + + # compute output + with torch.cuda.amp.autocast(): + output = model(images) + output_full += output.cpu().numpy().tolist() + target_full += target.cpu().numpy().tolist() + output_full = np.array(output_full) + target_full = np.array(target_full) + t = t.fit_transform(output_full) + + + x_min, x_max = t.min(0), t.max(0) + print("x_min:{}./n\ + x_max:{}".format(x_min, x_max)) + X_norm = (t - x_min) / (x_max - x_min) + print("X_norm shape:{}".format(X_norm.shape)) + + + plt.figure(figsize=(16, 16)) + for i in range(X_norm.shape[0]): + plt.text(X_norm[i, 0], X_norm[i, 1], str(target_full[i]), color=plt.cm.Set1(target_full[i]), fontdict={'weight': 'bold', 'size': 9}) + plt.xticks([]) + plt.yticks([]) + plt.savefig(args.save_path) + plt.show() +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + main(args) \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/__pycache__/lr_sched.cpython-38.pyc b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/__pycache__/lr_sched.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8f29661d0c1408023371be5037f4838661f2171 Binary files /dev/null and b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/__pycache__/lr_sched.cpython-38.pyc differ diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/__pycache__/misc.cpython-38.pyc b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/__pycache__/misc.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cce271f36485e84a65a894adab623904c38d95f Binary files /dev/null and b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/__pycache__/misc.cpython-38.pyc differ diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/__pycache__/pos_embed.cpython-38.pyc b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/__pycache__/pos_embed.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7636cc26f471927dfbda16d20fb1b9e6eb461577 Binary files /dev/null and b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/__pycache__/pos_embed.cpython-38.pyc differ diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/crop.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/crop.py new file mode 100644 index 0000000000000000000000000000000000000000..bb2d97faf5156231c27d599b7799fef96e2cab27 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/crop.py @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math + +import torch + +from torchvision import transforms +from torchvision.transforms import functional as F + + +class RandomResizedCrop(transforms.RandomResizedCrop): + """ + RandomResizedCrop for matching TF/TPU implementation: no for-loop is used. + This may lead to results different with torchvision's version. + Following BYOL's TF code: + https://github.com/deepmind/deepmind-research/blob/master/byol/utils/dataset.py#L206 + """ + @staticmethod + def get_params(img, scale, ratio): + width, height = F.get_image_size(img) + area = height * width + + target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item() + log_ratio = torch.log(torch.tensor(ratio)) + aspect_ratio = torch.exp( + torch.empty(1).uniform_(log_ratio[0], log_ratio[1]) + ).item() + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + w = min(w, width) + h = min(h, height) + + i = torch.randint(0, height - h + 1, size=(1,)).item() + j = torch.randint(0, width - w + 1, size=(1,)).item() + + return i, j, h, w \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/datasets.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..5abc8dc22a968378c6e062ff0f2e7bec11bc1f8a --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/datasets.py @@ -0,0 +1,67 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# DeiT: https://github.com/facebookresearch/deit +# -------------------------------------------------------- + +import os +import PIL + +from torchvision import datasets, transforms + +from timm.data import create_transform +from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + + +def build_dataset(is_train, args): + transform = build_transform(is_train, args) + + root = os.path.join(args.data_path, 'train' if is_train else 'test') + dataset = datasets.ImageFolder(root, transform=transform) + + print(dataset) + + return dataset + + +def build_transform(is_train, args): + # mean = IMAGENET_DEFAULT_MEAN + # std = IMAGENET_DEFAULT_STD + wsi_mean = (0.6790435, 0.5052883, 0.66902906) + wsi_std = (0.19158737, 0.2039779, 0.15648715) + # train transform + if is_train: + # this should always dispatch to transforms_imagenet_train + transform = create_transform( + input_size=args.input_size, + is_training=True, + color_jitter=args.color_jitter, + auto_augment=args.aa, + interpolation='bicubic', + re_prob=args.reprob, + re_mode=args.remode, + re_count=args.recount, + mean=wsi_mean, + std=wsi_std, + ) + return transform + + # eval transform + t = [] + if args.input_size <= 224: + crop_pct = 224 / 256 + else: + crop_pct = 1.0 + size = int(args.input_size / crop_pct) + t.append( + transforms.Resize(size, interpolation=PIL.Image.BICUBIC), # to maintain same ratio w.r.t. 224 images + ) + t.append(transforms.CenterCrop(args.input_size)) + + t.append(transforms.ToTensor()) + t.append(transforms.Normalize(wsi_mean, wsi_std)) + return transforms.Compose(t) diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/lars.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/lars.py new file mode 100644 index 0000000000000000000000000000000000000000..509c5f65b7f68423343121d5676d05ce32d5a6c0 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/lars.py @@ -0,0 +1,47 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# LARS optimizer, implementation from MoCo v3: +# https://github.com/facebookresearch/moco-v3 +# -------------------------------------------------------- + +import torch + + +class LARS(torch.optim.Optimizer): + """ + LARS optimizer, no rate scaling or weight decay for parameters <= 1D. + """ + def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, trust_coefficient=0.001): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum, trust_coefficient=trust_coefficient) + super().__init__(params, defaults) + + @torch.no_grad() + def step(self): + for g in self.param_groups: + for p in g['params']: + dp = p.grad + + if dp is None: + continue + + if p.ndim > 1: # if not normalization gamma/beta or bias + dp = dp.add(p, alpha=g['weight_decay']) + param_norm = torch.norm(p) + update_norm = torch.norm(dp) + one = torch.ones_like(param_norm) + q = torch.where(param_norm > 0., + torch.where(update_norm > 0, + (g['trust_coefficient'] * param_norm / update_norm), one), + one) + dp = dp.mul(q) + + param_state = self.state[p] + if 'mu' not in param_state: + param_state['mu'] = torch.zeros_like(p) + mu = param_state['mu'] + mu.mul_(g['momentum']).add_(dp) + p.add_(mu, alpha=-g['lr']) \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/lr_decay.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/lr_decay.py new file mode 100644 index 0000000000000000000000000000000000000000..7fa11f1c581190f1a93c24c7dedde60804ab980b --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/lr_decay.py @@ -0,0 +1,76 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# ELECTRA https://github.com/google-research/electra +# BEiT: https://github.com/microsoft/unilm/tree/master/beit +# -------------------------------------------------------- + +import json + + +def param_groups_lrd(model, weight_decay=0.05, no_weight_decay_list=[], layer_decay=.75): + """ + Parameter groups for layer-wise lr decay + Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58 + """ + param_group_names = {} + param_groups = {} + + num_layers = len(model.blocks) + 1 + + layer_scales = list(layer_decay ** (num_layers - i) for i in range(num_layers + 1)) + + for n, p in model.named_parameters(): + if not p.requires_grad: + continue + + # no decay: all 1D parameters and model specific ones + if p.ndim == 1 or n in no_weight_decay_list: + g_decay = "no_decay" + this_decay = 0. + else: + g_decay = "decay" + this_decay = weight_decay + + layer_id = get_layer_id_for_vit(n, num_layers) + group_name = "layer_%d_%s" % (layer_id, g_decay) + + if group_name not in param_group_names: + this_scale = layer_scales[layer_id] + + param_group_names[group_name] = { + "lr_scale": this_scale, + "weight_decay": this_decay, + "params": [], + } + param_groups[group_name] = { + "lr_scale": this_scale, + "weight_decay": this_decay, + "params": [], + } + + param_group_names[group_name]["params"].append(n) + param_groups[group_name]["params"].append(p) + + # print("parameter groups: \n%s" % json.dumps(param_group_names, indent=2)) + + return list(param_groups.values()) + + +def get_layer_id_for_vit(name, num_layers): + """ + Assign a parameter with its layer id + Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33 + """ + if name in ['cls_token', 'pos_embed']: + return 0 + elif name.startswith('patch_embed'): + return 0 + elif name.startswith('blocks'): + return int(name.split('.')[1]) + 1 + else: + return num_layers \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/lr_sched.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/lr_sched.py new file mode 100644 index 0000000000000000000000000000000000000000..4cb682bebbce25ea1df70119928faa5fc9a6ab02 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/lr_sched.py @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math + +def adjust_learning_rate(optimizer, epoch, args): + """Decay the learning rate with half-cycle cosine after warmup""" + if epoch < args.warmup_epochs: + lr = args.lr * epoch / args.warmup_epochs + else: + lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \ + (1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs))) + for param_group in optimizer.param_groups: + if "lr_scale" in param_group: + param_group["lr"] = lr * param_group["lr_scale"] + else: + param_group["lr"] = lr + return lr diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/misc.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..3bd53543176d31c408907e4eea6895a39231b0b1 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/misc.py @@ -0,0 +1,345 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# DeiT: https://github.com/facebookresearch/deit +# BEiT: https://github.com/microsoft/unilm/tree/master/beit +# -------------------------------------------------------- + +import builtins +import datetime +import os +import time +from collections import defaultdict, deque +from pathlib import Path + +import torch +import torch.distributed as dist +from torch._six import inf + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if v is None: + continue + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + log_msg = [ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ] + if torch.cuda.is_available(): + log_msg.append('max mem: {memory:.0f}') + log_msg = self.delimiter.join(log_msg) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + builtin_print = builtins.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + force = force or (get_world_size() > 8) + if is_master or force: + now = datetime.datetime.now().time() + builtin_print('[{}] '.format(now), end='') # print with time stamp + builtin_print(*args, **kwargs) + + builtins.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + # # altered: disable DDP + # print('Not using distributed mode') + # setup_for_distributed(is_master=True) # hack + # args.distributed = False + # return + if args.dist_on_itp: + args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) + args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) + args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) + args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']) + os.environ['LOCAL_RANK'] = str(args.gpu) + os.environ['RANK'] = str(args.rank) + os.environ['WORLD_SIZE'] = str(args.world_size) + # ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] + elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + else: + print('Not using distributed mode') + setup_for_distributed(is_master=True) # hack + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}, gpu {}'.format( + args.rank, args.dist_url, args.gpu), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +class NativeScalerWithGradNormCount: + state_dict_key = "amp_scaler" + + def __init__(self): + self._scaler = torch.cuda.amp.GradScaler() + + def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): + self._scaler.scale(loss).backward(create_graph=create_graph) + if update_grad: + if clip_grad is not None: + assert parameters is not None + self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place + norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) + else: + self._scaler.unscale_(optimizer) + norm = get_grad_norm_(parameters) + self._scaler.step(optimizer) + self._scaler.update() + else: + norm = None + return norm + + def state_dict(self): + return self._scaler.state_dict() + + def load_state_dict(self, state_dict): + self._scaler.load_state_dict(state_dict) + + +def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = [p for p in parameters if p.grad is not None] + norm_type = float(norm_type) + if len(parameters) == 0: + return torch.tensor(0.) + device = parameters[0].grad.device + if norm_type == inf: + total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) + else: + total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) + return total_norm + + +def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler): + output_dir = Path(args.output_dir) + epoch_name = str(epoch) + if loss_scaler is not None: + checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)] + for checkpoint_path in checkpoint_paths: + to_save = { + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'epoch': epoch, + 'scaler': loss_scaler.state_dict(), + 'args': args, + } + + save_on_master(to_save, checkpoint_path) + else: + client_state = {'epoch': epoch} + model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state) + + +def load_model(args, model_without_ddp, optimizer, loss_scaler): + if args.resume: + if args.resume.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url( + args.resume, map_location='cpu', check_hash=True) + else: + checkpoint = torch.load(args.resume, map_location='cpu') + model_without_ddp.load_state_dict(checkpoint['model']) + print("Resume checkpoint %s" % args.resume) + if 'optimizer' in checkpoint and 'epoch' in checkpoint and not (hasattr(args, 'eval') and args.eval): + optimizer.load_state_dict(checkpoint['optimizer']) + args.start_epoch = checkpoint['epoch'] + 1 + if 'scaler' in checkpoint: + loss_scaler.load_state_dict(checkpoint['scaler']) + print("With optim & sched!") + + +def all_reduce_mean(x): + world_size = get_world_size() + if world_size > 1: + x_reduce = torch.tensor(x).cuda() + dist.all_reduce(x_reduce) + x_reduce /= world_size + return x_reduce.item() + else: + return x \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/pos_embed.py b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/pos_embed.py new file mode 100644 index 0000000000000000000000000000000000000000..bcafafe0f75d9bbc16827d2432d84d8f8a1e2709 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/gcmae/util/pos_embed.py @@ -0,0 +1,96 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# Position embedding utils +# -------------------------------------------------------- + +import numpy as np + +import torch + +# -------------------------------------------------------- +# 2D sine-cosine position embedding +# References: +# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py +# MoCo v3: https://github.com/facebookresearch/moco-v3 +# -------------------------------------------------------- +def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): + """ + grid_size: int of the grid height and width + return: + pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) + """ + grid_h = np.arange(grid_size, dtype=np.float32) + grid_w = np.arange(grid_size, dtype=np.float32) + grid = np.meshgrid(grid_w, grid_h) # here w goes first + grid = np.stack(grid, axis=0) + + grid = grid.reshape([2, 1, grid_size, grid_size]) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + if cls_token: + pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) + return pos_embed + + +def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): + assert embed_dim % 2 == 0 + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + + emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) + return emb + + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position + pos: a list of positions to be encoded: size (M,) + out: (M, D) + """ + assert embed_dim % 2 == 0 + omega = np.arange(embed_dim // 2, dtype=np.float32) + omega /= embed_dim / 2. + omega = 1. / 10000**omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) + return emb + + +# -------------------------------------------------------- +# Interpolate position embeddings for high-resolution +# References: +# DeiT: https://github.com/facebookresearch/deit +# -------------------------------------------------------- +def interpolate_pos_embed(model, checkpoint_model): + if 'pos_embed' in checkpoint_model: + pos_embed_checkpoint = checkpoint_model['pos_embed'] + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = model.patch_embed.num_patches + num_extra_tokens = model.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + checkpoint_model['pos_embed'] = new_pos_embed diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/CODE_OF_CONDUCT.md b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..08b500a221857ec3f451338e80b4a9ab1173a1af --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/CODE_OF_CONDUCT.md @@ -0,0 +1,80 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when there is a +reasonable belief that an individual's behavior may have a negative impact on +the project or its community. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at . All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/CONFIG.md b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/CONFIG.md new file mode 100644 index 0000000000000000000000000000000000000000..ebd0dc7633bc5bbae4557bac0cbf467909d96aca --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/CONFIG.md @@ -0,0 +1,210 @@ +## MoCo v3 Reference Setups and Models + +Here we document the reference commands for pre-training and evaluating various MoCo v3 models. + +### ResNet-50 models + +With batch 4096, the training of all ResNet-50 models can fit into 2 nodes with a total of 16 Volta 32G GPUs. + +
+ResNet-50, 100-epoch pre-training. + +On the first node, run: +``` +python main_moco.py \ + --moco-m-cos --crop-min=.2 \ + --dist-url 'tcp://[your first node address]:[specified port]' \ + --multiprocessing-distributed --world-size 2 --rank 0 \ + [your imagenet-folder with train and val folders] +``` +On the second node, run the same command with `--rank 1`. +
+ +
+ResNet-50, 300-epoch pre-training. + +On the first node, run: +``` +python main_moco.py \ + --lr=.3 --epochs=300 \ + --moco-m-cos --crop-min=.2 \ + --dist-url 'tcp://[your first node address]:[specified port]' \ + --multiprocessing-distributed --world-size 2 --rank 0 \ + [your imagenet-folder with train and val folders] +``` +On the second node, run the same command with `--rank 1`. +
+ +
+ResNet-50, 1000-epoch pre-training. + +On the first node, run: +``` +python main_moco.py \ + --lr=.3 --wd=1.5e-6 --epochs=1000 \ + --moco-m=0.996 --moco-m-cos --crop-min=.2 \ + --dist-url 'tcp://[your first node address]:[specified port]' \ + --multiprocessing-distributed --world-size 2 --rank 0 \ + [your imagenet-folder with train and val folders] +``` +On the second node, run the same command with `--rank 1`. +
+ +
+ResNet-50, linear classification. + +Run on single node: +``` +python main_lincls.py \ + --dist-url 'tcp://localhost:10001' \ + --multiprocessing-distributed --world-size 1 --rank 0 \ + --pretrained [your checkpoint path]/[your checkpoint file].pth.tar \ + [your imagenet-folder with train and val folders] +``` +
+ +Below are our pre-trained ResNet-50 models and logs. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
pretrain
epochs
linear
acc
pretrain
files
linear
files
10068.9chptchpt / + log
30072.8chptchpt / + log
100074.6chptchpt / + log
+ + +### ViT Models + +All ViT models are pre-trained for 300 epochs with AdamW. + +
+ViT-Small, 1-node (8-GPU), 1024-batch pre-training. + +This setup fits into a single node of 8 Volta 32G GPUs, for ease of debugging. +``` +python main_moco.py \ + -a vit_small -b 1024 \ + --optimizer=adamw --lr=1.5e-4 --weight-decay=.1 \ + --epochs=300 --warmup-epochs=40 \ + --stop-grad-conv1 --moco-m-cos --moco-t=.2 \ + --dist-url 'tcp://localhost:10001' \ + --multiprocessing-distributed --world-size 1 --rank 0 \ + [your imagenet-folder with train and val folders] +``` + +
+ +
+ViT-Small, 4-node (32-GPU) pre-training. + +On the first node, run: +``` +python main_moco.py \ + -a vit_small \ + --optimizer=adamw --lr=1.5e-4 --weight-decay=.1 \ + --epochs=300 --warmup-epochs=40 \ + --stop-grad-conv1 --moco-m-cos --moco-t=.2 \ + --dist-url 'tcp://[your first node address]:[specified port]' \ + --multiprocessing-distributed --world-size 8 --rank 0 \ + [your imagenet-folder with train and val folders] +``` +On other nodes, run the same command with `--rank 1`, ..., `--rank 3` respectively. +
+ +
+ViT-Small, linear classification. + +Run on single node: +``` +python main_lincls.py \ + -a vit_small --lr=3 \ + --dist-url 'tcp://localhost:10001' \ + --multiprocessing-distributed --world-size 1 --rank 0 \ + --pretrained [your checkpoint path]/[your checkpoint file].pth.tar \ + [your imagenet-folder with train and val folders] +``` +
+ +
+ViT-Base, 8-node (64-GPU) pre-training. + +``` +python main_moco.py \ + -a vit_base \ + --optimizer=adamw --lr=1.5e-4 --weight-decay=.1 \ + --epochs=300 --warmup-epochs=40 \ + --stop-grad-conv1 --moco-m-cos --moco-t=.2 \ + --dist-url 'tcp://[your first node address]:[specified port]' \ + --multiprocessing-distributed --world-size 8 --rank 0 \ + [your imagenet-folder with train and val folders] +``` +On other nodes, run the same command with `--rank 1`, ..., `--rank 7` respectively. +
+ +
+ViT-Base, linear classification. + +Run on single node: +``` +python main_lincls.py \ + -a vit_base --lr=3 \ + --dist-url 'tcp://localhost:10001' \ + --multiprocessing-distributed --world-size 1 --rank 0 \ + --pretrained [your checkpoint path]/[your checkpoint file].pth.tar \ + [your imagenet-folder with train and val folders] +``` +
+ + +Below are our pre-trained ViT models and logs (batch 4096). + + + + + + + + + + + + + + + + + + + + + + + + +
modelpretrain
epochs
linear
acc
pretrain
files
linear
files
ViT-Small30073.2chptchpt / + log
ViT-Base30076.7chptchpt / + log
diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/CONTRIBUTING.md b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..dc46d7ca4fdeb197e02755f98f986719ac084381 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to moco-v3 +We want to make contributing to this project as easy and transparent as +possible. + +## Pull Requests +We actively welcome your pull requests. + +1. Fork the repo and create your branch from `master`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Ensure the test suite passes. +5. Make sure your code lints. +6. If you haven't already, complete the Contributor License Agreement ("CLA"). + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Facebook's open source projects. + +Complete your CLA here: + +## Issues +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## License +By contributing to moco-v3, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/LICENSE b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..105a4fb33f75de2fc37c4bd73b7952e19602b589 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/LICENSE @@ -0,0 +1,399 @@ +Attribution-NonCommercial 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-NonCommercial 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-NonCommercial 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. NonCommercial means not primarily intended for or directed towards + commercial advantage or monetary compensation. For purposes of + this Public License, the exchange of the Licensed Material for + other material subject to Copyright and Similar Rights by digital + file-sharing or similar means is NonCommercial provided there is + no payment of monetary compensation in connection with the + exchange. + + j. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + k. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + l. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part, for NonCommercial purposes only; and + + b. produce, reproduce, and Share Adapted Material for + NonCommercial purposes only. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties, including when + the Licensed Material is used other than for NonCommercial + purposes. + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database for NonCommercial purposes + only; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/README.md b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/README.md new file mode 100644 index 0000000000000000000000000000000000000000..af75ff4c58def55959be476e8928e420b08004c2 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/README.md @@ -0,0 +1,15 @@ +## MoCo v3 for Self-supervised ResNet and ViT + +The original repo of MoCo-v3 could be found [here](https://github.com/facebookresearch/moco-v3) + +Pip requirements: timm == 0.4.9, PyTorch == 1.9.0, Torchvision == 0.10.0, Cuda == 10.2, Numpy == 1.19 + +Typical BASH: + ```console +python main_moco.py \ + -a vit_base -b 512\ + --optimizer=adamw --lr=1.5e-4 --weight-decay=.1 \ + --epochs=100 --warmup-epochs=20 \ + --stop-grad-conv1 --moco-m-cos --moco-t=.2 --dist-url 'tcp://localhost:10001' \ + --multiprocessing-distributed --world-size 1 --rank 0 --basic_state_dict the/path/of/CPIA + ``` diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/__pycache__/vits.cpython-39.pyc b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/__pycache__/vits.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..111dc8df11051824a6f387e7d710330d52dce6e8 Binary files /dev/null and b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/__pycache__/vits.cpython-39.pyc differ diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/convert_to_deit.py b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/convert_to_deit.py new file mode 100644 index 0000000000000000000000000000000000000000..b2a12f0e90e61180131a8a907dd6192f4d321c8b --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/convert_to_deit.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import os +import torch + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Convert MoCo Pre-Traind Model to DEiT') + parser.add_argument('--input', default='', type=str, metavar='PATH', required=True, + help='path to moco pre-trained checkpoint') + parser.add_argument('--output', default='', type=str, metavar='PATH', required=True, + help='path to output checkpoint in DEiT format') + args = parser.parse_args() + print(args) + + # load input + checkpoint = torch.load(args.input, map_location="cpu") + state_dict = checkpoint['state_dict'] + for k in list(state_dict.keys()): + # retain only base_encoder up to before the embedding layer + if k.startswith('module.base_encoder') and not k.startswith('module.base_encoder.head'): + # remove prefix + state_dict[k[len("module.base_encoder."):]] = state_dict[k] + # delete renamed or unused k + del state_dict[k] + + # make output directory if necessary + output_dir = os.path.dirname(args.output) + if not os.path.isdir(output_dir): + os.makedirs(output_dir) + # save to output + torch.save({'model': state_dict}, args.output) diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/main_lincls.py b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/main_lincls.py new file mode 100644 index 0000000000000000000000000000000000000000..807f416c1f29e7cef81f62add4f92b719850fdf3 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/main_lincls.py @@ -0,0 +1,524 @@ +#!/usr/bin/env python + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import builtins +import math +import os +import random +import shutil +import time +import warnings + +import torch +import torch.nn as nn +import torch.nn.parallel +import torch.backends.cudnn as cudnn +import torch.distributed as dist +import torch.optim +import torch.multiprocessing as mp +import torch.utils.data +import torch.utils.data.distributed +import torchvision.transforms as transforms +import torchvision.datasets as datasets +import torchvision.models as torchvision_models + +import vits + +torchvision_model_names = sorted(name for name in torchvision_models.__dict__ + if name.islower() and not name.startswith("__") + and callable(torchvision_models.__dict__[name])) + +model_names = ['vit_small', 'vit_base', 'vit_conv_small', 'vit_conv_base'] + torchvision_model_names + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') +parser.add_argument('--data', default='/Users/munros/ROSE/MARS_MIL', + help='path to dataset') +parser.add_argument('-a', '--arch', metavar='ARCH', default='vit_base', + choices=model_names, + help='model architecture: ' + + ' | '.join(model_names) + + ' (default: vit_base)') +parser.add_argument('-j', '--workers', default=0, type=int, metavar='N', + help='number of data loading workers (default: 32)') +parser.add_argument('--epochs', default=5, type=int, metavar='N', + help='number of total epochs to run') +parser.add_argument('--start-epoch', default=0, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('-b', '--batch-size', default=4, type=int, + metavar='N', + help='mini-batch size (default: 1024), this is the total ' + 'batch size of all GPUs on all nodes when ' + 'using Data Parallel or Distributed Data Parallel') +parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, + metavar='LR', help='initial (base) learning rate', dest='lr') +parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') +parser.add_argument('--wd', '--weight-decay', default=0., type=float, + metavar='W', help='weight decay (default: 0.)', + dest='weight_decay') +parser.add_argument('-p', '--print-freq', default=10, type=int, + metavar='N', help='print frequency (default: 10)') +parser.add_argument('--resume', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', + help='evaluate model on validation set') +parser.add_argument('--world-size', default=-1, type=int, + help='number of nodes for distributed training') +parser.add_argument('--rank', default=-1, type=int, + help='node rank for distributed training') +parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str, + help='url used to set up distributed training') +parser.add_argument('--dist-backend', default='nccl', type=str, + help='distributed backend') +parser.add_argument('--seed', default=None, type=int, + help='seed for initializing training. ') +parser.add_argument('--gpu', default=None, type=int, + help='GPU id to use.') +parser.add_argument('--multiprocessing-distributed', action='store_true', + help='Use multi-processing distributed training to launch ' + 'N processes per node, which has N GPUs. This is the ' + 'fastest way to use PyTorch for either single node or ' + 'multi node data parallel training') + +# additional configs: +parser.add_argument('--pretrained', default='/Users/munros/Desktop/moco_checkpoint_0000.pth.tar', type=str, + help='path to moco pretrained checkpoint') + +best_acc1 = 0 + + +def main(): + args = parser.parse_args() + + if args.seed is not None: + random.seed(args.seed) + torch.manual_seed(args.seed) + cudnn.deterministic = True + warnings.warn('You have chosen to seed training. ' + 'This will turn on the CUDNN deterministic setting, ' + 'which can slow down your training considerably! ' + 'You may see unexpected behavior when restarting ' + 'from checkpoints.') + + if args.gpu is not None: + warnings.warn('You have chosen a specific GPU. This will completely ' + 'disable data parallelism.') + + if args.dist_url == "env://" and args.world_size == -1: + args.world_size = int(os.environ["WORLD_SIZE"]) + + args.distributed = args.world_size > 1 or args.multiprocessing_distributed + + ngpus_per_node = torch.cuda.device_count() + if args.multiprocessing_distributed: + # Since we have ngpus_per_node processes per node, the total world_size + # needs to be adjusted accordingly + args.world_size = ngpus_per_node * args.world_size + # Use torch.multiprocessing.spawn to launch distributed processes: the + # main_worker process function + mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) + else: + # Simply call main_worker function + main_worker(args.gpu, ngpus_per_node, args) + + +def main_worker(gpu, ngpus_per_node, args): + global best_acc1 + args.gpu = gpu + + # suppress printing if not master + if args.multiprocessing_distributed and args.gpu != 0: + def print_pass(*args): + pass + builtins.print = print_pass + + if args.gpu is not None: + print("Use GPU: {} for training".format(args.gpu)) + + if args.distributed: + if args.dist_url == "env://" and args.rank == -1: + args.rank = int(os.environ["RANK"]) + if args.multiprocessing_distributed: + # For multiprocessing distributed training, rank needs to be the + # global rank among all the processes + args.rank = args.rank * ngpus_per_node + gpu + dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + # create model + print("=> creating model '{}'".format(args.arch)) + if args.arch.startswith('vit'): + model = vits.__dict__[args.arch]() + linear_keyword = 'head' + else: + model = torchvision_models.__dict__[args.arch]() + linear_keyword = 'fc' + + # freeze all layers but the last fc + for name, param in model.named_parameters(): + if name not in ['%s.weight' % linear_keyword, '%s.bias' % linear_keyword]: + param.requires_grad = False + # init the fc layer + getattr(model, linear_keyword).weight.data.normal_(mean=0.0, std=0.01) + getattr(model, linear_keyword).bias.data.zero_() + + # load from pre-trained, before DistributedDataParallel constructor + if args.pretrained: + if os.path.isfile(args.pretrained): + print("=> loading checkpoint '{}'".format(args.pretrained)) + checkpoint = torch.load(args.pretrained, map_location="cpu") + + # rename moco pre-trained keys + state_dict = checkpoint['state_dict'] + for k in list(state_dict.keys()): + # retain only base_encoder up to before the embedding layer + if k.startswith('module.base_encoder') and not k.startswith('module.base_encoder.%s' % linear_keyword): + # remove prefix + state_dict[k[len("module.base_encoder."):]] = state_dict[k] + # delete renamed or unused k + del state_dict[k] + + args.start_epoch = 0 + msg = model.load_state_dict(state_dict, strict=False) + assert set(msg.missing_keys) == {"%s.weight" % linear_keyword, "%s.bias" % linear_keyword} + + print("=> loaded pre-trained model '{}'".format(args.pretrained)) + else: + print("=> no checkpoint found at '{}'".format(args.pretrained)) + + # infer learning rate before changing batch size + init_lr = args.lr * args.batch_size / 256 + + if not torch.cuda.is_available(): + print('using CPU, this will be slow') + elif args.distributed: + # For multiprocessing distributed, DistributedDataParallel constructor + # should always set the single device scope, otherwise, + # DistributedDataParallel will use all available devices. + if args.gpu is not None: + torch.cuda.set_device(args.gpu) + model.cuda(args.gpu) + # When using a single GPU per process and per + # DistributedDataParallel, we need to divide the batch size + # ourselves based on the total number of GPUs we have + args.batch_size = int(args.batch_size / args.world_size) + args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + else: + model.cuda() + # DistributedDataParallel will divide and allocate batch_size to all + # available GPUs if device_ids are not set + model = torch.nn.parallel.DistributedDataParallel(model) + elif args.gpu is not None: + torch.cuda.set_device(args.gpu) + model = model.cuda(args.gpu) + else: + # DataParallel will divide and allocate batch_size to all available GPUs + if args.arch.startswith('alexnet') or args.arch.startswith('vgg'): + model.features = torch.nn.DataParallel(model.features) + model.cuda() + else: + model = torch.nn.DataParallel(model).cuda() + + # define loss function (criterion) and optimizer + criterion = nn.CrossEntropyLoss().cuda(args.gpu) + + # optimize only the linear classifier + parameters = list(filter(lambda p: p.requires_grad, model.parameters())) + assert len(parameters) == 2 # weight, bias + + optimizer = torch.optim.SGD(parameters, init_lr, + momentum=args.momentum, + weight_decay=args.weight_decay) + + # optionally resume from a checkpoint + if args.resume: + if os.path.isfile(args.resume): + print("=> loading checkpoint '{}'".format(args.resume)) + if args.gpu is None: + checkpoint = torch.load(args.resume) + else: + # Map model to be loaded to specified single gpu. + loc = 'cuda:{}'.format(args.gpu) + checkpoint = torch.load(args.resume, map_location=loc) + args.start_epoch = checkpoint['epoch'] + best_acc1 = checkpoint['best_acc1'] + if args.gpu is not None: + # best_acc1 may be from a checkpoint from a different GPU + best_acc1 = best_acc1.to(args.gpu) + model.load_state_dict(checkpoint['state_dict']) + optimizer.load_state_dict(checkpoint['optimizer']) + print("=> loaded checkpoint '{}' (epoch {})" + .format(args.resume, checkpoint['epoch'])) + else: + print("=> no checkpoint found at '{}'".format(args.resume)) + + cudnn.benchmark = True + + # Data loading code + traindir = os.path.join(args.data, 'train') + valdir = os.path.join(args.data, 'val') + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + + train_dataset = datasets.ImageFolder( + traindir, + transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize, + ])) + + if args.distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) + else: + train_sampler = None + + train_loader = torch.utils.data.DataLoader( + train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), + num_workers=args.workers, pin_memory=True, sampler=train_sampler) + + val_loader = torch.utils.data.DataLoader( + datasets.ImageFolder(valdir, transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ])), + batch_size=256, shuffle=False, + num_workers=args.workers, pin_memory=True) + + if args.evaluate: + validate(val_loader, model, criterion, args) + return + + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + train_sampler.set_epoch(epoch) + adjust_learning_rate(optimizer, init_lr, epoch, args) + + # train for one epoch + train(train_loader, model, criterion, optimizer, epoch, args) + + # evaluate on validation set + acc1 = validate(val_loader, model, criterion, args) + + # remember best acc@1 and save checkpoint + is_best = acc1 > best_acc1 + best_acc1 = max(acc1, best_acc1) + + if not args.multiprocessing_distributed or (args.multiprocessing_distributed + and args.rank == 0): # only the first GPU saves checkpoint + save_checkpoint({ + 'epoch': epoch + 1, + 'arch': args.arch, + 'state_dict': model.state_dict(), + 'best_acc1': best_acc1, + 'optimizer' : optimizer.state_dict(), + }, is_best) + if epoch == args.start_epoch: + sanity_check(model.state_dict(), args.pretrained, linear_keyword) + + +def train(train_loader, model, criterion, optimizer, epoch, args): + batch_time = AverageMeter('Time', ':6.3f') + data_time = AverageMeter('Data', ':6.3f') + losses = AverageMeter('Loss', ':.4e') + top1 = AverageMeter('Acc@1', ':6.2f') + top5 = AverageMeter('Acc@5', ':6.2f') + progress = ProgressMeter( + len(train_loader), + [batch_time, data_time, losses, top1, top5], + prefix="Epoch: [{}]".format(epoch)) + + """ + Switch to eval mode: + Under the protocol of linear classification on frozen features/models, + it is not legitimate to change any part of the pre-trained model. + BatchNorm in train mode may revise running mean/std (even if it receives + no gradient), which are part of the model parameters too. + """ + model.eval() + + end = time.time() + for i, (images, target) in enumerate(train_loader): + # measure data loading time + data_time.update(time.time() - end) + + if args.gpu is not None: + images = images.cuda(args.gpu, non_blocking=True) + if torch.cuda.is_available(): + target = target.cuda(args.gpu, non_blocking=True) + + # compute output + output = model(images) + loss = criterion(output, target) + + # measure accuracy and record loss + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + losses.update(loss.item(), images.size(0)) + top1.update(acc1[0], images.size(0)) + top5.update(acc5[0], images.size(0)) + + # compute gradient and do SGD step + optimizer.zero_grad() + loss.backward() + optimizer.step() + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % args.print_freq == 0: + progress.display(i) + + +def validate(val_loader, model, criterion, args): + batch_time = AverageMeter('Time', ':6.3f') + losses = AverageMeter('Loss', ':.4e') + top1 = AverageMeter('Acc@1', ':6.2f') + top5 = AverageMeter('Acc@5', ':6.2f') + progress = ProgressMeter( + len(val_loader), + [batch_time, losses, top1, top5], + prefix='Test: ') + + # switch to evaluate mode + model.eval() + + with torch.no_grad(): + end = time.time() + for i, (images, target) in enumerate(val_loader): + if args.gpu is not None: + images = images.cuda(args.gpu, non_blocking=True) + if torch.cuda.is_available(): + target = target.cuda(args.gpu, non_blocking=True) + + # compute output + output = model(images) + loss = criterion(output, target) + + # measure accuracy and record loss + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + losses.update(loss.item(), images.size(0)) + top1.update(acc1[0], images.size(0)) + top5.update(acc5[0], images.size(0)) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % args.print_freq == 0: + progress.display(i) + + # TODO: this should also be done with the ProgressMeter + print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}' + .format(top1=top1, top5=top5)) + + return top1.avg + + +def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): + torch.save(state, filename) + if is_best: + shutil.copyfile(filename, 'model_best.pth.tar') + + +def sanity_check(state_dict, pretrained_weights, linear_keyword): + """ + Linear classifier should not change any weights other than the linear layer. + This sanity check asserts nothing wrong happens (e.g., BN stats updated). + """ + print("=> loading '{}' for sanity check".format(pretrained_weights)) + checkpoint = torch.load(pretrained_weights, map_location="cpu") + state_dict_pre = checkpoint['state_dict'] + + for k in list(state_dict.keys()): + # only ignore linear layer + if '%s.weight' % linear_keyword in k or '%s.bias' % linear_keyword in k: + continue + + # name in pretrained model + k_pre = 'module.base_encoder.' + k[len('module.'):] \ + if k.startswith('module.') else 'module.base_encoder.' + k + + assert ((state_dict[k].cpu() == state_dict_pre[k_pre]).all()), \ + '{} is changed in linear classifier training.'.format(k) + + print("=> sanity check passed.") + + +class AverageMeter(object): + """Computes and stores the average and current value""" + def __init__(self, name, fmt=':f'): + self.name = name + self.fmt = fmt + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + +class ProgressMeter(object): + def __init__(self, num_batches, meters, prefix=""): + self.batch_fmtstr = self._get_batch_fmtstr(num_batches) + self.meters = meters + self.prefix = prefix + + def display(self, batch): + entries = [self.prefix + self.batch_fmtstr.format(batch)] + entries += [str(meter) for meter in self.meters] + print('\t'.join(entries)) + + def _get_batch_fmtstr(self, num_batches): + num_digits = len(str(num_batches // 1)) + fmt = '{:' + str(num_digits) + 'd}' + return '[' + fmt + '/' + fmt.format(num_batches) + ']' + + +def adjust_learning_rate(optimizer, init_lr, epoch, args): + """Decay the learning rate based on schedule""" + cur_lr = init_lr * 0.5 * (1. + math.cos(math.pi * epoch / args.epochs)) + for param_group in optimizer.param_groups: + param_group['lr'] = cur_lr + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +if __name__ == '__main__': + main() diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/main_moco.py b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/main_moco.py new file mode 100644 index 0000000000000000000000000000000000000000..9fd74dd55e17734211b2e5d8f10a208e95ce33ad --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/main_moco.py @@ -0,0 +1,736 @@ +#!/usr/bin/env python + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import builtins +import math +import os +import random +import shutil +import time +import warnings +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.parallel +import torch.backends.cudnn as cudnn +import torch.distributed as dist +import torch.optim +import torch.multiprocessing as mp +import torch.utils.data +import torch.utils.data.distributed +import torchvision.transforms as transforms +import torchvision.datasets as datasets +import torchvision.models as torchvision_models +from torch.utils.tensorboard import SummaryWriter + +import moco.builder +import moco.loader +import moco.optimizer + +import vits +from misc import NativeScalerWithGradNormCount as NativeScaler + +torchvision_model_names = sorted(name for name in torchvision_models.__dict__ + if name.islower() and not name.startswith("__") + and callable(torchvision_models.__dict__[name])) + +model_names = ['vit_small', 'vit_base', 'vit_conv_small', 'vit_conv_base'] + torchvision_model_names + +parser = argparse.ArgumentParser(description='MoCo ImageNet Pre-Training') +parser.add_argument('data', default='/root/autodl-tmp/All', + help='path to dataset') +parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50', + choices=model_names, + help='model architecture: ' + + ' | '.join(model_names) + + ' (default: resnet50)') +parser.add_argument('-j', '--workers', default=32, type=int, metavar='N', + help='number of data loading workers (default: 32)') +parser.add_argument('--epochs', default=100, type=int, metavar='N', + help='number of total epochs to run') +parser.add_argument('--start-epoch', default=0, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('-b', '--batch-size', default=1024, type=int, + metavar='N', + help='mini-batch size (default: 4096), this is the total ' + 'batch size of all GPUs on all nodes when ' + 'using Data Parallel or Distributed Data Parallel') +parser.add_argument('--lr', '--learning-rate', default=1.5e-4, type=float, + metavar='LR', help='initial (base) learning rate', dest='lr') +parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') +parser.add_argument('--wd', '--weight-decay', default=1e-6, type=float, + metavar='W', help='weight decay (default: 1e-6)', + dest='weight_decay') +parser.add_argument('-p', '--print-freq', default=10, type=int, + metavar='N', help='print frequency (default: 10)') +parser.add_argument('--resume', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('--world-size', default=-1, type=int, + help='number of nodes for distributed training') +parser.add_argument('--rank', default=-1, type=int, + help='node rank for distributed training') +parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str, + help='url used to set up distributed training') +parser.add_argument('--dist-backend', default='nccl', type=str, # nccl是GPU设备上最快、最推荐的后端 + help='distributed backend') +parser.add_argument('--seed', default=None, type=int, + help='seed for initializing training. ') +parser.add_argument('--gpu', default=None, type=int, + help='GPU id to use.') +parser.add_argument('--multiprocessing-distributed', action='store_true', + help='Use multi-processing distributed training to launch ' + 'N processes per node, which has N GPUs. This is the ' + 'fastest way to use PyTorch for either single node or ' + 'multi node data parallel training') +parser.add_argument('--saveckp_freq', default=50, type=int, help='Save checkpoint every x epochs.') + +# moco specific configs: +parser.add_argument('--moco-dim', default=256, type=int, + help='feature dimension (default: 256)') +parser.add_argument('--moco-mlp-dim', default=4096, type=int, + help='hidden dimension in MLPs (default: 4096)') +parser.add_argument('--moco-m', default=0.99, type=float, + help='moco momentum of updating momentum encoder (default: 0.99)') +parser.add_argument('--moco-m-cos', action='store_true', + help='gradually increase moco momentum to 1 with a ' + 'half-cycle cosine schedule') +parser.add_argument('--moco-t', default=1.0, type=float, + help='softmax temperature (default: 1.0)') + +# vit specific configs: +parser.add_argument('--stop-grad-conv1', action='store_true', + help='stop-grad after first conv, or patch embedding') + +# other upgrades +parser.add_argument('--optimizer', default='lars', type=str, + choices=['lars', 'adamw'], + help='optimizer used (default: lars)') +parser.add_argument('--warmup-epochs', default=20, type=int, metavar='N', + help='number of warmup epochs') +parser.add_argument('--crop-min', default=0.08, type=float, + help='minimum scale for random cropping (default: 0.08)') +parser.add_argument('--basic_state_dict', default='/root/autodl-tmp/ViT_b16_224_Imagenet.pth', type=str, + help='Load in pretrained or un-pretrained model pth') + + +def main(): + # 存入参数 + args = parser.parse_args() + # seed默认是None + # 关于种子的正确设定方式 + if args.seed is not None: + random.seed(args.seed) + torch.manual_seed(args.seed) + cudnn.deterministic = True + warnings.warn('You have chosen to seed training. ' + 'This will turn on the CUDNN deterministic setting, ' + 'which can slow down your training considerably! ' + 'You may see unexpected behavior when restarting ' + 'from checkpoints.') + # cudnn.benchmark = False 这里应该还要加一行这个 + + # 默认是None,因为这个模型预训练工作量比较大,作者都是用几百片GPU或者TPU训练的。 + if args.gpu is not None: + warnings.warn('You have chosen a specific GPU. This will completely ' + 'disable data parallelism.') + + # dist_url默认值是'tcp://224.66.41.62:23456',应当是作者服务器第一个节点的地址 + # world_size默认值是-1 + # WORLD_SIZE由torch.distributed.launch.py产生 具体数值为 nproc_per_node*node(服务器数量或者节点数) + if args.dist_url == "env://" and args.world_size == -1: + args.world_size = int(os.environ["WORLD_SIZE"]) + + # multiprocessing_distributed的默认值为False + # 需要多进程运行程序时一定要使multiprocessing_distributed为True + args.distributed = args.world_size > 1 or args.multiprocessing_distributed + + # 返回显卡数量 + ngpus_per_node = torch.cuda.device_count() + if args.multiprocessing_distributed: + # Since we have ngpus_per_node processes per node, + # the total world_size needs to be adjusted accordingly + # 计算总的GPU的数量 + args.world_size = ngpus_per_node * args.world_size + # Use torch.multiprocessing.spawn to launch distributed processes: + # the main_worker process function + # 开启多进程,每个进程调用main_worker函数,控制一个GPU。 + mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) + else: + # Simply call main_worker function + # args.gpu默认是None,如果不采用分布式,则通过这个参数输入用来计算的GPU的编号 + main_worker(args.gpu, ngpus_per_node, args) + + +def main_worker(gpu, ngpus_per_node, args): + args.gpu = gpu + + # suppress printing if not first GPU on each node + if args.multiprocessing_distributed and (args.gpu != 0 or args.rank != 0): + def print_pass(*args): + pass + + builtins.print = print_pass + + if args.gpu is not None: + print("Use GPU: {} for training".format(args.gpu)) + + if args.distributed: + if args.dist_url == "env://" and args.rank == -1: + args.rank = int(os.environ["RANK"]) + if args.multiprocessing_distributed: + # For multiprocessing distributed training, rank needs to be the + # global rank among all the processes + args.rank = args.rank * ngpus_per_node + gpu + dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + # create model + print("=> creating model '{}'".format(args.arch)) + if args.arch.startswith('vit'): + model = moco.builder.MoCo_ViT( + partial(vits.__dict__[args.arch], stop_grad_conv1=args.stop_grad_conv1), + args.moco_dim, args.moco_mlp_dim, args.moco_t) + if args.basic_state_dict is not None: # Transfer-learning + try: + basic_state_dict = torch.load(args.basic_state_dict) + if 'model' in basic_state_dict: + basic_state_dict = basic_state_dict['model'] + else: + pass + model.load_state_dict(basic_state_dict, False) + + except: + print('erro in args.basic_state_dict:', args.basic_state_dict) + print('PreTuningRestart') # 没倒进去 + + else: + print('PreTuning with Transfer-learning with:', args.basic_state_dict) + + else: + print('PreTuning Restart') + + else: + model = moco.builder.MoCo_ResNet( + partial(torchvision_models.__dict__[args.arch], zero_init_residual=True), + args.moco_dim, args.moco_mlp_dim, args.moco_t) + + # infer learning rate before changing batch size + args.lr = args.lr * args.batch_size / 256 + + if not torch.cuda.is_available(): + print('using CPU, this will be slow') + elif args.distributed: + # apply SyncBN + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + # For multiprocessing distributed, DistributedDataParallel constructor + # should always set the single device scope, otherwise, + # DistributedDataParallel will use all available devices. + if args.gpu is not None: + torch.cuda.set_device(args.gpu) + model.cuda(args.gpu) + # When using a single GPU per process and per + # DistributedDataParallel, we need to divide the batch size + # ourselves based on the total number of GPUs we have + args.batch_size = int(args.batch_size / args.world_size) + args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + else: + model.cuda() + # DistributedDataParallel will divide and allocate batch_size to all + # available GPUs if device_ids are not set + model = torch.nn.parallel.DistributedDataParallel(model) + elif args.gpu is not None: + torch.cuda.set_device(args.gpu) + model = model.cuda(args.gpu) + # comment out the following line for debugging + raise NotImplementedError("Only DistributedDataParallel is supported.") + else: + # AllGather/rank implementation in this code only supports DistributedDataParallel. + raise NotImplementedError("Only DistributedDataParallel is supported.") + print(model) # print model after SyncBatchNorm + + if args.optimizer == 'lars': + optimizer = moco.optimizer.LARS(model.parameters(), args.lr, + weight_decay=args.weight_decay, + momentum=args.momentum) + elif args.optimizer == 'adamw': + optimizer = torch.optim.AdamW(model.parameters(), args.lr, + weight_decay=args.weight_decay) + + scaler = torch.cuda.amp.GradScaler() + summary_writer = SummaryWriter() if args.rank == 0 else None + + # optionally resume from a checkpoint + if args.resume: + if os.path.isfile(args.resume): + print("=> loading checkpoint '{}'".format(args.resume)) + if args.gpu is None: + checkpoint = torch.load(args.resume) + else: + # Map model to be loaded to specified single gpu. + loc = 'cuda:{}'.format(args.gpu) + checkpoint = torch.load(args.resume, map_location=loc) + args.start_epoch = checkpoint['epoch'] + model.load_state_dict(checkpoint['state_dict']) + optimizer.load_state_dict(checkpoint['optimizer']) + scaler.load_state_dict(checkpoint['scaler']) + print("=> loaded checkpoint '{}' (epoch {})" + .format(args.resume, checkpoint['epoch'])) + else: + print("=> no checkpoint found at '{}'".format(args.resume)) + + cudnn.benchmark = True + + # Data loading code + traindir = args.data + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + + # follow BYOL's augmentation recipe: https://arxiv.org/abs/2006.07733 + augmentation1 = [ + transforms.RandomResizedCrop(224, scale=(args.crop_min, 1.)), + transforms.RandomApply([ + transforms.ColorJitter(0.4, 0.4, 0.2, 0.1) # not strengthened + ], p=0.8), + transforms.RandomGrayscale(p=0.2), + transforms.RandomApply([moco.loader.GaussianBlur([.1, 2.])], p=1.0), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize + ] + + augmentation2 = [ + transforms.RandomResizedCrop(224, scale=(args.crop_min, 1.)), + transforms.RandomApply([ + transforms.ColorJitter(0.4, 0.4, 0.2, 0.1) # not strengthened + ], p=0.8), + transforms.RandomGrayscale(p=0.2), + transforms.RandomApply([moco.loader.GaussianBlur([.1, 2.])], p=0.1), + transforms.RandomApply([moco.loader.Solarize()], p=0.2), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize + ] + + train_dataset = datasets.ImageFolder( + traindir, + moco.loader.TwoCropsTransform(transforms.Compose(augmentation1), + transforms.Compose(augmentation2))) + + if args.distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) + else: + train_sampler = None + + train_loader = torch.utils.data.DataLoader( + train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), + num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True) + + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + train_sampler.set_epoch(epoch) + + # train for one epoch + train(train_loader, model, optimizer, scaler, summary_writer, epoch, args) + + if not args.multiprocessing_distributed or (args.multiprocessing_distributed + and args.rank == 0): # only the first GPU saves checkpoint + initial_setting = os.path.split(args.basic_state_dict)[1].split('.')[0] # 'ViT_b16_224_Imagenet' + dataset_using = os.path.split(args.data)[1] + if args.saveckp_freq and (epoch % args.saveckp_freq == 0 or epoch + 1 == args.epochs): + save_checkpoint({ + 'epoch': epoch + 1, + 'arch': args.arch, + 'state_dict': model.state_dict(), + 'optimizer': optimizer.state_dict(), + 'scaler': scaler.state_dict(), + }, is_best=False, + filename=os.path.join('/home/CPIA/saved_models/MOCO', 'moco_' + initial_setting + '_' + dataset_using + '_checkpoint_%04d.pth.tar' % epoch)) + print('moco_' + initial_setting + '_' + dataset_using + '_checkpoint_%04d.pth.tar' % epoch) + + + if args.rank == 0: + summary_writer.close() + + +def train(train_loader, model, optimizer, scaler, summary_writer, epoch, args): + batch_time = AverageMeter('Time', ':6.3f') + data_time = AverageMeter('Data', ':6.3f') + learning_rates = AverageMeter('LR', ':.4e') + losses = AverageMeter('Loss', ':.4e') + progress = ProgressMeter( + len(train_loader), + [batch_time, data_time, learning_rates, losses], + prefix="Epoch: [{}]".format(epoch)) + + # switch to train mode + model.train() + + end = time.time() + iters_per_epoch = len(train_loader) + moco_m = args.moco_m + for i, (images, _) in enumerate(train_loader): + # measure data loading time + data_time.update(time.time() - end) + + # adjust learning rate and momentum coefficient per iteration + lr = adjust_learning_rate(optimizer, epoch + i / iters_per_epoch, args) + learning_rates.update(lr) + if args.moco_m_cos: + moco_m = adjust_moco_momentum(epoch + i / iters_per_epoch, args) + + if args.gpu is not None: + images[0] = images[0].cuda(args.gpu, non_blocking=True) + images[1] = images[1].cuda(args.gpu, non_blocking=True) + + # compute output + with torch.cuda.amp.autocast(True): + loss = model(images[0], images[1], moco_m) + + losses.update(loss.item(), images[0].size(0)) + if args.rank == 0: + summary_writer.add_scalar("loss", loss.item(), epoch * iters_per_epoch + i) + + # compute gradient and do SGD step + optimizer.zero_grad() + scaler.scale(loss).backward() + scaler.step(optimizer) + scaler.update() + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % args.print_freq == 0: + progress.display(i) + + +def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): + torch.save(state, filename) + if is_best: + shutil.copyfile(filename, 'model_best.pth.tar') + + +class AverageMeter(object): + """Computes and stores the average and current value""" + + def __init__(self, name, fmt=':f'): + self.name = name + self.fmt = fmt + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + +class ProgressMeter(object): + def __init__(self, num_batches, meters, prefix=""): + self.batch_fmtstr = self._get_batch_fmtstr(num_batches) + self.meters = meters + self.prefix = prefix + + def display(self, batch): + entries = [self.prefix + self.batch_fmtstr.format(batch)] + entries += [str(meter) for meter in self.meters] + print('\t'.join(entries)) + + def _get_batch_fmtstr(self, num_batches): + num_digits = len(str(num_batches // 1)) + fmt = '{:' + str(num_digits) + 'd}' + return '[' + fmt + '/' + fmt.format(num_batches) + ']' + + +def adjust_learning_rate(optimizer, epoch, args): + """Decays the learning rate with half-cycle cosine after warmup""" + if epoch < args.warmup_epochs: + lr = args.lr * epoch / args.warmup_epochs + else: + lr = args.lr * 0.5 * ( + 1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs))) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + return lr + + +def adjust_moco_momentum(epoch, args): + """Adjust moco momentum based on current epoch""" + m = 1. - 0.5 * (1. + math.cos(math.pi * epoch / args.epochs)) * (1. - args.moco_m) + return m + + +if __name__ == '__main__': + main() +""" # infer learning rate before changing batch size + args.lr = args.lr * args.batch_size / 256 + + if not torch.cuda.is_available(): + print('using CPU, this will be slow') + elif args.distributed: + # apply SyncBN + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + # For multiprocessing distributed, DistributedDataParallel constructor + # should always set the single device scope, otherwise, + # DistributedDataParallel will use all available devices. + if args.gpu is not None: + torch.cuda.set_device(args.gpu) + model.cuda(args.gpu) + # When using a single GPU per process and per + # DistributedDataParallel, we need to divide the batch size + # ourselves based on the total number of GPUs we have + args.batch_size = int(args.batch_size / args.world_size) + args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + else: + model.cuda() + # DistributedDataParallel will divide and allocate batch_size to all + # available GPUs if device_ids are not set + model = torch.nn.parallel.DistributedDataParallel(model) + elif args.gpu is not None: + torch.cuda.set_device(args.gpu) + model = model.cuda(args.gpu) + # comment out the following line for debugging + raise NotImplementedError("Only DistributedDataParallel is supported.") + else: + # AllGather/rank implementation in this code only supports DistributedDataParallel. + raise NotImplementedError("Only DistributedDataParallel is supported.") + print(model) # print model after SyncBatchNorm + + if args.optimizer == 'lars': + optimizer = moco.optimizer.LARS(model.parameters(), args.lr, + weight_decay=args.weight_decay, + momentum=args.momentum) + elif args.optimizer == 'adamw': + optimizer = torch.optim.AdamW(model.parameters(), args.lr, + weight_decay=args.weight_decay) + + # 使用自动混合精度 + scaler = NativeScaler() + summary_writer = SummaryWriter() if args.rank == 0 else None + + # optionally resume from a checkpoint + if args.resume: + if os.path.isfile(args.resume): + print("=> loading checkpoint '{}'".format(args.resume)) + if args.gpu is None: + checkpoint = torch.load(args.resume) + else: + # Map model to be loaded to specified single gpu. + loc = 'cuda:{}'.format(args.gpu) + checkpoint = torch.load(args.resume, map_location=loc) + args.start_epoch = checkpoint['epoch'] + model.load_state_dict(checkpoint['state_dict']) + optimizer.load_state_dict(checkpoint['optimizer']) + scaler.load_state_dict(checkpoint['scaler']) + print("=> loaded checkpoint '{}' (epoch {})" + .format(args.resume, checkpoint['epoch'])) + else: + print("=> no checkpoint found at '{}'".format(args.resume)) + + cudnn.benchmark = True + + # Data loading code + traindir = args.data + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + + # follow BYOL's augmentation recipe: https://arxiv.org/abs/2006.07733 + augmentation1 = [ + transforms.RandomResizedCrop(224, scale=(args.crop_min, 1.)), + transforms.RandomApply([ + transforms.ColorJitter(0.4, 0.4, 0.2, 0.1) # not strengthened + ], p=0.8), + transforms.RandomGrayscale(p=0.2), + transforms.RandomApply([moco.loader.GaussianBlur([.1, 2.])], p=1.0), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize + ] + + augmentation2 = [ + transforms.RandomResizedCrop(224, scale=(args.crop_min, 1.)), + transforms.RandomApply([ + transforms.ColorJitter(0.4, 0.4, 0.2, 0.1) # not strengthened + ], p=0.8), + transforms.RandomGrayscale(p=0.2), + transforms.RandomApply([moco.loader.GaussianBlur([.1, 2.])], p=0.1), + transforms.RandomApply([moco.loader.Solarize()], p=0.2), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize + ] + + train_dataset = datasets.ImageFolder( + traindir, + moco.loader.TwoCropsTransform(transforms.Compose(augmentation1), + transforms.Compose(augmentation2))) + + if args.distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) + else: + train_sampler = None + + train_loader = torch.utils.data.DataLoader( + train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), + num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True) + + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + train_sampler.set_epoch(epoch) + + # train for one epoch + train(train_loader, model, optimizer, scaler, summary_writer, epoch, args) + + if not args.multiprocessing_distributed or (args.multiprocessing_distributed + and args.rank == 0): # only the first GPU saves checkpoint + if epoch % 10 == 0: + save_checkpoint({ + 'epoch': epoch + 1, + 'arch': args.arch, + 'state_dict': model.state_dict(), + 'optimizer': optimizer.state_dict(), + 'scaler': scaler.state_dict(), + }, is_best=False, filename='moco_checkpoint_%04d.pth.tar' % epoch) + if args.rank == 0: + summary_writer.close() + + +def train(train_loader, model, optimizer, scaler, summary_writer, epoch, args): + batch_time = AverageMeter('Time', ':6.3f') + data_time = AverageMeter('Data', ':6.3f') + learning_rates = AverageMeter('LR', ':.4e') + losses = AverageMeter('Loss', ':.4e') + progress = ProgressMeter( + len(train_loader), + [batch_time, data_time, learning_rates, losses], + prefix="Epoch: [{}]".format(epoch)) + + # switch to train mode + model.train() + + end = time.time() + iters_per_epoch = len(train_loader) + moco_m = args.moco_m + for i, (images, _) in enumerate(train_loader): + # measure data loading time + data_time.update(time.time() - end) + + # adjust learning rate and momentum coefficient per iteration + lr = adjust_learning_rate(optimizer, epoch + i / iters_per_epoch, args) + learning_rates.update(lr) + if args.moco_m_cos: + moco_m = adjust_moco_momentum(epoch + i / iters_per_epoch, args) + + if args.gpu is not None: + images[0] = images[0].cuda(args.gpu, non_blocking=True) + images[1] = images[1].cuda(args.gpu, non_blocking=True) + + # compute output + with torch.cuda.amp.autocast(True): + loss = model(images[0], images[1], moco_m) + + losses.update(loss.item(), images[0].size(0)) + if args.rank == 0: + summary_writer.add_scalar("loss", loss.item(), epoch * iters_per_epoch + i) + + # compute gradient and do SGD step + optimizer.zero_grad() + # fixme 尝试使用梯度裁剪,集成在misc中 + """'''scaler.scale(loss).backward() + scaler.step(optimizer) + scaler.update()'''""" + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % args.print_freq == 0: + progress.display(i) + + +def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): + torch.save(state, filename) + if is_best: + shutil.copyfile(filename, 'model_best.pth.tar') + + +class AverageMeter(object): + """'''Computes and stores the average and current value'''""" + def __init__(self, name, fmt=':f'): + self.name = name + self.fmt = fmt + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + +class ProgressMeter(object): + def __init__(self, num_batches, meters, prefix=""): + self.batch_fmtstr = self._get_batch_fmtstr(num_batches) + self.meters = meters + self.prefix = prefix + + def display(self, batch): + entries = [self.prefix + self.batch_fmtstr.format(batch)] + entries += [str(meter) for meter in self.meters] + print('\t'.join(entries)) + + def _get_batch_fmtstr(self, num_batches): + num_digits = len(str(num_batches // 1)) + fmt = '{:' + str(num_digits) + 'd}' + return '[' + fmt + '/' + fmt.format(num_batches) + ']' + + +def adjust_learning_rate(optimizer, epoch, args): + """'''Decays the learning rate with half-cycle cosine after warmup'''""" + if epoch < args.warmup_epochs: + lr = args.lr * epoch / args.warmup_epochs + else: + lr = args.lr * 0.5 * (1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs))) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + return lr + + +def adjust_moco_momentum(epoch, args): + """'''Adjust moco momentum based on current epoch'''""" + m = 1. - 0.5 * (1. + math.cos(math.pi * epoch / args.epochs)) * (1. - args.moco_m) + return m + + +if __name__ == '__main__': + main()""" diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/misc.py b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..cc64f1b317eaa7985adfd5ab99d2c9ef1131fd42 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/misc.py @@ -0,0 +1,390 @@ +""" +funcs Script ver: Aug 15th 19:00 +有修改loss backward +""" +import builtins +import datetime +import os +import time +from collections import defaultdict, deque +from pathlib import Path + +import torch +import torch.distributed as dist +from torch._six import inf + + +# SmoothedValue operator +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) # SmoothedValue operator + self.delimiter = delimiter + + def update(self, **kwargs): # 更新内容字典 + for k, v in kwargs.items(): + if v is None: + continue + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): # 报错 + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): # 转换为str给print + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): # 多进程同步 + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): # 新增一个indicator元素 + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): # warp minibatch + # 初始化迭代idx + i = 0 + # 初始化头文件 + if not header: + header = '' + + # 初始化计时 + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + # 初始化输出 + log_msg = [ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ] + if torch.cuda.is_available(): + log_msg.append('max mem: {memory:.0f}') + + log_msg = self.delimiter.join(log_msg) # 缩进 + + MB = 1024.0 * 1024.0 + + for obj in iterable: + + data_time.update(time.time() - end) + yield obj # 生成迭代的下一个对象 + iter_time.update(time.time() - end) + + if i % print_freq == 0 or i == len(iterable) - 1: + # 估算时间 + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + # 输出 + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + builtin_print = builtins.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + force = force or (get_world_size() > 8) + if is_master or force: + now = datetime.datetime.now().time() + builtin_print('[{}] '.format(now), end='') # print with time stamp + builtin_print(*args, **kwargs) + + builtins.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + """ + 配置多服务器环境文件信息,安排args.distributed + :param args: + :return: + """ + if args.dist_on_itp: + args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) + args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) + args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) + args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']) + os.environ['LOCAL_RANK'] = str(args.gpu) + os.environ['RANK'] = str(args.rank) + os.environ['WORLD_SIZE'] = str(args.world_size) + # ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] + + elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + + else: + print('Not using distributed mode') + setup_for_distributed(is_master=True) # hack + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}, gpu {}'.format( + args.rank, args.dist_url, args.gpu), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +class NativeScalerWithGradNormCount: + """ + 定义的 loss 优化器 + 基于自动混合精度训练设置的loss_scaler,额外增加了梯度裁剪的功能 + """ + state_dict_key = "amp_scaler" + + def __init__(self): + # 自动混合精度 + self._scaler = torch.cuda.amp.GradScaler() + + def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): + + # 反传 + # fixme 这里有修改 加了torch.ones_like(loss)解决梯度标量问题,不知道为啥存在 + # fixme 反转了,后来发现又不用加上torch.ones_like(loss) + self._scaler.scale(loss).backward(create_graph=create_graph) # create_graph + + if update_grad: + # 梯度裁剪 + if clip_grad is not None: + assert parameters is not None + self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place + norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) + else: + self._scaler.unscale_(optimizer) + norm = get_grad_norm_(parameters) + + self._scaler.step(optimizer) # 使用optimizer更新模型 + + self._scaler.update() + else: + norm = None + + return norm + + def state_dict(self): # 记录loss_scaler的state_dict,应该就是保存梯度 + return self._scaler.state_dict() + + def load_state_dict(self, state_dict): # 还原某个checkpoint的state_dict + self._scaler.load_state_dict(state_dict) + + +def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: + + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + + # 确定需要梯度的模型参数 + parameters = [p for p in parameters if p.grad is not None] + norm_type = float(norm_type) + + if len(parameters) == 0: + return torch.tensor(0.) + + # 从对应GPU上进行操作 + device = parameters[0].grad.device + + if norm_type == inf: + # 面对norm_type == inf爆炸值,保留 + total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) + else: + # 无norm_type == inf爆炸值,做norm + total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) + + return total_norm + + +def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_idx='SAE_'): + output_dir = Path(args.output_dir) + epoch_name = str(epoch) + + if loss_scaler is not None: + checkpoint_paths = [output_dir / (model_idx+'_checkpoint-%s.pth' % epoch_name)] + for checkpoint_path in checkpoint_paths: + to_save = { + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'epoch': epoch, + 'scaler': loss_scaler.state_dict(), + 'args': args, # 保存配置参数,但是在加载的时候不加载 + } + + save_on_master(to_save, checkpoint_path) + else: + client_state = {'epoch': epoch} + model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state) + + +def load_model(args, model_without_ddp, optimizer, loss_scaler): + + # 加载配置checkpoint的路径args.resume,默认没有则不加载 + if args.resume: + if args.resume.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url( + args.resume, map_location='cpu', check_hash=True) + else: + checkpoint = torch.load(args.resume, map_location='cpu') + + model_without_ddp.load_state_dict(checkpoint['model']) + + print("Resume checkpoint %s" % args.resume) + + if 'optimizer' in checkpoint and 'epoch' in checkpoint and not (hasattr(args, 'eval') and args.eval): + optimizer.load_state_dict(checkpoint['optimizer']) + args.start_epoch = checkpoint['epoch'] + 1 + + if 'scaler' in checkpoint: + loss_scaler.load_state_dict(checkpoint['scaler']) + + print("With optim & sched!") + + +# 计算平均在单卡上的loss +def all_reduce_mean(x): + world_size = get_world_size() + + if world_size > 1: + x_reduce = torch.tensor(x).cuda() + dist.all_reduce(x_reduce) + x_reduce /= world_size + return x_reduce.item() + + else: + return x \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/moco/__init__.py b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/moco/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..168f9979a4623806934b0ff1102ac166704e7dec --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/moco/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/moco/builder.py b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/moco/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..268bd833d33d67deb65bb58d4974897bd176941a --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/moco/builder.py @@ -0,0 +1,137 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn + + +class MoCo(nn.Module): + """ + Build a MoCo model with a base encoder, a momentum encoder, and two MLPs + https://arxiv.org/abs/1911.05722 + """ + def __init__(self, base_encoder, dim=256, mlp_dim=4096, T=1.0): + """ + dim: feature dimension (default: 256) + mlp_dim: hidden dimension in MLPs (default: 4096) + T: softmax temperature (default: 1.0) + """ + super(MoCo, self).__init__() + + self.T = T + + # build encoders + self.base_encoder = base_encoder(num_classes=mlp_dim) + self.momentum_encoder = base_encoder(num_classes=mlp_dim) + + self._build_projector_and_predictor_mlps(dim, mlp_dim) + + for param_b, param_m in zip(self.base_encoder.parameters(), self.momentum_encoder.parameters()): + param_m.data.copy_(param_b.data) # initialize + param_m.requires_grad = False # not update by gradient + + def _build_mlp(self, num_layers, input_dim, mlp_dim, output_dim, last_bn=True): + mlp = [] + for l in range(num_layers): + dim1 = input_dim if l == 0 else mlp_dim + dim2 = output_dim if l == num_layers - 1 else mlp_dim + + mlp.append(nn.Linear(dim1, dim2, bias=False)) + + if l < num_layers - 1: + mlp.append(nn.BatchNorm1d(dim2)) + mlp.append(nn.ReLU(inplace=True)) + elif last_bn: + # follow SimCLR's design: https://github.com/google-research/simclr/blob/master/model_util.py#L157 + # for simplicity, we further removed gamma in BN + mlp.append(nn.BatchNorm1d(dim2, affine=False)) + + return nn.Sequential(*mlp) + + def _build_projector_and_predictor_mlps(self, dim, mlp_dim): + pass + + @torch.no_grad() + def _update_momentum_encoder(self, m): + """Momentum update of the momentum encoder""" + for param_b, param_m in zip(self.base_encoder.parameters(), self.momentum_encoder.parameters()): + param_m.data = param_m.data * m + param_b.data * (1. - m) + + def contrastive_loss(self, q, k): + # normalize + q = nn.functional.normalize(q, dim=1) + k = nn.functional.normalize(k, dim=1) + # gather all targets + k = concat_all_gather(k) + # Einstein sum is more intuitive + logits = torch.einsum('nc,mc->nm', [q, k]) / self.T + N = logits.shape[0] # batch size per GPU + labels = (torch.arange(N, dtype=torch.long) + N * torch.distributed.get_rank()).cuda() + return nn.CrossEntropyLoss()(logits, labels) * (2 * self.T) + + def forward(self, x1, x2, m): + """ + Input: + x1: first views of images + x2: second views of images + m: moco momentum + Output: + loss + """ + + # compute features + q1 = self.predictor(self.base_encoder(x1)) + q2 = self.predictor(self.base_encoder(x2)) + + with torch.no_grad(): # no gradient + self._update_momentum_encoder(m) # update the momentum encoder + + # compute momentum features as targets + k1 = self.momentum_encoder(x1) + k2 = self.momentum_encoder(x2) + + return self.contrastive_loss(q1, k2) + self.contrastive_loss(q2, k1) + + +class MoCo_ResNet(MoCo): + def _build_projector_and_predictor_mlps(self, dim, mlp_dim): + hidden_dim = self.base_encoder.fc.weight.shape[1] + del self.base_encoder.fc, self.momentum_encoder.fc # remove original fc layer + + # projectors + self.base_encoder.fc = self._build_mlp(2, hidden_dim, mlp_dim, dim) + self.momentum_encoder.fc = self._build_mlp(2, hidden_dim, mlp_dim, dim) + + # predictor + self.predictor = self._build_mlp(2, dim, mlp_dim, dim, False) + + +class MoCo_ViT(MoCo): + def _build_projector_and_predictor_mlps(self, dim, mlp_dim): + hidden_dim = self.base_encoder.head.weight.shape[1] + del self.base_encoder.head, self.momentum_encoder.head # remove original fc layer + + # projectors + self.base_encoder.head = self._build_mlp(3, hidden_dim, mlp_dim, dim) + self.momentum_encoder.head = self._build_mlp(3, hidden_dim, mlp_dim, dim) + + # predictor + self.predictor = self._build_mlp(2, dim, mlp_dim, dim) + + +# utils +@torch.no_grad() +def concat_all_gather(tensor): + """ + Performs all_gather operation on the provided tensors. + *** Warning ***: torch.distributed.all_gather has no gradient. + """ + tensors_gather = [torch.ones_like(tensor) + for _ in range(torch.distributed.get_world_size())] + torch.distributed.all_gather(tensors_gather, tensor, async_op=False) + + output = torch.cat(tensors_gather, dim=0) + return output diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/moco/loader.py b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/moco/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..a1b931a7f5573ea3108cdc5820fd39ee76fe2dd9 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/moco/loader.py @@ -0,0 +1,42 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from PIL import Image, ImageFilter, ImageOps +import math +import random +import torchvision.transforms.functional as tf + + +class TwoCropsTransform: + """Take two random crops of one image""" + + def __init__(self, base_transform1, base_transform2): + self.base_transform1 = base_transform1 + self.base_transform2 = base_transform2 + + def __call__(self, x): + im1 = self.base_transform1(x) + im2 = self.base_transform2(x) + return [im1, im2] + + +class GaussianBlur(object): + """Gaussian blur augmentation from SimCLR: https://arxiv.org/abs/2002.05709""" + + def __init__(self, sigma=[.1, 2.]): + self.sigma = sigma + + def __call__(self, x): + sigma = random.uniform(self.sigma[0], self.sigma[1]) + x = x.filter(ImageFilter.GaussianBlur(radius=sigma)) + return x + + +class Solarize(object): + """Solarize augmentation from BYOL: https://arxiv.org/abs/2006.07733""" + + def __call__(self, x): + return ImageOps.solarize(x) \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/moco/optimizer.py b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/moco/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..dd3995a630f094db20e4f8b68e8550673f40166f --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/moco/optimizer.py @@ -0,0 +1,43 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch + + +class LARS(torch.optim.Optimizer): + """ + LARS optimizer, no rate scaling or weight decay for parameters <= 1D. + """ + def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, trust_coefficient=0.001): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum, trust_coefficient=trust_coefficient) + super().__init__(params, defaults) + + @torch.no_grad() + def step(self): + for g in self.param_groups: + for p in g['params']: + dp = p.grad + + if dp is None: + continue + + if p.ndim > 1: # if not normalization gamma/beta or bias + dp = dp.add(p, alpha=g['weight_decay']) + param_norm = torch.norm(p) + update_norm = torch.norm(dp) + one = torch.ones_like(param_norm) + q = torch.where(param_norm > 0., + torch.where(update_norm > 0, + (g['trust_coefficient'] * param_norm / update_norm), one), + one) + dp = dp.mul(q) + + param_state = self.state[p] + if 'mu' not in param_state: + param_state['mu'] = torch.zeros_like(p) + mu = param_state['mu'] + mu.mul_(g['momentum']).add_(dp) + p.add_(mu, alpha=-g['lr']) diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/transfer/README.md b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/transfer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1b2e10ac6b002d6a0ac94e550f1aa5b73bda0ef9 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/transfer/README.md @@ -0,0 +1,128 @@ +## MoCo v3 Transfer Learning with ViT + +This folder includes the transfer learning experiments on CIFAR-10, CIFAR-100, Flowers and Pets datasets. We provide finetuning recipes for the ViT-Base model. + +### Transfer Results + +The following results are based on ImageNet-1k self-supervised pre-training, followed by end-to-end fine-tuning on downstream datasets. All results are based on a batch size of 128 and 100 training epochs. + +#### ViT-Base, transfer learning + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
datasetpretrain
epochs
pretrain
crops
finetune
epochs
transfer
acc
CIFAR-103002x22410098.9
CIFAR-1003002x22410090.5
Flowers3002x22410097.7
Pets3002x22410093.2
+ +Similar to the end-to-end fine-tuning experiment on ImageNet, the transfer learning results are also obtained using the [DEiT](https://github.com/facebookresearch/deit) repo, with the default model [deit_base_patch16_224]. + +### Preparation: Transfer learning with ViT + +To perform transfer learning for ViT, use our script to convert the pre-trained ViT checkpoint to [DEiT](https://github.com/facebookresearch/deit) format: +``` +python convert_to_deit.py \ + --input [your checkpoint path]/[your checkpoint file].pth.tar \ + --output [target checkpoint file].pth +``` +Then copy (or replace) the following files to the DeiT folder: +``` +datasets.py +oxford_flowers_dataset.py +oxford_pets_dataset.py +``` + +#### Download and prepare the datasets + +Pets [\[Homepage\]](https://www.robots.ox.ac.uk/~vgg/data/pets/) +``` +./data/ +└── ./data/pets/ + ├── ./data/pets/annotations/ # split and label files + └── ./data/pets/images/ # data images +``` + +Flowers [\[Homepage\]](https://www.robots.ox.ac.uk/~vgg/data/flowers/102/) +``` +./data/ +└── ./data/flowers/ + ├── ./data/flowers/jpg/ # jpg images + ├── ./data/flowers/setid.mat # dataset split + └── ./data/flowers/imagelabels.mat # labels +``` + + +CIFAR-10/CIFAR-100 datasets will be downloaded automatically. + + +### Transfer learning scripts (with a 8-GPU machine): + +#### CIFAR-10 +``` +python -u -m torch.distributed.launch --nproc_per_node=8 --use_env main.py \ + --batch-size 128 --output_dir [your output dir path] --epochs 100 --lr 3e-4 --weight-decay 0.1 \ + --no-pin-mem --warmup-epochs 3 --data-set cifar10 --data-path [cifar-10 data path] --no-repeated-aug \ + --resume [your pretrain checkpoint file] \ + --reprob 0.0 --drop-path 0.1 --mixup 0.8 --cutmix 1 +``` + +#### CIFAR-100 +``` +python -u -m torch.distributed.launch --nproc_per_node=8 --use_env main.py \ + --batch-size 128 --output_dir [your output dir path] --epochs 100 --lr 3e-4 --weight-decay 0.1 \ + --no-pin-mem --warmup-epochs 3 --data-set cifar100 --data-path [cifar-100 data path] --no-repeated-aug \ + --resume [your pretrain checkpoint file] \ + --reprob 0.0 --drop-path 0.1 --mixup 0.5 --cutmix 1 +``` + +#### Flowers +``` +python -u -m torch.distributed.launch --nproc_per_node=8 --use_env main.py \ + --batch-size 128 --output_dir [your output dir path] --epochs 100 --lr 3e-4 --weight-decay 0.3 \ + --no-pin-mem --warmup-epochs 3 --data-set flowers --data-path [oxford-flowers data path] --no-repeated-aug \ + --resume [your pretrain checkpoint file] \ + --reprob 0.25 --drop-path 0.1 --mixup 0 --cutmix 0 +``` + +#### Pets +``` +python -u -m torch.distributed.launch --nproc_per_node=8 --use_env main.py \ + --batch-size 128 --output_dir [your output dir path] --epochs 100 --lr 3e-4 --weight-decay 0.1 \ + --no-pin-mem --warmup-epochs 3 --data-set pets --data-path [oxford-pets data path] --no-repeated-aug \ + --resume [your pretrain checkpoint file] \ + --reprob 0 --drop-path 0 --mixup 0.8 --cutmix 0 +``` + +**Note**: +Similar to the ImageNet end-to-end finetuning experiment, we use `--resume` rather than `--finetune` in the DeiT repo, as its `--finetune` option trains under eval mode. When loading the pre-trained model, revise `model_without_ddp.load_state_dict(checkpoint['model'])` with `strict=False`. diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/transfer/datasets.py b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/transfer/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..6c957d4f78dd1a682856740b478cf337e08e9eab --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/transfer/datasets.py @@ -0,0 +1,74 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import json +import os + +from torchvision import datasets, transforms +from torchvision.datasets.folder import ImageFolder, default_loader + +from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + +import oxford_flowers_dataset, oxford_pets_dataset + + +def build_transform(is_train, args): + transform_train = transforms.Compose([ + transforms.RandomResizedCrop((args.input_size, args.input_size), scale=(0.05, 1.0)), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD), + ]) + transform_test = transforms.Compose([ + transforms.Resize(int((256 / 224) * args.input_size)), + transforms.CenterCrop(args.input_size), + transforms.ToTensor(), + transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD), + ]) + return transform_train if is_train else transform_test + + +def build_dataset(is_train, args): + transform = build_transform(is_train, args) + + if args.data_set == 'imagenet': + raise NotImplementedError("Only [cifar10, cifar100, flowers, pets] are supported; \ + for imagenet end-to-end finetuning, please refer to the instructions in the main README.") + + if args.data_set == 'imagenet': + root = os.path.join(args.data_path, 'train' if is_train else 'val') + dataset = datasets.ImageFolder(root, transform=transform) + nb_classes = 1000 + + elif args.data_set == 'cifar10': + dataset = datasets.CIFAR10(root=args.data_path, + train=is_train, + download=True, + transform=transform) + nb_classes = 10 + elif args.data_set == "cifar100": + dataset = datasets.CIFAR100(root=args.data_path, + train=is_train, + download=True, + transform=transform) + nb_classes = 100 + elif args.data_set == "flowers": + dataset = oxford_flowers_dataset.Flowers(root=args.data_path, + train=is_train, + download=False, + transform=transform) + nb_classes = 102 + elif args.data_set == "pets": + dataset = oxford_pets_dataset.Pets(root=args.data_path, + train=is_train, + download=False, + transform=transform) + nb_classes = 37 + else: + raise NotImplementedError("Only [cifar10, cifar100, flowers, pets] are supported; \ + for imagenet end-to-end finetuning, please refer to the instructions in the main README.") + + return dataset, nb_classes diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/transfer/oxford_flowers_dataset.py b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/transfer/oxford_flowers_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..13f48a963407c732813db9af86c70110b478f99c --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/transfer/oxford_flowers_dataset.py @@ -0,0 +1,67 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from __future__ import print_function +from PIL import Image +from typing import Any, Callable, Optional, Tuple + +import numpy as np +import os +import os.path +import pickle +import scipy.io + +from torchvision.datasets.vision import VisionDataset + + +class Flowers(VisionDataset): + + def __init__( + self, + root, + train=True, + transform=None, + target_transform=None, + download=False, + ): + + super(Flowers, self).__init__(root, transform=transform, + target_transform=target_transform) + + base_folder = root + self.image_folder = os.path.join(base_folder, "jpg") + label_file = os.path.join(base_folder, "imagelabels.mat") + setid_file = os.path.join(base_folder, "setid.mat") + + self.train = train + + self.labels = scipy.io.loadmat(label_file)["labels"][0] + train_list = scipy.io.loadmat(setid_file)["trnid"][0] + val_list = scipy.io.loadmat(setid_file)["valid"][0] + test_list = scipy.io.loadmat(setid_file)["tstid"][0] + trainval_list = np.concatenate([train_list, val_list]) + + if self.train: + self.img_files = trainval_list + else: + self.img_files = test_list + + + def __getitem__(self, index): + img_name = "image_%05d.jpg" % self.img_files[index] + target = self.labels[self.img_files[index] - 1] - 1 + img = Image.open(os.path.join(self.image_folder, img_name)) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self): + return len(self.img_files) diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/transfer/oxford_pets_dataset.py b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/transfer/oxford_pets_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..8aa026ff970c30d6ce2f9a2d10fa583adc9711e0 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/transfer/oxford_pets_dataset.py @@ -0,0 +1,67 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from PIL import Image +from typing import Any, Callable, Optional, Tuple + +import numpy as np +import os +import os.path +import pickle +import scipy.io + +from torchvision.datasets.vision import VisionDataset + + +class Pets(VisionDataset): + + def __init__( + self, + root: str, + train: bool = True, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + + super(Pets, self).__init__(root, transform=transform, + target_transform=target_transform) + + base_folder = root + self.train = train + annotations_path_dir = os.path.join(base_folder, "annotations") + self.image_path_dir = os.path.join(base_folder, "images") + + if self.train: + split_file = os.path.join(annotations_path_dir, "trainval.txt") + with open(split_file) as f: + self.images_list = f.readlines() + else: + split_file = os.path.join(annotations_path_dir, "test.txt") + with open(split_file) as f: + self.images_list = f.readlines() + + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + + img_name, label, species, _ = self.images_list[index].strip().split(" ") + + img_name += ".jpg" + target = int(label) - 1 + + img = Image.open(os.path.join(self.image_path_dir, img_name)) + img = img.convert('RGB') + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return len(self.images_list) diff --git a/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/vits.py b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/vits.py new file mode 100644 index 0000000000000000000000000000000000000000..11e22ba5a0ff606e7f101b1833b16ab9a823914a --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/moco-v3-main/vits.py @@ -0,0 +1,143 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +import torch +import torch.nn as nn +from functools import partial, reduce +from operator import mul + +from timm.models.vision_transformer import VisionTransformer, _cfg +from timm.models.layers.helpers import to_2tuple +from timm.models.layers import PatchEmbed + +__all__ = [ + 'vit_small', + 'vit_base', + 'vit_conv_small', + 'vit_conv_base', +] + + +class VisionTransformerMoCo(VisionTransformer): + def __init__(self, stop_grad_conv1=False, **kwargs): + super().__init__(**kwargs) + # Use fixed 2D sin-cos position embedding + self.build_2d_sincos_position_embedding() + + # weight initialization + for name, m in self.named_modules(): + if isinstance(m, nn.Linear): + if 'qkv' in name: + # treat the weights of Q, K, V separately + val = math.sqrt(6. / float(m.weight.shape[0] // 3 + m.weight.shape[1])) + nn.init.uniform_(m.weight, -val, val) + else: + nn.init.xavier_uniform_(m.weight) + nn.init.zeros_(m.bias) + nn.init.normal_(self.cls_token, std=1e-6) + + if isinstance(self.patch_embed, PatchEmbed): + # xavier_uniform initialization + val = math.sqrt(6. / float(3 * reduce(mul, self.patch_embed.patch_size, 1) + self.embed_dim)) + nn.init.uniform_(self.patch_embed.proj.weight, -val, val) + nn.init.zeros_(self.patch_embed.proj.bias) + + if stop_grad_conv1: + self.patch_embed.proj.weight.requires_grad = False + self.patch_embed.proj.bias.requires_grad = False + + def build_2d_sincos_position_embedding(self, temperature=10000.): + h, w = self.patch_embed.grid_size + grid_w = torch.arange(w, dtype=torch.float32) + grid_h = torch.arange(h, dtype=torch.float32) + grid_w, grid_h = torch.meshgrid(grid_w, grid_h) + assert self.embed_dim % 4 == 0, 'Embed dimension must be divisible by 4 for 2D sin-cos position embedding' + pos_dim = self.embed_dim // 4 + omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim + omega = 1. / (temperature**omega) + out_w = torch.einsum('m,d->md', [grid_w.flatten(), omega]) + out_h = torch.einsum('m,d->md', [grid_h.flatten(), omega]) + pos_emb = torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], dim=1)[None, :, :] + + assert self.num_tokens == 1, 'Assuming one and only one token, [cls]' + pe_token = torch.zeros([1, 1, self.embed_dim], dtype=torch.float32) + self.pos_embed = nn.Parameter(torch.cat([pe_token, pos_emb], dim=1)) + self.pos_embed.requires_grad = False + + +class ConvStem(nn.Module): + """ + ConvStem, from Early Convolutions Help Transformers See Better, Tete et al. https://arxiv.org/abs/2106.14881 + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True): + super().__init__() + + assert patch_size == 16, 'ConvStem only supports patch size of 16' + assert embed_dim % 8 == 0, 'Embed dimension must be divisible by 8 for ConvStem' + + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.flatten = flatten + + # build stem, similar to the design in https://arxiv.org/abs/2106.14881 + stem = [] + input_dim, output_dim = 3, embed_dim // 8 + for l in range(4): + stem.append(nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=2, padding=1, bias=False)) + stem.append(nn.BatchNorm2d(output_dim)) + stem.append(nn.ReLU(inplace=True)) + input_dim = output_dim + output_dim *= 2 + stem.append(nn.Conv2d(input_dim, embed_dim, kernel_size=1)) + self.proj = nn.Sequential(*stem) + + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + x = self.norm(x) + return x + + +def vit_small(**kwargs): + model = VisionTransformerMoCo( + patch_size=16, embed_dim=384, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + +def vit_base(**kwargs): + model = VisionTransformerMoCo( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + +def vit_conv_small(**kwargs): + # minus one ViT block + model = VisionTransformerMoCo( + patch_size=16, embed_dim=384, depth=11, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), embed_layer=ConvStem, **kwargs) + model.default_cfg = _cfg() + return model + +def vit_conv_base(**kwargs): + # minus one ViT block + model = VisionTransformerMoCo( + patch_size=16, embed_dim=768, depth=11, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), embed_layer=ConvStem, **kwargs) + model.default_cfg = _cfg() + return model \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/LICENSE.txt b/PuzzleTuning/Counterpart PreTrain Methods/simclr/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..bd1b9289818f0672085d7b04bc217d79331c0bd7 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Thalles Silva + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/README.md b/PuzzleTuning/Counterpart PreTrain Methods/simclr/README.md new file mode 100644 index 0000000000000000000000000000000000000000..dc94e88615990ea21264c35b1f85722d06187c5e --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/README.md @@ -0,0 +1,14 @@ +# SimCLR + +The original repo of SimCLR could be found [here](https://github.com/sthalles/SimCLR) + +To install environments: +```bash +pip install -r requirements.txt +``` + +To start pretraining: +```bash +# You need to alter the script according to your directories +bash pretrain.sh +``` diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/README_origin.md b/PuzzleTuning/Counterpart PreTrain Methods/simclr/README_origin.md new file mode 100644 index 0000000000000000000000000000000000000000..2d908618065dfe494a448029dd96be3999a000ce --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/README_origin.md @@ -0,0 +1,47 @@ +# PyTorch SimCLR: A Simple Framework for Contrastive Learning of Visual Representations +[![DOI](https://zenodo.org/badge/241184407.svg)](https://zenodo.org/badge/latestdoi/241184407) + + +### Blog post with full documentation: [Exploring SimCLR: A Simple Framework for Contrastive Learning of Visual Representations](https://sthalles.github.io/simple-self-supervised-learning/) + +![Image of SimCLR Arch](https://sthalles.github.io/assets/contrastive-self-supervised/cover.png) + +### See also [PyTorch Implementation for BYOL - Bootstrap Your Own Latent: A New Approach to Self-Supervised Learning](https://github.com/sthalles/PyTorch-BYOL). + +## Installation + +``` +$ conda env create --name simclr --file env.yml +$ conda activate simclr +$ python run.py +``` + +## Config file + +Before running SimCLR, make sure you choose the correct running configurations. You can change the running configurations by passing keyword arguments to the ```run.py``` file. + +```python + +$ python run.py -data ./datasets --dataset-name stl10 --log-every-n-steps 100 --epochs 100 + +``` + +If you want to run it on CPU (for debugging purposes) use the ```--disable-cuda``` option. + +For 16-bit precision GPU training, there **NO** need to to install [NVIDIA apex](https://github.com/NVIDIA/apex). Just use the ```--fp16_precision``` flag and this implementation will use [Pytorch built in AMP training](https://pytorch.org/docs/stable/notes/amp_examples.html). + +## Feature Evaluation + +Feature evaluation is done using a linear model protocol. + +First, we learned features using SimCLR on the ```STL10 unsupervised``` set. Then, we train a linear classifier on top of the frozen features from SimCLR. The linear model is trained on features extracted from the ```STL10 train``` set and evaluated on the ```STL10 test``` set. + +Check the [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://github.com/sthalles/SimCLR/blob/simclr-refactor/feature_eval/mini_batch_logistic_regression_evaluator.ipynb) notebook for reproducibility. + +Note that SimCLR benefits from **longer training**. + +| Linear Classification | Dataset | Feature Extractor | Architecture | Feature dimensionality | Projection Head dimensionality | Epochs | Top1 % | +|----------------------------|---------|-------------------|---------------------------------------------------------------------------------|------------------------|--------------------------------|--------|--------| +| Logistic Regression (Adam) | STL10 | SimCLR | [ResNet-18](https://drive.google.com/open?id=14_nH2FkyKbt61cieQDiSbBVNP8-gtwgF) | 512 | 128 | 100 | 74.45 | +| Logistic Regression (Adam) | CIFAR10 | SimCLR | [ResNet-18](https://drive.google.com/open?id=1lc2aoVtrAetGn0PnTkOyFzPCIucOJq7C) | 512 | 128 | 100 | 69.82 | +| Logistic Regression (Adam) | STL10 | SimCLR | [ResNet-50](https://drive.google.com/open?id=1ByTKAUsdm_X7tLcii6oAEl5qFRqRMZSu) | 2048 | 128 | 50 | 70.075 | diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/data_aug/contrastive_learning_dataset.py b/PuzzleTuning/Counterpart PreTrain Methods/simclr/data_aug/contrastive_learning_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..8fe8880e3e61723155e2b3366fe65d85bae4bb2f --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/data_aug/contrastive_learning_dataset.py @@ -0,0 +1,53 @@ +from torchvision.transforms import transforms +from data_aug.gaussian_blur import GaussianBlur +from torchvision import transforms, datasets +from data_aug.view_generator import ContrastiveLearningViewGenerator +from exceptions.exceptions import InvalidDatasetSelection +import os + + +class ContrastiveLearningDataset: + def __init__(self, root_folder): + self.root_folder = root_folder + + @staticmethod + def get_simclr_pipeline_transform(size, s=1): + """Return a set of data augmentation transformations as described in the SimCLR paper.""" + color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s) + data_transforms = transforms.Compose([transforms.RandomResizedCrop(size=size), + transforms.RandomHorizontalFlip(), + transforms.RandomApply([color_jitter], p=0.8), + transforms.RandomGrayscale(p=0.2), + GaussianBlur(kernel_size=int(0.1 * size)), + transforms.ToTensor()]) + return data_transforms + + def get_dataset(self, name, n_views, mode='train', img_size=224): + valid_datasets = {'cifar10': lambda: datasets.CIFAR10(self.root_folder, train=True, + transform=ContrastiveLearningViewGenerator( + self.get_simclr_pipeline_transform(img_size), + n_views), + download=True), + + 'stl10': lambda: datasets.STL10(self.root_folder, split='unlabeled', + transform=ContrastiveLearningViewGenerator( + self.get_simclr_pipeline_transform(img_size), + n_views), + download=True), + 'imagefolder': lambda: datasets.ImageFolder(os.path.join(self.root_folder, mode), + transform=ContrastiveLearningViewGenerator( + self.get_simclr_pipeline_transform(img_size), + n_views)), + 'cpia-mini': lambda: datasets.ImageFolder(self.root_folder, + transform=ContrastiveLearningViewGenerator( + self.get_simclr_pipeline_transform(img_size), + n_views))} + + + try: + print(f'dataset: {name}') + dataset_fn = valid_datasets[name] + except KeyError: + raise InvalidDatasetSelection() + else: + return dataset_fn() diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/data_aug/gaussian_blur.py b/PuzzleTuning/Counterpart PreTrain Methods/simclr/data_aug/gaussian_blur.py new file mode 100644 index 0000000000000000000000000000000000000000..e3ad45039790e5b96c101f3fcd21f4199330adbf --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/data_aug/gaussian_blur.py @@ -0,0 +1,48 @@ +import numpy as np +import torch +from torch import nn +from torchvision.transforms import transforms + +np.random.seed(0) + + +class GaussianBlur(object): + """blur a single image on CPU""" + def __init__(self, kernel_size): + radias = kernel_size // 2 + kernel_size = radias * 2 + 1 + self.blur_h = nn.Conv2d(3, 3, kernel_size=(kernel_size, 1), + stride=1, padding=0, bias=False, groups=3) + self.blur_v = nn.Conv2d(3, 3, kernel_size=(1, kernel_size), + stride=1, padding=0, bias=False, groups=3) + self.k = kernel_size + self.r = radias + + self.blur = nn.Sequential( + nn.ReflectionPad2d(radias), + self.blur_h, + self.blur_v + ) + + self.pil_to_tensor = transforms.ToTensor() + self.tensor_to_pil = transforms.ToPILImage() + + def __call__(self, img): + img = self.pil_to_tensor(img).unsqueeze(0) + + sigma = np.random.uniform(0.1, 2.0) + x = np.arange(-self.r, self.r + 1) + x = np.exp(-np.power(x, 2) / (2 * sigma * sigma)) + x = x / x.sum() + x = torch.from_numpy(x).view(1, -1).repeat(3, 1) + + self.blur_h.weight.data.copy_(x.view(3, 1, self.k, 1)) + self.blur_v.weight.data.copy_(x.view(3, 1, 1, self.k)) + + with torch.no_grad(): + img = self.blur(img) + img = img.squeeze() + + img = self.tensor_to_pil(img) + + return img \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/data_aug/view_generator.py b/PuzzleTuning/Counterpart PreTrain Methods/simclr/data_aug/view_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..aa4c4a5adb6d671bae28f9d1a5a82731bb9b2bdc --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/data_aug/view_generator.py @@ -0,0 +1,14 @@ +import numpy as np + +np.random.seed(0) + + +class ContrastiveLearningViewGenerator(object): + """Take two random crops of one image as the query and key.""" + + def __init__(self, base_transform, n_views=2): + self.base_transform = base_transform + self.n_views = n_views + + def __call__(self, x): + return [self.base_transform(x) for i in range(self.n_views)] diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/dataset_test.ipynb b/PuzzleTuning/Counterpart PreTrain Methods/simclr/dataset_test.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..68184fa79541cf88010d5618a767576b6bd13ce5 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/dataset_test.ipynb @@ -0,0 +1,312 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "dataset: local\n" + ] + } + ], + "source": [ + "from data_aug.contrastive_learning_dataset import ContrastiveLearningDataset\n", + "import torch\n", + "\n", + "\n", + "dataset = ContrastiveLearningDataset('/data/ImageFolderLike/pRCC_CLS')\n", + "\n", + "train_dataset = dataset.get_dataset('local', n_views=2, mode='train', img_size=224)\n", + "\n", + "train_loader = torch.utils.data.DataLoader(\n", + " train_dataset, batch_size=1, shuffle=True, pin_memory=True, drop_last=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "img_list = []\n", + "for images, _ in train_loader:\n", + " img_list = images\n", + " break" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAakAAAGhCAYAAADbf0s2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOz9Taht3XYWjD6t9d7HGHOutfb7c5Kck9ycq2JBEP9AY0AhBBQFQRAsyLWiFtRCEtBT0Yg/xEqwloKiNS1owEpEsJCC4RoRBCEiQSSC4X7gVRKTnHPevdeac44xem/tFlprvY+593uSN9/VnPOSPc5Z71p7/cw5fnpvP0972tNIVRXvj/fH++P98f54f3wLHvzNPoH3x/vj/fH+eH+8P77R8d5JvT/eH++P98f741v2eO+k3h/vj/fH++P98S17vHdS74/3x/vj/fH++JY93jup98f74/3x/nh/fMse753U++P98f54f7w/vmWP907q/fH+eH+8P94f37LHeyf1/nh/vD/eH++Pb9njvZN6f7w/3h/vj/fHt+zx3km9P94f74/3x/vjW/b4pjmpf/AP/gF+62/9rViWBd/7vd+L//Af/sM361TeH++P98f74/3xLXp8U5zUP//n/xxf+cpX8Hf+zt/Bf/yP/xG/9/f+XvzxP/7H8b/+1//6ZpzO++P98f54f7w/vkUP+mYIzH7v934vvud7vgd//+//fQCAiODLX/4yfuiHfgh//a//9V/z70UE//N//k88PT2BiP5Pn+774/3x/nh/vD/+Nx+qijdv3uC7vuu7wPyN86X8G3hOAIBt2/AzP/Mz+OEf/uH+PWbGH/2jfxT//t//+0/9m3Vdsa5r//f/+B//A7/zd/7O/+Pn+v54f7w/3h/vj/+zx3//7/8d3/3d3/0Nf/4b7qR++Zd/Ga01fPGLX7z7/he/+EX83M/93Kf+zY/+6I/iR37kR975/j/6f/0dLGVGFYWI4s22Y22CSxM0AOoftTbUvWLbN4gqGMC5ZJymCR89nDDnjFPJIFVABHXfARX7IAITv5OxKTwBPSSi8T0CgYhAzCBmICWACAKgiaCJYN93iDRobYAIoAqCnRuB+uvY5+MHAQQoCKpAU4WqvTMRQERg9o+UAJDdg1ZRa8O671hrxd4a9ib9HjETEjNOuaCkhJwSVBqaCC7bhnWvuFXBrSlEFQ0AE4GJcCoJc0p4LAWnqWDOCXMe90zUns91V+yt4VIrtiZQVRATppSwlIw5MzIzCtu9Y2bwNCGVjOXphHKeMZ9nTKcZxARpgvW6Yrtu+ORrn+B23fDJJ894ua64bRtuewUAJLLnPaeEx6lgSowlJTD1BwcRRROBKPr9VFWoPxtVhfiza61hbYJdFDcBKgiVCMIJCvTnrKLIaEgAHhPhnBlLYpQ07g2BAaL+7BLZUyf7hq0jX399BapApQEQEPTwfY3LsYMIAPdvKAC1F+/vN1aWQtVeQvxaRXxtqd6tbSYCM4M4/tbWftzDqoIqilsTNAVE7deYCSUnTKWgTBmvvu0VlldnfOH/+UU8ffsHePz4CeePH5FLBufk1368NIVWe5/b6xvWlyuef+kTvP7Fr+Hy9Rd87Re+iuv1hjdvrrhstsY22B5pqtj2HbUJat0AFTApHnPClBKelhlzKf0jJ8aSC6apoJSMeS7glEAp+T2xtbdvO7brDXWrkNbMZvgzSSmBmVGmCSkl5Jz79UhtaLWitYrabK3EXiQif2YKabZnamt4rhVra3jeG1ZRVAUoZzAzUspgJZAqWBoyFBMDp8KYUsLD0wnL+YSnj17h1Rc/wunVA15950fIp4L5cQGXBCKgbjfc3jzj+vWv4/Uv/iL22xXSKubHRyxPr/DBl76EsizIZcb2csP+csEv/3/+v3j+6tfxK//X/4U3X/0qrq+/jtvlNbRVsOxIbkPz8oRUFqTTh0hlxtoEP/z//md4enrCr3b8hjup/zvHD//wD+MrX/lK//fr16/x5S9/Gaey4DTNaKLmqJCRRcFN0Iigbsxba2itYl03SBOoCJacMOeCOc+Yc8KSC0gVpIKm1A0UMbnh93SUwnlo3zixu+9xU7KNxoxUshkGZggUooBIg4hA23BS6uemopDWoP663ZQQ+XubAVM3AIr+LTcG3J1kGJHKFZUbCidMXM2YyHBSicwpzCUju5OSZueYkbClhnM149ydFMJJZZSUcJ4mzDlhSoySUrw1qigaKao0KBhZCApBE0VisvfjhCllFP9bToyUM+aHBXmecPrwCdN5xnSekYoZiu26QTeFsCBRAZMAVKAkaBDsao6QCUjNjL0igSghp4xMDCaCikDYDGxTc8I99vDAJdZDa4JGAqAhkUJI7T4AaJQgAKqaVRYIEhEmApacsOSEU2JMOYHdAfmCsmfWVw7s+UawAwLYPttfxFMTW7MHL6Qq/qU7ObXgSKGQuCQPLojIHbWvZzHHbOvTn3OT7oD8VC14IPK9wQdHYs5tb3YfM5uTan49RISSGWUqmOaC83LCaTnj8fyAp8dHPD494fz4hFySG823nJQo2t7Q9gbNAqWKCQmTJjQknNOElAGegYwdexPcRFFVsIuABdjRQBKORMEpgzKjccFGCaKEJkB2B584YyozpmVBygkpMVoVSGto1AAChJoFTWCI2F0mf6bMjMQFmRNKyshs90tZ0Lii1Yqdmq25g+0gv5+NBEwNTA1FGAJBlgplBSshZXOeJWW3X0CShkyKhRRzsfV2yjOWacFpOeHx8RHnV4/44KMPUc4TpscFqdh+bfuG/XzG+vCA8+mEuq4QqZhOZ8wPZ5w//AipFECBWxOstw0nuNOGImXGaZ6x0gNUGrTVWHSgPIHShAQGK6H5Bf9aJZvfcCf1bd/2bUgp4Rd/8Rfvvv+Lv/iL+NKXvvSpfzPPM+Z5fuf7Cu3pEiEie4vYlOzDDgaQkDiBPFK8i/L9exzOJqJpDQ9g/x65Tbe//v6+wOJvIttRtewMI8MhYiS4XxOG5gRIs0iqWZTeqjmHHtr623R7YqmUZ2yH77ujGg7Nf1XHz5gIiQkEdtPkd4gs60tugJjixez3ixIo2d+IG3Ly35vZMqDMngl0yxK3R/vfHD8UR4NI4/v2MEGJwTkjlYw8F6TJviYmaLNIX0Qh/nUYxKqKXRVbE4iIXbc/sa1lJCLUpGC2AEAO1zQ+/DnfZcrxRTgJvfuJqjt9UQ82xBfmyFdiDfV/0+FZ3S0pz47j2cl4ftqfe7gsXyf+gnZm5tIis1MFGtRjfDWn0c9JcdhKvvYPWZV/xJHUHF5SgMgMY38d+HkqkMidX79ODMfj5xoZ42FH3QV7erjFqvasW22o64667thvG9q2Q/YGEtvDmQiFCVBC8z0sAFgVHFfp6695pEfSwFWRRLCKWJBGBCoZJILFT4JUezZdm6D65yZjPcb1Mux8qDkiI2J7K8IKusuDe656/EbPZP2eD2dGfT1EKBpPPBGQgP5eb6+t2FuUGZyTBdMeVDIKkgiKKOZakfcdKg1lnlFOC9I8gYnR9mr2at8h2wbdN3CryKoQBlAKRDIkZwu6BVAyZEdEgdYs8/wMx2+4k5qmCb//9/9+/NRP/RT+1J/6UwDMSP3UT/0UfvAHf/DX92JuSSIanBJ7FE3Y4JsTvrGJUXKGEkOIkRMjJXu8Y2Oq3VC1rF08TbFNJwaJ0SjwHY28ivRN3WMiXx0psiEie08iEKWe0odza7WhbjuIdkhtbly0O6jubA41RoZDOIi9Tu8Yvcj+yH+WOUFJ3Ij4r3j2lR0qjNcjAgozkioKgJnCKGK8XnZYw+Eqfisy6jBQa6geoXdDCL92UTQVcyaJwVNGXiZMjyeU02QZ1FSAlNBaM8fU7J6DGTwVsChoKtBaIbVih6Kq2r1UxS6COVsWVlKye8TcAxNRoEk4rfEISd1ZHA72+5PIfpcBMARQM4akAoYiA8ggMxqeeY7/vRVFuiFq4p/jPR0uYSJkhwoTAezejxyWhiqEBrwXkPcm6tdvmR+IkN2YFTYjxxFs+b2obox3zy5b9xZke4GADEaCWuBD1B16IkKcBoOQVKG+vlJi5OQBI7OfN9yQ2f4bgWGse89id8F62bBfV7x89Q1un7zg8itvsL2+oF1WpNZQgB7cVBZM1LAJcK1ixlMUqwqa2jq87ApUgu47hIa9KCnh6XTCRyr4AAqeMqbGKADatqPtFbfLDXWvWFeDEZvaelNHWZgViQUVhElh94ATkjtrpQgMaNiMWAphvCJYiH+DbV2xOSALKhkZFjAwFBMImYCJYb/XERh/bQB6cFTmrNwuuePK84xyOkHEIExOCcmhRRFB3Xa0bUe93dCuL5DbC3i7oWgDc8JcZggRhNgy6yaoFRAPE1qzgOOzHN8UuO8rX/kK/tyf+3P4A3/gD+AP/sE/iB/7sR/Dy8sL/sJf+Au/rtd5Gw5jsg2SidDc09T+e2HQDQYLY8rUfUkPkKOG0kQQIaClqAB8c5ttsT+IqDPgjohISewXLZonqDJU9M7JAIgX+/So8hDB3mEfh/M+uJpxLYiNcOfBumMBGMzjteP7n5Z6E9SjNhjEA1j67mlb3Ms49WN+0ZPR/lnvrumd+JmGkUlTAReL9JQs04IM/J4Lo2iBMrCoQkvCIoKbCjZpSGuGaLXsVBXVoaidBbWJbaJuIkYWQEr93OyeyOGmjntl+blnfh4+2yNXy+QB5G4o7uuaEXjYs6SebVZxx6qKKvFLBGb1LAqWCSsBfOghORj2+KeoObpdrEa0+jWDgAIgkwVpGQDIsgT7G+0fu4yv46kxzDkrCQCGkvn644kY3BXZGlv0TuyQmcFm3Xi6c9JqaII0AbcEkN1bcXhtv+1YLzdsLyvWNxesz1fslxtk3aF7A4llECCCuhFH8vyCGRvb9wvZ/dvhdUgAtWmHeolgkDMz5qlgnjLWbQMSW0C17WhbxbZt2KtgawadV4UFYTAbkhRISuAkQBOkJkhJoGAkh7sFgGDUowIVArTbtx4QgsBQR0Jsz6X4TBEoAZkMrkw8silbG6PWKAckIgKTyHSRDI7OZMEDoL1GDABUq5cTRm1zZMiBYiWAE4gTCAKClTZUDf4HGvZv1UwKAP7Mn/kz+KVf+iX87b/9t/ELv/AL+H2/7/fhJ3/yJ98hU/yaR4QYbjCyb+KAfAwys98jQYclDNayrIhp4L/xYUQMwd48pSKLBlM8pMQH4xrZQGQMRyKDG5XW3OABmhKgtrG70ere5giaoC9QjRXULZGYVegnQd1pjaxqOKiAFogJJAxiux/d6cXPjxCQitU33OpxnF9kZXCX+A2cW3dUOqAVHO5LZKh3BwFE4aAyyjIhTwWcs21kaZBd/D0VZS4GBZ4mYM5Itx0rUc+gnq+rZRC1urFWbFVQSLDnhsqxuQ9BgnuPyM4HucLWQmxGMxDmgLrz8usVttoBPKItfDDIoDtHEq/dYM5haw17OKtYvoBlH35+me1Z2iI6LAMNY2efqxuEW7O1fGmWnYMIsxJKcrCJqWeL6hlU9Trv5k6zHlJLVjOUzS88M5BE+14iX4iJLUtIxEByOLkkpJJ7VE4gd1AC2RtaFRALJEn3wHWvlrG83HB9fcH65oKXr73B9uaC9fUL5LZB9+YZrBvNxMjkkJvDdLsb2s2vt4lBwlUVN2mo0tC8pley1ZHnqWAqGdd5gjCjSUNbPZNaK2pTrE2xu3OPrLWJILPdJzAbdFgNvQnoTjtZZ9RACSNYPmyLEYQzoYCNMhOBNtAzYyZggq3rgN+Zx0aTgCpr81q93/NktdUegLHVhO/39EjpuNeNGZzYSFopQTl5tp6hlKDstX4IlDY0Fey+Dm91x2c5vmnEiR/8wR/89cN73+hwHCwBUFLMbPeSCaADwyiMNfsDLBwwjNdFPC1d647WBHttfcPnZtHfognq6XZm6g4porAqke4r0My5NapAEssCkoDImVEUaIfXDrweZdGk4bjixiEivP4RzrkvwGGtiCns153j68V0shBcD7vhLn/zHRO1lYE9jMj9zr+4c+9GWnvl45CjoNew2KO8IDWEAWe2hZ9LQnIIUUSAWiE3BTsUMZ+MPDEtxSAjJpzWDbd1R3qYkeaMNCVctw2cGK21jt8FJFMlIufw92ZgUyJ3yPaMmMgctihkG94lKUDufJPaRmpQqJLV2dhgxdj0AkIF4L7LI350GDEYcVsTcypiWUw8ndwE2aHJiQmc2LIpOpIf7H5HdB5O5lobtiZ4qdX5gHYfZjH4ipWNDerPrjk0uotidSi2Ruqnbix7/Ga5aPK9cISQAXZjliwrTkYiopRAOYEdE2xbQ73t2K8b9ssKqYLWxj7arjfs1w2XT15w/ZU35qT+19fRriv25ytQG9DE7ocqSATsAVb2PUBMaDmhEACdcK0VTIwqmzkKJwmJiGUMYkxbNAFaQ1s3NCJzZJsxZbcq2AVYhdxJAWuvTVl2ldjubWw1UkXxbA5Rt4zgre8agFSG4/I9UpgN2sWhlMFmF7JnU4kIhQJV4kGiIgsG2t6wXVZwKXj55NIRhUkVOSfkuZhN4UGIicNiD0cQlgnTwxmybZg//BCtKfDVN6j1Bdt+w159JbKxF0UUl31HbRVrrRAR3NqGz3J8Lth9n+lww2gQDPULC8REAKhb2ETUC/3mKyITOhZEDY+PQqWoRytMSGJEgwguA9Iyzhd69iMilqA7BBBsLis2e1bmAXG8vzodNTBESwRHWBW2Ij76j9j+Q0dPdqAIHz0cObwU9YA7gE7Hvw44psMQYTLvM0CFG+y3Mib72cEJRgZCAbAdir/xEdRz3yHmpOy9iTMAi8bzXDA9LGasE4OXjLTuZuTXDeu+Y1km1NqQUoJCQAHF6mDw6eFCqDv2wcxiZiNESIM2BqtBU0wMQAz69VdgAEJmrEjR2XTwdUF+r5Pjg6yETmUIByGRRYk7qTBc9n5NHDwhQCloALamxhPSvh6ttjUccxBTahMkAC2JvZbyKNDfwX527rGuI0siAI3Qz1kczuxrNSJyslaIXLI5rJK9LeNQj2rG2KtrxX7bnQSjfQ+tzzds1xXXr7/g8skztucrtpcr5LZBtt0CNgkEwNetOHtP1W0CMDGDVFFT6lBoYgaL9EyS1X43gfwDIC/0K5E5syaDuKNAgzHVqr9mlAvAgIrZEqaGTISNYPUg9hreYW8f9/fbe9+cji3OqJ0JACZbIdkdVPIAMPZVsJOhFnRKbdi3irRuWK8bkBk8G+UfgLMqU4fuAiHpgSYUUEM7eC7Iy4x0PoEvN2BaIHlDpR27GBKjbbTdbHXH3hrWutlz/VbPpP63HAdXH1+l+JoJDYyUBrspClCJRz0qkW1py5wqtr1i3SuqCDYvuIvCnFpif1i2yJFHcSk2cmwu+IInsdc2I0++0aPOESbFP5x6LrUNNg8Go4fcsQRdmf3viNDxd6QEzrnTg7vhkTZKWsChjjK+B8efI+zTToPH3bnG/e6OSNUMu8MBzNbBM2pttmEym6FVJZCak2cCstPOS05WCyCr3bVtR6sVlAhpLkiFUaaM5WnBdJ5x/uBsEXlO1ge37eApWzCSGK8/eQGBsF7Xzkbiw8YL93KEKhPbeaepOOSRep1EI6sVsZ4UYYDEgwwN5B27AhXhIOxnmwqyKDJzz8QB7kGSrTPpPUrhrI4ZrK2f6KbrAMLdDrijzJAhC+jQoGWFtgCcUSpBPaR+vhXArsaQrHAKu//OqHECgCB5FNggPWPpoRETuGTkqWBaFmOSFesZtCzAVlFdK27PN7Sm2KuAM4NKQmsVrTZcXj9jv9xw/eprrK9fUC8r2vMVVAVUW2e3aQ+6zDmo3yT2jGpJ1oPHKSG1Bq4Nq8IzKu1tIVPJmHLGq2nCY0o4g5CrQfZoEUgOdCKC02Y/Nh8JXwCk2KsxD1MTpMYQJpD36XHfI3QXdVKUKg5PtzBZnQva3ztiz0SOCLh96/11nklBbT/v647rm4u1nywF87phaw1NFNNpQsrJUA0nUtxDJujFR54K8vkEEaB8+BFyJeDxNdqtYb9WrNer1702VKmo0nDZbthbw82d1K39ZnFSZBCIAeojqk0Of01BR3cjHr0hPROBeuHWo2VP+cV7YySYVqIgMofTmNHIotp4iM1hFv1G5+rHCJwOWUfPyLTDUnr/W+ZsiDzr89+hMEpkjLhiDqrMkxU6iXvkJ80rCCRuaIYzCidkDklGHemYSWFEeUoW4Wv8HVGvZ9j3ByMRCBtocASYoE5rUjVDld1BZU5Wv/AaRdMKJICEkaZsUKDXqsoyIc3FsqjEUAYKAdMyYV4mLCf7PN82TCUb60rNSSQmD1TQI8b7NeX39JCRBrQ2anAYdUV30gTt2ZLCgosaGRtZ8V5EOkM0U7A+77PjSIITxT2nDi8nJ/2MwrURgSjO/QAcscNvOQGChikpxGG54rBQUvUs2OClIJhUVc8M/FriPeHXAm8fQFCjPWhSBbx3ihyWTDlZLSrb+owWA86WXako6rZ7/bDZGiGg1QppFbc3L6jXFesnz6iXG9ptA7Yd1AQs2h1nj/oVd2s7iD/JA5KJgcaExoylVoDICA/CEFXr9csZ55Qwk0OiDv/d1bYdpiZ1dKQ/gkNtzk/IsnFAG6BqNkmOD15j1WjPBruzD8eFAEdGdjNqpOjOmo/r9Gh7PAhuewVtRuFPU0bbrRFZox/w+DcH1OSYJdubGmxLZQLlApQC4gQ4c9nsmTl1a7NpUKlQMbhPpeKzHJ9vJ8VsH9LeijrUHREhRVMpGyxEYehh0ZYxbGIRiUVTXoMY3fZAU+u3qY1Qk4CFkESHk9LAiUf20I+3PFffQ4iaz1gA3THE72E4KmjAP2LZhsM7Vij2gvRcMJ2XDlVV7yGprZqxaTReuG+ig2MKjNybEu+YeNTbRofztNXrgbWC+Aij2TNwfpVlKQJoYlAw9NgaPKeSUTIjJQJELKuoACXq3fApMcpcUE4TysmcFCfLBLJTLqfThPk043SasSwT1rlgmjLYodtChu1nrxX0CNYNTmQMw3s4PHqITsf3bZ2ZQwp7M7KtqOV0theCfWp3ptCRATdYc8kzsqjvMLOfszsqb5841hsGM3RkXglAFsWU7dmJWhO5qmKCojCQHBAXMZWWKsB2qIk1f8aKqB2O5SNOaY9WCQFBlJA8iOlOqmT/MBpzWFVTlbCsua47ZN3R9NKJB22vkLpjf7lAtg37yxW67cDezEGpr0m/dquLxjnqWJdwEIVG/59qgibBQ5Ne+xSHqBZXoXhIGQuxERGiRqUwpwTqjoF5OC9rd1EPHPz9fcOrNs/ACNIAYh7n7s4+gtbjmojXiY98rEPfLVe6d1DsyI/fG6uTC3ivwMao64Y8Z8jeBlLQUZNDkHa0A+TBqSvpUMpALkA4qpQBSv4yVoeHNPtoFZAKyO4O8TdFJhXgq2dTLvMD3D84Yho9Gf58o6Pe2QlAawYNwG8KecpO6MXt6AMx9hMBbdAubYGN7IYxai1HmA3juVtEBc9A+kL+RrlYRFOKAPoUCmUCMhkb7nHBdFpw/uDRJVkI9bahrjvAwL7uYAbq7s2wrfa+sLgP6vdhMAoPBtvPX/3cPy1zJMc6OqGDRgSYQKBkWHx25kAuJhdTSnbZGHNScV8SWQN2Tgl5sqbeXHInVlBkOgCIFSknlJJQpox5KljmgnPJqM2g23NizClhmjJKNnUNTgf4LK5btTdYR//ckflkjboMRevXCIllqL3m0TMRkNU2CGikHWaGQzYMN1q+QCZhTHAnz4wpWaP1ktNwVl4HOpC3EGF5OCkwGcxFhEwKEYJKQ1bx2ovDXCBcxRiQl9pw85pYO1yzMeWGE0g6aDHia14By+pzsoz3NGN+WDA/LMboS4PpR2xw9F4b1nXDuu64XlfU2rDvO7RWaGugbQNaA+0VJGLQWWzy2OMECyD8QfbM9xBWgTAaxRVIZBm8QLHX0huw58QonJCj7aRFc/fITIiMPCL+Ocf7kAVh6jAjQVEcbjSSlrgKCJkRd8feIUKYEwu0h+8+tGdL8aSPj578xDjs4uGIIJdggV/KjHkpWE4TTucZy2nCNE/Ougw5tTsqh72OeBDqfKpD+Xx8DXX1C2+eJitBVr/v1c/os+VRn3cnBQxDivEpsGmKSLVHmujGD5/yEYsqoJZGPanBWPNudERQD1I2d5kDeUR3cEj9HxH54S147ACxfaqjovEOxzSIGAZ5ldSzjPlh6ZE2ezS7b7vbXrFcjBsaBALbHRKUc2kHx+MsRMfFqFtjDIRV75Oycb7H54N+P1jv70VxGaZSbHOYj3L3FzCHBxqRHVJQsN/apeMtXSssWQZSAhLMjIkTppzMQZXc75NtvCBUeDR7B3mO1+5sKXLlkv7jA2HkYMAPZrL/1vERR1QOizcAMFo4Ls/eijupnLjTiuOe3t/2YVJIR98MEaA8YnIWy26hw6FuItia4tYaVqehH/sKI13sqg0DDBoPIOot2WpR5qgMnuWU3DlRr200UTTZsW0Vt9uGy8sVda/Yt61LhqVaDbpsggSHVPnooA57/HCv3/7X3bM5PNaQn+oZ8mGCUYf8aTj/ILPE3QyGsPXMBU7jjkbt+10Fws+rIyfwuiWijntcFcdVM3Y+PBA+Oqv+zN/6Xg+C/V6B0CHYMhWzGXNBLsXkqDyAGBnUIOOMx649cNOmzkSWw5o34xC3NPn5ZgIakffYeRD1GY7PtZMaGSgPixkGLpKAcFJhGFQdG9WOmUKkQy0TWUTbYE6rkkEgvSQBM+hVuTN+4M6KyFiFITj0qzXI+on74vx0J+VJQmf/+VUDoH4+nBPyVDA/nnD+8BHLqzMeP37lmQZhv6yo6wZOhP22YbpMqE4k2G8r2rahomGv1Rpfpdr95IQ0sRmcMqjekXk1V9iguI9H2YsDBBMOBn5/Y/FzPohv5oScO+UFNfBxFqPF5oTEGSG0etgrY9M02zQQ1y9TU8eoifGYE2SagJRwyhmlJJyXGdNUkFx5QkRRd+sfueuEjz0HIHECWKFeR7EoGB0iPYSdiAycfD1Y2+vIKqNgzh5IBTMrA5BMB6Nl9zA5JThF1tDvlp9kN3rDAZJHzlnE+vwOFxNraldr8l1F8boKNhG8VFMGaaogpzFPqo5rsfsHNbSdvL7n8B3nhPKwYFomnD/+AMvjGaenB5R5NohaRxPpVq2h8+W64fn5iufnC57fvKDuO/Z1t0hcFRNZJjJ5htA1Az0b4zTYoABsDdBwBAAZnA9nCypwa4qrKC7bhq0Kbo4ghA1pYHBtxjRkBWByWoWprz2C1fSy3/f21hpPMFp/dkeVoT1ggNrviwIV2l0ekdWStWf3hgBAZWSqGLaor9HD3ruPjodvARkhYjrNWB7PePzoCecPH/D40RPmp7P1JAb70iP62Na+IJ1so5CqnZFZV0NrdK9D+JgEIFMmYWuYg+QMgqA2y8q7osqvcXyundRdpE1Bf9VhvMYvwm669pT+SPO2fXuAEDgCNaudMGkXyYyelJHa2mbwtkQnCuOQ4R0WS5xvj5j08Huj/nX/7EZVZxgY7RlSmQqmZcLycMLyeMLyeMbyuPSIiJmQJjOqZTH16TpntK1iz8B+FexCoLWhmZ43wAVUCOU8I00T8mmx3hZmqxWJolYZXevVa1oSaf6RVn7IfHg4reR6YaUUVyBI/jjUZGr8WSV/X8A3RxNTIPD3J7H722ozJ7NWyFahe0MSRQHhVIrVlCTjtEwoJeN8Xvp7q4g1NK479LaDyHrVRhZ4eBp3RiAcczzCKGJrrz2Fswn415wRO1U46krstbXI0Ox1pTe7+Rq/X00I5qUimGAHUo16XqcAxLJn7jjN0ONrothEsHoGtTXpbC+BGp05HLGfjrVwwCDE5LWyklGmjDIXnJ7OmM8LHj568vV4tlqUwtTCNxNWXW871nXD5XLDy3XF9bbhuu5o1RtMHdkAGXEjVDeisTzqLkFrI79wPTwwQkR45KQQc8q3JrhUwXWr2J3JC3j2yXaftnhusKCMmCCa7rLVDqsiIG1gOClnr4KQlFxCCv5s7dmY62SIOzcCDZoeAaoJJO3uydvyo56RKR0XYVSAw+a4nfG9xykhlYIyT5hOM8oyI882aYByAlyFpLM49fA6vnxE1Bqvt4q6VbStQasMpm5JkLlAE4Fbs541UcxoIFI0qRb462fLpT7XTko1xj04myiZyniM3PDfQncCnRSg3WEhID7gLvtiBQgMJlNFr77fwaMW0+sNEuyaaCiOTALvRDT9dGJh0RDD7an5wZl1vP2w8HqdLSfkafLI6ITl6YTTq5NJ7yczfikz8lZAILRtQp0K6lzQth0bNeyo2HaCckOlClADpQwujOVhQT4tmF49GYsncU/t972aGnT1AnczxQBj8uioyXmkSx7FERuZJZXk/TPFYbzUJVssY7NAghLdw3HxnlXAdTiSuhlrqd52tLVC9trFRk9TBqlRfk/nBWUqOD2ckKeMlBLabr0j1gdjWHn0wPX97/f9aAy6szrIKA1nBAjTYL3B1S2IXLKGu5PiyAhCeqYHXQHVyEAcwzG5swlGJuJnhzUU6ymYl3S0OL4PajOpqFsT3Gozh1UH5Gva3gC4IdjsmRjZIUgb68IO7RXMpwWnV49YHk94/MIrzOcT5oeTrfOmUL1h35s1zd82XK8rnp+vuFxuuFxX3HzkRWti8HsYdfb+JVakMKKe8XdJnjg85TgmAXB1810Ua2241Ybr3nDbmzUuN+lZL8MMP/uYlXBSkQl2NMBfPyOc96GOTXSAAS1zSBEoa5AkrB1DaSy2QCCGzpQaRi7DMXa7oopx4W9nUJ3CM4JedjWXuZgu5nlGOc0o89SZlmNN4/6ItRgQXzUn1dYdslvtkMl0PKe5QPYJmhlUqzEjVex+kgKyozYF9LO5n8+3k3LMmlxCv9dQ4Pc4HFXfmzpolm9taCCgGMuWVENahHtTY/RDVAzxTlHHq92IWC0hemaoGyiDaYdRIdiCoOQF9bgejFPus3+8CRHei8IpmSrxMuH84SNOHzzg6ds+wMPHr7A8nbC8OneYMy8Z0gTlVCz6WXe0y4q2btiKYssN1/oMXIBdBYkE6cwoDwsevvQhpqdHnL/wMaiYjl7zbKZuFW23nq66btbJfl3RtgrZG9BcJT2Ry6YYfdx6j7jXJ5IXaUHUWUa0be6IaocX6rpju9zAOSHNE8reIC5up4ARRLaK25sLtjdX1JcVSYA5J5SnsxE0Ssbp8WTw6HmxcyDCdtvA182gF++NO7IuDUZ0hXon3FhPUzR7j4yeyXTxmKynJevY7wHrzTmhsM35MSjTPkfdLcxM4PytoWdAoaUWbFT4+x/V2gF0fcNOyoHe7QNxOM/0DG222F5b1w2MvZDjAwaFFwaWbM5pLhnL7Jnp4wnzecHp1Rmvvv1DLE8nPH77RyjzhDwXU5VYK/TlhlpNnPXNJ8+4XFe8fn3B9bpiXXdsVWJyjWWmjqup2Hs3JRulc8wWPMiL3x2BoDsF2zkGbYqJzV6r4tacwSiHWqLdIHueClN4EL//ggHtH96GYYy7t1GQYP8lRBas/T2i95Fha6mHMuq1BfG9j3tm39EX9/MlP4uA2u9+TrH4vHadURZzTtmzKMoZFB3An3YcLqxPa9htFp62hpwY8zLh8aMPMJ0y9vUR2+XFiC/bDq4VVCvk5Rltu2F7IbS64WW/ffr7vXV8rp1URISR/hrLy6Jv9YfcN6j//jFbuaNX99DrLYjHF/9QEIjRHjgkaXogQ6CrG/eNcgcg3x+RFana+RtDbGw28ogxGnmJCCmK0rNFQ/N5wfQwo5wm5GVCmrITC6gPByQAUhpSSWhMkMLAdYKuBW3O2CaGNoYwrJh6nrA8nTC/OuP04SN4KqCce7ZTt2rq4rVhv25oe0UqGfUWkZU5EE4G7Rk0mbvDioyBozEVZEyy1pDEoD8I9/ph2w1aqOuOejM5lU6xVWMu1s0zqc02EJMV8SmRFYengtkzqLLMvVbIe7P2BGcvUFigXqM4jBc5rp0DXOxLBfDg5vj41T+HknkJAkRKJgV1IIT0uWV6FL6N87jvYRtfv40aDOekBwgwrum4B94Zn9Id1H1W2GtmNPrapqlgXiZMnpnODwtOT2csr86YH06YzkunnmtTX+eu9u/N19u62bOrrcvnRAN91NSs1qNd3qw3uVPU+u7hqaOt7dcK/1s1TcQIOsceP7QCxLXHv4HRG6VjooE9Y9vbYQ5GBvr2h2do/vMoC8T7xPOgDskO4I6c7NSdMA6KMfFu9NbH4VxiIRJ7jTlbuwq7cxrr/lOMVLeRh2gr9gYM0s9TBmSGPj0gzwltn7GfJog7Kdo20F6hDLQbY5cdbTeC2Wc5PtdOSp0kQQRwMvjLentjjIN9UOC/hyzqHUPjR39MPGRRIhJrgBEF/OMtd3Y4Oj7kjgU9looXU8ZwJK46bMXdNn7P/64jh56V5Gky3P/hhMcvfIDTBw84f/yE5dXZ6khzGew3HxCY52L9R62hnSfIbQPXG1hW6PWE/bqAs6k2T6+eMH/0IV598SPMr17h/B0fm5MqGZ3t5MrUWgX7bUNdq6tSr9abVZuduzOJOBstnLzI3mErpzZJE9Rs0FtQuFGj6e8gFtwM3s3LhHqr/T7vLvq5vaxotx26C0rOoKkgT8mgjXnC9DB3KSWN+lZkFt70HIMxo/AYAyh7w3f8+27d0PGR95lhUzcg6PBeycmZelaLC7HO0XypI6L3PpMYiAkM9uEQAB7ZHLpzwqhZHda4+L+7srn/iES7GGv0cyVmTGxOdWLCnG2A3jJPmJeC83nB49MD5tOMp49fYX449bVYzjPmx3Nn4eFWrVl3r9jWDbfLFddnz6BuBgHaXKahHxh5AJNPP2bCDjPqu98p9nsOij4uQkgixb2AO7fWHRQhZhqzt68ExTsB3uhsTb8TWwaXfQxGBMUBxg2R6HvHEPO3unOLbNs/YtaWOSM1qSMZ88RZW18PXSQX/U9+VQd1l0lFpOQOKiTF8jx5K0eo0xz+xs/700xbvFxKjDwlQCacP3yCtjMePnr0Zt2Guu/QVqHrDlxvwLqh/covoz2/wf7VjHa74XS9vPsGn3J8vp0UDs6dopFtFFL7Ro0a1CF6xPHDj97Yaf9CNG7eP3R1dQnrdh+/HVHY0KA7StiPKOWtpx/RvkeZplQu/byCdEApBpQxyjIbrPdg0N78dEI5mXNirx3Fe6qPYDBaPoMaW1ZIQHs4oW0PKNdXmPYNvK0oIJSnV5g+/ADTq0eDys4zyLXXBD70LvTLmpgjKuYwmAlpTQ7FodPjTTjWnRTQmXihVUjMPjYAPdsKSReFOQhpVv+q182dmzt5ADUaEl1SKorE5MoGxKnXnEwPsKF5HWt9uWG7rNivqw/Qi1HgoSCBdzKm49PsMYn/l3v6hL6ridhZcJZFxWdKQ/mirwqPprsTEm9KCWjxcC7HzdCJEn5/VSOLGM4qguBj1mS1HnNGrD7Q0Ek3s0tVLSVhngvmknF+XLCcFzw8nfH04RPm04zHj15ZVv94xvSwWKCUjdWlYkotvY6xVZOpqvYR10cyssU+YZiMtsygMdSSXKy3Z422xXPP/oZhOLhvRJYcTa+Zg0FnniS5k1o8Y5yZzElTzNwKRXWMgKLvbepZcyQ5AbkeTqYvoD4lx4kplhlGBKsgoTtniAPSc7R5d9R5uluI/Zojizo2VueSehYVmdThL2JB3R2BKnJipCkBOtl+z9436I3iiLEcrUG3Cr3doLcNLRPqecFGinq9Yn+Z8FmOz7WTiuM4MykeWuDUAcvpnaPCWw5KPyXVpd4v0EcQqDq/X9xYy916iA1yFEwdFPh4p2NkS33DqGdtIgxq0edP3cinzN0JTacJi2dSy5MZhXKaPIVPQ1SSHGJQNaUSUcOeY1OeF+T9AeV2w9R2pG2DckJ5fMT0wQeYXz2gPJyRlqlP7wxoIgXTrpmqe8jbQN3hVGeDsZ2/ObI0cHV3Tlrd2UGsl8kdVoe+6AC/OTmj3nZzUhjXKS2Yf0PxPBQPbOqoxaLVxdW0iWVfmzmp/bbZlNfdmGfa5LA+YkUMR0QUUWpEnSOvZvc4EXX2Zli/pqBsp3RP0cfRmUSGpHL4GMaO7syf/73//G20QI/f81/znKxH9ZnY5IJgP7P+MTIdu5QwzxnLMmGeCk6PJ5weTnj48AmPH78yJt+Hj1aEP83I56k/cyNLBMGm9fsr3qiL1nqDLulwxgFLxmWJWmNxI9cWjDvg9z0UPw5343AMxp/R+QkZpu3Zu5qUe8PtTKYqbg7KFcYR8N9QtTh+9GAE4RjJnU481iCBkDu5+zVlTqrn0LaPjihKPGyKpwZ/r2PH1HGxHr7HVvvmPJwUl+yB4NsREt5a94cVZosbgKvZE5AyQ+Y87Km/XBQWda/Q2wpZN1RS7PMEbhX75YK5/CYgTsSN7QMKgx0GANHF3aPS+4wK+mmL2Y9gDKUw+FY5MWUGhVADEUOpddgkmHj38jWpqxLHQtWeSI08kEuQCPz9ddTZ0pSRl2J1lNMMLgl5MemjskxYPnwwB7WUbhiOuHRfNaredKWWFREhPZ6t7yMl8NOjGfiUkE8LysMZ0wevbFz0lHuflBWTCQIBqRf5ecAM0e9xXOcS8AuN66IIHvrfRK3BoT4yB20wpW2CrkreBIKKXTGclEOwTVyr0GtZIIC2Ck6rOwNbA1JrJ3nsl9WZgUb8MEIOhpfByLKtV0sR9b6eSumQ4uK4RlBvJuc02HyJ2ZqwAXSh4WiaDCclgxhxDxm8vfy7yJff65gQLR01iGyqgwd+z+PRTYm6CkFMU80lI+eE02nGPGWczzMeHhbMy4xXHz9heTQn9fDhk+klPp1MraC4nhvB6crmnKprxbUYUNisd6tQ9CZqzy5CUVzi3kcbQk7QUqA5Q0tGYwtEm1qWI2TTowvBZ0v5nfHgtXiw8jAxJhHMImjKnjFrV+AIZ5UD+vTn39sADg6v16aOHtJTKuYh8EwIB3fvTN3v2Pqn8aiP+M3IkDi+0ffbaB32VyPq+xFskC2YUU4zpvOC5WHBfJowLcUZtiNXe/c4noN/kaKdxBjAQSoazuzg8BwF0K1C9x17SUgfPILPC8rliv3N61/lvcfx+XZSOESIcj9l8riaevZy+BvCfU0BGIaop89BCfZ0mNV6ppKntE0NHuGA5jCm1PbJvz2dNkNEESH3N4VlGSlZx3cENmzvn2cf/rcUlPPs7LbcBVbzXIwocYDHjlGUhoMi6pu2Oy5mIGXQPCGp2vj1ko31c1pMCTzn0bcRr3mM+D2jas1o4UZLt7lYAGzOj0ds3KRnDkFGiVqLNOn9MUHOUMCz2QGxgKyhuEFAWhE7IoxxiOmGFhuA0Z9F6AGL1Abdm9Xott3ZSi6R9RbMcVggRheOexzTicNM9OXj/WE9mx50/A7fwg2RpzbhNI5Z/qcRNMZTGEYKqr3W1P+nQ1X9zknZohimjaI+YsGYyWxllMkCo4fHE+Z5wsPDYgy+04THj58wn084f/CI5emEMpv8EfEYQ25bwjJAU/Y3RXOpITjq4zDIAjoRyzyS38SeIRC84M+uFWdOEDkD5OQHl65qGHJQhLH/4zVSsuc3YTQhN6HupBja97N9HTWvkTnd/3esi7iXx6Dm7vkBsPHuh43vK4ftkYw2FNxZiH4tx7PA8afx7QikAklJVh6gZBJV02JtAnnKPteL+96wvx+Z3/0ZHt+K0FNBtQGqkPtz6k411jERNDHyw8meWW2gecaU3r6WTz8+106q31NppjEbTkkjoiWIF0Ztcx7YUW8vAmAYH47+nuTwmS0tUQXXZqoSYpvahESHQgEzXGXbqNaB7dt5jd4WASwqSWTFzCnj9LD4eO14bzZJmbMz986TpeiuKs05dcdlcByNQnVcz/0dG8ZP1BmQDKSCdDbhzzRPSFOxzGyaPDNDv684OKa61z5Erd52m/vzcrOZQHvtb9mhkMgkUuqyi1D0+lbAbdu6dcHLcLxxJWJyFzZq3A2zBXNey+jkh9YJD75I4gUCP7KamH+tnokPRz6OTnem7loQjt6EfrUboFCHiJlYPQJ36JGZ7usMMnqg4n17sHVQ4/80x0k4ftthtXDC/r/WHdX99cRnAvtstYSlmOJAXmYsDydMpxlPHz9hOc04vzrj9GTfO3/44CSUBWWZrTE7BxcOvd7YxFQJ9nXHet2wXVdTJqjWA5WZoMxoecDAmgxyDMae+J5MrnyS5hl5sonMqgqtDVUAkuaSWwbPBW8IESQIMLEFmEmS62+O+xvPvwuiHgz1u66hp1J3NgOH5/3Wbx5XzSEz8pN09EM9vb53VFGKiE2IvjfksHYA+Jwqr1073TzPxvY9fWhN1Q8fPGB5PPlYDgbHQDQojlf5jvs4pn++9nuQFSyhT/sTBSgl6CS2588n0OkE2Xdsr5/e+ZtPOz7XTkqhowGUbEIqedQU2Bl7VE0Uoo4D7wd8Y9DbS5C63FH/AIwBxREVWUShyia06Ys9sqeuGehNt2YS2IxSvLc7wez6WfPDYiKqXlvinMw5nSyTMid1gCETg6eMt/W2DjcIPSYTDAez7pBtx/7mxSRNbjdr9EsJnDI0W3SkUbygEYmH0kNdTbqmrjtur6/Yr5t9vqyjwe+w00MdIIbgkddjFBhOb92Mnnzbu/PoTsozwmBWvp09H41NGByJulJnwQGd3tuzkshyI+ixzSe+GWOSra21EWC4+cAxgwqndBzc2GuScQ1vSXhFran3Nfn9sNOOmU/S1+Xb6x9QjL8cj33UoD4tk/Jz9+wycepyOXmZcXp6wOmDB8wPJ7z69g9MVf6VUczz7D1mk7VBRA0UGM3H4tOl9+uG7bphe1mxOTml+tqwhm/q98mCwgO0xqmrx1ss5Vm0r5mmcHhXe3BRAC8K88gm4EobhP57pGKBJIZyjI0cub9Px7s9/j3qoNTfZ9R2UtRRmfrvd3TEM5GwUZYhEsiliCinoYp+pGf7Woy1MGp7prJiW0U9CHL2XjHbYWWBgtOHDy6d9oD5PKP4mJsOY9/Zv0MAf+ec3joOQ+k+HXwYWR4pA9OE5LZKasP0DSGL++Nz7aSAw2aMtJq4jz8G4NL57Nj/IWKN+/NpNz9S655CU//dMDYpkdPdA2aiDiN2mOcQZRHxwSF61J0TuDDKnFGWYkZiLshz7iSIskzIJ8ts8lJsUR9UCSinvuAPF3B/jX6fgk0ne0VbN+zPF9Tbinq9WZ2qFKT5BC5hOGEGtTsF9P6o9brauO/rhsvrizupC+p1Q9us7tCDAc+EOCI9jqyPDeboDcK7Fdi3vUNbY7wK9WuQbrwPjuOo3n6nKCIdMyfPBO0JvDXWAMMQjUxjNF2GwQyDMJaKX1sYn4Oj6koohL4WBoN0EBokiszdjw4n3LO9d1zUW/vAz/sQf/W1HtqQGkFLD70pgmKkxFhcheDx1YNJGj2d8erbPsR0NqJOOZU+DiZQhp79tZFhyy7e5O0j4a/GnKy3FbLvI0sm6o6KPCDpzyRlq/XquP5xPSZMq4daNIfsC8L4H6c7K9j7Pmx/OvEJ3pRPThhRp79jNEe/k0HdOaghltuDKW8Q70EqjkMI0Z1nwHCcE9JUHB3Jwbwa0k5hg+AZVLVgbl8r6l5Bt72jOczej7hMyFO2+tOTZU2nDx8xPSwGz/rPewvMvdV4x2n1f9PYFx2d8q/ha3n8iUPKYT7JnDGiP7Ap8m+GeVJjtLt2g5YyI6UMSTyiajKojZpjv3fW2z/TuMkx1tkYoVaHsl+1z9FwmQ7NpDHV06LetyCAiOqYkBzsocRIk/UrnD986Aspz4EZOyMu/j1l61XicHoYDvDurvhC6tbDTlCq0bPbumP75Bn1+YKX//ELqJcr9pcXpGVBWmZAANkfoUrgppaqEzmFWLA6tHf75AW3lyvWy4rrJy+mCPFycwp3sxGl/RRp1GncURn70iPYA0VZxc7Tc5R7FXu/MBH1ey7DSbkRjuJ01BegCpaDA8Oh/sOm+sHJNcQ6rDIatiuAGhNz2yDgsA76tmIY+143iydxXAN8wDgVlkkdg6D4fnwZGdBh9d1F98cMKaLaMCQ2k2JcFgIOtM8qzaHJBOaCkgnnxxNOT4/48Ns/wsO3Wf/d47e/Qj6Z8wqCD7lg6KgBimkm7k7rX20NrM9XrC83rG8uWL/+Yo7qukK2vavFdETDjXiogGhOff/K3vqaJhBqbUjkfYW1IWu0HGRnTY6+MxXLhVVhGngiYLFJuVABeybrQ1dc6IH6/e13/DBHLJwSQkYo8XBULpwcDetE5C0HzuwMKG7OPrXY4FUuyYhRDp1SZ34OzCcUXvbLDdvFstTr8xWtmkA0e8/d6dFIVecPrLF6Os1YPjgj++iUvMxWLoga9lvr6m0H9e43w7bE86m9zYA864sMm104eogLeG1RFbT9JqCgh03plOVseKwtBFNaSLVBVcCtoTGhs6j8UADHzNq+OaLcu6ZNRTcCPWU/huA0Ot8Jb8E8fHBWRC5P4vN2vOY0nQw/tizKFr3h/WlAfO6k7BzuV1DALXGuvf4mBsHIXiHrjnpdsV+u2J9fsL+8YHt+Rt53pL0iP11tcFmZTM2dm0WutaHtDbc3Rte+fv0Zt5crtsvNxlFvFftldf2+ZkPgYNlTQKAarEmmrhpQ9b6RFkFH9nuYDj1fYcPjOptnUqLSpW1CKJiJzHjhWGcYz1IBUDKVb/V7GfON4M8+HFWLCFuGk0qIBlC7/VG36hGnP/MgSwTMEUQLlci87QXURWDVzzki6QAI4qzvbMVhnR4zsbE5AAI7e+ygSqFx9/0eM5CyjU0pPvl48unHeZm6OjZnN/wUAsAuNFrFlT4q2lq7k9ov5pTabeuiv2gh9xCsxoPsT0T1ce794sX3rdcb/Z6Tqq21Qzbb64DxkDue571YDv0G4SR+TYERMKh2WPHOSvdgifo4IIsN7jMJPawBZoYG+SoZQxEpucxYBk0ZPFuGmp29axRxt2lpnEdzFIOzaRaCGbUJqFbonnrTvNUKJ0wPJ29PmV0s2rLg1J0g7rKmdxwVfdo/fBWJN7nXZoFHM/sQwXOarW0FHniE+PLd871Df77x8fl2Uh4xJddlm5ZikkEl+7p0cUWyr6nWLi+kh6i1Q2MRNAFjYRMdvn90UNSVqwEYHVvkrr8mBRxAB6FV79sp5xnT2YRhzx8/opymrl5uD5dG3SkouAFuv5WPdwUCrzvF96BeQxIx0dVtR7tccfv6M7ZPXuPyy1/F/vyM7fkN0umEfDoB8wmlKiYw6LZDmVFFDV5Yd1w9Ir58/Y05qeuK2+XmTmz0F5mjMI23EFNNSbrD3l1ZYHVVidakaxcGQyv5Bu/MPCKf/mrXV9UEUqsXwSOGyGSMsQXW02YqERa9i8/LEhikRJwwlcmiXORRV1GDlPqEWn+vuNEZQPBUjKRjjyWEhUMdJLmIbUSuIJuCK83gKalsDEiHr1zXxNYSFJ0CcYDxgCG8GoYx3E53lpQQCncmmOzOsVkrunUJGbOvZMVcGMtpwnKeMZ3mLj4ahfdOICJ02LjtFfvV1UbeXM1B3Yxm3naL+Pfbhv1izZy6VXNSQVKRYNXFRGK7fwJ0CBQioBhmquZqm2fpoRJhc8NiEKQTU8T/471YqDYwMepScXBkvV4SaMdghfwO+p6z5n4e31fxYMMcG9hnfSVbu5m018OUTU6KUgIy+9h1I4NgKsCUQT5tuiwTylw86Ob+rK0W1ZBfZtB0BU0FVQHeK3ivRngqybOnCcurByxPJ+RTwfSwuBxS6k3uHYrrVvBgVH7VDMqf/7qj3Tasn7xG23bU661nS9PTI9IyoejZ1lAm4I7NR9/gxd89PtdOanpYsJzOmDoGezKpj3BSzRtKk0WSrVWLfJxNFs2NPUINpM4tAnkEDdG7SbMjSvVvsdUCiJIxbHwPRLMcZ0//U7INX1KPcuanE+ank8F8p6lj6WMfkUNNbYwoSCPiHAQCQYuZSpEBimVAsaBk2yHXG7bnG/aXG/brDdv1hvVyBTcB7w16fo3cgFIBTDOUCLsztPZ1x+31Bfu64/r6Gettw75uWNfNKeitU8v7iHZVcM4eqXoNSg0aqGoyOa0Jamse5frgP+Ixj8k43F5EV+xi2c0uir3Z7KPNmXwE6/sp3ouUochth9QN2nbUundjznkCp2zFbk32PnEcFBtiPElzw2lN3QYbhszT6IUi5DxGpU/eCJ2n3I1drdXo7rZKADLYChjsT3WH3ADs7jAjY4olGFmcreD7jMCCM5v9FOLEIoSURsbFzJimCafzglP0z/jYBnU4TZo5CRaDCQFY9rSbsPD2fEO97dheX0cmtdUOLctWbSCbhC4durON7Cd5YBHfE1gAMgR0R2aV1BS1M3H/u5l9uCWZEK7tX3dyrXkQEO0Fw0EFcce+NtITmHsdzK1Az4yOMGAQdaK3rwcn2UgQaXfJK7ZxOjbCoiG3Zs5CgVQFtdpwyTRlNAJKtd5LJULWhMwHJ9UUtSm2XbDtDdtesW3RHN0iYsJeK6gmGxa5V2hi8NRMhd1bOiK4QjdndHdf4mHEdztHwgMH3SvkekN9vmD7X7+Cdr1ie/PG6c0J7eOPUB4eEPVky9jTARq/85C/6vG5dlKzR3zzMiNPGfPDjJTvnVSTZkMKawWv2Rar3yCJLrrurOxgd04kpqag5Hk9DUjiLqu6W8BjswU8l1LqskZ5ygeob0I5O6QyW9rPYYHaKHZbLxFZYsdkYpjxfuGgdofMPMrpTsqzm7bu0G2H3FbX2ttR19qdDwTgBtDzBRkJVTM071AA296wuxDo7flqGdXzFdu2Yd8qtr2ieTbUhTmTDQjMRFCWAaMAPUsx52Tq4jUcHLT3Fgk80mXtItQNZvOqKrYm7qh8vIRG/1uCsmLKZOmlNKhUSNtRq6mdm8KHQzaljX6y46Fmpgbse/ie/QJiECNH24FrFVqwVCy6zQlpLsM+KwA0aEtDYME9srlhzxgBNFizamSeUDH4VGPi6eF0MYzqUTKHPQJLOrJuI/8wpnnGfFpsL3n9Mxo8Q9OQnKod9JIWs4RuO+p1Q73tHeptPi9Km0C2UMqW7px6/BxfU/QsweFc7ZNqBxnKf5/GfKdC6D1W010WRQeYUL3VQO6kxo5HZwASwVissbcHmSac1IBg1eFmm2xtTiUeqxisW9lHsbNBxTl1p5ZEocQe+Nj3uDYgeZ2dyEkp6sxgO89aBbU2Cxo3c1D7btCqSuvnUPdq9extB28ZyoS0FQuqusqEiQd0gPLgtMKuHb8IP6ZeD0Zt0HWHXK5on7xGfX5G/frXocnhTE5AU1OsmQq0ZKhY60k3tu8+jk89PtdO6qP/xxfw9PCEabY0t5zmDpeJN5eiMKgkKNkYBpAVIBWmGdYbPgHfEARtQVslWHHF2HwIdlpg4FFn6vWmgA2AnksTDCt2rLmcrQ9pfjqZcvnDgnRyfDqPSD4aZPfV5ja1Jj7K25QBAnsPQsN22zyqcmkghcsOORlhdwmauqM9r2jXim1X7BXYKiCtAjuw/vJrpJeK/MkKTbaw626F2eojvmttuN5W7D5ZddvF60LSVbNJ1RQCNHWDoT4/pyn6364HBxdZgjBQPK1ldtiV2URGFVg9C1vFhthVVexugEmNvQUwagMIpiBgigJ2DrZPDI7JhAHdAQ4FYkBQOsavR+pimaIZxjIVlOQj6R3Si2yeszkqJIaQj3wRwea1NFP1dvq5NJeKsvsozabk7mJTc7cmw0nBzin3ep8vmh4Bk6mmeEvBPFmNNmcFsU1N5WLraJ4XnB4eMZ8ecP7oAyuqTzYhqVWDeaOxOtoYoi9uf1mxPt+MjPNy81lfNqZFHfruw0XDI7mX6kLDvl+S34vszriKojEOwCR6NkM+y6o7qWS9hea4vA34WM+CflZ76Jk7PFA4ZBL+lWIwSltriN60qp4Bhqo/OUmCGfNcMOUMnSeQCLg4srAzZKvYbivAhPW5dETl+nRGmY3xG891X20czfX1BevzBevLFevri+/x5qSLhOub2ebMPZ4wP558pM+TlRSeTjYnbsqYXIi627GoocF7sz4l0SF48FAbsK7AywX42leBTz4BfvmXDC1JGVUUtK6oZ0O3JBnsjTFR9jMfn2sndfrgjNPD2fFb6w8ImnNrAqoNeZ9QakXdLEoUT7eptQGVfEoELR03F39w6MX1TjOOYX6JuqirRaGHVNa/Z86TvWOee7e3kteQRWwYlb9/dYezXlbsW7V+CD8Hq2+4k6oGs223Hdu62+/VCM/hLCq1CE8F1CpktWmatRGqEJoyotwia4Vgg0gCuEFBPdOp1eRt9trGRzNDGiPBmSwSbOmoADI+YgxDE+mSUu+MiRAFg5FIIDrgDovsh4J3/K0iiuUHOjiiUGuMSkomdEM5BvoxcrJaS0rJx2jgMEVXe80muyMgDwMJ5qQSM3LOY8S9G6UYvx2GzJrI2bJHsay+K2OEunlXXncnpX5/3AlXN+LwGk5kZAx4f6DH/9GekBNSLsglYz7NyJkxzdaLRBlIsxnRaZ4xLSdM82J1ixJyOZ6BhRMNjcWmqFcblxLEiLpaAKNNTFPRHVNIVQUBRaF3ey0yI3BIE3llxDNWVoKQUbg19pJnriY3FgMkfaDk8bn3zD2cS+RF7x4DWb9vRSAMOLD7PEhvEQgR4A65K9CMwO5wMMBsNbPmtUioXV9HHNSZtwS0ZuSmtu1AE+wlYz+tCEO1b6b0f3u+Ynu5GlvycutMSfGat9SGNm2QanXofZmgopjOc6f/T3OBitoonS7o7BOzncpvQYT6faH+7LqL8aCQW7OPfbf7zObA9LZ5T2Z1R2oXf4RZP8vxuXZST1/8EB88vepF3ZBkAZyNVm1IndWjGqbr6pHgDa0xsHvqfsimoo8JzODWzOGJIMMfFo8IKfGoSXD2Bs6QhTkscLBlU1GP4pyAZN30TRRbbVa78YKwirGl6lpxeX3Betmw3TZo841dxrjxuM7tumO77ah77fRvKyBLh1hMz0ygq9GA10bYJWPXYvdJCPVawXVD2gAigxoscLJRCvtesbWGrdqo8b0J9haORnrxuzZxuZuh8iFNrMYiaq8nhseLZ0RdFQKWASUCRE0jEGLkgSqGy1cdTbUAvK5kRi8xdc3ETEBxaINSgqbo7SGkqEmVMorisDWRmLonMMjGhhnGJsuuxTeXgpITSsm9bknwqc3V4S5yqDKcbJeCaqYE7oX9cFLVhytGJrU1xa5qj1WNLJDIjD4zoSh1mR+Dc3xi8zxhWmY8PD1gmjNOZ4OV05yQzw5DloKUC1LKyNPkhf3kyvHWVwSngTcfdLm+vqJeV6xvrth8lljb2j2a5rWLdvgQuX9moX2YXC5s1M4MpRD4kiD0ms9RbiqGSGYKOTL0WVAgU7MwmI4cCruHdEeWBAzv5Cr9/jlIOuFgrT4p3UlFMGhIgtUPg2zRFEgsNrmYkyeU1opRDkoq1VGTWr1mzoz19cUa+qeMkFiqu9We6m1znckdum5ORDlkq55RrVMxevtUcHtzRTkvOH90w3ZdfYLyGal45u918zLbukABKIbI3iU+7r7jGZDpHUJNL5MEEDTgeoPON7TLDe22oS070iIIktGx/PtrHZ9rJzW/OmN6PI3Ob7+hqgAqATuhnKZuELbrzeCeNwxsACBQrZ0JZ8VBgsKKp+ZEGiBWtGfAlIQnj54n78voxsH6tKzobBmIUZetGCpeTObWsJOC1w3plsEv1sTXx1OrYrtsqJsRFbbrhv1W0fYggkSfkb1Hq+JRlmVRMZgtmE4BXdnyUuhuUdZeFVUZwpPL6QAkBFQFoYFcZ8bsjXQWnhVwxaE2N8ie1XTbDu31GxFjTYGOTZNDV04+5dlGrbDDNh7VE8zRRhE+ULigtmdYllOI7DMTMisyGIwCyGyvT2S4OSdQKujSM35OSRWkBjd21Qk3AlHPYWKUks2hEe4ai0WGRJH97VCvECch9HEG0gxudYhsl5iaO5yxwA2tjjoJwQVRvT8nl4wyz8Z0PZ8wL1ZvevzgAdNcDOZZMvKcTb3Es624dutRMrWWLijsPWShMlLXvc8Nu725YLuszuw8Bhj+CMWuUfaKfduNpuwK8zGW5fi47YuQKI7e3FFbO7Z0MB0N5eFn7gQ6NZzZ1i/5jCoa2cAgldh3yElPQn25+ToGQlJNtR3SqnBQkUUdWJbxkJRG1i/jgo/jfLp81747IiFoN2/X8JqU+jMQEchmKvJSK3SvB1jTr6w10M6A1644Z8hekZcJ2+WG9fmGcp5xevXgayabcyoJy4MJCpTFWIbJpzCE/+twbeIhU3V+ALYd6fRgTfxNUHeB3Dbom2foMqMCUJ+qPZ1ms7PyqTv/neNz7aTyNCHPkxGkrAJreKoGCk3Ic4bUjLwb3lu3vfcIKA4jEI7hlMCiMDkMSfT3DG2sNJlKhGnR0V2vlroCu1Y4Lt9sgYqTAxqBYWPvabWaGRxrj2h6u21o647bmyv26+5D/YyKq2xZmMIyHGlqMF+zlDopushtplDdGCUBrRXSqrGBlSCUDtJQhGjSdI0MiyajcS8aW/vm1G40j0e8XIf5RADG/d/FfR14SzdQd69lL4ROqcYQBD2yhRjunAiuRG/ZbkoWgSfgbrqpYXjecxLv7z05LDLgwoAnogWAjqM3PDgCejN323evK7WuTqF0iOr7PWk2GM6dVGT1BoWiD38MIa1wdoA3Qce9ceg5lYxpseGOy+PZCBHnBadXw0ml7qRmV43IPZ6I14q9ZPvJ5XiatRiYisSK7bpiu6xYr2sXjo17yMdn79li80wx4KahgIEeGNwrFti9t8fEHcocZBCHN7tzwt2/xwsZ8UnABsV5v060DPAB3Is37g4q8Ed4S7XKMKx93dt1CHxtgzp7NJT+IxuTfo16f87q67KJERJaM1akZ1Xw1zPKu/cnNR+EeZT+ikMNphNRo+8nsz9tq6jV7EW5zkZnnwqmuZj8WslQEUxBQmKG5uS1T+7CAV3iLCfwNIGXBel0Ai8nEDYAzZjG2472coGeFggz0vmEompC2in59O5f+/hcOynKbD0HfvPAA0dOXMDZl4sbku3lilZ3ICmEdoisUNltFUXOQeneSHpEr14AoMLWgX+asDyeTOEiH3p51CCuWgX7xTZvbORWa7ygN+Waw0FAEYAvRDGWVBPUq9Wj2t5Qq0Vkuyoq0KPtiEqDsl3Y5wMlxuwF/mN63iP9BqgyQNnIBnS/YUM9w27BeJ/u0xERacBc9E6/iiqwN+nZlnrEGRljOkTOsXELoVOKxxwf+08QLSsDWeAaZwPqyTQyqSlFzYK7NM0gGYzNj3BCanUN26BxdejevStGHJ4XoEbQURtxL62heqbaYSF4RsIMInaZIkGT6hJPrRfhm8ggg0hMkeXO3PLl2JWziU25ZH444fHxAR984QPMpwXnj54wLdbzND8sRiyaCnhyKZ7T1EWU1V80lDwGdxGW/e8N23XF9fUL1ucVb37lNbbLDbfXL9hcb7EPuYQ7bsR+0J4h3mksuvNFZIpR5/F6VqwrInIa9sg87hhnh9+78zUeEAgzqghWAJvhCGDYuihMBmkBRlWPm+tX3+E9h7JizQZkGeW35l83WMtAkC4Qz7k11MZo7qxbYnMyUb/0miTVCqoN8Oyor80ITGL/REblCvOxG2Of2GfpJ6lk2Wy9MPB8we2TizGMzydTvJiKT652jb+HBcurM84fPNg4oMcTcrEeqxS2NiXQsiCJIn/7F4B5BjdA3zxjf7ng+bqivq7YbzeUl2dMT4/4qO5YXj0C8hHKXNBuV3yW43+7k/rRH/1R/MRP/AR+7ud+DqfTCX/oD/0h/L2/9/fwO37H7+i/8/3f//346Z/+6bu/+8t/+S/jH/2jf/TrfLdD1B2LFCOKB4YCRSqmk8fZIBxjgFVPd4yQbFGkOjbm0QLfj1gI9qCpRRfvh/GFrp45VQDwzKPaKIjqBVF1thGRHgzPMAu9CFvDWY0RGHszp7S6ETM6tkd0Xo8hsqhdWcDqU2CVu+Cuv02PVM2Y+GbQwz5349qjyQ6tkY0rASybY1hTJpG9HxMyG9RIRF2GR4TtnsCaccOhxlgFpnFOiaJRNuBKp5bDvl8YYCWvf/mTdqZbNPJmGkX1e908N54eQ3sa1p2gqj2bIx03sqfDwrLb6Bmn9gyqeuf9PqJd/3tSNWiRxeEgcSktc1RN7p2UkSbM8ImywX16UKkgo79HH868zDg9nvHw4RPmh8XYXLM1heb5qPno600OtGmMSD3gpsh29m1H3Sq25xuub65Yn2+4Pl+wXVes15tl+LV1iroF/9wz+VhPvecpHJSaRp4qTKld1OuPdn3Zny1TRO6jHw2HZ/N2Bg+FZ4D+TzLUoQLY/b1TQOv+e9RhwAjCpJN0wlF1goRnUFZj9NlXsOnBUf2NIE488onXikxb/f6OvXWI/PQAcbtLDYcV2Zhl2tIz7Yiw2PdR9otPBwQomJYS9astodXWhyDu64Y8ZbTWsN027LuxOqfTDAWs12vOxmAlq28rM1AK6PxgJOhXVw9qKtbnC7a24nZ7QdGGsq2YzieoNMxzAfYZdf0mOamf/umfxg/8wA/ge77ne1Brxd/4G38Df+yP/TH8l//yX/Dw8NB/7y/+xb+Iv/t3/27/9/l8/vW/mafRfDQpAW0ZDxqpZJdHysiFkTKBkoKpgXQH6daxZ9OWc/YdW0rKPja9M2ASW+F5KSgPpoGVSkbXsVprpwrbA9ux31YTdL2th4ZChUJ6pNYlVTpzaGTx1o+oWJs1sF7FPleHGQIsjohSqEGYQc2LoilgEHQ4AzB4sRtkOtxDd1AaeLzf7HAmFoVyJ2VoClYdOtSW3KBXcUI3GYwWhowAhyVHhoDjaxCQeRTCj3UKZjcER9aXO9vEgzgRFG1P0xzeg/cQ9ZRqfO6/OjKJfhz8Ux/0ptpHgrRW+8DEVqtnq62vSbCAknRHZyoSFVX27qSiflk1WH0Gx1btYk/99SJjSZkxLTNOT2c8fvyEV9/xEU5PJ5w/euqtD5EdRe1Dm0XrZigjgwfqbmzD3Z2tNEMB9pvVRq9fe8b6csPz115jWzesLzd3zGP9cmSuRChet+O3nLuoK3goOjIQZJJgVIIJJSAmd3p3os2H59M/e5Yde0JhzqOCsAPY3DkWWOBZInBzp6BNfFeGv9CuDtIhSnj2pHBgy6YENxoQfPiYFpmPxBBOGcGLqDMAD3CdO6hw1N1h+l4c9009YDWEIrK/TBb0QTzOFhchcCRIVE2HctssO7u4xmF2uaSScX25WCb15gH7bcf0sKA2wXSaMJ9mzIvJKmV/tigT6OnJiDZ7MzmsfcelbbheLri8vCC/PCOfTig5QW5XTEyQhxPWbcVnOf63O6mf/MmfvPv3P/kn/wTf8R3fgZ/5mZ/B933f9/Xvn89nfOlLX/r/6710rKaBB/l/FZFVeJSdGLkY/no6T9DbBLlN2GKCK6xAaDNYFmN95alPBU2zaZiF+CsX7ro4CncuTR36cBp4a96tb5Eii44oSr2qoAb3QNVh5YioyCc6kKMBir1aj83aDO6zWTtmwgnGJiJVbBa6IsM3SGwegqsq+IYIPJ8jWsMd3AGMe8puwSe2DAYwKZrmGyCcVFCBe10i7k1E/h4ZJ4zkJDKEgHlis7GKZ1wj44sIvR/uhNgdbUCafeAkUWeEjd9/W5cNb71efP/grKKeoNqNjYigetYUg/1EDkK5GtJGEQC4ViDBAhTPnjr0pwc2XM86fBHDpsoSMRIYiRilFJSp4PR0wumDBzx8/AoPX3iF5fGE0ysL+lQV2+WKum1YX1684C5I0wTOGeV89jXExg6tDdvVSDutVh+vseH2cvWRLAb71b1aD19rA6rz25dDHgjZ2mK6sG40ZFtms7rBXWvrmUZmG+Mez5scmgtdvWPWfwwjtH84LkEW1lBikFjtjf0Xbd6SN9irgkXuYN9Q12/+jEf7hDmnHTY5eIUzdF1lwWpnNiU6mtSb91BVRdeX/LT1FD1+gVgcA6QgF1Wx11rF9n91eDHugKi3I/iFsm0ud3gH2nxziJWq1UiZwLcMygn7umG73lCvGyCK+XICBCinCddlwnyakHLCvBQLRtnWp1BGLQV7ZmwE3PYV19sLXt58Dfn2gvwy481UoJcLSqvYTgtu+/7u/vuU4/94TeqTTz4BAHz88cd33/9n/+yf4Z/+03+KL33pS/iTf/JP4m/9rb/1DbOpdV2xrsPrvn79GsC48drhqGF0+oP2KDoUefOUMS8T2jKjnWbAe1aUvAGzFEynCZwLUp4t4iYThE1T8j4C7s5JHHMPGf2ghIv3J4U2WUBxsfpiUaoqqI0Fi0AecPAXYhunyWB8NYTm2P2ittcFGg8pnUH8eNe4H1JQBNzRw7i37qfRTS0jE7YN0Q4zZQJuiD6eO8Oho+vE+lxwyIDDzdnP7WfaMyhg3JOjsyF3eGG8yH9+9xkHp0MYDoPo8Czub00cYQeDftyJNIcep5FJHSJkOcBDcf0Oh/UmmcOb2++Jw0xtsALDScU5q3YHbFqlCXmycS7zyfqcpgcTE83LdJiIu6OuK9Y3b0yqqFZwmcClmNIHZ4AStuuOujXvzdvMEV1uqNturDAfaLnfdmshqCZpFUhVZNukthCa2IiMUL+IWo2FZ4QGa2jeddRbYq01MkWQIfTqP+lBxDHL92d7yMr7ugCDk4Il9ZHy6QjjiwzHN+50fz62LUctNmpQFUAlo7mrE1CY2QgyIiBqvl6oN20P+v0xgtPj272zDAcKqA4van//RvbazpPof2sEDhz2fb8QIJilOlTflQhoAq7cTUImRr2sYCLsy2y11lohe0XKCbJPKCWh5AQO2TCYM95VsLUde92wr1dIq5C6Y339CbIqriWjLQvW+i0wqkNE8Ff+yl/BH/7Dfxi/63f9rv79P/tn/yx+y2/5Lfiu7/ou/OzP/iz+2l/7a/iv//W/4id+4ic+9XV+9Ed/FD/yIz/yzve7ZhXYpIKAXjsKw2JCkQpKjPlhBrPioy9+O06nGQ9PJ1xfP9vwMFGkMiNNC+aHR+8dmRzV0R6VdRVhBfbrhi6PsnoGtVagNdO2qtY7knIekAI7y6mSs/6M8mrnG+XW+w9gwAexSGMoox6VhA/rPTZUsOlis/VoEWZSop+rYWyIsWHUkKoDRFeSRbFZ4+/DkB42mG8o02Abpxd1pq6WAHN6ALlhH5Ff96EaNZjIqHA3vmNkg3ce1wOUEcHH12/DRXq4N/1+E3CcotwDCHGqsKjRf0XMCXQiSrsPnA7Pr//jmObDMmHqz11glZoYH+HBAQYxJyXraSkl4/y44PRwxtPHr/DwhSc8fPwKy6sHTKcZPGfI1eDm69e+jtsnr/H6f/xP7JcL6rqCy4w0zShPH4LKDHDBtkp3UtVbGtq2WW/OtttHbdi22nvbukE8LDzVBlFGogbKXqM6rOdh5BmVAIn6EKmplBBBU7JaEgHZ10iCsed6kBE3NRaMYcFGoc9G4S5EPrcto+zGoDTNP1caifDIH0fQz+8/wsEqdlgGtROh5gxKNv22lGKKCmSjRLBuBs/VavUjitlV3M/VLiHgvS5odbeJFY6iKLB5BrWDUIkhBLTDAM2A9ZrfkqiJdQuh4Xgta901MirfV6pIrUKblS+oNdBeITfrq9yfGVfvEyyzswKngpII0iq25zd4/uQNXl6/4PpyxfV6xbrekNcVwowLGPL6DfT1G6RScGsjD/zVjv+jTuoHfuAH8J//83/Gv/t3/+7u+3/pL/2l/vXv/t2/G9/5nd+JP/JH/gh+/ud/Hr/9t//2d17nh3/4h/GVr3yl//v169f48pe/7E2EFSrcM6U+yoK8QHvoyaBsC2r+4BGcyWe5LJBaUZsanbJMmE8PNpsmZUhT//CIMI3ehnpxcVTXK5PagN0VHhz2EY80AfK+HIAgIG/mBWBBoUZxFJ5h2ebvC3gkAyPn8IUsh69HYkSfsuiHUQ5827IzhwUdeoj3iRpAcukaPrx/HzkBug//VLsOHnmWJf4LoQyQnAEYGSYUZqTkECUf3iekcO6ypLc+htG6u2povEecZvw+rAFT4fppEen6Rr9v1lS/nvF1DF0MNtqRPh0042MG0Nlu/d7B4E+1bKETWPp1+995NkCcwGS9ULlYk+7p4YTz0xmnVzYafHqYbSBhSeP5NEFbV9TrFeub19hfXrBfbp5Jzdg3BZUTKE3YdmMq7te9k0BkdwjTJ8Gaqv7BOfnAo+h2UxgkBh3qIiNYHE82ggHrLcz9/uWUjHDheHHfB0CXq+orJAITz6jJ5zohpg34z9WdVUBrrNa+MKp89t4xcJEdXuwKM7Gu4RkRuWGPoYU5d50+BYFYEUMRmRksBvMeJYhiajO/s5Y9yY+g6XAfm2dxQtGgTGPtHRATBzw96AzNwfvXs35FD5ApCGEZZZqMcDNNQ00F5BCorQEA0HWHThlSMloiSNuxPr/B9bXPmVtN17M22/0sirbesKviSjb769q+yZnUD/7gD+Jf/at/hX/7b/8tvvu7v/tX/d3v/d7vBQD8t//23z7VSc3zjHme3/l+ve3Yy3ZQezCFCAohNniviTsqyqZ4fU6Mdl7QXj3idLl1uIZyBuWMspy9WJssmqzmiIIdU6uNcL5db8Z8uqxd+gOi9ygaMKR7mEEZhpURgVrgE+RZFPcoB/Yv+zuy0mlUnyIqFXtx+9qLzkfoq/eSAOjwhUeEwQysqtjEmULuqKLBMYgHSsHWO8CWfi7dN/jru80G+fvEKASQ4dcc6uiHDM12E2OorB2N9f0mDhryXT0t0q54f79OYDyH5Ocw6lHoRmf3PrYajlo9q/WpvgYT3VN8+4UOPGYY48P7I963Oyr/OrI5Ys+U2Ma9OJmF4w4TgymD2YKmaZ5Rlhnz+YTHDx9xfvWIx49e4fzBI5bHszFOvQEUqtBWUW83bJcL1k8+wfbmDbaXC8AZnGek5wqezkBeUIWs527zHhzv2es9ci3WSBg4N/Hkq1Wk31NIMNoGHN8FWgF3zJZl5ZT6s84HbcxORvL73vyzjNS6B6VE1JuQOXu9yWtExj4FkiYLhJoNPWS/Pqj9XMYSMThQbT9qrDNRCHmNlwnIPs26lA73iaA7KWZzmkzo42r4+BF7ink4xWOWj+EYjenpbE8aaho9i/J9qoe6l+2/nkDBQeWuflLjObA15uZSMJ9PmE8LlvOCebEMMZNLOtUGva1dOUYSo2WXE2s7bs9vcPnaG1xev8HtumFbPfiHwb51vdlr1AYQ4yrfpExKVfFDP/RD+Bf/4l/g3/ybf4Pf9tt+26/5N//pP/0nAMB3fud3/rre6/rJM3J10hZZJsXZ5ksFk6v23h5F9kwrn2IKrWCu+yAz2GoC56kbFtktetyvG9pWjdF0tamYl6+9wb4aXh/Kz6zao6QQmw3MGCDHjkeUHaQAU7rQ/r3AhoLXlTwaVDIIjaAGI0RKJdohsZwSCnuvUWyOnnE4bVYV19aw1Ypbrdhr7cXrMMTZ+63mpP5aHpFi1J+ODjlQrP5ZXaTTd75p5FHfoMDIGkNl3qLyYyblNUF2Db9D1qTHD/cVoSotOogaKZnTSHBn5X+zu4LGpVa0pvbv6G0S0zpMUGQN1iH1aPvt493vxA8GHHnMBBUWRCgzoAlZc38+ibOFJ549MWeUMiHlguXR+lhOrx7wwRe/gNOrB7z6jg9xevWA+XFxFRSGNkHMIwstQRGgVndCar1gXC+goqC8QyhDlaCO/fbR7QfsuP8zVkkPEDxnFauWdup4D2iGg2fyeqbvkZhi2w23et3PM4NdxdECMuktCRkyGs6I4SgH98Z6W0/agw3EUM1mTfWdaetWPB4tMY22inAggAeLTsSJHid/llWMPh/C0FAjkCAnTEhYSsJSimWKbpvu7018g8a5HNa1wB0VmbPqiEVcg4z5XMd+x6MKTNT/NgGqGpTKKSPNE06vHrGcF3z4hQ+wLBPO5wXz5GIFDtemBFOvIKu7UzV91FZ31H3F7c1r3F4/mwDBWi2YV+5ST8TZ+gWTEU3ceP2ax/92J/UDP/AD+PEf/3H8y3/5L/H09IRf+IVfAAB88MEHOJ1O+Pmf/3n8+I//OP7En/gT+MIXvoCf/dmfxV/9q38V3/d934ff83t+z6/rvbbrDZvnFeak2HuijKkS+lkAHP4yDT2aMkI9WqWMheoRkknBuBxQMs/faahwwdXNOu/3dUe9rFajaNL7h0LoNrr3j1E0cLc3ENhHRJzov4UhcQTD0YUImcKMj0jLxZzMAJD3CiXXsIvzOWQSAnjDqGAVwdbcQMtg6k1KRpBgMrxaw5cMk/y2ETqwmrubjYwnHZsy756keScCo3frxmuTO/G3HVR8Dtze37s7KbujDid6cKCD1BFw5y7GmKxiOoQ9mxQBQ/3e2ikOVY5x6OG/x8u5z+MOlxn3q18bW6TJqWcbDAs4mDOYk+vqzchlwunxjPnxhIcPnvDwwSOWV2csjydMp7mP2SAiL650qwtjFXrHmRLEb5hytVYMCViIxv1/59pGaDVeO9a1wpplTO+PwkmB+qOz67YxI5aVUzdg0Vhs/k6dDj6eFQCw+xtlN1y+lpAOQcBbUVNkgeGgcMiees9ZvOc7D9GvL64HBzmmw2+KwkaBgJxIY5vbUAdGSUBJyfQe6SDc29/ac6ajAXjrGPAfRrbeYejBDrQpAPerMiBTI6pY1TPYwdZmU1DOC+aHE06vHrAsE5ZlQiYnUqgHILD6nvgztCZttfHxW4WsG5qL4FoG7UEI+wibYrPK8jwDKSHXz+Z+/rc7qX/4D/8hAOD7v//7777/j//xP8af//N/HtM04V//63+NH/uxH8PLywu+/OUv40//6T+Nv/k3/+av+72ef/mrkOkF4tlQ4NvWWV8AZgjbuAIuCcvTg2lUTRlUbDLm21FNx3phqTMlw+Vx29FUsa27NTS+vuD25oLm49hH4TIi5OPGsTfgKN67UR4NjtLrG72ecaD1GlQFLMxIZAs6qzFpKo3CdUrWo/JQEubEOOWEOScbQBjCtzxqVbsqbqJ4qQ3XvXatQasVEU6JUVg7/BJDy361+CfEMBkwSARmfGyhprvo2jaddmNCvVn1YDC6MbxLtvpHE9t0W4cuB9xnJQrCRISJBMXcTndqVx+H8VKbT+Ad/TAEH9Php1FAmLxvBxHBujMxVpl25eu7G3SAcODPMT4INghO4D07BEy9nkEm/poyUi4oywl5nvD4hVc4fXDG4xde4enbPrTG3Y+ebMTDlH1UN4D+OglcZlBZwOUBlKtLdZi1lwYQBCQNoYgBj5xjrQbsxGQOlfjewfRgxJvgi6/Zksj7aUZNMcc9gK2DqMtGlmm2z6HCZs5FxVQVdkKXopozo8CMH1zxpUd9AVEqrIbWjNQkNa7bjbu/V6/Tep2x6uER+rkyk60HtfVExDaYsTXs2AfU6zWYBGBiY8k9FtuH55xQAFATtG3vhIVo/h4KJejOPRrZGej1abQ2siiXWkvakFzgmXwRRvYEOFFFFasCNzW4D6mA5gnT0xlP3/Yhnj54xBe+82NMU8E8Zci6W4C+1lGPPpl6ulYTJ5C14ravtj6qgKqCqiKBAS7IBVjmglMpePrwA8zLguXxAVQyLvWbREE/FpA/7fjyl7/8jtrE/91jffMGlG+o2wppDjMkc1JpmkApA9nmN6XJZGBUFGWZEIVWcqgwGnvivz1XETUoaLO+kPW6Wkf2bTXpoloRKsR3unORrrjUvWUwkWaMLCCK8McCfS/cQw/TaeHK14SZfeid+shtf73kytxLYhROKMnrP5FFOfuJk1Pj0wEWcYMhh+fXWVueCVFvrIza1J01PpxJ0OMjkqLOugynrbBN11+B3dvGe8YT8N85GrG4dw3kPSiK1XtItqY9yIh5UCSGi0N9iCVisq81eFYiLyAzhLUHDpEZamTDUQ9j7+sKdx9G7ZC197V08E/WF/dWXYqjATnbyPniiibMVutwJzWdF+RlxuPHr3B6ZcoSp6ezj3gvozWC7g19E5/YrgylDHAB0uQyQG5OnbsdwYtF4yObVUvrfB4XITm03AOW+F215xRZf8DMoRxij9mVQgBvvNW+FoxsEeLB4vR1dPmk2sT2K7ON2mFGgWdYsOes/WH4PW8jALTX9QAR9gy7Mr2MLPqo3D9Wt2VAmW02mIL8bwVNq0tKRf0lhjEaRD4zYyIjDUEaFOoDPXyfRRvMAcmIpUOAoyHajXWIQSuRZ/xWZjAiiJ+zkuu7OByJmLvm2VQgGk4mm04z5vOC+fGEqdhsNCFAqqmd9BoywceLZLTEaMyot4K272BOyJyQOWNKE5QTCAXn84LTMuPxoy9gOZ9w+vAVeCqY9g2f5fhca/ddX79BY8Z2u5gmX21IKSGXgjIv4FTAy4I8L8ink40xV2A6zZ0NqNnL929DOWLFQhvI18xB3TbcXm5YLzfsV5fKr61HT31xkXbYwnbz2PQet473CccUGdRBfcIDdV8YTmTw1Dup90rE6zD14uzi0MLkzXYpMHrv7yKYwQkFjd5sOfyEOQTPKil+zzOicFLdKXULTf2P+9VGMZuDFDAglIjAAXWWWHdBd7CGoSPR4WR/L24kdrXRH7d2yKT8cWZVJGOX+LuwGTJYDWFzR1XhxeiEnt1C4EVmdX3Fg5PyqEEPdd+3hXbNQcX90K4x6FdnXzHbLDKO1oaEslimHwrlnGycRnmw4XVPX/gAy9MZDx8/Ynk6I80FeSo9gIg8UtSCKxuYCTQhKArAE8AzwNVPNvXzivU26knkt9szabJgrB72yiC72OUmz5YyBdRsygTx7AaBZfx9GN1AEsLw6uFaRAR7i5lE1rdIyXT5kjNFIWqj453A4Q/GZ6nFwlRoEJTUlFua+FBJiTEcTqaIDAZeTgChJLgCBGETO7966GsrZE60UMLMhClZ0JiZkNWzIBEcW4Q0Whd6jcyfAzzQgmtS2lKCOBqgHhiHgxoZl11qUNDNScHRArX1Dmf0lWRO6rxgfrSPUhKmxAYJ7s1o/xSTpy2g0dpQE6MyY38pqFtByhk5ZZRUsJQZChtT8vj4gPP5hFdf+HYsTw94/MJHSMuEy/5NUpz4jTy25xeAgPXyjLavkG3z0RkJuUwGlZwekJczysMjAEJdN5NKckYMgcBZDWMFzKc45XzfK9aXFdvlhte/8hrX1xe8fP0N1jcXGza2WxZ1QHBMwDQi5Q5cDyMwgCo6pAX2cZeEHr6OBZvh8AgrskZjZBjQocwd9G7AjG6DBc7w4XtVTfuLmVAyY5oSGgpyY9Tm03VBeJgK5pzwsExYcsaSEqbIoppv5oDIANxj6gPmDFWMuCwrYURg4N3+ig6X2T7VHkGrDqNvzt6iu12pZ0ObN03WAT4hamdVjQKr2rrpiyJyBYFS7sMNmwwl8xg53uuKTt4waEk6ISEiiU9FEXr04hlyvzdsBihnpKng9HQQ8/QhnqF2QimhPCw2YfWjV25Mzsje/c8p9bQtMv/tuuH6suLl9RUvr2+4Pq/YNkGtDNV8f4JdtWSc7nh0qf9aIdO9VDHCThWrc8RVW99bjHY/uj/tjrk3cvtnBdmsz9CyZOuR0omhkqAqWFfCVhv2W+syQNZMnGzIYsANIRIr0bIRz8xZek78MEWPhh0G+e6t4brVrvYRUKBNbjYtypj/xGzOin0R9SzMEZCcbCBpzgnzVDBnduTDnAlacxLIWCvR9mFO6vhkyAlMTnxJdn5JIgMd6M191j72WoPR1wMOr2rkCz3sTVtjHoi6zmkqydiMqVnW5c+OkgeTyRuhm/T5VcwZOU+YymywKhOmqeDxw1c4Pz3g8YtfwvLqAY/f/hHSMoPW27v75VOOz7WTkr0aROWd0LJvDiOQKUkkYytBCcQZ+/UGSgn7bUMqCW3KqJmNmup+wzJyG8le193GEVxW4/4fMijZ26Cv4ripwzgHGw0YqJgOOughW8L48fg4/G5PUMw/HYQ7XSKI7uG03kui6j0rZkpUCMqCqiFi6gVXcnkTJDciFjXNc8GcbbLrlJIVfzUYRA3aPPMKnP/tbPRTLq3/o/ttz6TiOjVMGtCp4IAVYcddHKMRjh8YMGOvKyJIEm58dTipgPLu+q2A3oU/9OIO+//glI7W4a7uhLgXB0P0ztdObU7JM6gJ02nB8mA0cs6umu405nJaTC9yMXmu5OMOOgXfDxETJ963iv222UiNm9GBm/c4hQSQn3m/nPFwDtfaP5mwMIiQ41mQ3kG20feWQF5L8YxZj/cPh/t9CFoihfOJ1WBry+gZKgHrzmCJrPro/0fGfdw0Flz4wBkPgqLFIKDirSm2prg5cag2gxuZvIfIo73EAXcF8uIZEEafHaAQZ5ISG2vR2Hyw0S/tgJocnVRHU/DOQWSkHybLpmxte/8ZjaVmX8aaD+DWa22INnF4n9dhz/kT79WGQ3kiZKO0Ri+cRkjZ97NQBMtxvlZ7znmyjHKeUeYTynJGXs7IpzPS6Yy8TMhH9smvcnyunRQ1gBK8n4EdOzA9PNl3w0Sb2ocQ9mkBmmI9LX1wXqu7QyrZm+vYeqP2htvzzbqnn694/Ytfw/Zyw/rJM2TdgL1Zkx7ceSSXWglyAB+VIGwFhALxnYOK2VNvLdBvFJQD6FFN7yc5ROsAAGkGXwkZBg14d3lIq5g23BrTcd3BpMSYSsaUEqac8erxhLkUPD2cjHzBBAoR1XWF7OQTZY+Z1OEa/LSiztVJGUeDHjtMPJKm4VgU5kjtj3HIpOCyOtyVqPVQ54n+mDgHcUNCh8ytj1Ugcqr3vZM3lQRjNGbPDDoOE0Y+BIkdogrSSFz921/Fv6N/xWrLxnY6vXrE8nDCw8evhpNKQ50gz5OpJpwmpLmMLCvWmZqDqmvDvm64fP2C56++wetf+gTPX32D7c0F+3UzJpYMxfLIbo5n2jyDNdjaWaPQXm+iZLVd69sJIzsSmoRgVQb2yu8QAmi8OcBkAqU59WvnKXdI7nK54XbbAIKrcwtOpTgpiDq0aM/f1mE6ZKFWD7YRF+oU8U0V19rwZm9Ya8Vlt8b8FhAaTKcyILuUfAQNB0PSRsVXNei4OZEmJUaGQtggyVyyOZfWAN2NVXkH8X/6XgecmKLa90tx4lTCgMM1GppxcBzwAKxnjtprtw22TgC4Uza5o33fse876t6QU4KWZEQYtTUY8l9RP9emPj18x7ZV7HvziQwEooRSFnDJmE4nlPMT0vkBWM7AfAbmBbpMx+j9Vz0+104KnL0wPwEZjkc3QKobHfbx2Nl6dACQCPS2ojKwSUO9JsdmM1Iyyq80oO0Vt+cbbp+84PZ8wfpsI7Nl24E6oq3QAEveQJhz9qwm9dMMGiz5FF2I9sbHIAYAh0SqOy3t2UX8HOGY4ogaSbyGDlp6U2AVMeV0d0g29VU6/TqUFuDGfS7ZHVbqAqZlKp06K9UzKNXDWPD7fhMcjJ45F+kOJq75TiVC4REqEOTxuJZBJEGH3YAhImPGMcA9RUwDo3AqHuH2BudDlMmH11Conab3u4R2YOYx00odOum3OyLOgEzi/fQ4ptCvg+JfQZP2aDt7XWAqyLMN8cynydsoRi2RS/YRMa5cfRcEobPh9nXHdt1we2OCsOsbW7d13Uz93Lude3Dj1wUd0J244DFETfeu15ZGlm6BGAzCIsKxlnKXmd0nY8O/97U7so6UbXw5+wTXvo69DlVrRU0J0gRzKRZQ5YRMPhXKaeCmxTtgrL6eRHsts4oxQjffHzEeJTIihqEWGT7E0AOoXiIgdDHjWMqDaBTBT9RjyTJOrw2HoHKQN945AgXw4OtY82PAGJUqdpaHoCn2RDUP1RVfYg2aUIApZkDIESMfJbTu2G8b9tveW1jIWTfiyj5tr9DqupRVUdfNRtmvu6FLLmaAOC9KACUIMZqy6fp5XQzNSD2f5fhcOymlBFAGpwkA2+hzrYBUD9Ccm5+tiS6clKwbqjbovvYFzdnkZlIuUGHPpC64fWKZ1PZ8hawbZN1NgQCePbljYodfcsm+6aKUqV2QlCpDcRhtcNjg3UHhrcj7Lcyv1zUOGFQHPxT9r6MovNaGTQRvth17a549NZdEkh5lZnfkxc87JZN7ySUbtRnwFibbtLWJjZdwinBnVXUMx85ZVYNlYWrTbJi2Red+ZUdcNAzXXXQ44D9SxrhD0lMv6y2icef8XhhtV3o9iRF/clDu0LGRoyHSGqjNSEVtpQcA49bb66o6FcVJAPLOUzx4tmHIrA5g4qRpKkaCWCbkZUaaTNA4FFQscyKvQQUBJWIDk+5q1Z3UZcP1zdUc1RtTLm/rbpNQ3QkfT0ndSZvGHLkSgUJJwEimuO51GaCPW7N14tF+f+5313jwSOMT4kewJMtqOMkCvTJlCyyXqe+H1ixLlb2ieqPylDNKSphLqJsrVHjESQ5/45Bpwo14NLNvzYK2PvZmLBsz9sDIFj0w6son6gGqmPMWh+WGErJfZLBiUwJqO3poWyPh4DHO7/BkOoxmW8u+b2Wh400lIKUhgkICeMAVfxs1OREZtT9xev62d4ezXzd3Ui7n1GxOWlt3+9ir1ewPzq2um4+pEZ/kMEoeStZiUUGoAiSBqVCIsTU/y/G5dlJNCQ0JmmbYdNkMwDZhzmZoTw9n5HnGtJzApVgEeruhXnbssqM2o0GmMiHPJ6TJdMxaU1xfVtyeL9heVsh1hdbWVZQTE6bZCtdlnsyQu5Cl1RG6aeuK2W2voNV6IqobDakCQbXANYaDAjh+BYS7G9F4cOPeFmGK2CkYUbU2bK3hum5Ya8W17tiaUWabDqeQfdNbPcFmAe3bhgRgTcmyVBXsl5uJTV6jgbmN/g1Qh93EI0CTyHF2ULLotBB5Y3DUwmxvG5XYNo+1ybuRO0SEx+u0+kc4ZWNwCRG0RTZlGVGf1ss2rRhkcF9yY1W1+dgX7XTeAu3Ff/Y3DAcFoNP6UyKwJIgKuPm4jbrjOCeMABdnsOsxhX1T3U8++oWSfSAzeLL5PtN5clYl9dpTSIB1XTp3UHWt2Ncdl08uuL2+4OWrpqO2v1yN4NPawRiirzDAYSD1OWWq1kcDgnp9ITFh5mTEGWacvCk1u7GNLNVv+uH1aWQfXV2ChoM6OHuFSy5VAagCO/dzZFUUIpxKgXIC1NRjgs1KYYA1MgYAUauLAFDh2YdLLckhMKTIDBnRFBuwNNFQawlWa/HLPJeMnAQpsw1uBDAVxpwLlpK9R2xcn/Z7QhAarRJdL+rwYMJh9uzsrdtrSiymShK1S4bXZms9tJK45zqiHer7rTVQrcC2oz1fsBHhen4NvczAebbatyr0tps26bZ3ZR2p5tx0s79HrUEnBPw8gl1De4NuFftWkVzBR6o1An+W43PtpOJBqs/ooUQdCIrZUGk5ocwz8jy7kgSgdUPbN7TthrpdLboqE7QKpCooC5rA09gKrdVprEMqJSUfKFgKyjyBvZEyCt4I5hc8Ym5meBWwjEoAYdODFtc9g/mBe5t8MCxHB9UJBxQ/ue8j6rBLLFKfWyTiDlNNgw2+QVXYiqA+rC/ERRsx9nVzQyDYV6tr1L1ak2WT3psRwXR3kv6MIkNSZqQwAHFRBwNn6vXsxW7LmsizpQ739SyLuoJEOKu4FYPcLlGDt6yIgezwD3saQu6c7H/i52eOLV672914MY46lp+TGsFDGhtUxgBJM2jZoT4i/3tiU9iP7ClncM69PSAMekyANsbVoO7fKZn4ww6jUYMs4bBNW+uAYHQEMEd4LijgVRRrE2wKXEV90qwYjZ8ZwrHnjPEG8kzikBpRvPawpAON6vT9A+vzsLLNcRBarfb8IlMEWSAkanWnZEX8fJDYMu26kWnc7Ze7uo/2f4/TDsiRQDKetz2uIQjbQ0EPOhL5+HmybLc5M7Xk5DXd1AlOvWEd8TGIPW9noFDtGnyhsRf1q+M+618cPu6D24MCxSGU7egFLABLavVU3St029EuqwVuMHiSFdDdpjpobUbpFIHWo76j9vsSry7hEF16rHWhbznYtN8ETirGVgC2AdQXDDOBF8tupsdHU/VdJtcEq9i3G2Rdsb88Y7+9QFXAZYKsO/JSgamhKWNfW8dawyBxSjZKuSTMD2fkqaCcZlDOQKKDYoUbMwCMAhYFtwYu/np5R9t3G5q3klFTfZqapezHB39ftxqZFMyrITb0YfPB6cAMCAMLA8o2sE3cWvFhnyQVJCVTHqgVsm3YrgnYK7BtnWq+h8BkrUY+QcA/Bp8Zrm+Q0bHeBSJMLF4/gCmreyTcnTk5xRnGRCSCOxEXefX72qfqikeEYk3PTY3mG9uUPBuyDIowZcZUAmgkFKcP71V6JM7OWOvivIERuoE0GNQGDrIreYQBaD70cN1WG3gX2YtbvehJy8UcU15mKywviymXe5GfEoMydy1Kcs3J0WAbEbhlEG2r2K8b1jdXXD95wfX1BevzBfW2QfcGghX8DRLSXp8wiM8INJsInvdmCiTiTElipCRIzJi4YZOE2TOXmRmIepBnGd0RHepA/RMPiPIdB9WaQc8+9oSixhvX7KhQOrxOhzxjfVnqExFSvz9H1hyJurYmQiwdiW3IogAmHtuzKGuezWzQF/meUSfIJCacc4YyQ5K/GNnE2mjkjQGhrTXrj3J7pcDI9Gg81/BkJqprQWRrIweLNX10NKbCMtCEmO4sPhGWIGDXoAxWLDwemgmYVJFFwLcdSjfU6Rl8m8DXzcoYPCSQ0MSg/XbIznoLDvVriiDVWjkqaN9Be/Ehn+Ewg4v7ax+faydlgHZCNM9GH4NJwdhQN+QJyAWUcp/bg5RBuYBKAbXimVcBTQVUsvH+lUANSCVD3RjCaxXZnZQNSTTRRPUsIPqWwC6oygPSUlHUKVt9KifwnkGbvV+rFbpuQGVorRa5iLGR4HOW7NC+XKHUpYICUosec4btm4kJlBkPJVt/iipuzZogI2IlwGEdxikRJrJGWKoV8EUfoylqTJ+VIUQbzi6cUoy47tTvGDXNYYAHAYBzQq8/hfHMDFIBS0bSMaOJIix3eKSrcjt2HkV2T44AtvHZJbHBQzmZyLAHNdRcrBQK8ebtIGAERZrcOVEi8OSjs0tGKdmbG13BW7U7Kd6mPmMqIvyA7Tgx8lR8AOeENE1IpVhvVPH6RSQabtCPmo9+o8xsidU721at8H21mkK9bq7Ibz1vmoz9Kp7hd7Vy+LRXMXmomzSsYpOfrd4Lr0Fasy0BaKyYyQxS8mjbGngTFGKsSH9WUQfsdca7DMqh4IDoAK+jWPSkO+NOVgy2twOlCEIBedYR2UYf867Ss5fA+yLy5x68EKZkq1hVIcTupKxmWigmAdg5CtT6DZnMPiR2ck3qTesp7kdAZTpYvaIG8ZmCeWRxfOek+sw3VScoha/18NSDiw6xivS6lo1QiXH3zYI7sgBliuvygIuYMCc2ZwrLqLiJafCB0Iis8Z1pNNqH04d2hxi09Big2D/cSWlMrY4p1NKgUqFu5z7L8bl2Up3BA09leQhVmpOy0RuUstWIQv4l2fe5FLBa4xlPE3iaQFMxCX4hsDSklh3DtWiGQd05cXGtNDbxzt4v4DAE5+Sw4GiIpD3Z1F5OFmEUoxpzrRAmYGfoFouU+sYDjo4KHqHHohnJ/LH0msgMNBNQc0JMMMmNnC4a8AF1CZspsTkpKCiiPz8PEbWZQh1+AO4np7qjCmfhzqoT0Zis5lISeErmxCN7wAGe0WzZrQpU2x1sA3hkrXqYjCug6lFwaLbZQ/OifLKgIiXXD+ROOTfHxFAxlfDAxWL+D7wrP+jRQSTpTsrnKZEOB07b3iV2Qqk7eVbEiUxbMiWwfyZOXWWi12oOH/2B+unF/dBqBimcVF03tNvWlVDI39+ifc/UnLwirXlLgikm7NKwt2aMt2Y4KjEAtd4kIZeJEsWNCJRcXUHV4FkyqdxYk6M9wpwudxocOupGCChsZHcgJ0kEBOd7yZ5F6iv97TXTnYHGeJDhAMNJSbBsIa7iYHVKhRjk7muXfW1HU3J3Ug4lG7uPLANJ5sy63aExJy2cVHeiEV8dH+4BSRhwpMBIB82DCfu++GsShrK5en3AhDbE70PoAA47kBP16wp4c3JVGqsRq+23au0rjTyDSgzlA6Ljz0sknNW4L+84KQAqYyCoeNBtjsvhw89wfK6d1PR4wjyderSVnMrKLjKbk1FZiTuaC+WEdD6BpoR8XjBr9T6UgjTN4DJBqUAasN8a9uva6ZmmpqxWEPXXbK5OIC7MSZNN6MxzMWXqkjCdQrbGFNSlNuy3zfuxKtq2odVmjZer6QKuz1e0bcd2W432GSJsUIczDgCgwuim5JliH1Jo9QshAs+K2hjnTNha6ovIkMIBO0Qjr5EGQkctfKIaHEaA6tBkYx51peDeUcxj0ub4PjDNCcup4OHphHmeDfZK+RBlw6O82MzjEvtJKIxd5FCCzftqqK68PKbjAvApoqk3VSbklF0olZBcAid5r5eouJYhI88uTVQK0mLNs+U8+dBBH6shaorPu9WEUs1mCE4LYs7RvEwGD0b/TyIfGeN0ZIfoW1NQIodIGjhVyGLsy5SsB7BnT97CIJtd+3pZsfn4GIkMKjJWh1eaj2IJpyciaE27plu0EzQRpzDHfXeKODGgDULAlRRQRoYYhChWi+PESJKM8caWvbLanoxQKqBpf2V7vKrh2yzS9rfu8G44q7gmDsV0N/ThpHQw2GI+WmsB5UZAh57ZFAKWZONoMgUSoP2eZQxh1+bZAgHeUI8u5kpQsDTLVHorRYglx8bwIDK91cEaQQe0Zx8R6GlPA9Hvk/o5NX+WsY9bd1Ay2lv8HiYPugBv8HAST0kmf1TYVUAU5kTYxQzYekE56diLErPFGmptaM2mB9QmPp8Oncovfs4BfVe3Z9fnC/Y14bJe3zbpn3p8rp1UWWZMy+zzXWhEM04FT4FtY2SrAIz2y5M3TNqUXGuWLEi5QJAgTUFUe4aGeICtdaHF0NtSBTQRiLQXW3NONvl3ylYPSwRKhOSRPxIhVYHsDa1mq1OVhHQztpeqoubkDLwKocgozFH1BRwR6eG+UIy78CjQIk8TljVK8SCddAjNrZdFwIdG4QMWDoxgGER+Hw4MPQJiDDyTQS4EF8AkRsmEaUqYl4L5NCGXYtkuzDF0qCtHnwUd0aHurMRrSHXfh5PaHIas7zop62VLDn1ko78T9ciTauobnKKtYJmcfVdQTu6kXIYoZZeM8blBqs5syma0EqNDmfN5Rip2zRywYcBg8F6VZoQWeKbaagNXtgyRyfXq/DaEIW5qxegoSAc5wjOPnNOAl/zhtGbZKYlAq5FljuFOt5hurCGCxj4ckI1NyzTYgKJuJEkg6gQGX04AIEGFDF/C9yNaRl013jtQg2i59nUltpCHuv5YvJFPqXpN5lBntM+ti+2Sb5ZgwQHo+niZXGRYx54xJ+SrP86RqAcgwfAk/70+uZkiULu3V+8QO3q2Yfct2hfESU56yFzi0VC/dO02LZyayMEh99O1m899f6M7/RhqajO84hGMYZXU7p8nCB0yDci/tQhuDD3p1zQu0oMP7cHkftvQKmNbfxNo9z194QM8nM5jaJoLoI6CPHozmmxW4wHBouE8IRdGmoyRlxcbi0DJpmtKFWx5w54z9mm3TnPvDVAXo2w1oj4CFTPwmQmlZJSlYH6YbVz9wwzOBMrUI52yzYYde++KNEFdN6xXk7LJy4TtuoJy7nOrmjdkWlNm6xlVdyKRfohEuOT0aYvGRYw+q3xYRBSuiO4Xl79yL7b67+ZwHoghdUbHD+eVtJmtEkFFA1pFIUJJhNOJ8fBQ8OqDM5bHBwsMptKhj8gu+JBpxBjxY1oVmVT1keZtd4KLZyHd2NFbhsEzzt6H5RF4dccmKt5KwJhPNkAwz8VEX0tCmUuvVdVtR90skyMRKDWDBtnWVHbh11MIdi55KG0EDNSaz/gB9lZNrXyz+ysiyEsB4OoT/rfiupJaxcVjjYmqRAZJzpPdLfUsrzbwVqzmmdigPiKbkKqAusxPRNzJaftNrfiujbr6RrDSqpApx/t6MRhKTHbL/8diskYphexYAnH0q400+TgbKcSVe1akh9VIsP0tDcwM8QycibsxtyZdi+wv6469Ca4BvYr2Btz5QF+Pbsaub8yOQEC9GTagPq/0ktdvk40FCnFbUliTvudbY9n5Fz1aHGtRA7KWGBEzsqE+fPNuR/Y/7XUqCchNtDuM+IsRcA4afUCv4aRC51MV/TVAAuJmTskz0rug3G1f9QbffauoTnrpLL7Dmcc11W3DRsCL/+SyXt65tk87PtdO6vELr/B4PqOPFOehtE0eXbbbir0GLFdt8ZGxVmxsgk0/JWVvRDMjqC0mXUZ6770/xGiwprV4GIDL6UezpU8Htkg8Ic1m+DhT34ScUy/yhgJF3SakZUNZJoCAcp0BYmxTQbqs2IghtaJuG9DIDKPj8PdyPBZdWpF1OJDo+9CO38GjPnJHNAxPQAvdDUZW40aNOfk47ISSk8uuEaRViFSwrNiJjDWYBCUJ5kyYC2NZCpaTqSvQVNDztIj6ksFiUYzujLGD04kN3jwzlb05DCG95wnoiFWXiIkBO5/mpFTVnltmTIurOk/Z4LlkRJgOOzr8akGDPQNOUXPyDHrOyJnAbNHy7r/bfECmetbUqmC97RYc+f1WFSzrbM6j2DBDg4wc5iFYVubQZKTU5pwm7HP2GUrNApy9dVKQ5oStGXSHuoPUqPcTJwgIk2dlFTDD6efVa0+9xpY8MDxQtP3uhnSQSfC44ogrg9j/wzlF9h3ZkDucMNKqfV1oVyNRkKuYCAastPk9vu0Nz+uGtTZctt0JBdqlvUCErEDpSYpBnv3snXhn1xVBnK8ZHtMGeibSLyTCxYBsPIB1aPjguUavlkNnvdHW992oL72bRSGcl4592rzv0WTCXDeRbAAudGRPg5gTgRq6WgmogffWHaCKutTbCK56BlVrh9tr9WCxyyb5DvOaW7ynVMGOvQfqL7ffBE7q/OqM88OD4eUH3JVAVgCsAt13Ty7sZkLtxqcU3ekuUxL9MqLQ6kwgVyGN6bh9IwYk4oSGUeD1DRxjLXxSMJf4ekR9nFzPzF9LVZGmak6tZIgq0rQ67OjqaVVRmW2hEABv/g2YpWf53UB7DcIzLApnE5TAWHvd8B/yFYqo2I9Abpg8e+JORCg5mG4w5ykAKptEUVNkVmMaHmjgkztvyhkehMYet8zMSTChtMDFKcm9kXVEaPbMRhQamzdO3ljsZhRM6M8vKJzUXvtmz17zKHNEy+kO1lHPEtGiV8SyWvUiOhGMAegfZuiM+Ve3Ha1W7LfVsvFobGwmCGvRegRblq1xZnOETH00mQZkkyzoSHPuMBSpSXqlwqYKUAVptawv2iOECHxbrYVgTeAm4KQoKVlDamInHwwdv1hcVuNLvXGdOTlN2X9+MPZ3Eb/XUIh8ptcxq4znH880MqnICnTIaHHsURcKDLFVUVMw2Krgtldctopbtc/hpGIAaEnS90M4qRZMzFj7/ryZAypzRxwtLs7iG/1qOs4VscT8Pfzcj/hfTPqWCKzU66mRGem4F5GWaL+puLt3Hud2B9X8FyPDIxqDU+9YqzjccxGgGSQcL9/vReCggDsiGY6pml3tGaHvo1FPZM9Myeyx77fWGq7X3wRw3+OHD3h8eOwGSdsoKrdmlMe2N9S1WoPjdbWsZdtRt4L9tiFfyhgKGBZaPM31JkmtTo5o0iNfccdAbJTiMpv2Wnw2JYFsLLZOtw4hHgBZx6Jzx5XmjFQL8qmBSzYdrTxhWi64LVcwm4I7pwTZd7RW0fbdo7GQJQn4zw9fpOa+2S6OHdsgh3AQGxOHGsbB6fcIjN0oMRJnlJyQU8I8TSZOm9nwqrYj0466E0qya0lTtmmq3neh1eiurQZrMJr9FAhlAmflcY4akbPgpnwQV039HOHn3Tf0IQpWx9k1cPaIFlXRPAuL2kDvRVGF7hXVx5JYZmKK++vLin3bcX25Qqo7y8kgPi0WjAgEt+2K1hpu1ytulxu224rb5eoBVOu1AMCykpQLlr2ibQvm0wSoIpfktbrUde6IYTAg/DqaQuqMep4hrXWZGhFBXSvq3nB7vuLy5oLyfEVjIL1MVvO83oB1wwIgxTDBpjZvywViOSUsk6niPz2e8TAVPC0zZram1YCttY3sPgyk3e4w9EfMy79v3qAHDUeKvPb1F+mNr0U+Pm7LIrbWcKsNz1vF825O6lYrIqpPHki2Pt8K2P0e7eJMM1LMYlmSThkT2eDQnFOv31jSenDeIx5C+CyCS4LRcUFGbQv2bFQGTdudbDBiRTuJtb84Y6hhdFMVzD4omhIqCO24+v0UU7/3HlBToAkGIVc0kI8eSYnRcoLEVGWMvSLeg9Vq644pxsUj9i+oPzNOrpvKbJJKm2CvFfte8XL9TZBJRbR9Rz314rnsrpiw756WWmoKEcPP1QxW2+txJwG4x6Jt85lWlcronBZVgwo92jadu9S11Ti75lqn3gZOTd2Q3nkTr5WweQeH/AjTWj3aVuyXDQBZhMwErt7ELA1t92ZEd1SRQIk7qBhVjT47SO+yqa7m4OepHd4jLxLHPJ3kmZTBYDmZOGpK3tgqAIQh+wxOCtEGCgkgp38HbVoEEHIFDK+vSCxyz5qiRlRas/60IDt4ttodaBpwBvq1UjeCXTbHMy0AHW4lpiH14tCdREd9a65XZnUvY2PuWK8r6l5xu9x83aFT4nNmeyY1mfRWrXh5fsH1csF6veH2crPsaBew92ylZALHZV7MGRFMRy0ntK0ieeNoQFGDNODXwpadgwBttg4jUk9TQ67N2INsMOHtdgUYWG9rzzJzs3mxkwDq8lAZLrmTC86nGfNU8Ph4xmkqOC2zKXMrDG515qpW69E5slC7ssPRWfXA0JuwPXMgwPapO67ewBstJ9+griU6mGVB7Oh6e0Gscpg6/l5g2nz7sb5LJt5aPFOBowZM6EolqnaOkcn05+BX11GK/p/jvx2jCKesow4VuoIhli6Hv+8SXeoU9+6iHV4lNVRoWMi7xKs7j8im3EkFvEgAqMUbKppw16kMGFbcoUqLwO4AU0Y27XtvDDv1updYzaquuxEotm/S+PjfyIPgea5rQYUMTNsr6m1F3Xasz1dsPgeqrrvVqVaA0w5OdJfuRkbWR14fGjUH/hvRHyHPxiIsc8G0uIr1ZJNVrS8nGEAKG7XOI+U/wB0AjYItk2dcJvUPdZICJcjWjLYNGLOtVuR9d3rnZnBmbSZjEhs+oAs6RFBRxD2k/2ByLT0rrsdcn6hB9B40Gj0xQeuepoycbIAiawFpBVFD3o+9agm5zCAw6tpQ9xuUbFBhOKlavacibhGTZaQ5YXqYfebShOk8myirM+3YmZQxafTY9DkMIXWDcVesF0VKZLCFas+U6mXt62i/2tf7dUXdd+zrhm1dbcTBFhNuCfMyI08Zsm+ux0e4rFes24ZPXr/Gy/MLbtcbtusN2gQsQCKLMud5QS4Fp/MDmphDnM8zAO3K4JlhbFWiYagPCIBKNJ8fMhK/RmmC+XHB9DDj9nJCI0V5Pdv8pGT3S1VB/z/y3ifWti0rC//G/LPW2vvce19BWVKQFJiQiMZIxwaS+C8qGjR0JDYwMRqNmhhtUJoYEo1iB3vawdgh2EBCNDF27JUNG0YSY2IIDYnQgBgEFXjv3nP2XmvNOcf4NcYYc85936ui8AfiC7tqv3PuOfvsvf7MOf584xvfKJp5JNZhmTAB3LyueHp1xbat+JqveY11WXDdFs1jGWj3U6/XUXRMjsOhkwxOD4QsAHGjqbdF63Cd6APr+7G/C5ZBxhgmJ4OerfSBmRiCsL2+ab1ya9YhhEs2sycAQ+/7OWVwQjrGJTOwENl0g0nmyCC6Lv3ziI+N7vb3HvL+V9FAyZlxhVknTQts9pOvWXVDSS0IFnSTMewIOSN21O36MY04WZNWD4b7cUjPgIUJoQVIDF2nEnYvHggdMpxsN2P9Qeh6giYSQCBwUXbfeVcUov5WYPedb+84StCsyUQ2XXOuHIfJxRyouw4pFB+TQRYREGyqpjH1LELyCbc5JTPMDylHL+inZE6qa7AFgyFEGXu1aW/UaTc9SdcC88fDzXWMm0SbXIWs30qZfPV+QYwBxIJakhYvz4zWKkpOiGdRYxtL72fozskmbzoziawg+iBVQ1ABzzAW2SxB45Gw/8+deYw2eyZq7YwQkK5XhKq0fjG1CYSEykC9HWA5teYx4fKt04UnhpJd1+VFFcLzlrFeNnVQazb9xIj1siIaq5KiKYXbiHHVU4QywXoEqTRuQLNlPivqfuJ4uaPuRWWFDoWIy23XzXXb1UmVgmLSR9UbEolwHtpHdR6H0ZSB5/2G/Tjw9vkt9vsdx6HBE1gQ2NYaRSzLgbwsOi/JcPvlaYNAlHm6qHMKOevV7xkVLOMAJMLktNwRhxFcsQqhUiTEJeI0R1r206A5AhcGUVF4yIy/QtgLtldPeP3BK2xPGz742jdYloxtWwAjGZVnVVov+4m2H2qQjhPMlpFOUXZfkxZEEFmzuCuJmHUM7ji9Dmoz2/zvB8Rm40SCShgtKWFlzcAiq6xTigHXRUd7rCn161JsrSWr84rIYP7FUXeNSRVbCECrUGUM2+rKpjWcz7tsp+CI/cQd7rOXeONrZe0zOhrrOAtoM606bgxYETqKRgcxUq83BwNTXV3CP8Tlx6LvUwCjteQ9A0Rkn6MtDwwGT2upsdP726g7gXqfGPzu2nkHcicV+ns3Q7bqWcBVg5mv5vHpdlIvOzJHcFEqdz2tgFerSs+XqjTJWrtMjFcZNY0WretYIbAzWsz7B4GJfMUBLwQvqLqOWxzQnkc1bviq0jgpBkiy90tBWU62mH3RzhCBwl0BFMd48VwZy2XRGKlo42QrEZUCQotT1mPUUeuhIcuWgjHEUkq9RpasH2lAY3jIQLwOEK2Q35e3hawdcOoZC3VKc8irKn3E2CERIZvV1eqATZtG0Nr3MyBV35dk5IN6ZKT9QF4XtL0ovLpm60XLQGP9ysqcpGRitp55IABRMCJ33VxiUG4rzcZcaOa0v1Ptu/O+q3M6K467OqlaTpRalIzThvQRt4YYVeIKpHWCd/cbjvPAu+d3OM4d5TzBTuARssw0oLaKXAqEgRCVTHJ9viGmiMv9QD0viEtFZq0pkjmhoefn64a6AdGfeD0hdDiQIuFyv0IE2F5dVYz2bDhvh0GdJlcTCJv1Ij69vuLNB6+wvbrgg699o7qYa9baWmXEEPW+xIhCBI7F3ovQoNI4fXV7wNMZZ64+7g2wpA7XIvWBAoxgCRjG0YCAPmJCtRptyrSQMfF0VpqLvzqZIwUCx4DYSBm+wmrYDTlwh9Wd43Q8vrC7A/afOZRnhTPpR0s9yfFasKuzVNbROhVAxUTvt41AbOR2Mmc+2QwNGAFvFPBP9GsSzWE57KZtGJiO7f2HEUrss70PilkltJwa7/u/q1hAAxuyD3eIlczWssHBXKuVT34LKE589D8/RF3PThhA475wWtWsCqWAGneNUO8CdyiEWGebkNPL5D02X6e3xw5XJIuughlQQD9TuPXJp1wb6CyavSynsvaWPJQHrPel16ww+SiDAZS5q5TmAACVUdeEGNB7p4KpWMQYUFMGN0ZeFmu0a73XKK2DUq2yPAEpeUjncOcEaYpvkB7E9Yf/2/XRGKoqUQOsOA0rMAeActf5OndlBJ3noKvSxOByamqrg+EFX/hW59OZQ9qIrVRvJatcX6vY7/K0KWElRf1+SVifLgoHmoirGEzsfU77uzuO24Hbh8+4ffSM83Zgf/uCelomtSvcd+6HOthaUZv1ZYluNCJCPQpCDDjvqfevPO83nOXE/f6C0gpaKz1YamasAOA4T8SYcJwnzlqw3e8IKaI0ZXymLQMByJdVYSwn4XjU7o+HWMcyEvuJGIQbUkA9K2IIOG8npDKISecJxaiX3JpVX33whMvTBW++9gO8+W0f4PL6ilef/UAh7TWBqzIrz6cd9X6ivOw4PspoR0EIQdVazhO12JoW7tnTqDHRMPYSwWR1wfcdwLxHpoerP66e8QdCSqEPMgyWYV1zUmcVQ296JUkojVWI2TKE3F+fscbYe4kc9u+1JEzsV/9dGAGwshAJjcavCb6XqDPyfPjiwaY+P72eBX3GmfT76yk0hrAA1IFGGUcVIT2LSkCfi/ZwDWkECjDUpJ9Sa1av0xlQ1RqlWdw2jcGgBFEhXhh1v9ej1E4wi5LPSgFa1VlXn+whP/b4VDupc6/IfPQ+FeJxE1xCvo9zILEudowUV4b8faQRvYx03zKqGDHoRNNt5sEm5GZZk2VsxEGdZlBaZ6wJqZkoa2YkwHp/BtNI4QCvJ0hnm/niiTlCWGV52lkRQrPz1UWhMngGZYkVnoM62pDdOaauqtCzCuKe5nuEOb66Y58i9u64xqbV/jMlaITgqIcA3Hokdh7qpEoZBAXXRHzQOWOFS2UyBqhKFGlRqdUhRrQzIS8KeQYIUs5otXVtRWbWXiwQpGXEqiPK9dYPJ1X2YqOwNfOuNorE1SyUyeTPx+bLTv8ngpA63EatKwDMMDOxrtEB6U+GmAWRdSYRGRX85XZDXDPutzvO+468Zu31CwROCkOLZVVdaLjfpL6kbK1bJB1UcDbliLQmLFvGsi44V53A7Jmlt2ms64JlWbAsuUtCpUVZlmnJkCTWqqFjUcACPjRTjCn2YZ86/8h0JIh6hu+1EgHemxdInc7uQcy8dT0f9te4sdTVpHvVlTGCZVhLDCaBpJAZM1TbEgAkgKH3InkfFJmkkAeNdgDeCzSCNbM5ZL+Hvp5JtKRgvxOIBQx63KpxN2A/Z/M9jJd6gEk9axrEB8/ePFaeb71yX6k3Mc/LgRwVItLM3Fs9wmiTceKYj56vLKg81m4Q+zw2+yliVHc/RrtLtlf6aI+eiX11j0+3k7rvSBbJqcMxDNaiAR+5oMCIPvqCt6dnWBRDZ5ZFK8pn+0phQFb6xmZgrAGOzvlmGFssTCybFKxeskCaWPMlKaWaSKcsT6vLC939pkLfMy1JF/eSUI+AZitTHbFFL2aImHlMwqXREEpJHa7ABFUNcuPCXVbI5U7GBGE7bZpgBoxsdGDm6BtigGpKiGjMOB2Obc78cqjAgwd4Cme9SE6kGNEhBUINSsXWrEqNJx+nqYnflaaeEy73J+RtAZ9VdRTXjLDokhdBd0jny6G6ifuJcio9du4D4elaiNOGp+vS39AbXyu6k4KdaxAgWB+UDylgUYUELUYryzS2iiqMpZ6I2wIOgvVpxfXNBTFFXF9fQVAChRjE0uts9Mnbnvqa1evnY9q5MbbrhuO6o+wn1svWaxfRSAqX6wXrZVUVlZwQbYSIq2r4bQ7eyE4EOQsAQbklNGYEc6xizbxqFweE1i9hD1bMKFviBQexSEzE1hNI6X9JpGoqMaqKwhKDMvzs/ENQB9Wbbw2OpUhoISCHpJRw6F4KQedFuTSS+FRdzMHF4xLwNSxQx1SFwbBJx3Z/vH5k/mxSD6cOAU4XxMQEzLZhCN4Ggz91X+pTwSBzgpa3qZ6gwXLvwayd3GCTxZ05KSId6WiiEGSxjK9vXRkwK0ggRvx6+Az3nDz0/sRZhH69vorHp9pJpWVBTAm1DakiENuwPDfg0nFTj/6dcTQviwDT6ANZgVY3aadkq3fTGmlpqh+GguiUZRGEyoheM4ihG1YdvyGQqv/molJAwhk+4E6nbJKjW4BFM06F7vJOddB8xb73CL9vIqNbNxbUVpWlZVR7CoO96FFiYGX3eBTdsyjP5PRKjsVnP/EIyVsAZpjUPM6DvlftHel+T8hXuSaqPUr2e2PvMw9KY6gqM5kYKwtQGw4GQiqIR9G+rJTAhdUYnxXnZUFcdCSG19tc844tw005YVmyRvRnRaVgMDDAQYOY3rHftMAfLOwNBJXVsky7WXChGbqKmErQLM7n6jSQCnL6z8AgPtEAnNyQn9+BIrBuKy7XDWBgvaxYi2Yq6brqufp1HJg2em1gMgW9bkXUx5YsS+rPdcmIojp2yRT8121FSkmdeqkIR0E7FdbEyoYyQLP8GtBSsLlZNKSA5ijfMCsR9AZfkAc8nxQI2L/t75t4UzBN2cU4y2gGPIoafpbhpPVwHtEUp3V7m4b4+wM2R8k63ih0pyh2ME6RF5HRnIyJMSzoRt7zIZ+9FXoK5s3xQDSn5LvO7UciQoRYJkiW6ZFlT8Nxjrq2OiiSqW40Xy879mCs22BlACeEtcba9F0aGhhFBEVgGdXIIN3CMTk5ZJBaen2Oh6JF15eEsoMHsPiVH59yJxURQ0ILVRl77FIpUzTfoShC3yPQpeT1KcCzBOqpak+p7QWjn8EWQ1/9g+atGDQQgjxAFUJiihZA2xXzD7Yo3Mn16IrGhnMjL40Bd0imciDWk+PfO5MPttG4KWPobGxd6NI3j2+UbHYt2maENbv2dN+bW33R2XUZznB0yr9fz4NlP9xYh7A5M8guSvRrBhNb5WFSH7b7jKn4g3moZMAyEgoIVVmeoTJaaiDSnjICoZWGuFSkrZp8UeyFbbDew5QCWk4AC1pWEoZkg3IhiC1idN/afRff/NSp0R5h9ygesE1pIyHsfxpBmzED92suVHSc+35Hygm3lxfcnl+QU8L+TpWjk5FDAFgdwSjIE7lYHDKbsl//ucN5Oh8r9ieEESgjpYSUArLV9wBosGHZZXQnYkYvRPTomeab6A+a997IRnr24RCzG/3uoKb6j8D22oCv/PN1neoPfL87fP/eYfT39cZY36nk18xf69ibBUb9+vU3s1eG2dxSz6a6zZD+7p391w/fAr8g1CnuD8dL6kgTVDczWq1whtPg16UfnQYqRJ5VTc5pynJC9KkRqnJDUWFZQQVYmbCOvruSxfvXdNhUtwv0uNbESxd+UXwtyriHv8rjU+2knj54wkoZ4IZTGOUofVF4FhR6FoS+GHj+GeYL7ZGSGtVaoFkRBat1KxPH75JI6llMaFoniU20wM9Jadl2IzoVF0AsFVZAUtx+zY9ZiDkDAbTh+ChoR0F9OXR8x/Mdp/V9calDVgW6iPZScdaG+1lwLxVnaziL10nE5koRNpPpX4OOKgiEB+f6MQduMIg/3JAwm5OalAa6ojOrk5yb/QJN8jxAhyT9MS/zR6flbCIar2F1B3wWSAiqOlIbOEagCWrWMepxzYg5YTGqetq0py3EiJwjJAXkHBEpoKakNP+o/w6kY81jjMZQqqj1NHi0PsAXIqoc0FhMV7CNjNQGCjWONqI7oFFAC9rEKeLd/BWhNcSXZ6WOEyFRxPG8Iwjh6YNXKPcTr8obLNdVnX6ONoDTLKArphK9H/noPXDYzzQGswvpBgKyMvec3g9o31BjQSkqLBpzgdSl408ufMytdnKJTogdfDN/uloLTP1DHNFwqNcMm9dGx9qRnlF5c7q2Fdh5htkpi+mruNMgwIeeqgd5WLO+1uY19+BrxXaEX0pClzbr/USz9fYgVnxPWV7EI9jzJZ9N2UGgRIo+Rke0FUDvP7DE2DVC52Zm//A5U/Vj9gDqY6oxLpBrAzfTknvJg0U0KDY1CyWgmIPqgYD3W4k5TochR8bsiu6qsenFLLvGFPCxRfllHp9qJxWSGhFXduiFd0/Znf5I6BGemDKQpqbynjWeIiCTK9HeONtGggkC0zELAEBB4TgBgOY0X7ZegeEAhQkwbNsVLDpRwBe472R/WFbUVYcPe1rR33tQHD9mEZSzqJPaT3VSteGwHiQRMbVr0vpUsD6n4JtHHj4egGUtNFK9frHcoPgCZEt6jEwik6H52LvOHzDsqVVZ4EVhV+Ho2nWYisazM+tW0K4njcywWfEepE27INIppCxAHE5ToKQZcTkYGptRQkCMqmTOQe8xt4aqVlfvk7GYaquorZqyeUMT7gzHXiwXHX3BMB07Yj1mUho0Q4conkXVLe7PN0QKePfhO83mgzYut6ayUyLZRrnDovWBHJAHZbMVdgaWTyzOYwSJ1zCd3i9kh2Y9L/PcJlVkodFEf6qaQJ1kcxjuqEYPnPsI2xma/TxkVjzqPuOF8BEVPo2GRclJAXoJ3R8Pt2MvtI0utqfmoMlf2b+fA6aOtgzh2Hmr9tf7x1j2GgJBqRhaU/LlOWpK7gRtbUMh5eCZl2VCQWy2m+3ZkUGZN5jvq2dP5qgBPOwjJ8649mBwdRwrb/ixv/+YCS7B7IBmftKJGX5sehiOCChE6rD6eGu7lr8VMqmcIyKG9AZPBlMvqDsqv8LqtLTb3Ve0LmDCwIJnxhUFNRx+173h9wELJJWTCSI99Q8xTJCULW8i1awjIybYXCRn8qmQI3o2YliWSfNoc2TdT5T9VLHSOYsSsYF1jPtxYj8rnu87budwUp75JGMuhRyBGBGzNjD6aZJtttlf6l6wcuy8uLpzMoclw2l+UoFZ9xU9/vm0oeavsM8ih7LcSWHsS3ckEGj9iPRJJBo4kApbtqD1ylYZCIxYG7CocXTarRDANs5CATQLeKAYes4ZIhE6gpzQQlOiBDS7ak0hzbMUnOXEWU/rv5ucFEvH9qsMEoUWQPQkhRURqLXhPE7cwx3v4ju00pBCxrGffUzM5ShIOULaZtRfABg30htl5+un182au5ekTdFbRlyy/rayZVgZy3WDkEbT6r9GPUkqg0MDEfpkYKXs2/r0emlnaqKvU7YivLc8aIF/IjhNr9c1YfCc6PdskJ7/tS8KTRx93/nalQG3TetZ5rVJj2uPaLyDOzb/x/v7wpOX0UupI0C4wQLc8efBXhd7oGXOKKBnMlrPdLvkTbgYgrZ2gN4kTHYAasqG8X+o25kTceWOYDVJrU2m3tivoIh0aTX/u0jBmojtPKBDISONCcZ9X9r6MHnivgYc9pQuO/dbwEnVUhBdPWKiawN4bEiFLQinkdrkWgkeW+nf9IjHVpT3O2hvh76PMwA9GtNeCyMQCI3vW7O9Y/N77BE8hPTd4Q6qGV3Asqt2nOCzGtRXwQb5taOgHsUyqUcnVatCe/t+4nYW3I8T97OiNB0Nbiev0VoIEAkD8ghkxlgfc5TZpU/s7wVTTUDsPw5p2J/p4DaA+0afHA8wmYv3H/pbV7YHQTvfbd/SCBIndzWM0kNEbG0EKedOSw9EoyBuDEqu6iqEBXwqMaDuyvY79wPVBGjJ1k134ALLcqs2+daCxhXHeaC2U0krPCSqOs0Y+r0OtAwmW5UVivFrJVpLg2g97dwPEAMfhY9wHifOo6Ay4+n2hJgI16INxAvBzpUABHhfUk+p/NoY/DMgv4y8JOuFKZ38k1atVWRQr2vGqMFemxrkz+cd+/Nde85e9o8FUiqpY+0Izaa41rF2PbuwyRJaA+mOcUBekboL7o21wqx7LMRulPva/YS1NhzLWElf7sG+P6b3ETy+rwduem1id5SJdK9RaB1hcEpX7AHQ7ExESTY+IWFe4TSO0gkh83n1Vhu4o6futAl46EmLpsjiX711QGCN3Ebhi0TIIdj8LUJsI6APdg5RgExTgGGJQjN7o5ePe2Y8hBRGUvCrPT7VTopL1ea3Sf0bwMhyXD3BHQt9whMfX6LdiE5pNyacWDyK6w7KIQjxF2CQLAYNfMofesYkZizZU/5m6gtn7U8u03Ni9Dmj0T+rmQZeqa0/FXqyRePQWQeN53N+pMY+OJL3okgCtA7g0dvDQyan5lAfTW9qci/kwJ5HuhYRA+PaOkTxXibW7xlRN+Qd5pkjXdvdTpTwjBtAv+79CdtcDltVk3CxfimVy5HeWyKMUXNzI2xOif0rt5FZTpfx8arDrqOFS4EBYQ0YCCp2LDbVlCr2+94vxHJZAeg0gJi03pa2RQvg01qcc9MZdiXMmnixTw4eFxgdDkKMfa13eKtqsycaq1PaC8peUI7SB+I1E+kd69Snug6JHWYZKvx2jIVlNOOSEgbECx5uEPu+9MjcA8h5Ndp1/iQ/9N7inf/VWyvMmcyB2vtOSi+Uayl6AKC1sl7j9fYUGXvNg103+mNPPAgN9UP1n7FQn6HVX/dehEb0XlbYTR4NJYjOELRzZAwS1nTdU3TFGX2tOkTtiYry8SxK9CB7Bjq3bMyO/at9/Lo7qX/wD/4Bvv/7v//hZ9/yLd+C//pf/ysAYN93/K2/9bfwYz/2YziOA3/yT/5J/NN/+k/xdV/3db/mz3r+8AVrzDjuJ+pZ9aaYEXGj1J2RYaRsBnI4KepXt9c4LCppPGpRsJ/jvW8/drF7VhVAzCAE6IA2jf96gbjpqAY+KlrSqb9MQCuq4H6+mBTPs3by1/3E+bJrlH9odM/VR5IoRHCWiqNWHMeJYirDtVatVTn7L2jklAjIQVP1bP+eF5svMIg8nqMbgn7dMK6lcHcwc4Ni1+ITPX+NmNVZKsbt8jHeNKwFXjFj/f6VHqEFjd8YsxKksKkQIbkBCG6ErZGXtfu9Egwe1loat4bj5W7jNG447yeO+6FBEIDkqutEljnpNW7VtMhs4CNkGkbZc0k/DerRbgSm2UUG5QQFeEgYOQiWEJApIjAgteK839FqxXkWCAT7/Y68Ja1NRe3HgyihgCL0U0je404MR0REpmAfe4O3Xp+ms8sqI8SAnAOSBwBNIDaRVQV5G25vX3C87Li9vWF/e7fJxcXG53CPpqU3RDsCIsOJE6HYWr7Xir0xCrP2CIaAa464pIg1BKxehwJBZLB60eHhTwqg3luzfiUmJ9QjUXt0pIKd9Tr+vsPUNBAIAFojsyZhESCQT11QDU7xANUdmDt/PyxBhyw9GANcdFaMxIJe3+zZjdmtRKYuQdP6shrkUIM3TVJBl1DT6bnVFNEFOdrMLJgIbhyNuMEcGQkbwj4sogh69gnY72QArkYJ+Ar56+PjNyST+j2/5/fgS1/60viQND7me7/3e/Fv/+2/xb/6V/8KH3zwAf7G3/gb+DN/5s/gP/yH//Br/pzjtoOiSr837+q3s1eUTrSp1ry4DxPrqN8D/PHe+tWguU+sfYiCPcJ3ym2n3pqheX93TNhwz6AsYm9HQQ3ovVLt1J+7kyq3XbXVDOZjG5ne1Q9MOr+ZZuGQ0B8Nzg5jdudE1gvTIRR3DUNZQg3s3PeEKT3RxR9MONajMbEM0s+/w1ziEkeeJalxUbJLMOo+T9Sg9x6e0Y4LinGYGns6NZZbG42PjUHMNnJBMzmFGhhoglCtfhj0/VttKGdR514a6pQB+Pn4wzOCmSqN6RjN33RYRg2RLgCvM0QanXr9ulOwFgZCDrAR31FhIkGX3Kqh4NgPhBhwf75j2RZcnu7YXl8V5rSJx8ERhYdrZ47BJA66ugkPqKYVdVj1KCBSiM/5B61a9ll971XszzuO247zdhhEqiiAR9J+jTrJhnVkDtm60b0hvcH5aA33UnG0BiFCjgQWZcEiRSSKfjYjqupZC1tTKro98FTC4dqHQMzfYkJLMP17ZhgO2tS4nr3mbK+laZ94vWm0x+hr++w0Hs5Kr4Hen77v5kDa3lf7sgTM+n5+7gzrXSIY/AkkTy77YNhx5IOdC2OiGiPV1oSflGZfRpqwFHI07JPBrv1dO6IxNsM4/BEEK0vxq3n8hjiplBI+//nPf+znH330EX7oh34IP/qjP4o/+kf/KADgh3/4h/G7f/fvxo//+I/j9//+3/+J73ccB45J1v3t27cAgPvzHRI1owCjR80AtDcpqCQRgN74OVLkftftIrqBnHonzMr03oAeOfniGyMsPI1+SKvJmWhzOqwLk0sFB+1GB7fOTmynMqTOl12d1f3UorSpdHMzFQRnT5lCRDVFbhdpBes8IO1W10UUCcjWSb8EwmJGUHF+zyQd1pCHhTWfP2C1vkmfS2EAwmy0dTNyry/4tdOe0wAwIZmEkE4DDwPokGFIZTIaBHrICgQaeFSYECYsohQgVAYSY2FGtJ91CLI13XTMcAZSLbXXe05nqFmWNT86ZNXP8xH8CUE/i21dzbwwkdGEmclrVMOgOcwXAKyELpjqhXZpjIYKKcBhk01f3r4gLxnbZcP2+glEhLQt+p7BVjPBMk07VPYMR0Yk7TW60lBMJqvcVSV9Vu+vh40N3wvqrijG/Vkzz/3ljuO+KyJQvX+OH6Jmn9EGGb1KZN5BDAY8asWtnLgVbUZPIaAy94xlNYZlt3NkWbEFic4ClWm/9mZn36Tv4a8PLNQJKhHBdK/nh2drE/SPEZSQNW9TCDb6ne3SS7dFfXgqvNY2HJQHvGTBjDtyMyHaY9c/D3adDYYL9k4hIE2GS+DXAAbD6Xr2MTltamfpsGMwVMjhTAEIAcStw4yzpJXuz9nOon/u/PmRHvLPL/v4DXFS/+2//Td8wzd8A7Ztw7d/+7fjB37gB/CN3/iN+M//+T+jlII//sf/eH/t7/pdvwvf+I3fiP/4H//jl3VSP/ADP/AxCBEAjucbQuIOHTs9M8SgrKNpoepiG8yzrjRBZrz8Pr73GWPBmJGgkTbHZLIiwcZedHqn9Q1EN+ZTdmKTLatnU0dBzFqI9imX9aw68qC0PiCsVZ0xNA/nc6mRxiY11BqIG5IwhESnr0IxbBD1kQWXFJFjwBICkh+eHyLwsJA8CtXF5+eu3eoOoZEevA6YZAZYx2DLe8acIUZiIzArOYKNWi5CYG4gW/Xm6/qGd7Fb38Rew2rQIvve2OYCAYEFsQkkF7RIyE2bT6PfVMuytahP/f1bY9RqKhYg6ycRG8FCHREQEVSbfaQQomYF+hUQLYvrpnToj5vZRLsGIGR7DQE6esOuc6CICBVMTaRZr9+O5uuoNZvHI3j+6Bk+uydvqzYu56yDMwU63qND37ruPQuvR1GnbLWkchQb8siQGrA/h06AcEWUYu0P9Sg43Um93FD2U2cFHUdXvPalFSbHATG40/fmtO+qmUZhzWTP1nBw09YAAMkM2xoD1mA9P0AnUfjSVZUKAlHsBmBAyY9G27Oo8ZiCjh5skq1Lnj6Hxj4BWcuefu4DhDtlYYV9dtRgvjod3edFEdDXqkP0IOqwmgaeFpXz6KtSWF/LDYGAbCopOWovE0jLINxJDcNRunRZaWzN5co8Zahun/epKSXf4D6WHhD6PZzjoMk99e/mTC79ZmVS3/Zt34Z//s//Ob7lW74F/+N//A98//d/P/7gH/yD+Mmf/En8wi/8ApZlwWc+85mHv/m6r/s6/MIv/MKXfc/v+77vwxe/+MX+77dv3+ILX/iCwmViYwXMEOtGYktvx6YQTNGa/kH/XjHSKbPqobt9IE0w2ATzPapSCJz6rOuKJmkYGm9rTX4szYwNg6t+NjdzUj48znuj6hA67YK2MphRutiGNFEkFbd01g3DmFyG7eeorJ0YlVqql6qngOPC90yAlAlJpGrwxpgLNqqEIFYcbz3a7lHpBJXAoAj1Pgpvzr/Xv9E6njun96PXqVSsUIVlUSerEOZhkXkUILeG0MY0ZTFYwu8/s0IzLTitegQ8amAmBlQISDmZ82TEalT0EAAJ4BBUjosFgaL2m5GAaK6ruTGkKXO17JLcQQ3JKs+gIg23PIR4fQZXxblr7ez+olN/Y4oox6m1uKT3S9OO6UqK2Mh5BhfumY9OgdZ6A4vo7Cu7bm5gyql1qHrWTpIoh/dHDTha2nBSCKHLYPnV6IgFOcowMfem+92splla08GAxg6MECRzul7V9OXVIT0Ri/THPegB12wP3t/z9kqCmB0x3qXpD2K6HjPRgTt1zR2J/p3Xj5o9Sw/CyGpJA7lQ5GNi2s5HZDbHm3+1xmt5i1gAxWJxMSNZctnkcfpxZyxaENmsPtiaNpY3DM3BhgFJ+gh7EWX2eSnl4eIObGA+8P6vfv8/Cdr/hMevu5P6zu/8zv79t37rt+Lbvu3b8E3f9E34l//yX+Jyufwfvee6rljX9WM/l7NBpPaL5FRvno1tvw4WEUEhQW+R0hktgF/YMHbH/KUbEwR1QKpPpr8QqO4YmsrDeHNhjPo6ockINoU7mgjaaZ8hIyviB4dkE4cNiplnXjmkx+aw9PgZEYKVpBMjmhnlYJF2jFGFQu2rs3z6TCjvZjc4kwg6zNAiumjOKS+LDh6MUXuETh1wV+vZFZ/FsgsnJmivC8xQ+S71JxtbOnSW17BewGjIRHdcDBW9PITx0hoKA4cAIYjWdc4CDgHbWZCXMmR+3Gg8dMHbtrIZTylnSGRAVPlbzzkrxMIMIaUbCzO46JC6QAC3AK/tkRCqD48D4IoHJtxl2YSpdJvh6bOMSGFZP31X6NavujJbrRAA9+ebGk4m5Lzg3IuOa/nAyTIRIaE3YgrQ4b1aGsrp6u8GFwtrjw8z6m4s2mnUdxfdrTyad0vp06JbszE5zcVE1SGwnZ/txgeyAdl/ciA0UYHYJQYsLeA00lET4KiMFBr2ytZCMuqqMWgD/Tx1do7pSYBAE0EKI2fqU9Pf2/QK1YllxaPmrb8fuQJrVAWRBgqW7YSIYFLhTRSKPqE9cgdGH6LWjbWrKIqhH/BeT8tg+mepA1L1HCUgMeu65f5UVmRhgSCiQZBaVIcYGVJrL1N4wNNaQ2XGWW3wohCKWxWasiTRMkI0ZxfNRYfpsnzSo//95Km+ykTqN56C/pnPfAa/83f+Tvz0T/80vuM7vgPneeLDDz98yKZ+8Rd/8RNrWF/VY/LkPWWfi4P9uyltt8UmYWwONRvoN6RfcvHXjKxJF7ouHhad7iuuYmG2HhJBiFrIDKH/vnffixcnpZMdHmpNLvjKY2xzFbP3dp56DtQP2iWLVI2JAWr6+WJOKmgBPJsAa16XPqNJhwOGriow2EA6x8lptWEy2K5VV+8HCkn/24fIFCPKFXhjrF7T3iLtjsrDaYzaE+BQo73HlOwyRDe/0ZW9STaI9qecTZt2z7OglIqcdDaQWCTZZ4q1Ebt3eSo7X4j0MfVpSVYPYNQzAyJIR9JBcMzqoIIgcACTT0X+hCXbV+Soac3BUMCAgLyi1esQMoFRbmBqQzlO7Pcdt+cbKCiZQueHLQjLou8UvEmUegY7vRse7G9/f81uvWfQHdxMttBjnhSw7e97UR9jzYohCdR/90jOUXKPOqk1RpTIOI3O7MgFC5T1ByA0Agd1UouHH0SdbUZ2T2EwFXzg32NS+V7ANK6HG4hRpw79Ao37MAInFQkeQZZAQBS641AIzZq4yUtz0lmw0RyAn6s2lGvd1DOPYNfPVddCIGtVUPdcWR0683BW1bL+6rUl7UIfkN/0VbM96XCfr1m/liJizGU7Ps8HptzgMYvCJzwm2PVXefyGO6nn52f8zM/8DP78n//z+H2/7/ch54x/9+/+Hb77u78bAPBTP/VT+Lmf+zl8+7d/+6/5vTVN9k08Vt37bKzuu7yXoU/GneA6C2kmec4Ji/AXUK+9EnFnrZXWOtE4OMTWIkhyh0lEGyGMEmqpudNwTelco9DWR1n0RWOLrfFcmzG4QcMqBOrxIBKJGkmVLbWsMSIEFSbNi2p25eva5wKp7pvRkF0yx2j8LuMfAqmTCuro3JDtUP0+d1LTjeibu0dSkDEp1F7jfTcPUS/pebkxJTBEgoes8N40V9lwJ+U03SCEs8lwUmdBTRHs6hLeHyVDSUGjFe8DIYuktfkxJR0WKdDPTEuGQBBz6tTqEKJF25O6IE2bkWbfLV2Adjh16slj6JnGFNSYMfNz1/hEa2tnKKCw4/ZOs6qXtzerS63Il03vH9sf+X6Zgh2yMNcVBmDOGlbjGHJGYqQLGf00YmrvpHXPNlmrAfnqBXBRkNkp20HY/lFYeokRW2xoKeI0w+ltHCJa23HuLZs4q7I6teUjvrcOyUkVQR56ih5jo2FHHpy172HRjEV0K6sjFul/pwGEqB2xN/bRIoxBG292rbgzU+2YWAewBuGhLsHSmcPJSgxzu0i0oCuaJQxEKE3/rkGVQmBK5sSMxKzBrISe2XvdeDz1WH0OlgeKgNXzzeEqk9BD+tkpfdz5PMwGwwjQvprHr7uT+tt/+2/ju77ru/BN3/RN+Pmf/3n8/b//9xFjxPd8z/fggw8+wF/+y38ZX/ziF/G1X/u1ePPmDf7m3/yb+PZv//YvS5r4So8e+U1+BdO3BHTMtLPO3EmxOSoLWV27jWlgpfTem5ILVFqvQxVNkY9a0R6gHEFOAa0kW6hGYddwSXukfFYU8wO76qGVnCYjx0YZc2cYXC/Ljosb+AzgUlDbHVUYgmJQoDqpGDPSGrG9viBfLrh+5g3SZUG+rkhbBqWo4ya8z8NUkX0CsZNSYFFzO1X5otx30E4md2PnNUEARCMiI0zjEWgyEMNazyGsGqZu1bTG43T0OQpkUSydyeAZZtRWcRbgOAqO5USOymok2Mb0eVk07nFzCReHqgKNSLzDPTrKQprCphymDLIvFpjD8Xs4iAsso4LCrBu2sSAGp8sDSdNfe530AX6l90/oWtLsv6FRwQng/vYF0hiXV1eEEDVjtqbfkKLJA1K/L8GU0KOpoEuNqFHZXM7A04OQnnnqdRt7i+x6wAbbtajO2oWFXc2AQIgy6rvGwxhO0eF4AJkIa4xWc7SRJqJ0fFBA0VuMKowqargJggWjfhYsG3YdOSEe+6pH8mNP+cNHg3RgcrYHI97orxG47zeKOls7RbN9H4zFKaOVxVe4mgS7tsaGJFGZMneOuu4EiRXeW6JeH1fhCIASoKBEm5YiCg9qPAtwNl3nkQVkwW6KD1ET3NXMAeW4NGrzyJye084DLJODB5b+6vGVMF/hUTdkebzuX+7x6+6k/vt//+/4nu/5HvzSL/0SPve5z+EP/IE/gB//8R/H5z73OQDAP/7H/xghBHz3d3/3QzPv/8nD4buOFfeH4OH6ejQH2FeP9KbUPegi8sh1wE1ztmqb1t7bdbaaz1SiuX/GmxcZTE71RB913yEnmaJajzh7BqcbmAGECIMBoEYyasSp7VUCqRUclDAhh8KLlSwDgWWMkRCXhLytWK8rtjdX5MuK5WlDuiygpMrIg7EYuiHzXome9bCg+mrzqFkmkoSdhxoe6RmqL+b3U/3+N+8t8r65/Z4//pX/8dhrMupVzWHS9jhhlzylEb+lHuUNeFXsfnofl2cP6DWNCT6m6bPlvQhxctR9I4PsnIb2gNPtiYDArEQD6b997DezXU9dhkszWW7KxCsp4rgfOEzaqZ5FhyQ2N9Jj3XtAEpMFJV2VYzI4FmFpTx53J4WRL3bR0mgsV5r/VhjMHmxp0OaBSzdgNAymS/xEUqJPsjiF4EoJWud1jkJl/YM+ksb3k69Pu7edJt6v6/j82Tj3v5gs7Mdzg+lvyY6vw4y+tqTT4n25uZH28yUZ+6ZZ35qL55K/t10TzQD1dSGgq264JYt2P7PVmnRarr5XE5svJ4LIAKxu1jVC/ZzsHJz1PNdAMX31z+yBZs+c3+8bm87Zg0u/Er9ZTurHfuzHvuLvt23DD/7gD+IHf/AH//9/GLkhN62rseKB+UL49TVGS/MFxTwMsJUoKVB/i37xH26AL3h0FgwDfQ7NvEEbN6DqCmEBfMCeWV6jJgvgoy5mS0y2IbOSEyQGcIpK2MgRKelQxhQAYoaUgvr8jHq/4843nKGiFdgYAQHlgLRlrK8uePXZN9jevMKbz38Wy3XD8uqimVQ0NphnTcGzABpRJJu24FlxAGNIokkvuT4bJkPQ2UfvRWrAWMQgrxXpvWPSrLdNDpwMYpgNjDpAxcZDt0v6t1X0d2cJCvlFHcMRXcdsWkrensDWa9YVOqISFAjQDMOzpt735WvDSQ0ynRe6Mwum+6eGxiYjixta3dZBdEAeCyOzANFYXtTl1EZyQ7CMEQjGomMA5TgAAl4+ekbKCTlnXF9fQBCkJSHmhGjbvqtNLNEUzyO4RJxRYVY0MzesunulnHqPzdEHiggh9SJ8TBHCSbXnGhlM2DrlmUBIMemIFhqOHhZo9Wtp91EzBc2oGggJNFHpdbs0dzwM1KD9cAJVK5lGHuj7Gl1bnaqvAYOtrK/IadXDyPqOx7Ty0G2Cr+VEGg42TFtZC9GquuFm3ZyNaZ90+N4zC183CcYuDdHOVWzSr8yC+XonDa5Nls2vpsfHIjhEodLSdEqwVK0wZq0/TJp7IzBQJS69Cao5OeD1YK0XOttKhzW6fRAo+UMp7JMT6igTel8c4FDkr/74VGv3aSQY4E2eQF87CnOxXkCZqM6eqoj9LZEAIfRUXGF73zxmWFxNwlh93TmKIDGDWWnYIRC2NetIc5tVBAomAaM0z1nywsVke23EMyv4JiJllq0ZYcmgbUHIUetIWSenxgDtwTkKzkyoS4Qcz6AAtFaAqs2Q8XrB8uqC7fUV25srLm+esL2+Il9XLNdVFbDjlDXZ+c7XVFMU00oMpNfOdpiPS3dJ/u5v3TBj2vi26WA+yfF4Mdq2Sr+wESPMvQnb8LfBKIK4g9JN33qmq/ecicCN+lynZvRqjeS9VUC3DDOZ4Om4F54lOXElWrHYw0cisjExoxduSOWgOydHlTu6LJ5JelxpBh2DokxQtYAwO/fpWvYLSn7ODB+ZwaV2RfLjZcdxOxBjxHopI6sLFsnTgHFDVAJNSLEbJZ9xpdCpqrrXWkAUkELCkqFQckh68lGnxragblOkobViDknFlomCZooGt/meG6clvr27k0Z3UM52GhmI9ItCI9ofF2h8b95dpuzFL6yuBXUkDFhjrtsNf0fLIKesD+OTOy3cD0MPZcrywQ9ZEVn9yanhfr4ADM0IA/aDBiRay/H64EMS1INqrw+mGFD60FO1NzoqyOq2ZIFAX4l+HtbDJoZa9ssrvVba58/1wN1mjpmzrY5oGPRPok3G3mxMQC+R/GqPT7WTeoCNHr+A2CM0h2VGtKy0FgBgCOklUNp60HHglj0FMz7unPqod4MbkkVx4NYnZq5bRs4RKS8ARXV81RSgawOaTJDAKEBrFMXmvyyDiQFpyUiXVWtHry99YF+cnJS0Bj5O5CgoOaDdn4AA1HZCakMVQbpesL66Yn19xfbmCdsHV2yvNYPKl1XrFSF0yf5HNG7KDkxMvas+2HF34+6bf9wlzFvJv1OyAobmlyhdniGoxGhQhpI3EmpPjMbFa/9s2OK3pxje7oaIlKjiQ9f6yIBA5qhCP0+yN5R+PhY9WzGbzFHpxtSBlZ0BaRDX40UjyzbMUYWR6QVWuM+hUMuj4Oa5mXFwxX1vtnZIya+iUvqpZ5oQ0bVQrdH2fqhU0cuOFCPqq9IlvEJKcFPq63ueL8QtaLNZ1aPyESSlnChFe7A4ZkRrxQghqStlnxyr+0y4glsBUQSRgCmYDNXIrhGs+dWz6WnldBjLrqUY0tH75+ZMx7/3/YXhTOYVqPd4NPJqZkXovXv+XjI0/IfTtuvsd9jXIkb2558+ysvSs4gHJyUwSNeCrcmWRRrCANSdgb2hjM/o5+bOTaSrTSRRsow6BDEyhE2sjkq6yHbdvXnYHW1SrK/vUz8wz7z67DIaE5CrLhfbt14OQdf4S5jrkL+JNan/m48QjDI98usBMSH0aElrEMOy9cGFDJvwKQrymoOIytVGWHKPLOOS+leKatC1yU2zthTNSa25K0u34sKNh0YUMujmXYBU0KVIWsdxLFKPAXFNWJ9WrK8u2L7mFdKSka+LGRRzvk0n06YlojxtChfdb4ivrjhKQxMgXZ+wvnrC09d+Bk+/7Q22V1csrzadJ5R1inCH+OARm11M73PyaMwMoSqzl85OHC0A852Q/r1veY2x0XeZQNCEUKDkhwOMJtpM2CwSi1DntFnEG8kIGKJfXZ8v2bE2Wx9KR4dmm02VzYOYWKg5ZhEtXvenyAQSQQ1jDAiLjvyIS0LyexkBRIDBqOUEIKitaZ8WbCOSTd7FaCZ2LT7vbenNkmZkyAyAZl9Gge4RPNlGF4OaBp2fdEGhHSfK7cCebrh9+AI0wbpmLc43RlxNDNgamAk66DCuC9KlQXLVabu1QE4C+ERFxdlOnOWmEHtekDkiCkHjca+yNX1KAUkFodrPI0AJNpH9PRapBg5ezwWPwrxG3pppqs6j1leIGcI6z0prKMNhazYsAw0A8Bg8+VodmZkzFd25qQqKjZgwpfwxZbov9c5IDHa/vObtxtuneQuUuAAAwX6pzcLqHxOF7nic3BKABzYivfecHz2TAiBBBYFz0DYNqdwVKaoIUiNAMpaoQz4zjb61OTgIMEfl7S32u97jZo6JYT1gdvcrxODP1tPKDGsAtmOsX52P+nQ7KS/y9ijDH/NNdehqDnk+4dH/WsMqpWMnNUgxRaSLOoa06VeEMOoJIj2TWpY4faYOK6RYJiWGoQatmRTgs5d6EuVfCR3rT1mp42lLyFseIxSgjoOJwNcNIGB58wqUo2YM5qTi5YL1esX6SskSacs6ctwp53PtySNRdzyuXydQp1tab/BsPj7Exi70qG6qL7yPwnRXZRuXofWjYhHYCY38tD7jkav2hdSgxeCHDSU2uVTGPCLARGYxnJleby3iMzOIfeSBnZcfv0XICNQhUA9Q0pKQ1qzXwnvYakU5TyMnNMsIaLIkk7MhGNlFj7EZfOLAh0fU4/UOd40T7obMI3lCv3ceoauaRFMtyPuJkiLK7bD6ERlC4M2clrHFqCy/NUNSsGBELVWsp66XSnqDSO8cYDAtPLsZpCHVkuNe0PfP7CosntXZJGSvXRKPDCXQqBE9kBK0yDv1taE7KB8uiSBGu3dbYA5enDxl9+cRNpjshB6n78fx0dKbdwkwkosd45RheEyhbG+r6dh1csiL7G/EnFsHFibH6R/skkSOdPRt1lM5PUiyNeXZWLRWG2GH4RqYA87QLMixkfTzqdvDTWn/CHg2PzJXl1FqUPieyXvB2AFOCFxlxeuC41x/tcen2kkpmyg8YMyejncxROsfIFFIw5WfNXwkG9thxXCbmxPyogbpsiJvC9KSsby+IK0J+bohJB0E10kZBgMFUpUJ/xmFAgoV6Sgqggv0vigudTTz+gmpV9LZPQY3xhSQsha2l00nqKZVG2rd8UGAFtVxxm0FUkI9C5b7rkreAgTTcltfXbC9eUJaM+Kau6MfdHCNxKWZViCzCYXaR1VtHj2eD+zv7tjfqqBoOYpuJLueZDRkmu6JO/TH7AqoDJwiuLOggnACtshdukiNX4AjUE5GIEAYAWI6eAoZemTn4qxrMKquGPQKY01apK1lQpPfIegaCOh1muW6Iq0LLq+vyJteRwoKI+Y1qcoIGOV2hzCjnmosuHGvh1IYxXIQ9V6Ubm9NZoMw+mEWzxhpQEF+9RjDOKsx13aBCMscG1t7wIH97QvQGnKKKiV2FqyNFeJNsc9zimsGYgRS6koHUivqcSCuAUwFIQMCvdeRAiipI2fSgnxpFbUV1KZZVAqCmEhH04cETknBLYoPhAtV2fDrA9VaNI5R7DBXM+RiSPx4lh2ZANZsqxIBQgjsnRtk768tDOT4KcYFFFszPAW4gMG47MGuboIm0tEPz9BIdO/GEI36bnsZgIoYs9mckR0KrMzgTtucGouSCpTx13oG6LCgS2XFMO2D6eFBSxRCFg2mQ9VQvjVGaYM1WhP3jNUnUvt2bTxCf1Xr6Oamf3WSRIOooyJCQ+hO1/e4Q4fNQhufY/rVPD7VTupjlcMp0BoPrT2EYIsbg/oKABRTF0wNSae3xkVHaadNa0FpXbA8bYhrxvq0aeNrGHCiNIYTiqNH65WtsdcZNPq9N/C2OpyUU8RH1EaWvTgkaAtcDCY0ejRkUJiJgJACQBnL9YK4LIjL0nXPgsFUy2VBXhW26plomDKo1sBFR4OcLy8aiR/nuJYS0Cpjfzlwe3vD/d0dx8uh40QaA6Q0dtjsJs9ehJU4QvZviEaL7E2HBhtUsWgMvsj9pkpnHbHoXDCefq9D2MRGWes9zikgx4g1JeQYkWLoPR3MNgfHyZZ2/fW6WNNy1P6h9bIiL7lfO3dSqjJRkfeMlCOCGWx3Ip0VpeavR9fRol11kK5mYMV7om6EkrGn0pQpWTn/ETkAurHyJwFAY3CpOG67Ru85qYhu0bHeOjZ+tWZvQlozQhakLXe7idbQzhVpjUAQddhLhI8XX/OGFBKCJD2u09mu0rP9SEBar6CU0OKGJmR064EaCEbNx5uH4fUac0ZkBtxaevv/YAavmO5chUGgQZACrFVD1GG6xqIMtqgYPNVMmUGMbTsyU7vmXvex/cYG3TVmBLMsIoP+7S90UWUxKx+EJmdIQ3bM6kHMqlPIrEIBel24f24k6s281C2ZZz1+jNKzqCTU5aYCecYLa4In1UIMqjsJskyRYbJK+vqHWpShGiN7tiwQnl2ZdqqMdelaiYMH+NU/PtVO6mM9KdPP/UGeLYWAIBGMEXloqh4HLu6OypxUXBfEVaGxtOnXfN0QDCbrpIemjoNEQNwgxQur2qekKbobW+5/48bbF6hbhi6SK47P86ODYm9CdTbR+LtAEWlbEZgR16X31YScEJKKpKYlPbL4uq2XnkHVfcf+9lmHLd53E+gkBEpgFuy3gv3dDfd3O467jrfgBo3m00RoERnnCjXsBM1iPItsIlZohZvzEcHZd26MBNq465mWfwbBalNEnTCxxIicIpYUkaNH6woPg1WVAx7xGbQXczLIyySjcsS6rkg+Yt2e7qTqaUzLTt2HwV9+nFb/InSadcfz4SiSwj0qLeTK9NRhmORQjBtzX+f9mjj0N0kT9WBJBWhFpK9ZtiwqN9b6aorKyltyd4jBHbow2nkiLhEgVqg5kdarGiOFjIAIKRqlKwlCDzamYILGEet1A6UFNa0oTVCqgIsFYbZn2a+Z9xFOjqSrb6MnJPAaEWzNlEkBo5iTWiKQWK8hdB6kKXiHDhWKrcEijGIUe3jWgkkdBeh7BtMa1cBJB50O2zPQnEAEkJiKhAZhXdGESO8BOfzqDGOg+r4hV0XR41B1jSkYsWNxZ9qTRIx6WQyEJOqwm/HktY8wWBO+gKP0DKf2PSkPTopsgObsZ2T+Z4fMLfuzAxr/G0zsj4OLn/z4VDupxg2tjVk1QxH841EmQcdnaAe6/Z7Q9eliSkgpIS6LUshXfWb7mpas4qpGpNAN75p9AnJaSznB54n2sqPe7jr6YD/Qzgrx6a2todYK7zkK3cmE0Wx4aBG+3HaUJSLlgLIlSDkh595rCQI7h5RAKQEhahRjjjdZzSku7ozDcFBeoAd6ptbOgvP5hv2jt/jw5/8n9pcbbh89QyWkInJaARBKEdxeTuz7idvzXUkEInoNY0Bect/MOhJDR4xzrV3+SQvKzXHGyTHpv+fjIiM7aJPucK40vZ5InZQTW9Y1I6ekbQE5IZnA7vg0s/CRrN6Ysb26aj/Z08WaXGNXQn/IQL352w/CLyPgbql/jn/fC+kYYxBcSsc3rtakAlKgPhk1dbbce07KDp8t0CGYqr0ZVjStR+5yx3mcOM6C/X5ifdkgANanDRQiluuGGNNQHck6giVEdXbcGpZXFyxPG+px4PXtM6aS0kBFIJVxvDsQ810hqnqg5gCsGiCsS8b2+jVCXtCWDcfZcJwV9+e7qv7vRSHmqgiDzjWqHZp3OoGrO/Thi3YNXI3Du7FADYEYgQg5Rp2fFgM2ZuSozMxsQSEHFVC9M+N+nrifBUepPRvSyciqIxgtjgwhIGmaOZACGa0Kmj2ZDJUHJ7A41Cb2wgaGalAX4VMVtC6lahCtsc4ca814VtP70Khv+YJg3zwyMlOwjtRIpNDiogwcsDgxwtemOXzS3sQCoMgYJ+INvmKZ7BLGMbg0nXbDuD3mvkiDIUHKwh3Ei6/ORX3KnZTDAw8/ePTrGLE41BH0NNv+7U+MKOmBbNHfWuASRoMG6x2WLnGkUFk9DpT9huOmY7TPo6CeCg3JrEYxRYwO9fmTa0MLKj1UTxsutx+QFsCFbAy3QgEUAigvCOuqs4PyohBC1uZfstqKF6kfMig41m4O3qLvup84bnfszzfc3r6DilcG5FxACGgt6HgGGyni0EewBsRsyhX6M+vfqWpG2JqYXSypbzy7hV5v8c3W4T53UhZJh+k53oe6WoZCfNFo0uHhfs7BoGYOKu2TVxXeXbalO3RXYgjm/GYa+ExN/lhOT7DY0Z2L1aJk+oqRGZiAfodPeg+T1x1IGWdzrNojZ3NSnqmRheQCVc8nYe2TDmqWjtsOCjp5Ny0JkQ3mtIAtpNHsqr2Beg7RMnIdkNggZwOXBqlqTMu+IG+rTQog5JywLAn5siHkjJRXUGygGNFKQyFCO6tl0MZEe1BiGRnC3CMmFuU3cXFhjfx7VmoZTBNBC8H6gvT+Lxy1kd9QCSag1IajNtxKxf20zBPAGiNWG16oA0O1fq3159ADxb5SbS3oflAE5YEN6BCfk0Ysy3GVF6fGuzOQ5ND2CL59v2itHX0tvO+gXO+zdWcj/RgII7gaaibcHdiA8fBQHmnu1MRQKsx7cWS6vub9/nmryFCpmIHKr/z41Dsp4D2E82OWQoCpKC2wiysevdpTYH0LdlG9ybYxpDTwWVAdrinaT6QPg+BKgdSK8/kZ58sNt48+wsu7Fx2DUJXFJy0MbThbFMJOdyYQCygYtRYAc8OeAkIESBghMGIEAhrqsaOdRSOXmBC3C9LTE+LlgvzmtVKJk0aNyk57j8Vnp90XikNJtSp9+b7j/tEzXj56h4/+969AmAAhrPmCEBJAGaXakLRi0aM1H6aoSgcOGbUS0aKeayHNqsjgPgoBAar+nDEiLIfLej0ONt9IlGTRzKslz0L9TgYgRYU1l0Wz45S8p2wAbXNsEyIpe3LLWK8rlsuK7Wmz3rjpulnWjaBwZeMho/RgBOb/2bRhbTEAqhCKaH2gCkw0WA1Ogi7VaAYKhN4r46O21YCZ4bPIlqwKT0RWCyFz6jyayAlA1WnDpRTkRetTy7Z07T7Cpp/nqvi2xkkElCKCielu9dozKd61fkkSQCEq5CuCVk6gFc1kcsLydOkBVK6MxXTqzj2CS8XJDVxhDeFtCuI+ed+7czrZBl6yDrzUTIs0VyVVqcgxYDWCVU0RKVZr5tcNUAHcSsW7/cDb/cC7fTcBVWCLEVtMqBtjTRFPOWOJ0UaqRB0YWCpgvXXN2ltcY7LfM99kdnv6fpnmlfWp4nb/tZ9PtEbbGYJ+TWbEaA6YBoReWHDYtTlYdUZnVQu1XoPwwqLEGwF1Uo87Pw0pPVvVa+ZwYm9/gNlQTIQpAMEmBvtQx/eDy1/t8al2Ut1oAD3SBjDgPGBE41MvCotnDujNkOLRfqjgU1Ua2jTQT0QQUkTbT42so7NdtFFNaoG0gv2jtzhebnj+8CO8vNxRSoUgQrvys0IVgboIcbMCPjxihoAkAEENSz1PnHs00l9T2KedqPcb2rFrg3BMiJcnLGdFOiuwLEAgRFmt0ZI+TpLwzdIvpuPj1tSZVSk95gyKyURXBRXNkAor/oq9P1mmYW/KzfMBe3uCEVjUmFFQIxVsaugSAgLr5oqihVsOZP1F1smOkXUICRJCJxp4hBaCTvxMgSyDoj4Xq+c6Ylmwn7dnIGEYi551Ouxl16ZfOO9zm+sn0/uIKaJ7VMpQBYwWSOsAZErTNDH1QJ1AEnuMijHXi9wrKSLgEbPXHT069jxPAyDpjCo9DqUK3287QITtuqk6SorITxcbkDicLYJrYwIB6qQQFOqOzKiKmSOtJ9LZkNcVbT1hbkLlqZiAIgjcEKWqERZgTRGUE8qSgJq05aFWYz66iXzMRFTlHn08S2mC0lTlu3gj9xSpNyI0CWCJyHYdC0eExogm19SgM7Ja4z41t/r4nMg4gzq8raVeP15S0jVGRpSp6E35YAG11vUKuzUiUkQiQDMsq4vq4MwpSwKmbFAXNomOIXHgZXSFeF1vZD7dgTfGwazOijXA6xOBAUQxeS/TYowsBmM6YDeMxPxfgcf91O0WYAF+0L0aeRxPsHvhmZYjV79pQw//bz662GQPDeghRJ7x2t4pLjNEKP0msEN5tYFrBRHQio796j0OkcCuPNEVWkwQpBUtUj+/YH+54f58w/12R2kNFBektIAoDVqqFZi918oHqwEBFFi51ASb0nuiBqAEBlGDnDvK/Rltv6PWpplUaZCYwRSwlILYFnMMnwxhjkxqGEO1N2qYYxosx5CyQpXQOhKJQII5ebgTGF/9equ0kEawvebWs5HYYQISzyBVRJeaEg4a6yZQmSRjPvlCh2LsYiej7Enriwr67B37RjmeVsZjwu2n79dnulYD7tPvu1OAOwkZBsYgt0DBVLc9xRlOikHmwKCByvQeDO8x8VoV+vp0OG/8G/1zw3Q2vu3dQbpEDcMals1J7XftmTpuB9bribwu1pQ9KM/dQJE6ov69G1VmSNMAMFhA4+slNIFUNZ2VBVIaqKkmnRf8E2nbx5JVl5JjQDXLzDQ05PoTBj9ZJqX2lcdMMZGuh+imttn6AYCTIyIHVBYkMaIHvNXBCBvW0N1YUG2Ueg2CVBQyVBJOUimhoA1vEdEgSvssYbQGy4wsqrIAgzRFAojt/Kxvy9NijEvvrXaBbPCgr3ex7GrEXP2rq754JlVEM8xT0JmPTv3u5tE66yPZ/rL747ZqDn5mW2EGZpCRyBAfm+fm98vhRR8MHSxKfuj/+wqPT7WTguHD8AvCQynaac5254DZuMjAkYnIoCefzqksKK4WXeSGmCvoKN3oQ6z0zVW/lwo0hfuOlxccx4Hj5QWlNDQQ4pW0IL0tIIqaQQEQOgFuyuKxSDyIzobJ1p+B2tCOovNg6glCA5cb6n5DPQ+ldaeMEDJwnkCp5hQwaiqmrDDSGozvPe4OKrga1wXLqytEBG/2E+l6RaOE/fnA+XKADyOLiDoAgc5b0pqN1aFC0LoRqxF2e9od4JKRAyEwI7nDIFgkyziPE6U2hUnFXJJ49CaAyRClFJEhyCTIXI2G3hAR9EnSRXLd4M61ox7a2JrxcSnS+6UIlPz62cZVy68EmFrNsGttIBqTrYakxmhWOSG/xhGR1EETa7MpW3HbYRIQ+ugYNyR+jUaOAKDPJbO17tfSsk4dlCkoPKjZgTWDSCmhiSCvS2d7bp95pQ3ijYHE+Fh524xyD+eJQCkhMKxWpWosjIDagPtRO1FIbRohJxWgTZFA3ABuSK0iUwMnAeeg07LN6UAERXRacGPufTnsg/3gkwjw8Hu/XMEYcyQBRVSRRKnXjMwDqsoi2IhQU8SRksJtlhmdteFGBTreQuucIQYsXudFQGjquLkaVA/uiM4IoQkUdGMEDpAWIFEZiQgBJKkjGmPYqbIoLSUfjkxGxjwe+q9mgUkBUBHQArT3UtQle5O+xheD9h5IerYToeNiQlCGpsqI+YqgTiBRKNrEmkVRIK3nBUsKXPHn8Uh/LY9PtZPqGKw5JZ6e4oVL2+EqSPq+kfIswkkD0unhRMZeMtw6An3kvLQKcIWUAwqkF6CeQKsou9WKztrDCCLLIJKqRosA8UyojYFS0ah12m2EUVRl3GQfLd+KLjJuAIs2RCIFIGdQzsrwy0k/pw8wHD0YZuMe7I7QRAkN1J1IuqxYXz9BQLjcTgAJ4ICTT21MZl3ggca03phHc7CHXgF2/UUNcoT3iFi0RWbsAtAq95qdRsysDZkm/+OkgpiN2pwDkjCSMEKrpoNmc0+JFZL1DAh68j6u4pEl5xG0y1hNRfux2HRjm+JGO6s+S+2yUE639cZaIs0kzUoCFA3yjAOu6xVJFRoNcH3A/sEj0gZ6VtgdP3mkOzJirecZscCzAgGqGSAG4awVsUQUHwh5KhLQZ2zZU9td7DPFL4h9jgdtRF4NV7jJoLha9fq0WgfdO1VwDJBICpNzA5cTXAvQVD4pBOlZsLBWkrUdo2EcGnnUMp31Y4Ys01X8WNDeDb5JS8FmWIWALQaQxKnWiJEtTH8qwKBYEz1+xicEQwCsx9FRAbcnbEQEUyshPNgyx/eGnnOPvrvx9+DkMbARZfo6ygGAEEHUzGbO4mUOpztJAyASZQcGh+to1JNokFM8AIH9jqEOT2HxT7gG+ORa45d7fKqdFDdGDWrYhBm1tl54hfjWsojKA5ARiOim6pGrOykxJ9XQimF6GHRtgUDOE1xP1Jd3kHqCz7s5qQauBZW1pkLRWVJRs511QcwZEEJipc7iLKg4dXhbFcRgQqmmp5VN8aFB4TBl4gUgLqCo4xVoWRCfXiG/foX0dEW8rIhbtuJ36Ows2Dl/4sOgxrAkJNpAKeIVA/l6QZOAmJ9BIYPbM9qpck8Ot+bLqmMhtnWMkPfN1GyTsQAxIDQGxTDEJaNtoEhotaLWqo7zCFq8lqbOzajDFAMua8KSAp5yQG4VsRVgP+yzGiiodl5MpAMb01C5133NEB4sOWZ1PrUySm0IxXTrOABWhxFSxmU9axdvbUdBue2o+6ktBpZRhRDUUcEyD1t8Oh1ZlRcEmi1Qa9qvBaiTkqFOPd+qsaepBwHOHKPg5zYMFovXbBinZVQV43DyWREoYL8fOPYT635qcFWrSXYlOBTcI3b/xqEez2qNmy0GrVVuKLVit/ds5wkf0NgCkKNG9wTNkGo7UVpDbarcEEDISZ0QiarTM5oGhh2jCvp7aJ0piJjDlgeL6HCZit7iY87ER0csBEggRGjQdwalrldTEY+mXuK9Tj2opWFjhgrKMDazo+xsNwJYGoJ4zRAQgwbJa1wWaHNrVv/0z5kQgQ6pSXcycxuO2L4OQXUnHU2oBm1WG7PjI4w6qcIYgQQxxQ19j9xrSQare2zSA2BdEwEYZRjjrfuxeH2RpmDnV3t8qp3UXgqEbSExo9RqxUvuAwKzLZxoxWZ9eKj6SPN9yDD8dfYlUPDWBq1LWROc1Ao5DqAcgEEbipUTjqhfZS9I4cSRDqxCygqKAZISKGe0eKI0xr1VBGZVyQ6Elbnr3iqbS6Vk0pKR80XHdjytCNuC/Po11jevka9XbK8uqmxuo94xnfbj+T2YIPuBZlMhA/m6QShge32inIxyNNzfnWAuAEP17FJSsVrrMfJR856NSOU+ebgeRfujah2U9WzyOCnYGIgKDgS6n+AYIaUhsvasxKw9PK+vC9YU8CYHhHMHnQfOj05IBaSJ0si3jOWyIqQFiAtYUQ5QbYCMQAYAECMkKNW3tYZSKo6jKKyEESVWo9wXo+fXs+B4d1O18fuurQJlZCMPawjAVITUfxoFmlj7wAK0cJ3I5vVQ6Pi94UA9MCDFEzXydWMzNcI69biJSuy0nkmpWWutoVq/XrVMqhVrE5igpYeWDaCrLkAEo4aHHug5zNh8vliraK2CalPo15pBdWxYhXBDbWfvdSJKIIqIiBqLRa0JgRmC2rOfZFwxJu11IlJGWnNrbfctx6DPoKoLqkjiorXS1SAigMWuL1JECQGZgJrE6OsBKQZsKWElUkHj5oRs6UQaeu+uv7e7zDhPEBj72A1t+/Zs1DMoh3JVw890NgTWPCwTxGk/h9j9NkcXdPpztPvlLSjiAsjswr3mqNiVPax+z1aCEEEU7T0j0XUaRJnI76X+k7kZGX5XuJ/KM0Th43/0CY9PtZM6SgOkoVQdtHdOTipBC4GIEckdkW10XyTzQvLnw8OhFMw9Dei1MJ1GwECtWgtqpnVnFOMiAUVIJ6OGhJJOcEhIyYrGtiEkBDRSzFwNAyFZT9HSuEcqpPr6oLQgbgvyJSN/cEXcVuTXr7C8uiJfNs1slmxjRajX2jsmgOlkp8Czn3/QjCUuCxIDaduQ1hNxPXTEQ2KgGvU9q9TSellxeaOjRChFE8hQeSj2Zt4ctZHZ62aA0Z1Vo7DWiloqztrAQRlwfFZEFuQYuurDm6cVawp4nQmyB8gdkCOhnar2nNakOotbBsUMCVkH+LEWnMEBPtUWEM10Y+iZAFtWjjCIJsKCsh8ou81put1Rz6pfj4J6nOp8O1w4r63H6KBnmTOsBpd2oq7V19Uj3BlgEE86I0w0NVJFjwAnCXti5WKozuhyeJwNMfAhhjq1mB8nR08HTaBRYpvO5b2lBCeCqBE1yKo1UFPIr3lLM4llRg2llT4sT9VKjHRACm8mc4ANrCPgRWs+MEKB1pIJlaAjejAMo+o3an9TsmBVsRF52P/RvtFMKCCRICD1ab/B1ErWEKxVYjgYDxYcvUF/b0dfJoPS7QoecENh7xrEA2HEs5zZ03VyhMGq3ojrZJtqNTq/z0pyssyI1IaxqFRUn/b7cFvtrOx3ruIRxAlK5qCm830Pz1Pn7IlA/6E55k4i+S2QSX20H8iRcVQlH5y1dew6kzqCVzlhjdoxvgTr9p622lwaHo5qWlYBpnCuYrYxEgJnXSQ5g9qiRjBYp7zRPZ8r41eEcZcTt3sFLXekd3e8eX3isi74zOWi8F1eELYNgYLCSa1Z3aMit4YmjMuSsOaMV+sCihnpesX2wRO2VxdcPvsGaVuwvFGYLyw2HyrabKgZqAYwPJY9HqAJ/5m9NGq9KW8L8mVF3lbEbUHSoB15zVi2Bdc3T9hebXj9uQ9M/83iVBEdNFgquLriREMtxSJF/Yxoyg4e3adXG57uB263A0dRs0Y5Ylkz1jXj9dOKJRI2NNTnZ5R3K6JoEzW3ivX6hOV6xfrBB0DIYEooVVCbIPKouQhsA1rdKiatx1GK2r90NtTKWncqFfvzDWXXGU1139FKRTlOrVHV1pU1+rgJUaOnwxphe1kUUrPMx7vxE7SPZCOoITQSRoeqjJySkgYfMTqjzCJiG+74SWr/s0K6UjdIyR/M2lB7VpRSUUtRCMh7lBig6KnUe1Zo+tmDv/Vf9eYZdV2VDUZsDREaRAbX3ZA2ah5Na1IUDE4KOrwPEiBBoUpXZGAKWFJClogqAcl6gkrrnYdYYkQKAUsMuCRVXEgBk/bduFzJbGiwrHolH7ehhrWr08N6Gc2BDIanTLAcOoM2POy79y2MZU2AZiVjY37i9Xb6fTFq+W6qIlUGtdyz0pMF1YZ/BhGzX3W0o0xBq7NxvYk5wJqXyeS5MKYJuJo5eabWBnz3MfRgOnf9MiwwScNX8/hUO6m9VFQOOCyTKl50F0azRs8cNEVNYcwIemg+I9vE/bpORt2NmaflBKUWG0RA0WZLxQwJFQiCRoIKximMvQpuIniuBWhAbASKGY0Fl5Q1ugsBy7oovNcaqFagaGGzEaEAyNCFItZXEtfFtARXZWetK9KyKMMqxz4ozWtBtn0maIZ6NNc3GI9OixEdGWEhmhr7kpBysuyoKU09+ejxMcYiLglel0osPZOKKWgmdUaFqFi6ykHIEVGSTjoOhLTpOZ4GqQRrzl3WhKdNNeZyKzhaAc4TaV0UKmkRy2XDct2wXFYgZjRKSn82+GtEfu5IMI4jKURKSXFWFlHpnrOo2vt+4LzfUQ91uK1Ua/p+P5rUe+BCnJ2tKR206dqDRH6PgUzB1oXXUULvc/M5Zd7LJVCYhueMC9TvocPY0e+5O81ubMf9l07BNtJR3wOYEgCatohM37tzsuNMqmUYU4TUgBaoKyYwVz13ESxBo/voxz1nj4A15dr6E4WWxyywDIkZOS2IklBAkMKojRFNtZ9AXQk/B3ovk3KTOjtgrfNpWwSZgxEH56bn7JExHBTwcL3QbYxfwOES9VPduU1v0/9wmHvqr1Yv6m0FPt5GdQcH3KdZlE955q6H6dqCcSY3sRjc7EK+qleompGWfVotrvdxfiwOGsiBeK2yrxbq9gYzKkCEiN8CTur5rIhBhSVdwdidVCKNmBzqyzEgi/UA2HUbkMpgq2gazoAEDBmkBq7afUsc1KCLOam0IixGlkBEq4TCDbsUvLSK58r4qDXwUUGpognhPCue8oqndcGyZlyfIpa2gFPEfhbcj1ONn5iGVlQ8HMuCsC1I1w3Zn5cNaV1MW1AHM3YHZbWKToU1Y4cwVaIMW8cUGVIYq5AI3UHlTTMnHXkvSKsNAFzT0DrcFp23FEeOyhad85FtaqwSDdiaKV11HAbx5FcX1NpwljoEclNEzhFLjlgiaRR3v4NagZQT+bL1RtT11RPWpyvWD55AUTOp5gBPmDaNN1JjinxB3ei22rReUyvO48D95YZy33He7jZqhZVmz+jX2PvFFKYhpGhNjmbwIrt8D6b9S1igZIk1BlM/H84ohNjbCGKMvR9NmWDB+rIedd1i0DGELNpzFGXU17qj8vvPg37PzQWBpTugXpdyqIYA8KOdVmhOHVRahu6ltIp6BovwdQw9CZsSvBbjY4i91gaY9iQAIJiauhb9F5v2CwBh3UApA8sFJyKKEPKu0P9xln780a5rtsL/PCBSH7MXlh60uJkdjkMenFOPdWbIqz9GQ3H/CU3Oyt9RHnuWHi64TMc4BdCuJqFqG4zd6k86e23017HYVAGDA4kZwSjvEnwagLkRHgiDK6wvjj4RIUfqP7er8t552LqYnD0wgowOm3s91ZRwUsRX9fhUOylmldvwm+M3SDFqAKxNbAtU1BPez9N5/9SjijDVH7yg6bRvYdXJC2SFRwWRzQAEcFrAmcFQWRhBAziC2gmSpjIv0O7uxmxNgkrtXJaMlHVa6fr6gr1U7KXgPE6ILZ5LTtiWjOvXvMLlsmF7o4MLY856uKyQGpEARpsXESUslKFATlEzhK7YLQJpCmXxcQ74yOYMUVTdA9SqskWBkJcIaRmojJQzUhojP+ZH8B42glGJ9ZwbBFKA1gqkFHAp6lxqQrxsoJSwXRYIARcAnvKFGFTNORKCHfN+08CktIaT2QrvQAsBYlT6uCwIywYEnbdFaRwXzBi7E5WpntRa6zUYXQfWVH1q7Uls7AhYN7v34yhhIiAE7v82YiMSGTXb5lTIBJ0kcx7ZMzry5ugwZVHRiDDm/Pv7P9aQ3FEllSuATQLvZPdA1B1hckPCGDJgFrAw2+gZvGdgHzyTBj5pyViuDZd6AZeClAOoVuyREITRzgPCFWffO6Lq7kRYQwSZo2qiV6SZ7xAKiHlBRMa65T5cNF02UF5A6xUlRFQhPN8LzqNivx+oZ7X+oqZBAtCzAT+JuUF4nN5wTDxeOaWLI8PpUBnE/crD63Ua/aB/axbyWKPp2ex8Wd/LxmCB2ny/dX6T9cNhDBwc/XXyXj3LHBVgQq/GZPQjnjKdFDX7vCRV6VhimOjmMh2kHab/J4xMESZ0PDfHhxitxUXn8XH96rzUp9pJPabH8zcOqDimbPNMHI5wivAELwTPHrqf8v4kdLx2ODMLLC2jEgqQkMARkJghbLJJURBt+BrgqfBYfYFUume14YOJFiytYqkNx3HaJmNsOWFNEdurC9ZNBzH6uBBmdUw4STe/OVE3vgpHSXdScUnAYuPihSFnAR8n6t2VzAHKCWFRRXhQgFSlxgXRQYKcAjhHxGRSSL5lZ9ijL1AAweKvLgfTIOUEHwfaeaqT4jxmOCVt+kUnfgwKcSBATqU5+9yh0tiKxd4HpASJEHTYXlpzbzSlHLuTkjpYh1wbmlTr3dHmSc0YuYvjah9RG5mnDDhGN6SeNwcBhLStQcT0INHHTjisohRqmgwoLINyWESzTB+zPvrPnMo76hD6FDjUFzyyDUrjNqSxQ4IxhD7y3GsT3iforRha341W57Zo+T1n6HsqZs2glsuC+rQikKDdNw2CzgMpR7Tq8lRW1O97M9jeCOidbjL2S4pJnXxMyEayWS4bKK8IlytqjKgIQDpxLAUxBJxRa4VcS2crurOy9qpuI3ozgsmrjyzJXieYgoCZcOFtLtPf9FdphuKohnExMYuykl1EvbTy8PezQ4Rfdr/X8kgz78dIc/xAE7w4jjNAA48kQCQvgMBjQYTo41UClqT1vGiTk7Vfys5/Nr7B3sBQihkcfXBS1r+ZrO6bwvQeX+HxqXZSW45IMSG0puKv0MUfWK9bJFvgMSGEhJCyjWLP6iAsYgVkDDabvza2qBd9/IJDKkQAWdGbm6AxaSYVFwQwVkp4CgnBSBAuefP6csF1XbClNIq6ht9floQWYLpubgRFi8Mp4tXrC3JO2NYFEEHjivLu6FCB1i7QxyvUo6CeVceZkBrr9bphuaqoKGpBu99Rn59xvHurI+FZEJYFYVuRrzqojiihVYYURob2cIV1jC1x29WNGzsGZhfKBEPbeaC83LH/yoe4/e9fwvnygvN2Q8wZadtw/W2fxfL0hEtMCMskdNrhCdVJLHZvzlJx3w/c7ne83He0WjUDWRaEnNFaQxJBzppVBXO+MENRj6rXp+rQxv1lRzOaPFdTgz8Ljuc7zrv+jktVLntvhJzgY/XGQzhzMiSRRafXMqO6aoNTpYGJyQeQNWASDVhPWyBip4ALXBrIacg8OStvuNSBgz04mmoC7viSw37O9is8+uBSM5Fd6r12DxkVwXQNdXpxzKoivywR9X4gR2BdAjIJ2rEjkKCWA1yV5CEhoMWAarAlw0R3DaoK0PdPS0ZOEdctqqJ6tvEi64r0dAXnFY0irkfFcS+4Pe/Yn2+oR8Fxu+tomFKBatnvdJ3kIW3R75vdHq3pTHTuKXsIsMGKMBYxYLOYTHfSfkAgFYUmq/NglBz6e019bnMc0K+0H7PVD92dj7EXHvQ83h8/RhaVo3JizhZM33J6ra+1EF0kWmexxaBizaMe6kG6B/XUG6bIWc8WcJBfEz8fIwC5VqSU3wIU9C2N+UBBZQw0uiSasNWo9OXoKgzqEFyxu6f/vbucezgyIhZo5O7hVU+6Ri9DV7oQ9Eh1TRkgRqNgWmSEpyVjywk5KAwjVdULYFGXRDMeweZApYAlJyw5YV0XpOTK4pol7S93m/TbLHASc5zmpIrK9sS8YNlWJSsIg1OAnHfUlxccb99i/+hDbbxsDFp07Ed+ekJMOl7BWCMITVW6ZVII7+CHjAJqN8KiTlpqRb3tKC8vON++w/72Lc7nF5y3F8Sckc+CtF1ACMiXQ6FOex9ymA6akjgs2esD4pmv9eA0o75bpiRdRQGYtn5XmDj3guN24P7urmy92rS/i3UMRd3LqKG1Qed95GSb2XM4Z8KQHA1xhYZg6UTH7IHhPNx4ER4civ/CAyh2+rHVYz1AwPR+8JjWe+XIHM7ssOxdvafN1xKXhpYZVFmzbn+fMBtC6jTiYAUGAoC6qIL4ZUPbT5TLjrwsqGdFzgtaCAit9UGUlKznycdE2HkizE4VfX33/MFkr8KSkFKG5KzsRyv0l+UEAcpaDCeYrI4orjryfuaiTstLB4UHU64asUgDAM2MMtQ+524PRoaos5n0ujPYiC7crx25AkT3SuTUxXFAD5ndOFof/ujzmUSMpQoaYsOCPtE40FDU0NpcsH682Ul5oIVOqqG+zqVnUM427dl9sJFAgbREQFPGb6fhmoggWN3SHB7/FnBSl5yQY0IA6VTOZg1jNqAsBcIlJx0fnqKObEim7h31336bfD6OaQ51g+tyObVWqH6YMVL6opHHNNzeL4WAKwXkCBvBQEAI2JaMJSWsFFQWqDScOLTWVY2dl42IEAOWJWPdFixLxmVbFJZkRikF523H8y9/hHKcOPYdamzUILMV/VvV/G+7XLBdL4rRt4qUCHzXDOr2y7+M26/8CupxaOaYM8KyYrm+QswLtu2CnBekvIDSihQiYswQnc094M+H7N3qJLWBjwN8nCgfvcPx0VvcfumXNZN6fsZ5vyEuK9bribRskKajxmNpiJV1lHkKQI5dqlG7NnmyMI4hmZGp3J24PhsoMSgOooQ27uqYhfvLHfvzHe9++a3S5Evt85jAjLLvqOWEWO3KGZMGVnXQSDNyOx4/MsvaNFp2SRwvrE90XGByHuhWg3qEaqfrTbrMKFY3as7IYzb9vxHkBleen+BuzyS7RxR739rAp2pF1q2CTlMsYQ2oQgKA4agIgFAABUFMeo4clQGWUgAfF7RSUO4Hlm1Dq4x1P8DcwNw0cLRaBRp6f1aPvP38SQ0zgzsFu3EFSQYIyEtC2FYkRKyXhmVbkXNG2QtCCNp8HSIKHQrX1jqwPEwQne1fpXRrPdup3j4AkEWdQyDBCstUSL8SRoZb3RaQCqtG0dVCZMQFPz97uvRVryVJd0/vwejqoJKtu2QoijJXub9K15V0Nt4CZZhuIWA19mjEY+bVHahgwNLWcA5DMgIZjB5jJ2rFNasAwJImAozZA4Y26FtrBsi1Pgmp/RZyUhQCamPkNvoMctD0eksJS1JWWDLKtD9DSg+ZlDQVfWwG6bANFvRJoS74OGAcc1Ay28sRCrviczTjQDFiTTohNgshVEZj7fYXQNl3Udkv67YgLRH8tAFnBS4LMnSsBQmjPN9wPr/g+Zf+N/b7jpfnZ9TmmlzozZqAssNef6BR/uW6oeWIUAnltuN4vuP27gXv3j6j7DuOUqy5NWPZXpBTRr1esW0XbJcLltcfICwrsOVeqEXPQCfHzer0+Thwvn2H8nLDyy/8Lxzv3uH+v34Z54dvUfZdDQYiJFW024lKO27pHSjfQTkjriottV6ysgyTMvi4NcSoo92frle0V69QzwI0wZpXpJB0pPnRcNxONASF/myDCAHt1NHq+4teg9vbZ5T7Aa5NO/Shp9KK6dp5xoJhkPQx1wcmLURLoTzp63/xXm3HX9sTM3Mk3hsFCr1fp07km7Oosa/mVIMwEkxRgUa9y+sJD6SL+fid3WdU+3IviFnHbcAEijUKpg5hYf57XweeHUTdWzEviMuKtK5YLheN/4qW+wmClH0ibdDewCo4jqrwZePRz0Rq4I6DcZ7aQ3W0gpUbsK4I3BAgOto+6zOGiPM4QYFw3g+knHEs2SYBH6oWMUkOaUuAxiWnCCoDB2uvURHgMJSEZWRSjXTibSUxdma/nCZ+y6ZviQf1EoH2xNF8zcwJgqGwLjNA/JAdA9Y2AGctA0zKACYIdE7BcG7AyIoWc0xrICxhtCEABg7NMK5oS0WzOrw0ZTdzjJCc1Mksqfd/RTE7F1wvVKFqRzgCARwEtekUKp9CPH3iV3x8qp2U46ZieO+QNqLupNYUbULrNCto/t7hCkJP57lyL3p7pNoFHx06gnfVe6RjUUgvFI70OTuDkEhT7TBkmsQknUR0kVHQ7AStoZX0MG65pahaaSJo54l6HDjvNxy3G27v3qG0Zu+lwpzM0IgnZWylopkQqrSmxWnLNOpZUc6K86w4jlNhCtIGXE4JmRmJWVmT2xUIaQiGijvq8YQMJ9WOA/XlBeXdM863H+F894L68qJswto6bk2i4q98Vpy3A4gFHE8dZZ4C+MzIOaItCTHrrKYAVUJf1gXbtqEG7b/KKSGGaOQRRj0rkCsQA2Jr8E6ZVhXa6nJHx2lwXzVjpBuJTckEHpgAj47K/vNgTh692Md80uyoHMTRzGRkPK4oD/IJvspmrE0ZjX6/G3OXrlH+j0fJHq2PbG183viuC+xW7mSbVhpCaQjVDBW7mvh752jnJtP5dCjIJkI7epGWimVdQcQIJEhL6gatVdYnCKExWm2IJEZV16GCrZzqSgg2eTpjtXHzIF3rFM3IN+3Bq6caRr19ghqCQdDUWcAyBxnmiBqU1l2hY9SrDPbw3KfMolJPwe43Ebqz868SxMRcVSWCTD7qIY8wCJjMDtli6F9HkEOdhAHL4ADN5ggqf+ShugYouo4Xq0FlotHzRO6gMAXXvpzVjoAUdbA+i04EicEUc1rUVg4OCGyN2B1XMKzBDKGKZk/Iwcc2xSc/PtVOalkXbGlF4majFsZ212ZIndIajaGSczJGmouuaiSjfzMgYV8oGrE2cDMDb0a4GczijLI2wQbR4AnVXTODI6pBFiloxAXC0qGW0aPSSukEjXrT+tn5vOJy3bBfV8hZkHNEjsD5ckO531BuzzheXnB//hBnadYXkQ0bTliWzTQH3eQarALRyJYJLAmMhIaEk73HpKjeWghYakVqDbExlvWqkkLrqscfgVYaEOMkq6PabbzvOD/8CPdf/F84fuVDvPz8z6u00G0HCSGHiLRdkJYN+fKESBnSgPtHLzibylyBVL9w2yKWTce6X95cddzHumJdFqQ3AQmaAbe9wIcwSjNyBO/IoqNAnBVJFFB3nUB83nectx3lvqPumkm5E4qgDqECFglPUecIT3Qzu7r5xz0WTU4cH/u9DsMMlkmrqnzISX8HoBqD8XYWFY1tTQWVjQEaYZONLTjysN5hxC7v5M8psHL6vWZRCeXlVPkrIsQ16Xpe0qihzDGw+Kc8uj6BrrmUIvKScblsSAIscJVzUdalDZZsVQV+8/NuM9QOlVLiBlQGt4JWDmtOFUiMoOVErQXC2gUXl4BIEXFVVmhYEhpDCVMxIS/JpKwyqgUkdTcIsEGzB/BwTvb0f7vxZ6BTzpsMh9FI61Ou4Mcy7IiJ2KOwDTNtoyfRHd7DivD7JfO1HYFAsE9Zg0J9ybI3LwX7XXGoN1mdzin8NH2OTJ/nwZbG4UbGaU0bxgNBKqPZOBexui8ESDkBtSnZKWgLy8Mxy2hn0MvV4YVf9fHpdlLbiiWviNx6QyIs2ouGm8c4SRq5mGlvFnQmGiDCtlh5DEC0xmAXQ+09CjJkR5qgd3sDij0r3MLqKEllaBgKu0jjkeLDjtEWSGdZeVc9MyoBp/3+2BZwi6DNpjAFQoh6jjESohUihSIoJISQsSwLlmXR7CJGpSNXQQtA9ZEfIQFxsYNvAKz51qJHbhrl1qLQElJFPBtYtNiNxjqosPfYiCrCl4q2H6i3G8rLs9LcTx1pErM2Y67XJ1XM2C5AymCKaKdKJx2Hqs0DjJKBvCas2wLmguWy9TaClPTnLWq/jDjk6X1QDEhUmCVvuU8QLvuJumsW1SzTdIq53y/PYNAdD3XM3Y2LWPj5iQ6KhrMAPMP29UTD3lvWRA5Dm/FmUS2206j2exsSYKr+LwjMHoiPiHh+PPzIneQoyPd2C6PaqwyUMvy4MSJ/kkGh7rjR3weTF9Sfucp4JOiQR+0gBgmDpNlejeZUA5IVediuk49eCQQtuptDzyl14oUzzmIweJIiGidFWNaiKEBrSKxNwa2pIoXPEBPy3Ijt+rFR0mS6fgRVnfcfSHcInjX1X9llfsi0jdAk/rnMqjvY0DMPZ+6N+yMPl7Pfg+m6k1i9y5MuUD8Oty8hONKkAXsnz2B64fSTnllacNY/zuykMBlRS69bCNYuIzJGBPnUAqJpOYzrKSBIqfhqHp9qJ7W+2rDlTTcXM8QUnH2UCRHgE1Wj6Z3N8jE+c0pErP5kjsonlPJwVA4HaOFWO7kPhk2+HIZJFbs1ouKo0Yv/TsgVLLSA7yl3siilQYuuqgumEUwxx9saI28LVs6IyaaPxoCYVapoyQmAOh8KKygkxLRg2TYsy4plWZBiUvCkCVoT1Ao0iZCQQWkFZYCKAx0NIg0s+vpqTqqcFRIL+CzGxiKE3BCaC5TKGAN+nuagXlCen9FuLzoGBYSYEtK24fL6tdYttguqTU1traAcBcfLDeW4g1tBREVeIpYtoXHF5dUT8roirzomZNlWcGwIQqhnQ5OGUpp13qugLTfWIrtyglFuB87biXLo543ZUEaOeT9hsJ91ZXlLRvqcH0iPiDv419dimMqVFiWH8Z4u6htSQshaO5UQIJaxH6yO6V4mnUqj+idoD5CE6YAxPqsbnw5p6q9KOgABAABJREFUTdCkFcaZh4PSpuUFIaeuYv9YeJ3OD5Px6cZtwL5evE82I0RIPwvStFBDgoDY6xQlEYSBGHQdkajWH0gHDrLBTGlZsOSMHKIxebXnUGVmkkJ5ISAduTsGHWsSrFaLgb5Q1b1NDagGP0MdLKzWquQGdLgLGBlVlx16vJ36el8E5kU6tOrwuYxJ0/iEr77MesOvyPTDqUYFE0+e/rC3MHiDuD3deejneOo9uSlzkA6RykzIsP65pjev29AYa68TO8zbiTph0qiYgqh2Fnw1j193J/U7fsfvwM/+7M9+7Od//a//dfzgD/4g/sgf+SP49//+3z/87q/9tb+Gf/bP/tmv+bM++O1fg+t6sTk4DXU/+wh4j2Q6E4mmiIVHLYmbZ2G1w261lq7e7JGTKmTrvjoBnEK4Q1BBKBgjqoOwCoqSYGWVtEeSjueXqCPpa9EJpSFGW2iacRGpuCVDswGBKE096FiFxFGV3a8XxG3Bq3oiPV1By4p26thqogXBnFReFqScsV0vSEnHlu+Hnuv9dmqHfgFeOKIg4aTFxps3BMP/zxBBkrQucTASNcR0AgtATZAXHRfeI0BmiA8GNIXwehY0kwqKKWO5KMS3PD0h5AWUFlBloLD1gGhxudYTKAfO4wU1AiUFhCBo54HtegWeBHQNnYRUK6tg6llx7GUM/Tsq0qJq6MGg3pd3N9xvB27v7jhuB85TqeewQEP6pvcIcDQqBqNzd2zEG0JsXc1wSu9zcrSGlNgC01UTQPvRUgQtGWHJPYsqXHFvDS+l4ShVnZRlBipQq8X7LiILPExO8KiY7PgfMiK3dywKddWKVoq2IhwnOEfwWcE5TkoUTjt3o4txDXo9Us9NLJvm80Q7dtT9hvP2DG4F0lqnIvPFFUGitis0zeZjEMQILMbgjYHAKSl2vq6qJgLo/jhOcDlNjDcjG+Gm1s0cjEFTREjNMiYjpbRagV2hBYkV0TNIcJeTak5b93MENBvs99mh1lGPMfPTyROBxgymLhAgrgQybohe3clhTMEFMO6t39/57/pX+0yvD/oA1BBGoN4tYn9DO3ZzTNSPkwZMbcektlFsSKnOloulDm1JIyg5m7QX6Cfo+TwPfDWPX3cn9Z/+03/SDWSPn/zJn8R3fMd34M/+2T/bf/ZX/spfwT/8h/+w//t6vf4ffdbl9RWXdUM9Erg2nKRkAA40OsgA9JtoRoSNRsyitFthNqkb7vj83C+lvQx6MxmkMiQEVFj2A5MkEY2kdeyAFVNJbzJDhUCZG7jZV4IVnN249FD7AUoR8YWjzgze8xUY66snqwcB9WRIFSBkBEqIKSPnjJgS8rL04XilGvX6rDjOir007E1QGuFk6iF+skisUUBFRJCA0FTZPJ1VWYAhIvEwUL1fyYkTtfWx8J2MEHTUfLTaCyj0LHWOwCNpr4dmyQXcVMm87DtS1rpCXRakWsHNtPaKzoMqZ9XsyJyUZ1IxKRwhRNhvB47bgeOoOIvqvungQvEWl1Hc9ozJHNXoB/H1JaOoycOMkPf6WKOvsQxUGrKZ8Kpn92TFaJNwYhu4VxpbHUrp0A45BxlMQyW2jz6rsXbmPpv3onI7nkeR2YnVavCnB3KzysHDZ/RF2hdrj7hdUoyrMSRtCi+3qvO/YlTCUMpATEYwYIWxoTWmLCrLE2M0YdkINqUTbgw+C1q21oGUTNleO4mSk23OhJhVDzMtyS+a9k0VHdcuBu9G1n0eGYjRSA4O+XtqLC5xZKrhTpXv9R7Lbm3xzOo2I7tyB+WQ2uSkes0aHUJ+gFdtzcn0r/Gg7qgGzT187PuxWOi9dzGkCcMeDecFeEFOWFmaSpI0SbXmTqpNSile/1cbSqTljvqbBfd97nOfe/j3P/pH/wjf/M3fjD/8h/9w/9n1esXnP//5r/o9j+PAcQyv+/btWwDAB5//GjytF5uMWrDniLJrX0YzAVBpGiUKK1zlQqpeO2nVKeYNTqPu48Mx0uEQaQyUExXLZIt23fE51kwsOEWLpY206Q9RF3MtBRDBeUQbeugbejTMucMS6Gbp4+dD1Gm/y4LlknV8+rahnCcur++oh2UC4s3KqtgQSLWca20ox4mX/cR9P/ArHz7jfpy43Xfsx6lRUfNMMABJG6FjzJCgxIpSgICGFAoSaWPg2q+X9OvbbCz5cRYcpWIvjCJkenEJhAgIoZSqNTIUlFMdGmpFEsFmEFiJAUE0YkNrkPsdLSaU200Lw0Kop97f27s7zr0oW/GoXYsvpqLjQM4KkAYZzy8H7kfBh+9ueqxHNX0zwRaVfGOz9+DkA51KNk/eRXc+PhZFgrsOjEgSMEfg49wZB2CsMXU4Tkv2OuXRGPfa8HJW3ErFUSqKq3nYJ0QotKxiyoMJah+oDpG5H8Oc+HQ8SmRyJtZbdhTUFFGPgpAC2lmHpFSUbpAdDvOeMosGANb3qqXgPHacx4G67+DjgNQC1KKK6AHAnkDLipBXhPWiMkvXCxIzkjCW5vp7hBYimEgVUOqJY1ctxXy/IywZa2uqLrKsoBSwXPLIhglIpSKvsTMZj3tGPStuKSIeBeEokFAQG0NiQagBsanOpgY8NiyT2WZ/CTIBOfpAxRky86xa97Bng2G6FwS9/wNOHe7Gc53HsGA4Lwjec07jVyNLCpP9sLpneFwlgElUyeM7qAvS9xr1scmhqoGypMQHcppDjgNWdEkv/1kwB1nO/ROO/uOP39Ca1Hme+JEf+RF88YtfnKJO4F/8i3+BH/mRH8HnP/95fNd3fRf+3t/7e18xm/qBH/gBfP/3f//Hfp5tuJ2qWYulmujpKBv055NhfepoVyxge930s/ejxRm7pSAgCQgMZWm7NI5DidZDI7ryurQLY4iAsuPRjRGooXpk5ZmShlH+4b1OEbOqnOsz63OJVstQKC7mBikMSDBdwNiHxzUbZ1JFsJ8Ft/3AR7cd9+PEy77jPJXKK8wqKhkClqTThzhktKBkDFCAyfMiUoCYGsSQMNJjZ5ZeOzmrZgHNPHpjQS0FFE5UuqGCcAp09pTVtNAaiNVhCjRr9Wgu2EgTPg60EFEYOA7Noo7nw2YjMWoxUsyEwdezgknVBPajYN8L9tJszLoNd4MWoyFAYOqNj8EUx91BzdEuSJtYZ+hEAHhKpiwvrTkephaxN5V4agLE0FSFhN0AAmfT7K7YvDSeivley0wYav8+LLEfghmTR4aY/cosJE0wXRfZrT4fSyHbmtW5hzVZc6+MYohl0b3nyIknpnPY95dtjBAiJDAQGCK1/y21BsSmGUmMoJz1Z8Zi9GJwg9ZyDtbmem2wZXCrqM+vEVMEH6eu1aTXI0XVmsSq+potws4zIgRBScouBFmjNQiRWfXlWtMWDAtmqwcJopB0JCAH2CgQc1L9enqWSt3h9IBHphYOpods15fRlPtOOc788+k2T1Buz5LsKdP3I4MaQrQMGNpgvVruiJzV2kZd/gEu9mMymJOFdegkEQLb/nA2IRECe4+i/pvb/wPEiX/zb/4NPvzwQ/zFv/gX+8/+3J/7c/imb/omfMM3fAN+4id+An/n7/wd/NRP/RT+9b/+11/2fb7v+74PX/ziF/u/3759iy984QtIWWcZuUELpi/GFhW2oiQIj5rEYAuPWhwX7mKd1rAHTIm3X2AAgdnGCphwLEFHCQj6TB8hgogudKenN1vQGoUw2NgxaurbpMysn9knWgYCJR2H4YreccmqPm5OSp1DAygheibFphWWQjdY5/0ACqG0hvt54tmc1G0/8LIfKEYUIQBLSmiJsElAoIQWVPuQQwQo6hh7CmBrUA7eeW46ewBGw2ltOJs+2SCgZGoYEEItDScLdrYituhMJRfCjHZDdOyEPqk2UCng+47KgJwN+15QSsN+K50YwlZUUs07pZWXUtEEOJrgvp+4HxW70Z8Liw11G42ZgbRZER0mMVy/d1HByzPw+KIzmuy+smXaDapccBhT71bb5KRIh3LWBgoRMaBDkMUhU8ugghnDDI3edcAnug4fTQbrATqW980bdUPUGX7cutZdPbW5N+SoX0vWgMQzM6bhpAzeRa3qoGo1uNDlwgjaWJ5A0aHAABF1RNQaqLGSiFIEpdQzvpOV+iyl9raPvRzWu2hTBmrF+YHqQLZXJ2JeOsQWIyHniMARnAiSCWIjSUKAid9yN95EYWRx5pgWW7+VrYoogmj9UTmgM3mDXef3xXr9ZJxkQ05GYdMEnHnt5EnuoGyPhzx87/+akR+ZXimTXfLmYWczCsbYj8o+jt6d1Ahe0FzAgHtA/WCxbOHrF2f8mQRUgBE2bBIwEcSuVav/D8yT+qEf+iF853d+J77hG76h/+yv/tW/2r//vb/39+Lrv/7r8cf+2B/Dz/zMz+Cbv/mbP/F91nXF6n050+N42ZFaQL2faMeJ86b9LsftQN1P01ozyI+ls5TmC+yx6VBl9ls/oh9gQHAJwGIXupizU7hGN7s6Lv2DAJqKqe+l6V78JgzWoRn4QLDGqqBj4p8uyNcNT1/zCtt1xfK0IVlTK7cKoaj0c1KHTL0urBGbCKMRlMJ8Vry7q4P66HbH/Si4n4eSGkTpwBQESQgcIiQk0LJo707OfW7V+uqCy+sLLq82XD9zVeHapw0xmfOOwZ4RHBMkZXAIGhDXiuPlBWfYcRfgaIxbtXEiIGw2EDJTANcTUm1el0V07SyotKN8+BFafEGIEcdZURtQKiCUgRARKPXNo/fCI0jtzK+ifTBMQR1wRI8kC2mNIUHFT0GaBUTLJD9W/AGGHpt9FRoisD5F9agNt9pwNsatDCeVrPaXakOgghy0oM+1mUGEHUNACmoYNyJkAKvNZ4oymGJ+dM44FPOkPRCeDt/zB+Gh3ddOkxS6nwo/rkroQAjK5hSFdpwFy/sOLqqo3/YDrQvznqjFGsxDAuVVR+akpA250kBSEfKqgVjUPVBrxbEfKMeJ891NG6qLTd5mxlGLwZ6CTAFRgHa7oy0L6vMdggjKFRxCnwiM4wDMoXnbQIoA5YjtsnTV/PVi/UHBWHuksP1Q6dA14mUWrZ/ZkhB0x+QjYJrVurlJJ+a0s3ZiSTuVSOJKNw6diuArNLyOwOOhft0rSeO/brs6YYJ0LVTW9X+KGNKhUlt6jd1JDXanOinpdvEhMIM6qEDqnPoIpKj1PAo6eDYEQhStQf+mO6mf/dmfxZe+9KWvmCEBwLd927cBAH76p3/6yzqpL/c47wcWjlqTMtWAetTe8yKNzUjLwMsfXNC85Ebg4Bu6uyhLv7smGrxOofCQStLoe0TAPQQSTKmY6IFx5ZGKGAzY388xXcugKEWkbcVyWbFcV6wX/T6vWXu+YrBzYYQGZV6ZlMqAfBhimoYaNWkUX2rDWVW1QOdb6Tt5B5A6zUmU16BGHbCYsFxWrNcVy5Md32VBXLP2p0GUSm2OLawLwrJA0mL0emUFiRntvTH22iCwTIUFCDbPyo2gTPfIayf7Dgma0bXCaEIQiaAUQBLsok4wR78qeu/F4B0dZ2Hd8Abl6NiDoXFHDvO9J6DpRmEomFPPpADL1mWOWNVhFSNBVFGHGWTqn2Fj/Zmh0BrhpFAdCEskLKTU49Rg8BH3NTottU6QmH7UJ8/OvtYh8A6Pm1QS12RjL1pv8xDSoEJMC08d04m276j3A+2sWhs25q32Eul1RIh6ZVMGRO8V5WyK+8Go4RXlVE3Kfd8tS3NjqUxEvTWGgPhx14Z2VEgqoKb3mK0hv56HsXgr3DVDIsBKX085QYTA2S5I1GBU+7uCNVwPu+HtVX2Qqt8zNljfkJtWqzGI1UkxsyrqWzBQjwg2hqFYrxoqDApV1zjbjumGjex4dmbmLR3OE/jrxnOoTjoRRzPG2ttuZKisdMfF4yOmp6ucBJKuFcmwpm0mxKAwoLtNPxee3u8rPX7DnNQP//AP47f/9t+OP/2n//RXfN1/+S//BQDw9V//9b/mz3j3Cx+Cl0MjuFJVqeA4UfbDvP+4kGqCP85Mgv1OWXvT78WlkvDxBWKLI3gNwxZAgKh6uWjEsBgUsxJ9Ys2grxsQEGxIX9YMKa6rjkx/tWF9dcH6asPrz77Bsi3YXm9a+wkEnAqTCCnsx03p3y44ytWMZIP2O3WmmKAwo4r1a8ChEdU4XJYF67pgXRaslw3LumDdVmyvL8jbgqfPPOHyRjOp11/7Wqf2Xldz6oz86oJVGJfPfgatVaScUSgA5wnaD7TjRCsFt33HXhteajMYMQILo6UEyRkkahRbMB4VBdUUKyfKy7vuh7QpOSLmKwItoBSUSm+9+T3uCwE+4FWdsCDGBJAS/zsRYFo8wQbyhRB7ROr3r2898iGFZvlFDYRPjD6aPvfacFS7/uKKBXr9SSxctybkwDpheos63wcg5KQK+UskZHgjb1X4qKnkzvvqBe50u+yBndcoVdj5squfaFQfQlA6eoo6wuRQ8omcFaQd6+B9RztP3H/5V1Bvd5zPz4ps1IZyr6hnQzmsZ60JRILZVau3kmBJhLQoZKysxob95Ybbu2fcXm643+4AC4KQKbmYRJMFDwpBB3AF6tFwvhxAUcei9d+CVk4c+wtaLajt1HsfItL6hBAzQtx0DMiSlaBEBEqkAU8MCKseW0hOlrHM1K+xsf/YEIkxtkbJFjIxTEVEWzLOquNEXu5K3Hm+o+6nBt27yoYBrfeyuQXSmGNkUB+ro3NTwQIvPTC6Kod6FY/4YBnbLPvWegmkcetlkDbDgSIPdtHrbDHqjKgYBEsCgqh+qct1iQ2KFTuMyr+JmRQz44d/+IfxF/7CX0BK4yN+5md+Bj/6oz+KP/Wn/hQ++9nP4id+4ifwvd/7vfhDf+gP4Vu/9Vt/zZ+zf/SCsFj6XpvO/DE8nPx/noM7Lvtwecn/36NtrzET0OVDCA7bYEghGUuLDWf2DMq1shLBIl11Vranx0C7yXB6H4OO3V6Qt4x02RCXhMUc1HJdsV63PqrdUzMha04MwepujBarTeVVuBPwcyKTqknI1gTsTYgBamQvOeO6rriuCy7rgm1dsG4LtuuG9bri+pknLJcFT595he3VhvVJszytm2lDMSQgXzaICJ7OD0DcsCwLTgrg+x38/ILj+RmNAJyHZjS9yBs0etUpd1r/EQayQLgCXHuBWmE1y0RDAMWkisxJo/KKAIjqtul9GplG9AZvM0IkAAXRbBQYIwsmnUd3QoFMS828wSO1V9eB2DVXKBmmATnGQHhQ4MbOC+/Jg58wrctkLFJSrcIUdORClDZAncn/+KNH0eTWVL/XfeG21tahDGPHNv/LyUZcFZaSYn1PpQKNICQoL3e0fcf+4UcoLy843r1DvRfLalTVpFWo42NBqSrS3IRBJIgBOjbGeImBdY6XKufvkPMAajHrnIydpvRz3zeICxAymgTUKqCjAFXzj1oLajlRzh37/RmtnihlV0WWlLBeGWnZsF6jzZtLOmkgjOybAOthtDaRQGNw4my4vf5kN94blB3CdOhZ1wTbpIKK42lFOSv2y4rzfmiT+bu7Bgj3Q8lfdQzafKgj9vs2spIQVDMebRyjB3IBDApTywcLQmNE6d2ZplvKnfTlwTuL9GfrlHxrKyEggcHQmVnEqu+nWbdK1AX7ULKRJf4ev9rjN8RJfelLX8LP/dzP4S/9pb/08PNlWfClL30J/+Sf/BO8vLzgC1/4Ar77u78bf/fv/t3/o8/Z390QstGSG6Meh2rGeedzX2iAkE3J7DDN467uVEm7oZrG2i9l3KTGA7Zp0vs3e/0pA2ZEqCsjz9TgoXgxGYmg7Li4ZOTLovDZqyvSmrG82rBcN+Tr0p1BWrNFcQQhVsw3+fRYpctyMemXUkeE1TOliJwTliV3SCsYjHRdVlyXxZxUxmVdsF4WbE8rtlcXPH1wxXJd8fQ1T1iuCvPl66LK8qZ6ISJIlxUIhCfWxb6sGw4i1JcbzhhRuanzuQdlZzaCBCNe2JhpSlFhTYLOnuEKaUUNOdzpmKpBTEBSej5SBmLW4XoGowUWqIK3AKarGGI0hxDBAaBO1ZZRdA+hP12GiVxOYM6i7OcdCnQ4V5eP8wvg834UJgpda28JJv5pmbkeL3p264s1xohoToqafZTDlv0xDMi4+f6lu7W+Dv3fjwQKU6Gw/dQnExdtuDVsC/VlR7nfsH/4FuezzgnjQ6dBwwDvJgGNSVVaLJMvrQEBiClATC8pICA2gwfPE1K0kZta0R2WfBREtGxH9w3FDAm5q6PgUD1wFij1/TxwHnfcb29Ry4nz3LXZNGW0GrBsjJQ2DUSQ1LiSh7PmcMQyOV3hcGKUN0J7JuIZVbB6DAVVB3cKdicXBe3xaq3huG8oZ0VeFxy3Hfu6g6DakocI2JQZuFqtqzso6b2FXS2dBJG99uT3XYlEJASRaZSGBSfE6qQgmomLZd7BhHBBAJOuMBaFqKuMqdBkAVcjVaAQsmSNbY8EXfuNPUgyZfTfTCf1J/7En3jESO3xhS984WNqE/9/HsfbF6RFC5Ai2qA69KGMI+scZv1h/4fXD2DkAjD1XiePtoct0oiA4arIOmumYhYbVeeWyOe2qIhsJKMmAybXpBsgBO8r0JHucV2wPF2wvbLnB0+Ia8bytCJtC9KaEbcFMWvRmbx7O1rjMMcO8dUjgUtFiWfHl4M5pm1b8cGrJ4VyRLTxtVbEoMd6TQlbTrjkjNdPK9Z1was3F1w/eML1g1d4/bk3WC4rtg+uSqKwDErhx9DPM2/QKcgpYrmsaPuB4/UV5eUF+y8/IV5XpOcXlLwgnxXLqQQQooht27AsmsUta1bFZS7aBFpOUCkAN2WZ2e0JaQHFDFqfIDrUROsP0MhdAqmRCYSUAihmXGMA1az386xqfKoAYqy5oDBbCtEc1TSyoi+pqRZlmmgCJ9AERI5oETbtNGBBMvUSjVvdjiwxaDYFmPK38gf1uO1eQw1eJBO+1WU3mFjAQ43CEZ3uiGisf0zfu8EbdHRo1N5MNaIqWsFHAROhkBiUXHH/8EOcLy94/p+/jHJ7Rnl+a7O+CDFfIHEB4tIbZY/W8HIU3EyBJETg6Vjwas14WiKuXEG1QM6K1AQbEVJWIgzlDSGuoKhZk174gIaI2oDbfiIURjhqz2rKcVomdWDf72i1oJRDWamxIdAdXIAlX9SxVkGNUS9Bs6wCQMimMJJCRx9OI0Wc1cgRFo24sHXOOpZnW7NB6AnhojB+yhmUExAI29OG1hjXN0847yeO24GX129x3g68/NKC8+WuY3Vud7RSUZqOM2nW1qEfyz24zjEY61BrY8GEBMhLE8biZVuraibVfkUZhq9BZc8UNA+AMAoTBvA4TGv/SpoQPPzG7OsozBM+wT182cenWruPa0FD6IW9VmuH7QYLQnqlafiqwXTxjElEITNyw4eOxDz6OHgG86iGPWQUMcFFA/5za/FgMFyyxEcZbAvStiBf9JnWjLwtiGsyunno47z9AAmWRpPm08Kkisik+HfIEaHq38YUkZOOn2cWnOagaq02yA3YQsASI9YUkFNANpp/3hYsl2VkT2vuQ8+c7OHHo/+JhlVnAKLZgLcJsA6RRIo4akM6KuJRzEkFrMuKvGSsy4JlTbpnWkY9D1QyvJ8CxJhEIAItqldIKUM4QpjmunK/fyEFUE6Iy4KaIqQ27FW741X0UzdUEukjVR7GfTs85jfS4VvC0PQzV0UQa1cQLcxL7CVIZ5M6xSL1PSzTMY8WAsBbIUYWrvbFT3KscycX9BVJ4/XzoetLH3Mw+3M7hkE6crkxLhV1196mep44Xu5qRO97b6pXXcrgW82EYRWaZajupbYmVFDVs4lQFlgiVduHwIShEygCCAmUFyBmgBJ4qm0xa731PCpCZFXvNyelgz+LDt6rAmlQS8oG2TUNTFppCKGiIvR2her9QRAfbAWJGqhqwqa9h7OTIlG4LcWAddFhq7ItyDlD1kXvHovCzAkIManDNCjZJzZwa4g5mr6dwm/nqUFnBXo2U6f+JjMpEBakiTcEGtCyTLU0t1ZEAAm5apEtLFJSigiYxCSigpEh3DaSIU+TPaXJ/gFTRVg6rKzrcHZkX/nxqXZS9Szq2XkU+4hUSFbYZE7a0NySflsAv6Nzj5I340IAB1CE9G+DReIDFgRC7O82cGIMMUYt9Ttjzn84aKAhahaVtxXr04bLmydsby64vLlie3NVaG/LvVE2pqQFaxqWxvXjPNpG1HpLSAEUtBYAAuLLgsyCtTZ8IE+4rguettz12qQVg92qdcYDayIsS8D6tOLyesP1gysur69WM1utHuDH82jqvMsc5oRlY6Q1o76+Ir+6IF4vuL7ckT/4APWoOI/SjU6KUY160jlgADQTe9GA5DgsY6Zo8koZtG5ASJCQrT+NrXHa7kYkUI5YrhvitiI9XbCwRsNhy9jvJ/ISrQZQsTRGBjpkqz3i2ijdG7zdQQbq2miw9RNMIikTgTiAiZBS64MpH55WsAYG1u+inLOTcrCObL1BfGifTJ5Fpv+hr/1ujBzwmzOvKVMjPN7HXmtxbczaUO6qnFL2HbcPP8J5e8H+9gUoB1CakocSIeYMWlZgWdEQECqDj4IKwV4rXo4DEOC0OldbMlLWcTYLZaRlZFEICUgrGAmMgFoGLCmlmQAy94yWrfg3C0QTRwQE5DBEUCMiiAltV5mmejTUpvel2qwqZu4jQip8EKLgVipaU/o2W31Iy1mEHCO2JWJJEa8vikg8XVZc31yxXlZcyxPSlpEvK8KiOp7JJnBvrzYs24JzP5CXjJdfWfCyJOy1QEhw7sAh+rmlcV9HHlhvSbCQQcQxIsWoJCT3YvRx9+CtC75+ndXHg3UFoYYCUdYkbEKyTNA42Yh6ImTStokEsn4yQgSbPSRDq36T2X3/Nx6lFCQhEy7Vyx7jFCrKiC9n+LEbF6sfdFwVTigeOmh+GYNBdBoRayrsNGX9LNIaizk6r0V0h0XTN0bTDCkhLRnrdcX6dMH2WqG+5boq2ynHrgnWTwjU31gA24T2u248NZqLNoZeBMjXRQNI1kXVloQlkdKGD+C8n2itopY7AO3ZkYua6ZhMbX1NNoRQR0mgs9neO8kZUXLDTYQoy8N1z9cNYVlQz4K6l66jCHZavhne1jplnW3WUDMR4Zi1IByTB2fSB+h5wTgaKSVfVmyvrsjXDcsHTxqRMhDWjP1+YN0Szuc7+CgIx4nYGMnaGBxWc/i0a/KRkjNE0Af4BXMwQQBEtubXgNoqWrMptFbfLK2hijL9tL7AyIhgaDTsgW3orL3JGU0G+JOD0h4r98kAvm5chxCTpptT7HWcDfW/g/X81KMoMYl19tlxvyszbT9Uh60xSAgp6XTcfNkQtgvCdlElglKR9xPxsFEkIAwVFvO3iJBgQY4CTjZ3R9X6mcmUO1Shv7FH66MuY+i8BZsOOgSEmPt1CVH1I3NctZ4pqnhSW+lzusrspMxRnczYTZ3kVqtmhW1krsEMdgoBp6ERvJ/Yloy6rWil4tgWtFKRLyuW64l8XRFzRF4X7Y0MAetl0YyqNmPFMfLLi/YxvWjzcSPu+qEeGqtioa7DJSXABADytmrwGmPPrsSCFpBDwfawAmqzse/hLKBSAevpitaQ6OQxt6nJevi0sRymxiE9o4qWUQW3D7/ZFPT/G49SCxImBfIQRse9A3CTo/IF+xihwibDTj+nmdigr/Ex8CkEpMhaJKQB/XWg+iGanW0Hja9ENmI7aiZ1Gb1Qy0UH+8Ws0d542LtZxOZkDjfWADoM6JBgjBFpUSeVtkXfoTEiAVwS1mBzpaTitgtOqahlt3MKEL4C0sxJhQHv+cBIGgX7scZpfDFDSCZVRTmNrIMI7bIipgw2J1X307TjivXfaLFehCGtQkoFu7J6rWZwg0JBzT7Z+lHY+0tIa3/ZNur6atPa35trby6mHLHeF6QUcMSAdj/AAUBpCKdR1KDG1GdGOXOULHIWW3dkxXG9W6RSP5bNhwpwDKYXaUMzWUE/j0pVeEAgQtqDZ7mN+OL09SVTBuVrY/r9vOL6wp4Wvq/LucmTzDk99oPpe3JjCApQCbUUnfd133Huhwr9FpMmAwFRG8DjtiJdN8TLpknfGZGXHSlFlewinRs1LW1rnA5ABIi0kK9sOjVzLMo2a+KOXu9HEIPy3vPWoTtdnzvlmW80EpFSzjW+NEFfq5fVMpxUZYX29tpwt3aCl6pDRk+nhxvU57aiRiO4HAVlyeCjAI2xbhlgwbLrdICtNaRFM524JB3QuqiKO8sFtSksn68bYmmQFCGxoYWARqyqNiDo2BOgGmzIKYLWBWFdkZ8uvUbcmYv9eqgT9zXite12VrTWEPYddFbgLGhECLFBR/74JHD90xis1SYMwliyY3Io27N4/fr/gOLEb/TjdtzBtdrCCNi2zQrjrlkXLA2nXqR2/FtFHak3+87RaIdUAMNpSefY2GTdBjIdPy8+/n/kvU2obdt2Fvq1/jPGmHOutfc5JzGJAYUUUrCkoBBEC4oBjRX/KgELomBAiCAWBEERgyCoBY0FBSsqaFXBSkC0YCUEDVjSipBi/u49d++91pxzjP7T2iu01voYc52T5OY9fe8d7oR11j57rzV/xui9t9a+9rXvg6pXk7GzwOOAhv3+Hu1MSmjKyMuC+aIQ2mLqDQrvqZq7bGLNZe1FpTkbTGGfi2UEKRHXLgzDlhtWTQUiyPsz2pwx5Yh+U32zjorSN6y3BpQb+v2K7fUDmHTGaHo+g2UGRUFINALUA3Hg6x6HAGUlBjyVJ6Ny50BILeumum8gZrR7R68b7p9e1Tl13YBSFGpYbf7ttoK2Tf2rQAbxdWBiWDNkBCaKSYkpc8bydMb8fMblsydMTycs759ApoY+X2aUreL+fML6fEK9b9i+84q+FvSrDqeKSQWxwWvUDLK1BEn7ctF8siY4/h9MSZxCQKgaoMg2uFvGNFZPMmeKJh8ctqHXID4U7kkKg2QXSt4DFDC40b8V3k9fPbDdsC7EXSgU0JGSVpvOHkF1F3tVlRf3cusEEJkH1rKATidM758xXc7I5zNi7QhrxfmqqvN164AlEykEnKcJy6T2MhTMZUA8o7fxMYO4WtdhaK+ckihwlGB7+lBBeoBygeZgn1dt7ZUZSKTwYO/qxN1KQzd/LZ1vMjV3qyi79aBa7WjC1hfCuA8BNKxeeiCEWtFyhKwZfSvIOWG9bchzVhLS8wl5nnD5/AnTZbFEakGKAaenM9wH6+X1hk6E+eWGZhUlKOzu4HbM+CjL9HTG+bNnXC4nvPvsGXlSN4SQwkioKNIQEfBTbzg1m8fadrujbAVlLbi93FBKxe1VZ7uaCSdAlD3owSkfvqtxJRSitvUqAMD/H/lJ/b/5qLUgipqhUYwKiQSMOQdAF9+O2e4LOAQYy4WAQ5A6YvI7RGcLD0AKAVlU5qUb469Dh+Y0c99xf4cM9yBlC+JobjdlpCmpzFE0OZ3OulE6o9cOiqbC3vvA0j2w9iajqop2wADYDxv7sClFUGYgR+1VESDS1Z6bm82ameZaCOMw9KzHjeBGU97LVGgF8QBrDpvQHWNyeEmAocYMm3MS7koNvt+x3W64fvqEtm0avGoD9a4eP6WCWkXw7M2glUC7qRt8hoY0qaBISDmNa5xy0kzeAq4QkCe1SNeeXEeOAVRU/Lcaw61DDhpm/tnFZn/Dfp0sEOs8D3aR0bBXnz4r5QOSGpwsEw7uJG0HKhRGDg5dyZGFt5Me9tpJ//QIbx9/Rh/B1qUfUK6vdrR08OdxSM5JSK6kANkr1ZASiJTZFqYJYdFKKs4T0jyhoSE1IKeMKWfMOaPlCRxZdQhTRDJauUCJCcwygngXIwp0G//w2GyJZrAOx95bObAuD192QJg6vVGx5aD2YRJG4ybZ9Xa0hQ6vMfaFozX+ZzIIPvggLYMboVFDLUocCiHsTtAiaEtDsAo8eEIaVKYpTdkQlhnTUhS6npoK79LueeWw3WSzjfP5hOX5jNPTCZfPLvp782TJrAWpEN4knjSSHw1SHWnJmCxIhZRQtoIQA8paUNaKVooyI1mMCXisoDxxcBIK67kDQOT/BwKz/6cfW7khcAbljEgZgRbECD3ESRdt14RtDNwB6hcTg0J3Pi39lQc9fo8UQLQPR0abY2miMjfqC6WbXCesD30LYD8Qsg4RpmXGdF4wX5bBlqOgQpq1NKyvd7TaUNZihwghnyZQ1Nkh5yD6DJ/PYYQQkE9NmXxulR5IKyjuUDm7jsYVpdzB2w19vaMXVe4QO4gV2NfMh+y705FHlQBd4PKbXcO3Cf2hTBUR9FJRbjdcv/MR3/nVX8P90ws+fuvb2ndaCyZmJAAnCnYg+SxagKSMkCeEPFmGmIE4jQMHrBlomjPmZca8TMiTut4Gox8RESZvXIeAyewpMhHKnHE3Om41JQSVK2ojqWEx76LoARtGKNGgxNGCCsw0k2XIUm1NG9+deRz2KUUsU0aOWrWTCNBxkN8yVQzuAwXwa/9VyoRf5zfL2nppR4O6kKKtlT2YgrTXxjaAORyqfRYoBPUri6SEnqDsyfz8jOlywvzugrwsyMuCJhW9E6Y84TzP4FNHhiZeAUCOEdlee3hoNSWalGYwnGg15Z+HvGKxBNJSPBsu3XuGZKMNQ8cINjxMYSjLs6ESGoBV5SQcrqYnqZkiGgGdjBRDAepTS3tfyLaPjyiT9TK5A3Ur4BrQm3qbbbeE9b4hTRm1Npy3otXlnEEgTJeMaZ4gIFzeXdA74/R0NucGAEZisEY0EEirs/OC91884/Pf8Rme3l3w2fe910pqNiJW2KsoWEV92Jh6Papej7pu6l69FtxerihrwfU7r1hfb9huK8rLHVxNpYQV9o1gU+PptmaALs0SX1W/798LlZT0DfDGHBFiVMovBTFVCLU7cDkgzy5zUPsFDqKYMWFo1u3/hUJHwKgGXEkC8GE1LfUjKyynYtmCKDbGSAeBWcuu45QRslK685wVe7bJbJeT6a3j9umKViq2+zZ6Hwr36ZCrkxbIsP1oUA3FgF4rYk7opemijITQm4qAXm+onz6hvt6wffiAclvR7nedEKeAOc8K2cSkauQsysi6ryivN+25TBnxNENyQkAadGOF94CDhzqsUB0X1Xt/wkppruuG9fWG1+98xMdvfYnrpxe8fPtLSG1ArXZWBh3UNSmbmDMkJmA6geYTwnxCmmeElEFp2ocdbTg35jQqzF47ECtwDzYE60WRHr7EjECw4WQdsA5zQhRGqEnvsbD5dvnpb5BQ75Ae90AfzLjQvlwVfjXq/1o7KqtOXzL4ac4JS9YDO1qgFbLBZYgZd8peyYyiSsYl9y/YtdY4K6PiHYEKNCSf3G/oKHQscKUOrVa8JCPbMxQIMUdAGBmzro0ccf7sCdN5wXS5IOas9yUwKJjkVs5YpgnUugr7mroBiVGqWaG02tRCZWt9HyCF9/90Fkj3o877RONQM5GRTq1yevO5DXvS2SI4eYNHcD9Wp15RBhIgKLSYA0EQNeEwkQDvJ/pLRAKmqD2aKQadQ/Rqn0xLszNEqvWWd0POlBPO94sqVZyX0aOazzOWrWA5z+qL1hjU9TMEE6RGipifzlguCz77/vd498U7XJ51CD/mqLJP8XCfx3U6BCnoevGqMi9Z9VBrw3yeUbeK+TRjfTlhfbnjPr2qbuptM0V7BrF6sxETWmPTsuhgaaoc870SpMAVSgmOCMTK4bdrzQYRVN4xbM0IRftLUSEaHbbVbIzo+OQ0NqUTKQCtwAD7HdaDhJjROkz9XINUFBmySppp77YWcco6nGtZPekbRt/qcJTdXu+q63Xb7NAnxEkn7ZFNLsiCntK1E0IkC1IRMSszB8xqG88VvG7g2x399Yr2ekV5fUVdC1opWqpTQE6TucMGRJAGqVLQ7hvqvCLkpM8bFZpBUFr2ftkOczfH6znOc3F8BcyMWivKumG93nB9ecX10wtuL6+g3hF6wxy1zwFhUMqqrpwSkDOwLKBpAc2zDkcaq8yZVi5gSf5eRYkmUvQgG5xQ8bOLdnYhwbSsgiYFSZMDYmObsWIs5OWiBUUlfPAYIueu+H43kkRjDVS1qepCt/eajCo854wpBbWEBwDZRxrGtXOo700/dZfJ8fjpzFYcAtQAqrAz+w5qGocqyqs/bfXZHgi++q2SJmOLRbGgHrE8P6kQ8rLoeqUIhKb7NEakmDAlddllUnKMv2kP6i6A7IF9Ryw0uYhjr6uwqSdD8nDY6p8H/cQDFDkivUPX+7zZ4fJYMCFgjKrEAGQJEGFMUdEVR1TYro8rlWQzzswhGOlqHwPQxIjBndBKBTOj3BXaK/OkUNucAVH6dkoR05wxLaqp2eYNXBJi0yo8JVVcoRyxPJ+xPJ3w9O6Cy7szzk8nzJdZoUMbIB7MVHiQ2jfs2KoGVacpDYmsPGW0UhFTwJQ14QgsqGtBBUGqStRRI6jiPA9ZMkGHiAYqANCJr9/68Y0OUjlHzFPEPGdVJsjJJsJ1QZcmWGtDaZqNeV8qE6GlgDlGRGL1gglxf+JjZmFVDNxOI6pyAIiQHb6x4dRu1YFPdkfb9MmqH6+g0pRxflKh1hQD2ILT2jrqWlDWDfeXG1ptqNu27x2jfSOGYb0+nSbEGJHnPN6rkJIc8pJRT5OKknLRKurDd3D9tV9HuV5x+/ARrTF6F6SQNMuN2YiKhNgYuBesX36ElIp21wZqviw4j6zasrgxOYg9ax3X074fYCeHjXpjlNpx3yrWrahDcG2Iolj7hEMTNgakHBFPCzDPwOkC5BlIszIHDbbye9fF+yhQf6mtgT9eTTmd0Drbz3glrhWCiGBbN5StYr3p9946OgUgJmAiBGraWzBIEYDStAFIILgFvCsDcFfoqraOwh2VbebESD/TlDGliNOUkGFzWdzUjdgcidVU0KBGnxGSg6GmOCHoEfDzyioe/vz2IbAD3qqprzPK84HtozNvtiHzvGQb+lYbF6X8L/qeOkChauITNTGLKSEGZbsxQ6/RgVHnRo+Nxe6TWqsIienk6ncKKn1FBHCwXhu00trHBMhiMh2+8AA9E6xPwwxChBCD0bVS5X2sOmnEMqFbg/sRlO0bwvBWyzkiJ533y9YbDsCoTrj2XdHD5tC2+zYEprfrHTEF9HpByMrKW04LWu04PZ1UXb420Na0p5cjkvUBn794xundGe9/x3tcPrtgPqmDgva2DyLII3jvm9TbyY8LI43+9HSawK1jPs/Yns/YXldMU0a5bVjnF/R7AW/FrFsKmhT0xiBpgFQQdRDxQCe/m8c3OkjNywnTPCPPC/I0I+QMoqiK5p659m62CF5JQTcLq7+JRngnVByySMvSXFzNm4whm65cCMYSBGJntBbNMK6PuZpoWVNM1gfJSZUkss5HpaSBz/snrTTUraBuVQ3nWlN4ynEbDjv23AWSGD0QkHiHY0ibqMGm1olZBTy5gO931PsdbV3RTb1ajDofTO1BPGMWAVoHB4X6dC6KEM8nbfD2jtEQGzn6m/Bkh8lQ3wYeD72Bo/s13SvEwND+y0MWfKDOWobsUJ2n/LsslgdMG1GweSdGgRDpjFTrw5GULEiJZdfrVlBKxbrq/VD7BEYwEkNyaIy8J6TOpGACta54cDBVBN5DA1mGHkJQzJ7cEM71AWkQI1zuSKG9XaF9WCiM4HT8bueKHKArW85+dwYkOCqv/bUM4xq/9HCvDgK6sMZ7MF+zfJqR5oTpZPNRWa1myBrqckAm9lvmYqU2i9RdiXuHM4/kkOPjobo8fEgaFxnj/XtAHWxGI1YNlRRb88ICNiNHtzdRyFhfMYBM005XuqMqIGglEwPSaUbKCdOckXPaRYzt2veqCEdbi3lJmWCvQY6tNtRStQdUqvbsUhjVlD63niExJUhxDwMlTuUUMM9ZvxZtJySz9RmMzeAhlw4o0dvodNzDMCsbXUHB+rQ+z1jvJ4QQIKWOvmErRZM16Nol6YiWYESDlvl7IUhd3n2Op2XGvOi8TZzPOuzXgW62025fvpmnDQHgqFBBDDQ2OPCQWI0/jdmXnBFyRJwzkmVLji03E4rsXaWZvErwBq4HqTRl5EUrvmnJY27DhWG32zYqqer0XqN3CqDZtOPsLAojic7ecGqK+1pGTVGp6Lxm5BTQUSH3G9rLFfV2R/Ugpct7NOatlNAsvWzovWEz5YreGtLlhJDTOCh/k3X91f/zExKAC8mGrHTZtJwwLWcd6s03xNYQSeWKHOIR/+x2sPbWtDUtBELUrNco/iAM9XFneAoEcjeRTGaU2g627HqQNVYI7roVJTeUimaDwYnUPXeOEacYkYAxQAx7LYUzMBIZkPWF4OghIZkO3OgNmDJ9DMEktGT4I0lnq6ScdcajhzIsFWRXrgD2AOWX3KsLD05kP+OQKLP2uoJBhAQ/1MM41AdN2Z7MA1QwXcnp+Yy8JEznaYxK2LEM6W1cBxlr1Oje5jzMRlrgxmO8AiyDuRegCgbDdk8OH9A/p+3XaO9ZFcuxVxDRkgPr3Q6Cy5EJ3FnZul0ZaNxVzOlojeGLK1mvKcWAeJoRpgmn9xfkeVJC1DwZi5TGfXHG3Pp6Q103lOuKer0rtb0zSqmg24r7bUWcM1ptiDmBiJDnhLlOWC4L1tcZ27Sh3rexHkIAcg44nTJO5wmn04R5zmaDshNi9koKh0oKhzj1tTQoiOgMFCQMF4E868xXOW0IItiIUCDg9QZuYgSfiiANUzDkIGmfMv7/wT7+//Tj3fd9P56WxaaoAyBBJfBZM2S1EfdpcV9kKuKp+9G27Z6WvXmFvaIKpvI9LWo6GKwZL9AS3qezW9/nlpw/4Bsjpqi9KJP98YXgVtbu5aI9kqCzWEEPZRI5FCSW7YHRqg9Y0thEOj5h+l8QIEfEZEpxeQIti4YmFnC3/p1lPcxtWL9LVIo4jGFHMRplPiuhIKW92T4W/fEhX/07u54U9HpMy4LlfMbT8zPuTxegVvSXF2jxo9byAkY27ThQBegOSQ2tMCjNoDwhLTSUHcBiUBt21hbvh6PCSdYTMpFOFqBDsJnd/eu2obSOrTb7XYUb5xRxyglYJoWLYzgYaxoN2dhwKp1FI1Bk0zlE0vfWeG/Vq98Y7b2fQ9/pWFV5wHUTRbGA67Jfoz/lwQY2d2UHz7HiehjPEGcKYtxPH3eg4MQfu6tWmSCQmhVOGfGUEeYMmjOQ0kjgPHj21tBbQ6kNpTRs9tVrU2HoQwDW5EcGmSlaT9auqhGU9JomyINXWxhbVn9oJ4Xs5KKUkyYIKQwPK9AepEgAbsEqBRryam6J4QE8avkCWiZM7y6Yzguevv8zzCd1DMiL7vWQwqj4a9HZo/unK7brHfePV1wJmpzeVh1SX4H1dkeaE+paNbmFVmvTPOF0WXA/z9juM9r1riMStq7F+qmeEHmwHrYwh+rPRQ/ozd78ykOONbj9oEm6USAsTyeEQGg39cFqWxmkG2ZFgkJQF4yUM6bLGWmasfL3QJA6XZ6wLKeRWbXSIWgjSLHsulbK3sF+ADw8NFg9VlL+9168mNZejrv6d057s7dFMDNS7zvbyg4MzUzNM8pmFEbGLBjePdJlvDcKBBJlAw1Iy97PgHOY0aoc3rT9u21OAGjJJvujVS8WpCIpKQJNm5v+nr1P06CzYAFqTa7wjjLrUs5jzmj0Kb7mur39m+Nm8IMvZTVYXKwibvOMWzJNQWhfiVh7jNQ7iBqE1GOnNYAm5W5QmhWOSjosa5fHDsg+NnDvKnnTjN7s0i5KcRbcSkNpHa/rhtKbVuBWaeUY0HKCCDDnjBBUkHdXEN8TlD3A6IEfSJWxKRJg/lB1VF9KMVeG2wGaO8JwtkZ9qLXzbpfQeQ9QYtUQxGf2MCqksC8vG27fg5QPJet63ysnOlRRAkNuCXv/6jDzF2y8wpsNXqUpwtDRLCBV+2rGZO2dd8j2APERFKpziUwWvY4OHQ5Wrx3KXujt7WQaEJcHq1FJWYBSRwH7LN28lqrN8TRXDsGBdk9g8xxTB21NPJfLgvnpjOfPnzCdFpxsQDekfXZRxARvaxvalBBBva+6Pm/mWF0FpVSUrY4enUAG8WGaVZUmzxmUop0ffVTXx2vo1eUjIQZ7RYVDmPq6IGXoyhh10ANUiwJr46c5Q7qyAONkybsVuyp8qwnPNEfkZcH5+RlpWb43Kqm8XBBnbSZy66i1opaOujXTb3NdsMNsRVDDPJ2MJnz9GSvju4osYmi2ARjZWJrsLrGgtWC+Mn1ks9z2KXWjuCg0EwihhgEh1NIGC8yfPyNrzynFccD6plf6qmZ1fbwvsaFWGB3VcGhYv+W0IMYTYnjG9NkzuFbkm06N17WglA2tNdRVs7lW+1BQoMsT0vMz5s/eY3n/jPndBdNpHtRuPxR+swe9/bMF/ZwT5jnjdFKjxT4pEaQH7S1WCzjq39hRqrIgJUTI1JG6IAshLw2SjBpgtGA/pAExt9mOWrSRW1ozDT0dxu4AKgu2UlX6Zi0oXSupZodEIMKcE0prSCmASbDMWan6+jJ7kkH74YhA5gFFZujoc3zqf8RdlOHWVeCXpY92nxOjxdbxowW9jIDl8J0HNAKGXp+w0bNFx7mCWLU9bsieZVMIg+QTkirge//GD0D2SipZT2pOCFMG5aifz9ZrvRdV77iueP34ivW64uXjK9bXFffrirLVMRhMHqRHJWkFjsCYpqpJN2jewKCgq2HkXo1q/NwDkn85xdu1J8mUJ3y+iCiAWMBZ+zw+XKtJMBm8qj8uIkCKSPOM+fmCd1+8x/nzJ3z+Q9+nVPGnk2lvPiq0cGvg1nE5z7ifZyw5gO8rSDpunwStq1r77fWGkCPW24r5Miu8mDPoRHh6f0HdCqQztpcrCgT1VlFKAd2B7XpFygF11fmrkCIox3HQDSFk2ncm2Z/l6/ax+M89Jp9K+onIUwI6q2PDpCxDDgEcAzAlRDohBvWgWy4XPH//F5hOZ9zb90CQ2mpHoD6kOUrpaEVxbhfx3OsPaPZgfaKAt5Rz2b+R3hARBlxs2o3fHJbrHdJtfNCFZXmvho5zBkd9vcBhNM+9QurDWoB3um8MYN6VHBgyGvBsjCdmhaw8247RDkP7jEN8lmBwZUSelQUkvSOcFqRSEbcCWlfEVsH3CaE0UK0QKBsrnc9I5xPSaUFaZsRp0gHQ4PpuX49h/0YPOf5JdAId5garA6PQ5yRXXQAk7JTxxvq7xG8CgpEfyA4gb0JSt1AlohVU6yilDmdloXigo/vwLfb76MoQpJl0bGEwz8TXFcFPLz1AA9lIgDfsg1szg3LSWTQiRA9SgdSBFWKabIRhZ2DrmGEQCmxA3b6bSuH4r0Jj+3pWPUDLauH6g/Y4zEaNoV5T5PDqaFRUdh0AsSpEoSxV3D+QD1oH145y27DdVmyvd6zXFet1VZWC0gZpxTklAThwIGgcmprcK6zuhAUAw5w0wttKNHq8YXwmCzz2tWcuZAQhEwa2itcfeyUZ4FbrFIzIzhqw1IZFP3uelKQwL2oQOtnQ+JARO2wOCdB7PWUVeZ6Ssm8tixCxmTpLFLvJMAnrSAugVPR5mbDY63AJqMLqlbWJQoVTwnZbkeasNj056rWMcVzjvYDa//z4Zh1mfVhRDz/jHnkP1Zo2/NUSZ1mQkyBlYPnsHZanM06ffabO3eV7YE7q9bqhbjDLeEZbVR1bjcF0JoW9gvLD1Kon/Tocrg6TaNQBDI7T81DLdBChrWVAaMPmw+A6nSXg3dl00Gr36uqQvOjLeg/hCLcYyysEqI2A6Rx3UsWDzoLSmtKZaxu048kWu7Ph4qiySAkK5wnL02J2H8BisEspBeu6opWCeLuZTlcFd81Ip/OT6rA9P2E6n5CXWftRTg54eOwKCPup8/YzY69SewPXql5Rtai4LCucENOEHNUunYDBnlQdUQ/G5pxrw7ApR2OWJQQRHSRkHrMoW6motWEt1Xo5UBVsZzbagakKBvo+3arBE8oQAkpnzF2HS2EHJOIe4FyiKhiDU8gOCBuG9nEC8b7ZFlRcF4JSCxAA10sdQVIEHUrF1i8P3FpyuPTUCLJ2sAwBZpEBJxrmiuEKbUEp+tc86fcl28FPqmcnjG5BKpnhZbAKRQRm5aEKBbcPV6zXO+6fbnj58gXrveD2uiqUVbX3OTboSEx8uXjQCSOQwo1MHf606kt1DTEkpYL10kY1ZV/eR3MwUWz/kcqI7AvUghPFON5XsPfJgcc1pLjPLy3nBafLCaezmpROy6Tv25pk5OdKVKo6zRU8J7QpYco6UxUgI6mtB6av97t1HirgdJpRLwt4KzidMqQE3KWjrhW9EF4/TBBhHU8xZmGckl4nZ/Z5XDoEqyPTTxzmG+nPMRXd75k4RLg3A/V5ckYkQZwC5lPEtCScf8dnOD1dcPm+z5HnGbRt+G4e3+gg9e2PV8ypGWTAqlJgA5Wu92W59iHbHvmZQ7ejwav/YoesHBrRAGrRGxcsW+6lom0FHszcZoLbzgRi6xWohImpEvifD8CFZ6FjMwEDIjgGUbHGbq0NpXaUpoftoCpLBHMw+2sgtqi9AMvKYlPm0jzPCDliioTZMrdzqWi94Xxflf66FSUmCCFPM5bLCfPlZJVU2oOTsGFKeIObHgIUfJHv/6fwm4pX3l+veP34EddPn3B7fUGtBSTaKJ6XBdlUNZrpC0pXzCVNC+I0I+ZJGZRGZFAxXD98BJXMFJNV7WErFdd1G8KgKfHQywMUQlqiQodsemNNi2rkFC3zjYPdpc3p/bPqXqX9YCQHHXVdxCGDBKM0ww4NU0FQWuKoLN8+FOrayQIiO9HAXaDFIEoB8LYnoRJaJoY7ZeRpUpX42WwdLsu4z64hFwIZe5TRRHs3PtMTorLhuDN6bbh9vKLcN7x8+wXbdcX99Y7rxxtKaVi3amQVWIDHgEgdgoxk/V8LNDGlESh3lKKPP2tvCkPRwX9XKyJj4eIxOGnw94Dn18cukVVpIQRI1LTrrdW5YA+M2dTFc7Yq1OneRnB8LE78HOgH9qYlETZUPWxZWMC1g4vOVTl0n8wCZJoCchRENKDdwVXV268Z4FqUsAGgFJ1Ry8uE+Sz7/vWq6lhJjUVm6+r4HccSHAOWdfWYbiaQDKgCPjLyFHB+XjCfJ7z7/neYlhnz05Na/8TvDn/5Rgep621DzRh0aHSdUSI2ajb2TNfOgdEs1IcckrI9oPmdGM1o6OxLB6FtRZ+D9x4SIFaS84EFtN9PgfcUePjxsM1fEHbNPT1kYVCkU3gP79UCnM6A2bBjd8t7Gx6GDkaGsFdz3rROPetms6HDtGSDQwW5dXTuSKdFzSS3irZpbyRGYzWepjEn5ivUr98ROhgQ2LE69E/hAdWavW0rKOuK7X5HWe8o2wbuDdEMDVNS6+1AAUIRHQFMyuKjlEEh6ZyVQw5QCSF9TZMTMkixGzxaurrxei+nAzoQPZ5D+xwsAT1GVaInQofSjvdAFYZiiV4O/Wx0XG/2yT0hIt7JEP474z3LITixzwrhzXWkgVpF2tsFBDoEKV2zDNob13QIVuGoBO7VU9b+0mQzfKaIonRyDbi+H0j6TiSyKkc9phrqWrBdVw1OLzds1w3rdcW2FtTaH2C+EX4d7RCMIeJ9P0SknAeyoNeO0WlP/ogc7vMgdaTMH2A++F73G+LxUfZrM9bxXmn4e/J/Ny3pYyvPqrgdciSrKo6Fip8D0tXvjI1d6ufFUP8whwUI1DnY1oOvFf9YMQABHQEd6AVcC7h3lJsmFtPLCdNpAYWA5XICRMW4QSr75UPbmnAePtThsz8QJ8h+xi6A2Po9IkdiELj2qwnLZcLpvco0zc9PmOaMtMx6n8p3F36+0UHqO7c7prhnmm5+5orRwO75RIQhJrsfKFZFMYaOGx0vPhzvF6AeJG5qQzVqKexnvQnvv6cHgWWAMaD3jtoabvdVZ0NqHQs/WxUw5YScE3JKmKY84ESxOZLedMaCa9MMyzIXX8DVAke1+ZteG8qqcxTXj1f92UigWfXhJhu4DCnaxxCcumVEtVuQ0g0UTdg2Tdks4LtuIOj1BZEyu46Kyr6ov+ahgbNivd1xv15xffmE++sLttsVrW4IcUZIk86/5QkUIig0gBKk6rxYpwmdIroE9C6g2lHXO6gCIGP2dUFbm2qLtYqtNdxrw632MV+UBEidMTFjThExBL0+KWDJCecpK5OOgDxFzNOE9+cZ55wxBbWaVye+XbMvsLIzR6EgMDFQAYc+mGwj8ViL+jStmxpRVnVK9ioj2PVMAaqlB9ZhyUMQithTLLGxCx59M6vUx7zejGmesJzPmE6zKm0/nXTm6emEfJoQLVAFP8jIkQEep3gMSsGv1xXbbcX95Y5P3/qoZIkvX4fFw7q5QSBG/4+dAiY2+2yfkSiodFLOyCliMq+lQEqeESZUcmakjH6kJjMHVQV/jErWEwbLIHnvtIxuF2nvza/73qOU/e/8aXFAyhxKtUDiz+ljKCqrwWj3DW2ruH56xe3TFa+fbrjdK0rtAGmSCgSkmBFJ+6rStM8nXSCkQ93SG7gW9HJH366Q9RUoxfQQgcodryFBmHC/ripMeznh+f0TlqEIMhtj16F7D+rYob8RtBwCxKioxMZC+lpRV9UZbV3X5fR0Rp4z3n3+hMv7M5aLit6qdcp+/n43j290kKrNwoitFM8k9TuN8tjhkQEleFYEvMWhrI3ildChWQgGOFjGqNBe6N7sdIaV/SiRNUxZ53UsQG214r5u2Kr+GVBYb0o6T7H0jLkzelbI0WccuLVB3BDT49sz8P1jHA+9UUEVZdBs96QW6nPC9HxGnLSqivADyGm1QZUfIiOEPlhlI0MlaCa7qVOuwkpkM1TToCFTCjsE+GZzH1omkMN7fqgwvJoVQ0NYCROVgc3ykh4EaAKKHdlIB3oAqoWBv/feYAzLPu6pkw80vghAgsCMxAq3ZMV7kKNCfB0CCSo/M00Zp5QwxYholfuQLRJ/38E58JaJ2mclAvWdHOL3bPQ0fWhX9rr+OINGtsbj3rwZ2fqAHAVqt8ICCjJsHIIdSDEn5EXhvXyahlRXWrJVUWYdk10ZHQe4UIOkQ22OHpTbplXT6x3luqLcVvWbKs0IR3KooPZ+0OM9H9tHg43tW/ih6NfqUHmAXM7KEQkfvzgEJE9UIRDRw1Y6YHjweFE/PEcl2x2S0+pn+HfJvjaP14D95w2u1UliKGxnrtLFBvbvL3fcX5TlWLeGVk0M2ZRX1FImHchJvhlg76NrP7dVcK8QrgiivetsCUtgVpeB+4bbpxvY1HBaVeHpubYx9xgs+VSiTNirK7vtfkyKXTKXdOq1Geqi4tgCgEymTe1CFkzLjJQnVZMBxhnG7XvA9LC0BkYY0T9AF5rSVZUynMWhkX3RaxDbN70VIPAS9lhJwQ8W0SyGG0ztQRvbAgxqqsNcIUZEK5EhWmGV1nBfV7zc7riXglvxIBUwpYwcI07ThGXqOM3anFX7CNrZhE0DA5gNxlIzPFM2OlSGagvQKgAS9N7ABDRhcCTM784IU8KJ9TnSWJQw6ro+R0wOO+k/KaqvOnJ93dC3TTN+qOhrupyRT4smBkbOOEI5wP7/OPyvXfpREXiPwfa3zjGBsXbBvQnWpodbkq6Gh6EjbRWtAWHrYC5gvVFwNQppxkSw+yqGsbNliMJqr9KZVUzalARSSkpgMHwtJa12Tzkp5Zmtimqmr3fMnDlY9iv7ZxXR9xn8gIZV4rvqgh+IRysOz85jMHLHuI4WwGgPUgrJEYjEMnsAdKii5hnz+WT9AZ3nSVPGdJ61J2X/P9yhbZN4hRygAqtsSVCvFfeXG9aXO24fXnH/dEO9F7RVDy6uBvGxMxT39erXSw6fxtGOQUe3GUcfyudBYnHih5NnwlDH8L2g/qN7UAEU9Xh4Tb94Ds0523aweVkTxcOw8UgIu1la1K4SR82TyTgqQ+kdXBrqfcP6clMq/rdfcXu54frxivW+oZYGcv+zEDBNaj8TUxpC1c5WVcscDVC9FnCrgDS1xwjAYn2xAAKXioKAV/qEcptQ7ytOT2dM84Tzu4K0zMinWV1/c0ISGBRsxJGH9ME3ra75XprC9bcV5b6hVlUWiVPSHvZpwvJ0UrHhKZsepYCr8Qa27wEK+uj9sFj1ovCe5TCamQS3facxFzWHMGYrHqb8/XmdyXOsABxr1he2uU1r4spoUZviNgyb1l3YSsW2rrjdbni533AvFdficF/AnDtyiGMGprNWg1OKmGPQrOmQrauRts559WA+RbIHYNUeY3A18dtIKmjKDZ2A+f0FlCMupSHEpJUS7Rk7CEZVJVCQMccCEfStoq8bti+/g3q9ot3uEAqI84zli8+B9+8AMbpscMmZr8IkMQZwTphOJyyXC07v3mH9dAVJQMSKFBKmpHAEg3BrjGtpeN0q7lXtqychcIyQGJF7RwIQTVWbuSr8a5l2jIRsQq5VCLlq1uf6Y3R4bwRdGzkETEfVBe8dhoAsUAdmly06MtUss1YNC6vaYHRxERUiDcoMc70+0E57dnanWMUvow4AYEjB8Yp6/2NUOtjhpyCqWkJR9fWWywnL5YTLF+8wzTPOl5NWVzaUqgc+mR2JHoj7mgjjQnFXnbn15Ya6FiVJ3FYNULcNvTRVg7frG9wx92Ep7HQGtRr3oVy9atwaKncUwpCy6oCSTUJEiMAUAuAMQKd7+wXacwBA9gqsdwy2JKwVkFO088Eq3c4Kqw89wTZ6yk7KotohW0W6bbi93EEp4fR8h3TtYYo54PZSUe8F6+uKl1//hNvrHR9/7QO2+4btekfbVHHD9fhSTjg/X7CcFywn80CLBOGmQ+fbHW3VL24F0ptew5wwhYinpyek5YR4eUJLGRwCemnYakO93bB+ekWcMs4vT8inBfPzBfPTRX3XzqfdhDXnQUTxvSGWSLWbwrvltuL28QVla2ilaiI0J5yeT5iN5RhIk7Vm/lRc9PO29XsgSB0PF0dP9nTIIZA9QKWwD/EeB/8GvHSYldiJFI+5BB3+fTQbWIYy8z7hjZEJuyRMrUq9La2h9q6ZE+nBigjk1tFiR+sNrSc1g4Mc5od2CvE+ab8HWGcuDhhwsAvVpE0igbaCshWVZ2m7/puVModPqTpbsNeD9TakNXApqNcrysdPKNcbiALisiBNM9Ks2bj0aWTwGEH8cB0NnknTpP2R0wnz+QxpgtAJkSJS1Il6loCGhsKCreuXQICoRngNQCcfLXCtvwAIWyM6IEhApIicGLlrA1nhPn6Ag4+VnCY3JvwaCMN+nAgRMlh4PiO39zEAMJsRngYppYyrzbeEjhCCsgUjDmww2kVA34RNP919xumQT+wBypmEhBGg2DJjShF5mcxo86SVlMF9O13bVg8LuDEo7APsngT683sVVe4F5b7ZgWWak7Xtnlpia5IEQciVlR7XgbEjI4myJUmrdhaGMGlwEkZhHrJIMdketuvrH30IJHvp7lWs7Crx1aSw2mDVmUJJ0DMimCju0Ed0YgC75YoxhxsDtWPbKrZ7QZo3lLsSq2JOSF2Tm741tLWqUvh1xWpzY3UtI0A5gSrnhDxPasthOqEuoaYuvl1dq+2rVx2IJgAxRKSUtQqbZ6RlRg0RDQSuKpJcekMvBdH60FNjMAVIiMhdQCEO8QMBDcYrQY9Hhy3bVlDvK8r1jnJbVZCgsSY5Magquw0zOzzYqyYu3YKUimf/1o9vdJCagupn2XzdaKAGW7xuODaHgNmgs0hAPvzM24LWeyCOROz/Rm9+EsChmeqHRYwWPACgdzB3tHVVJYdtgzS1J3Abj0cFbH1Op9gyBA1hwAveh0oGvfhzeJYZ7IDNRGPOR3Ff/XcGQZJukLTMQxfMyRFvP91eWVlQYEZfV9TXV9x//VtYv/wS5eNHUNCB3wAgRmVhpZwAI2UMqq+9iFakhBgTTk8XFU+tjMiE7emG+vHVgjGhcsDWBcwrWmBsqNgsmqQUIVMGLbNCjSlgigJhNVYLohJJMSQEVuX7i2QgVlQmpFLQDNpRAYWAJSVMMahTbAyW4RvrCrTDli7lM5pmY0WYykkHE6NAZYwKq0pEs4QmxoB5nnBeJsxTxmTVTkwJnJpm8mR9Tewvdbg7AxKDKfTD5H5gFbXYoRyzai1ePn/G5bNnnN9d8PT+SVW1UzTxWoO30FXRwytnNuqQV1JBK7peO8pWBt38+p0XdOtNaAWlkLUb4GQihTgZo7okO3wj0aikpqg+ZkQAS0fvgpvpKd6NYStEmCfGlJMOPavyLMyZzQopp+A70gGdnexs0lfqU8V2DeecsKSEJUVMRGOmzOFzN63cGqPYIHfvBYEZGwgcItbSEFMYVhrZ+jxtrVhvK14/vOLjr33E7fWG64dX7c10RjTpJJU6mjCfZjw9n9Q7ykxRhRnb7YZeC24fP+DTt7+N1y+/xHZ9RVvvgOi8X8qzkmEuFyzvnzWBE8HLiyXHt6v6cgW1r5+2iiYAIyKdtFJNU0MqDVPtSuia0kje+rahbQW373zA/eML1o+vuH64oTOApGLWERNy0DMITSXIuDHqTUWzxc6k7Vrw3Ty+2UHK6MDKvKLR5AvkMilADurnkmOw4GRySHhzKI9C6jHj32f4j3+5Y9xvq60BtbD3FzqkNTXxE3UChtHMFTqKmIzdN6eon8lUFmAbxJvU3k8ZZBAAKThUvYtrpnCgI5O9XYML2TXUSrMhwW7wpr3GgIzo4VQUw6G5NPBWwdsGvq/g+wqAVF/v5RX16YI4TWjzPKR1ujedxZhYlrVDBDEoBHc6n9Cfn5ApoIzGhYA6wJWR1o40MVLtmMzO43RecHo64fR0xun5jClHqFGsWYD3ps8hZKohhGkmdBBOVd1gW1CtxwCtmvwepHCAggkP64Ueo4Xdd58jMm09CJoAq2hg2jqjWgbOUJNDIdVvCzEiH4RcXS1hsNJo/zPEh6Vl/KyTVUJKCFMcOnsuCRasOX56OuF0OVmfYFJmnsHLWhnVQTbx+8WmaOJBBUGDRLef3yuCajYUHa5evlf4KhKr0KUllfCq0QxlLUjta9c8pqCVz9YZ99bGAHMDUA3KTCbiG5MmiAgEsQ7/0O9ks6TvrLNyrWOtplhPpMQnS6ZCjCOBdWqF+1lVBgqr7mJnBjWV6QrXFR3AfMqoRRPR2QRme21YbxvWlzvW613dDrYygngMOnM3paTEnBSHdBsZPtkrofGGWja8mjno9dMLttsdzRwTJCkxhIU0GXB6vggCd1DvQK/2OQNEXPxV1VGiQacGwthYDca9JBE0c1C4f/iI+4dPuH98xfqyqv/cfEJfMnjO6LUjIKBJHT27ci+KODQGSFRM97t4fKOD1ClpkCqGVXtWFIIaG2arpKZolZRXWsDDJnqopPwvHkSs3v5ZdhhEHgNUEFtYrdvgaYPUCrKeyRL0fczmneSOrNGz96QLNpgvePdmvy8U++/oo1ksSe6yCd3sRGpxAKZd5brrUGArih83U+oYcxr+OYhG1TOOYxvY41KVtXW3IHW72/tk1E8vKOczQlLFAvcUcoFX7jyGM/OkzKUUApAn4HIG3lW0lFBA4KqNeakMSR15ZUwdmJuy6WKKeHo+4+n9BU/PF1zeP+kGj1HpuWxUfWvcc2WkLphVeA69CzIBrQawY/rWg0ohaJUegprbWWZvR+tjwuJ9I+tvaOYOVFZ46t40ON3dAt2qrpiiUdoTUuoQJAxWaDAZnwCAD0xUvzf23ftXMWeDTdUmA6bO7/5MwSCk0/MFp3dnnJ/PmE8TSKBVk7PAbqtaztRdUbubWSgL77YWIWhPqlTtqZRqWnIdaAo9DcIMPJkyCaK4B90h/hr12qdAdiDJSFKYgI0Za++4lqaivAAKC+auMFm2XlTuqo2o1ZvpDIrBc11n5LbGuNWGtXbcSxu93OasPNHlqNWojELZ7es3ATbRIFW7zoxtUsCBsNWGFAjlukK2itNpRkoR3BnbWnD7dFOo76byUCmQ9gJtIHielJQzpYhkt9/nMXtt2OqG7X7Dy8dP+PThI64fPuL+egWXBqoMzpqI2W1QOTZLBkNvoF6BVgwmCiCom3lKB3gu6toTC1JMWmErtN1RXl5Rbzdcv/0d3L7zEfePn7DdChAS8oUxLRP6PKOXqpWowXu9dpT7poP4rK2Etn0PwH3nnDCniCZBG9Pk+LceMokIc4xaepL5zMAPYz+B39RC4xDAQ5vG+0c4HBKKJQMYAUSzHumaSffe0HtXajMIS4yIFLTqC+lhqt6HEfWA8sYuMPARyDgso8Fl5H1s2T8KQYbZInlfzeALsayMi84N9aIeUb13RPmapaBlAbg09LWi3zf0ewWvDejaDA9CqKWh84b2nY9oIWHd1IZB7Uwiaqto5hUUY8CUJyynBclmwtjYcRHKpIzQzQ/WwD4FwmXJAMwDDEDKEe8/e8LTZ094fnfB87unQdl1llxdN9scBYEY0hiLaPVKQpgjqcp0bUPgNHmSE8Oe0BzWFejQB8JB548IgFaMDeYM3UXdllm/u0yXWAKVRQeJOejhSg4FkAt9+v9DyRRivS8LjDFHhJSwPJ0xnbTXNJ11UJKFbR6vAaYOsZxmg5S0zyFGm66risDePl6VGFB2a5PaGtyYUIwIQ6bePcz7mmpnUuehp3jYUSOhYkv+2LeeJ1Pmcjv6w2LzS2RVG7RqWrsG/iai3025Y0oK8M2JIIa1C3e4yogGKcFaGaUJNgtSOtBtAZ8CUldLmM7uXSWjB1U6UJhQEdAC7cxegooE3wruWwW3hvOccX+54jJPyNb7qbXhft9w/fSKbSsa1FNEkAT0jMABmTtiJ1AFpJiPVYvgqL2iVhq2reJ2XXG73vH6csPttgKNEXvAGjuAhtdPK3IBStvJW9v9hlI29NaAFEE5YDpPOD0teHp/xuXzJ+RlQVhmq6YI7umFZgnftmH98ILt9QXXX/sW7h8/Yn15QWusM405g1s19iGjo4NJ0O7aeyu3bfTVQYSyfQ/AfckbnbAG9QggAZk0SDm7b2+Mfw18d/gbevMPe5Dae0jAzuZzLu1OVhi61WNmwo3YkpjlAREoJpvriIfej4ZQAQ7MRX1+iDGf4N4wVjXaWxfx3/RaS8ZhdqwalT5v7KV+dEE9fGiHMw324dJG87dbg1RP0QCEiC5V53vWAr7eUUOC5N1zq9Rq5JGOGAPaNAGG2cs8aeJsg4HmWGkzKg3EhCAaqHpSRiAISDngPEWccsCcA6ZJGWoUInogUAs2zU8IiRFEmXaRVY+t5wRiVoYk9L4Ri60VV/1wRuebtTMupoF8IezzUA5p+ZcY7HZo3I8g5DAe7a/l9/5NA8oLeNDx54P2/PKUMVkvQ23CA4TV7gEUwBBLhnwf7NChmJllKxVl29BLN3sID1I6MN5EhzSFVEXcf997DHLo0T3gDh7Mx7rUz8FgPeFJD1ImmJbrzmMcUAG5l9RuU0Kkbtutq/KKO3EHu7bMHSy7woV+YZAe2EgsbID+sQ+N8efdyqSLi/ru99jGxdTLqXWEDtwgkNYQuUPmMiS91EBTg1OrVZNXaIKiaz4oPN0AIQFXAgdTx0+EwPuohM9AdqPIS2cQKzrQuqhPFyo4bIrIkOgsU9cB9jHUPWv1PZ1mzKdJ7efnSa+10HhuZp1sU9v7Ct4K+lbQi6pc6MWIIJJB/hlQNasaT6/dfMPsnAyE9r1AnABsFipoJhTt4hAIkawpe+jV6NF9hGss0Ow4l/3U/r8+TOcDgxR3lQnubYjKKoeChyAtACMMKOasBwosG9WBWT9wvMHdfeMwzCvoWOcREgxzByHan/cjVBfjOCTEP61DLrtWoXPWufpwsOwEANPaGcGpNKyfriivd9TXFfXTqpVUnCHTGbIw1k1QOmN7WZH6J8TrhsutqPBoCKhmA9JrQYoBc57wdDlhmibM59NY1LwVSKnoL6+mqlEhlBAQcYLCInMOgMEUZ65YesHUEiJXRNEKFUlNMCMrvpSEQKEhxA5CQw866p0D0KuynzRwt3G96LgOsCcpw3J8LCEaczyjPAiiXzYaMRIKK8uizVpNc8Y0KeU4BDKE6jDWwAdYeSzMPUjFnFSB23pz5+czpvOijLLWx7xQaQ0QHjNNXJNKhrWOuqlSwHq7Y329q6NA6eNwrq2OnlonUpuZlHVNEYF6AzEjdEYU0Wr4+HZtIw1bGlFLlGrwGUh9ugYsb6iCogU6OhBSRGDzemKFoJoIoggaBE1MTNpgfxJBaxuYO0qrttwJjaP2a+BSR4DPWsVsAsVDtdwGwbs6DjSveskEYqEJYrfqtrl2ZynYIqFdA+4pIsed3NQ7a8JmQ/AhBnTpaIkQe0VBR0yEkAJamxDrBMpAjiekRKPSzyEiUUJEBDhCWCW8aidIEch1Q9g64toQgybQIhUSBBwjpnlBvJywvH/G+bN3Rqh5Mr3GeRwFXM177a6O1aEEFEsSYu9IEPQYIDEiLjOW92csz/qVTzMIhL7pIHer3USrLZkhwvZ/qpL6L//lv+Af/sN/iF/8xV/EL//yL+Pf/bt/hz/9p//0+HcRwd/5O38H/+Jf/At8+PABf+gP/SH8s3/2z/CjP/qj42e+/PJL/NW/+lfxH/7Df0AIAX/uz/05/JN/8k/w9PT023ovW++IoWNCRAqi/QMymMaC1DhsjlDdMTDZ4wHKw/5nb/LHpJmoKiProd5Mb02IADkeVPacIVpWeJiAdy2xGMfPaaaNMX/hNgye2Tlxl6wMD4cjdOw1vfh+E/Zv4p94r6zcpdU7pH4YunkfW7VVN53vuH28YXvRL76tQKsgiuh5Bp+0cdxM9LazIFVGWitCaAgASikm6VTQCOC0AtuGnDPW682M8yJCN8ioNYVrsAeHxAQmRgKjt6ISM9eCRhVFKlJO4HlBOrvUqt4zp1T7xZJu16YLpJvsTFRdOmZLjQV7wD8EiT1QHYjUx6TfWIKBAmIQpCDIat+q+oCkUJ8aPWacpowpKRztsjluXufEBXlzT48L9sgOHbpxh7Uw9BFtJi/kDWlSPbyhIWd2EG4LMWzcWQkHrXbtr3VGhVL90WQQSiKzEoI8UJM87B+x98IsRjxgbKxsxyba2BdRKbMogCTLwgNGsMopYWLBNGU0qzxC0H50MpFbHTzW9yRm966kCa2mIZq4SiDkBNU1hP5/CAHLnDGbJqPjGT6a8ebKj3suXl0cySYQsJAxOhls5BAnn3iLQOXbGOgNXDZ0aShooESgRIgyI3IFnRIkkym4sLF3AzJF5JCQY1LFFFY+r0A9ygiMjmYwqti1UV+pdL5gupwxPz1hsgH8OKmZaUhJTyyBUtNtT4bWEGrGtCyQWnF6fkZIAWmeIFNGXBacv3iP82dPWJ7PmJYJwgBXP4v2CtAh61b/D1l1XK9X/N7f+3vxl/7SX8Kf/bN/9iv//g/+wT/Az/7sz+Jf/at/hR/5kR/B3/7bfxt//I//cfyP//E/sCwLAODP//k/j1/+5V/Gf/yP/xG1VvzFv/gX8VM/9VP4t//23/623staGZE6IgWdsYAP7gY92L+K6Y1gZY7U+yEzoJzD4R920cqUlFEWzQnzQYmZCAdUTmubA5wT/Y8gg0xIB13hvSceMj3NNjPDGswH2Kn7Ihx1oR0ExpyCH7LAYwZun8t7HsHrLw9ijk9ZBSVWHdb7hu224fXjFevHV2wvN6DokGxOAX1awBS0XxUKKm8KEzRGXOtgGNZtQ2sVfVs1+wxAn+5KGJnVlC3kZCMFpPMaBm3RIbNO3a5V3SCsvZPSN1DbkHJGOjdQzEDKQExDRHO/9freAL3Y0qP27aIxomi/fh6oXGV7JDx2Lwbm5+tHlCUlonp2iQUcRFl7xJCg91MC6XxSTjjNGVPUIVLqpgzvKgfsw6fG8NSbOj7HUPS3QDUsIWwRuq7aYO0BQFDCRjT1a68Adt8iU0uwPhSz+hpVZmytowh05ibweP0EtXDXdQiDtg9bSw6MR7aK2746dvgviSIFPARkFCIXAFNKaALMM2sV3PtQH89TVvKJN/1hbE7siZeM6xWQEDAJQYghQZGDGAPOc8ZihJnQGWSstq/h/ts+l8EadFhfbC5RLNGsRvwYj6C9M1eKCcLKAq4drZNujEiQCEQuiH1GOGU1T8x635whnEPEFDJymBAigykCFCGiJBBPQH1t6JhLwjTPyJcLpucnLM/PmC8XTKcFyaxZKO1DORwY0gMCqyJKaA3LeQFxR33/DnGZkMsZmC1Ifd/nOL1/p4PBeUKvjLCqCoVXkmpG20AEZUF+F4/fdpD6iZ/4CfzET/zE1/6biOAf/+N/jL/1t/4W/tSf+lMAgH/9r/81fvAHfxD//t//e/zkT/4k/uf//J/4uZ/7OfzX//pf8Qf+wB8AAPzTf/pP8Sf/5J/EP/pH/wg//MM//F2/l2tVSRxno8yR9QAO+2DrMVCNqsMhGPE8iR7+Phh5gXx+yXxj3AJeAGVNkQNqsKa6aep538CgIY+HvmHFoEF9Hhk25tW8krofmNBfHFuE7CDA/qXqGodoi8cA5c+jkitql62whmLlzsDxeSoC0GtDLxXXD6+4v9zw5a98C/ePV9xfbkoTThFPzydQzKCcMQmBSgXmVRMFANnFMbuqNKvHaQK6YtN1LRAwEGQYMp7mE3LOeDqdkKcJ0zwjTieFZKqAb3dQ62itoJcVUu9o64R6mwBKmJ4KKMyIpxPCYjpkZI6kotm/GGyFJqpvR0pJf+yk7NeRaKf/DxwE8shJt8WlEJhKcuUAPSxjQA+ECfssU560ob6EgMyMUNnYjA1ig7DcvQr4mkPSOyaiB2QrFXGNKDGCqx79ZStY7xvW24r1vkIgKmoMDOYcoF5sw3X64XPuqv6ts/ZUGKhC2sj3Cpf2CnGyQBlsDfjlYcDchLWP5BY6Ooahyh7JiE4JZPNOmkwlIpwtw8c04UkUegs2VnKaEp6mhCUGLBFQvbKKJJNpUvrEYADCBEFASqQzawJQVmbtOSVkYWQWhN4VGWHWCg9Atg/TBEbKgPUx9SuLNhImEtPOO6woS3bJ5u4CBKEVBOmIXMFbRyVBLQKOARIDIp2RpSGuJ8R5QponUOTBFI4iSAByVN80pgiEDAlJpeLE2ggOZy4L4mXB8sUznr7/+/D0+TOevu9zg4jPqq2XdgsegWkZBkJA1uI2qpt4fr4gXk6aePYGmtSZeXr3jGk5Ic0zpAFAdYjKEmAZSbDAZzh/68f/1p7UL/3SL+FXfuVX8OM//uPj796/f48f+7Efw8///M/jJ3/yJ/HzP//z+Oyzz0aAAoAf//EfRwgBv/ALv4A/82f+zFeed9s2bAeDrE+fPgEwWijvEidieJv7wfj18e0yelPkzdnHLNurH/IMlWiIax7tFx5/E6PhfYQ49Of2bFvgB4D+hTdd/aDZM+c93ByfE8DodzHtLrUahA5B6e3FswrqGHSHgoItHDZquvamlO7atjqUBNbrivttxXpbkWMATwmNZ9UVi2qhrdAmDXvyKMp+7AiQyCqLE5uej2ZR0LmBpYEiIVZl3DELlnnRmY1pQlj0uVNUXyxyxXYWFbckACRo24aQJ9USzBOoM5TVbTYTBnHFpMGTo4n3HiutkXTsFQvsmjt8tq+XXdPu4eLLLvWjPQSXqyJzDA7IVjFGEX2fNkjr2oxDvUL21357T4ewKas8UTBJGrYDYPcEq9qHEk1w2lZRc0VbmsrVsIynp+Pnsdffvco8tmn14EgD26902l2C2QFqX+OQsV79dVzdwuWnkmX7w+ZiXGetTCdAA4kBBiHqfNScoo6iBEKEXksWHe+AADY4ZfcrQuA9ZoXUQ06qeh8DYgeizQ75fnXwWK3rNTBY3aj3z9eKvW/vG7siGBF0SDtq3yuRDN1N6h0kTftFDlEiQiiBvL99vCl+Tw7qJsGqcyCOfrdX7OJyUSkiTBPSsmC6nLE8XbBczphPC/Ks+/jYI8d4RdJFbMKzEEE+LwgpQgKp3iR3wIxG0/msmo8xoYkKVL+1RsLxHOQ36/o3ePxvDVK/8iu/AgD4wR/8wYe//8Ef/MHxb7/yK7+CH/iBH3h8Eynhiy++GD/z9vH3//7fx9/9u3/3K3/vasosZL3/w4aDzQkBI/iEhw3g0M5eqrz13dHlQY8XmvfDQfhRRXnvTxmOzTw2qUMPff+NnSxhQWqwzeGrWw8jPvxZgtK+9+bvoaby/7eGuX8mkJEsUkKMZg5IykiT2tDvG0prekhbZl7Xgtu3X3D9dMX1Oy+4vd6wXu/IOWHuqm68RIVZLssZmAVYzvukvkGGrTTtXbWGlhJqLdjud7SVwdytecoA6VzGtFTk0wmEM5Z5Rn53QUgZYevoREitAx8ncNBJeWmqy7HdCyRuiLcNU8hAMHfeqJUwslkRsK4Lrh3NyDB7Ex0aSN4EBbHKgkWM9BLGjA8FGv+uCRJMkkiP60w7muoK3YlMU6+p5h/7DJpBdNIOattfqaJgrwVlS4ngfg1qhLkWEwUFWlEZrlYqalW4j3vHZihBnvKA/QhQyDxEILANwyqN/SFwjZ2jRCVXzxfS4dpAhEYEDkHhTdhH6BbKKCgUKHrweHBaos63+djI2Ge23qeckFPEeZ5AUwaZHXogKFnDDn30Buk6mEtxAhMjB4vOmkJB2Y56z5kIaVJEIQFD/bw7lc96ZQAwBU28AgQFOo8aSfc5GzypSUlEIKWwB5O7SsuElKNS/6WCuKHLFeAC5hu4F0VWYgTiBIoJaZ6RF5UKcziOWEBU9x4yoIQsEBAzJGYwJSUP2TxbmNVyZf7sHc6fPeHdD3yB9z/wBS7vL7i8f0ae826/c+y1Yk/aKGqflZLCjtwZ6f3T3jP1/ZDSSOy4FVDoA0k67o2xML7LxzeC3fc3/+bfxF//6399/P+nT5/wu37X7zJdtag0c28eH+AwrWUcyvMkgXZLAzv4MX7H84fHXekuu0SaER4by26/4EFjDHV6liAw1eYjU2v/N4hZPYwdre/Aaa4qqGkHaYq6FYI2QXUdhkGZFtjCOvQuxuckU2ePUa2wW0e7bSiW0Qybc+YB99Xbir5ukGoMLuAwy7Xj/K4fSCEOLy9uDEmCGDs4Z1U9TgG5FDPQUxkn7ptlZA2tNoACSqmYTRaKormdIu7agPMJsTNa3cwDJ4IR0TmgV1Zqa+uIXQ/cQFrBBChTTLqqCowAZdeczJ58ZJSH/tSRWCLCCGIH9GgX7T0Jr6G9mvLKegi+stcfMtiV3Ziivel8jwcoAg73017L7hNMzbuY222rfSiVe3+pm4I+oBWIVsn6JcmIC/0x4TqOW8QQkASIgZE8AR6BOuyjHdb7oKhNfu9ZqfhzUH1KlofKNRkjdzqowXhPTYV4AWGyGTZRo79IQ0ZojFYYJZa9Eu0qtx6MMDHSTXvtaIFVCAheFRoMBbORgN0DghE4BCAS3XvwXpwGcgme2FgyaJViMKfe6emEPCfMSwL1FWgbarHbV9lECAJomhBOZ6TLEy6ff47p6YKnLz5XlZDTos7jgFniTIjThDirJqLEWYNUSAiUNOCkiHRSO/vLF+9wef+Ep8+ecXq+YLmop5SjIW/9t8ZRRRaErO/NAKIppIuvzWNybFCy7kt61KP048iD2wGZ+M0e/1uD1A/90A8BAH71V38Vv/N3/s7x97/6q7+K3/f7ft/4mV/7tV97+L3WGr788svx+28f8zxjnuev/L0Hp2iL3XX76M1BQYCdFIdKijCa88fH20AlFvlH8xrywL5yXTAtzMi+W0UnewWlQWp/jvHND6Px5ZWYi5PqxnZFbD3sjN9HdAjMOyTglUA4fFhXjo5EQxGj3TZllDUeCtIiYnYDDe22oa91TJwHckXxXRVBG9zxQBvWzyWBDV6TYS/SU0AtGSCAe0UQQS0rIAXcm9rDE6FU/bNWLmRYOZkJ34w4z4i9I2znATsJlF7cmkrVUNVEgniXGRISg8TEFL19pRzuPx0qZ/Jk4w3LzmEwz378OY6Vz6joD88LsSBlMJHj87yz+lymyhVGPPEY1bm9jgBDHJih0jMU2whS43m832SHcjfFkVpUny8QDVfpY3brcLeSklQRItq67nYoBR9upsNhZKrxvi7F5s/0IOYHOMk1NCcnxxw/nyiyoIuSERC1+iKXNbO9xrxft4PVCRi2l7zKoYd7O+6X9xhNpuFIgjjeP5Uo0vMjkHIcEmnFeewbeuVJMSCaT9PydMa0JCznDBRAigDXAGpKknBsMC5Ganj3Dk+ff4b5+Um/n9Sgst9XCDPiNCFNGqTSycDltEDiBAkJPUQgRFBOyJcF02nG0+fvcHl/wfn9E05PJ8znxSqouLNVLYmX4wf3DN7Oz2BJcMxpVHP7crdRDrBVcvu60MrAz1QZ9/i7efxvDVI/8iM/gh/6oR/Cf/pP/2kEpU+fPuEXfuEX8Ff+yl8BAPzBP/gH8eHDB/ziL/4ifv/v//0AgP/8n/8zmBk/9mM/9tt6vRR0Riobzu+afADU3gIGTQQ3DtOsjt6UtY8Pz3v1O4sNHfL+718JUIAGwOCZo/7+6CFZA1zAI4CIZYwadETZRD6v9PbeWXANohldgmudeZC2gBS8KtgD9K75tx+SvFYV2uwMMhM8NzsLybJCYfBWgdYwBUKYEnIKYwbi8v4J06yOrjqTJiCTPPEGCIVdwYEgkDqhb0Vp1wSUnBG4oWx3rKYMQL2DygbUAqkFxKolruKZCXmeMZ0uFpTCqFZoOgFhQhftLXATpZsng/Ds9OAuoGaNW9mVrt36msjuh6m3w6+h3UuBC4/aGOghUGFAyJZYOmxr/+YMy3B4vqGubdJRzbTyvhs4xA0nIU2pzm1PuLwaG5CMVYytNlCsKGvRyguCel/R1s2kjbSaEFtHMRAyAk6IANmck0FlHkBDULXsFANyVmq4WuFAq6eubEBmQeK9QnEXAp9ptBIKQ5XfT0pmlRgrAV0YUuII3MzqTeTf3ZiQDJzY74GxWUF7IBeBiEKmrr7BRiTx93jEVQZKE0x93QLUSEJ9r8YImhKmywnpNOPdD3yG+ZxxukyQ7YS+3ZFQUG4LynnWcyolzO8/w+n9e5zef453P/gDmJ8uuHz+GZLNcJXXG8I04XLb0CkC84K5scJ60wlIExCTVboapObLgmmZ8P773+N0WfD0uQapacpjjvFw0Dyur+M98H/2YCMjnI1E3pOngePQ3oclYza6eomiB7/lEgfwfyNIvb6+4n/9r/81/v+XfumX8N//+3/HF198gd/9u383/tpf+2v4e3/v7+FHf/RHBwX9h3/4h8cs1e/5Pb8Hf+JP/An85b/8l/HP//k/R60VP/3TP42f/Mmf/G0x+4AdTjkMOEPnXfbmNWDSNiYr44re9OYM8MRYbOMcswk/pEZ4OWYCst+Ww7ONUljpqD7zwqbufXw+PFpPC4Ydh1fI2oQ21XaLRS5A6TRkPYQV8CfRIOkU2vFqVmJLa+M9UW3oOWnWlyIyskc3zWBFEMCIJAgR5kyrg6h5UWM2sqpUqmWzYIhXe8EdfWl8Vq4T8pSBnjGnBLSITgEkTRlTvapjYzPLh94B658Eq6xiykjTvAf6qH0owIeWZdzEsVlA46TZKcS7Th0bpCm0X3tfG379/DDd76AelgPO8N+zhGFXVNzv02NlJCPhGR5Hx8z8uLrsIz2sOJvncmWT8YYPh+dxkftcTzf1d4j6nbVaVevNDhB/z8HWXCRNCEeVTyYYYXDqru7isLsTkbwqM/arf36SoX5BlkBZKjM+9J4q2nvv3WaNdhhUxKpR+2z+mTypFLtGChgavMkyKqkx3+SJypsMn2zvHf/f1/tIRo/JarTezTwhnxdM5wXL8xnzOWO5zJCZ0LeIvr1HmDIoJz1zUsLy+ec4v3uP82ef4fz5e7WveToP5ELhW1aF89bBFDAxAyEiLgvIg5SbQeaE6TybweFZK7Jl0uFxYyt7K+Dh4eff/oeR5DzclOOxR1b5jsV5WIN8SAZ9/u/Nr/9mj992kPpv/+2/4Y/+0T86/t97RX/hL/wF/Mt/+S/xN/7G38D1esVP/dRP4cOHD/jDf/gP4+d+7ufGjBQA/Jt/82/w0z/90/hjf+yPjWHen/3Zn/3tvpXR/3CozKsnAANaU1mggIBoskKWYdGeIfnD1Sg8Q/Q/74nCfiAcDwJ7wVEFOVzTzKzQv+vMjWWR8LVPCqXJbmugf7uznL1kzkFnwPT77nU0zkeybJEOGelhUWjzXw3cBoYcVeZmupxUJmVOcNHPGARCXZu9dmqcpoB5yTg/nzBdTsjLDC/z1Y21AsVccS04hBRHpUdEavU+L4jc0aeM0CIkAJX14Ex1RdjuwP0GWVdlLaUAdB2ETDFBJq3WtEjQ7DUMJ1Ea15AI5jQa7IDUO9Bt9qObKrxL+7hIMRE/wEMODDoM9xA5Bo2Lxn0Y62W8E3wFWtZqTjP6oZggHgxlMOR+o928rz1vUNsh/7A2Dxk+EXpXdYhaKghKA25mI8O1WmD1QEJjHU5EQASSmOMrBXS/7oEwu9NA8P7NYePY5/CABrhe3/78non7BvVAH7wKYqhYaeMHZuyI3P5C4pWufu/jOuqe8GRC4xibdJYMaPTYJ/nKZaf9Pgphvz+Qcf8xacKXLwsuX7zD8nTG+x/6Ql1qLxOkPYFrQT4vqNuK7X7Xnk+MOH/2HqfnJ5yfn3F6/15HFU7zAXkjUJ50YP7pgtNa9H3EiLTMSlyIEUxKEgoxIE/qSTWfZ0MivA8VHoLv23X53Tz8lvl3vfx78tfZ9SOVvFNMJ3Rc4a9/+a88fttB6o/8kT/ym2KJRISf+Zmfwc/8zM/8hj/zxRdf/LYHd7/uMVhXrJldg7dI3XUT1ugkRGKkEI0N5LPZ3r/ag4FvJhCMCAH44SSQAQWMxp8c+0/QmyCC2rvdFMbWG0T0EEqWlU5vemhKcwW8zSv2+vGA97tCuqokm4OqZawj2wRGNsw2bzMyajL4co9qoDRGMbXXOiXEqPGrRgDUwW01yxBG307gOYKkKxSUFf92PUA97AnMBgOlYNPuBGEvAx0CiPvnF0boOjsVSgTWCf36gvryCnRBmIBWOvrW9r6DBSgPPmNsAKPAeFN27NUte0XhGbTZmIyD06i8x2SFhpoCbM3QY3M4mlYBHasvf+XH0mxwZaCDrmqIqO8nAI96i/u7HwkHy3F2mw61nb4m+T03iIBC2Atk0UqKAPRSlKJuQZoEu0r5CNBaTWW4h5leGw5+bwNyVIZcNEYrd4fTvFL0/pm1K4RM7Z+OHw+wJI6Oa9T/bHtMP/9OUvGMf1T0495rYGL7ENEm+B36H2Qmu0fkidsxufAbbu9DvO9m1G4vNcno38GU6OenMy5fPGO5nHD+7BnTkjGfM6TPOjuYE1otWEqBuwovlwvm8wnz6YTpfBr2K/6I8wQhwvLuGXFZMNc2EsE4ZYUZgxl+OgzrVjDG4FN9y30tDpNIjMv4ZsV9/V/oZXtM48dYRFevse2+4f56w3pVZnArBdLFRBKidRl/68c3gt33Gz4OAUSxTocBWBmkACDaNI1kdO4DMYEgA2cWcsXrPUDp4pR9zY7nfBusrIKyv+/WWyito/aOtWmQEuFhWx8QzEzPDwIfgNxZNAB0U5BughwCUgzD2mMMHRPtG+uQFepAqEEi9m+HE3b/ZpR1IjLZHAJF0TlIMKQX7Qkwg+sKqROIOwKJUbzDmF3xA9sn3UOkfajZN/mBxj3OAekgbiDpoLJCtgn9fkO7XvUe9qgWEsXmiXj3pwI87h6qnmM14YfWYa04BLF/1801DkV77mOgsrPKXo/GtR907BD2w2zcj69ZtswgkqFUwsAhSJkawQGoo+NBAPGzd2g78uFn/XPSqDL0UHCV93F2m4ZadzuTruQYXycUjBBgkFwgIwrY4ScUlNVm9zHa/okeGDwRELUfZFuDQthJNmaHsx+Ufux54hiwV58O43q2zvYbe//1cCQ8/KzvDbY1ssfFHTs5Vitv94hXSWIsUUrqdBznrP+f7e9iQDzNyKcZy9MZly+eVFfx3Rl5SshLBuwcCFNSyaqmgUZNDxe1XJknhd+twvKzJeSMZMIF0RJQ9yALKY3eq19PPzd0H+5iBPAz4+EKvlmjb/90zLdkb1jI4cecBc2965zlapqQtxXbfUWrFQRCThkxBaTvhSAVySVr9PApAIQVOqm+ORARIxATMCMgBWDJNikfNPsLtvG8whAiPGybsVhtsYvoYvMm90H7TisoxtoqrqVi6x13m2cRCJZIyBSArHMhRyfSiJ374BRZ2OEfzOAxpmQVlUJYx0zUWUlePe36f/ss0Ii9/ktWdlAEQiKkOSMmAgVB0NF5MBf0qrbg7fqKGgPq9YY8TTprE7MJUW7oW1UvGQaCj0HSXkFBIjAl0JxBPIFzQo9BdeF6g/SKgGbVCKFjQjzdkc8VnQmtA9tW1agPyljUisyAVBGr6BhcG3oMaFtBaFHnf2rXIZfDIeZVrjCP4VQalRQAM4IL0TqdFojCsYrys83um+P9I08dAVPA2GexuqgSw9Y1sanmOxaBYVsSaf99V/FWO3oNVB1eXclYpp6A6fjqPqbgDw1SHd2sFZx2rcE4WCDdDzJCMHq1EpXg18YHiTwpMIinM6O0pnuRGU36GOiNBlWfc0YOAacY9XMC+0lIvhGg+8vGPxq7Krmncbs8VPS3YddV1Vw8WQECsVWFpnRv5A46Jqji9ZnRBhzGs0OeUkI8zQhTwvzupJp35wlxSgg5Ip8X5HnCcllwej7pn58WlaMySxEBkJdJ96ohGxQIMWXb63EIWw/UQ6CB0YLkGBk4IAl7tPbE7fCdHgP+/vhqgPqtHl+truwad7W2L/cNL19+wu3DKz786rdx//IF5XUFKiOmhDgpAsMhft3Tf+XxjQ5SOqW+4+cMMe8ZYGOzRiA2eRUGBYGA1TLDNq0YvrxnSzvc5g8RXyV62BJ7peX/btXKaBCqfUC1Q6f2PmCYRgEUzA3VJXcOmbp/G4O6BmGFoRax28Z/XVfN/+uZtmsA+oFLjvPTfhjj8FrHGRnyQBgi2G1EWMzITAd+Q9qAqFldvRfUtYKrqhkc9wQRgZL2BmlKkDlDujWOUwQH7XGIqM231AZZC+S+IklExwpBALNSppm9EiFTTjAY0/oKDt+RuYJqZRR21Xe/3G/u4+OfvfJUcz1h0Ul+s5gQwUHfDft7CPshvpMZCCQMYa+MgB0K20313BRRoG24keV7NSCmCyeCynsV5hUWxNUuxAXtVeUA8uY8koOwsFWl+4e3zzdwsD0z9890QAFgrz0ICKwN/tIaameU3lG4WUVlFjsxqKBsDJisaj2OTPh+GKQlMVjUkQoPyDicASGMuOYBvB2qKof4BarzCbGEwjeD7Ndo7C3azxccKhNK0ZyQE9KUkZaMmBOm04w8ZyUoTBlpcksegwQNMnalmOBLjPZq57gnD4MI8OHaEKF9Wj+T4PfEb+CxEnxzttDDInh8jKLpq+X/qFAff9DuvSV6XYf3y7qpuePrDdvrHfW+oW9F59YC2cHks3m/9eMbHaSmHDAlhb+ICFWAIh13AbZudhewXosAnTpmAVJS4cXk/P0dw7GF8TUP0oG2AEBi3AVg7YBgC1CeDZfesbWOresXoOskWnnTLNhkkxXzfyccNoQFRR1UVqgvhoMZ35u36BkgQ4eAG0wQ1H4yUBhuoNGDlFVp+2GKsRmTMejyfAJY4RmSAG6Cci0ArmhFadDcRQOUQXHTPAGSwZZhUtJ5NkwRmAIIHRSAeDqB1jt6nlBCAqOhtoaIiigrtviKuDGWlkCUQCHB9TNBwdQ3ug0/C6RCB0YFqGvVwykFk34hsGnj6WDyXkHbSM0eDLiPgD2SmK4JiwSFN32mjYQBUWKHZt0GvwQjbPj9YQ14PoQL7Kojqm2n6uAdVlWL6sDpeaQVRBH9uU3UnVYDlgU8WJ8OYsOmUA3BQKZTuS+aY79oH1PY1+EI0H4eEUayFAI9HHZOPOiWHLTWUFrHbSvYWsO9Vqy9qecTTCA1RmBhnHLCNAkQXSCW9iLK4WmrnCqrhbzuL13V7u4cA+lzWrCqJpdWx77Qoi/oHVCFCXGdwMMHFxzCgl8zD1A06MQUYO85mmXKhDQnLNaTmma1go9pv17HysiMtMfzH5mf+4gMvXljJteB+KiK8jY4vX2M8+3r//m7fXxt6HJ0wJRq1usdtw9XvHzrO7h9eMXty49o1w1SGnLU/re0hh5ICVzfxeMbHaRO84TTMiuriwJaYx3Ik4rOuilc+GEg2OS6fodFAQA4TlnvfwfAyBeWhYV9PgQGD6nhnf/4ANh8nm1ktISvLtZx40fWfcyMDG+2w9RhCu+5vIVkAD1MlESCgyUCgKBsvYyonggWsHwgV8VgBb10G4UgxJiR84Ll8g6RMlqYEOMEkoh2r+jtjnBrWhUyo5c+slFhgRAwiZrwBVOqhgioJwTRGZ18OiGvZ+TlgrrdwSD0ViAhKyzWAWk6YDxUrg+BXBssrJYHTOhsXj05gXIEwAhZiR0hhtFL9MHklHRGTJjBocENID1gybitVl10rYJF9NqLq29YnwakgXzc1oNCh64/u1vi/zksHXu4hmMLOk4QDu+lMqMYUlDFXGvt7jNjEC7EpKaIVRkhjBxYgxnZ6zs8BsIeyPYSYxyg7gbgX7ZNxtCrq3F4QSLYWYuV+4D+ujA4RIiYsHKgXe1dDqN+sCoVuoc7M7ZWca8VW2tYaxv7cLYZrTlnvdYxoiPo78neu/N80IOWiKgHGdzo0uuOPYk43hetFgVc1ca+hStiydhqw7wW5Cmhl4Y8Z3CZwbUhzRnSDe4b1G8Cxcd97tfak2SyvazXYX8jPl7yQHh4eLOH5OHN+/f7r3/w6vFrfsh/ef/20JfHYS0BGP3cthWU24r7yxX3j69YP11Rrytkq2P2jgHUuIJbw9a/B5x55zljnidQSmAixKrZr3oF7U3gkbZ40XQ44/Tvv+4u7dWVrh+9qQGqAg1A2TSAyRoN4b3x3GF87RnRrh+4v5Fj6/f4VvQld/htkHJG1uxpNnDMcxiEDjWWW5nRWCBMSBHgQMisPYrktRsFgKEU7NrNzlyhuRSzVlLdDl/7amuDVIGEaoOoyhgLdmCHpBXUrhoRECd9xcARUlTJIs1qtBanBSEv1rwnMEUIRXRReJVZzMIBA+oUC9jEO3WgG6ssCCOYukUszWA5FZd1W40YNdONMYKdGeXQGQ57+ABrOAzmgZINqiK3liWf9dF/D5Iwjh0PUH5MykEVhRy+pfEeVNVJRuEl0MqpWU+qwntSDu3SrlxiCEKkfW7MIed97u9YO+n71s/8kKI9JnS0i8DuFHCDE8dnsfc7go8Gq+EMC0LvNBiWI1AeV/E4RL0XZ2SkqgHqVosenERoKSIb1BdjMoq87QPxgGSB82GHyyCu+CegY3R4uAq2t1jQq45KFBLE2tS9oFbkKamyypzVXZpV2Z6IwDlBIKaCfoD07CW+bjzhcZbzMWLR49U6XDx5+Jc9CbY7OzISf5HDDx3ewn4f5Kt/6a/j64rFvMnU3aDcN5Tbinpf0bcCan3f1wB6Cda73EXDf7PHNzpIvfviPS6XC8KUIRSQakNaK/C6IdzUDVZ6V1kXEE7JHECtYeqZ1aP+xGHH0H4njwlHCHEEB29wUiV0CkiiWc4MgxW6QnQ+8DeZhJNDd2NTv42ThL2P5AeYZ7+972/RHYPt/QgITRQKuraO19JQusJhOQacOCFq4WTXRRvZ3Dq4NLTrCtQEbAloDOoBUzoBEwE9oG4VvQrKdh/Dp7X1wexxkofEAERgqWfMMHg1a5CS3o0umxDzjGk543R5B2FB3Vb0usEPzmk6I8YJOU9DHLcbu4j9zoiAS4NIB/em8yIpASRIzRTVZ+0ZhKBVRUpAngKEE6ZZAwlzBxeMoVCWnRFnt0QDFOlhBSLEMdhDQ23ElcVFVCMQrPCfz9cdB0YDaY9mCsFIKqJECIP9Rryz36kWoBoU1ux2LzlA1TTwePgeawKn3ovDks7Ntqb7ES4cVaGxFsk8v4IzxHxAfQzQupmfSihJApasDsBNrI9EhM6MHCPmGJHtKz70YXYgWwOLmDV8V7uQ2nAvBS/bZhWtqIJJjKgiyImRVWAOYmKyvi88mHZosoYQkO368kgSZKehH3pUAAaDeG0VVQTbq0CCVsvzFJFTxOU0YZ4zzpcFp+czptOE82cX5NOM+XLG8rQoHHiehy7l1wWJ/2ePwxn25q8fs2D/u/87zy/WVxJwqeC1ot4K6m1Fva6otw39XoBqA/rs5B5GAwOFUL4XgtS0TFjOC8I8aZ+odiAXm7gmHdKsDUFUVn8OpF4vXl2LvMlgDnRzWLYsqvlG2AdH/RFC0J4XmxUEtEqIAJIIsmXeTmBgkYNSxJvGur/qsbQHRvnvtFvtC+19M7IoR3SY6YJi+I2dvMGqnh5dKeDta1rl2RRXJmZQ74N8wI3BTaxa0gFIVW7XTLge9eaiSiulUpC2ZFYRzcz8uimk23MaBEAUkdKEaTohUASnBKe55jQjhqz6gH6QyRG2gAVudfNtzaSUetdmLRG4ZLBbJGQ9FEhYRUthvamoLEpJUX0ncJAVGndjb2N7NnuUHBqVChjCAWJGhgBAwT2brPrzNWTVdQoEEUILGgDYG8y+JuAZ//HrUKGNy/GYSe+LxP7Sh1aH7NNe0dNBDvdIjjiODHhS5oe+YWD7ZyOYWSFhilGDU4zgFEeQmobquc77jXlB8uc+fBa/v943s9fxGbfOPHpYtXcgxKGPiCAPVdSoXD2wj6p533CDJIX9d/aNou+JWXuIW2+DWVmqWof0WjFPGb2qW/W8TGBmzKcCrl0VV1pXBl/WN+PMVP8co4J6KOgO6/0hCO3vb8x8+eeSwz8PgllwYZp97bx9vC3Djq/l69ySHekMLipIzaWCSzvY/nz1Y3ibAoTvDeLE+fmMy/snpPMCpIipC5a1YD6tOC93dSRdN6B3UOuIXfsgpiX+lftzvGhj+4bjxtHM1ZXHYbp5LpFCZohIoQ8vl9gZgcKYGyHsEKAfBA418CjjMeAiP4wYdsjyIxQD8A752WG0z2k1bL1pk5lUSdyptIhGSLaSRMxwsV5XcAxq020qCHUrKFtF3Rq2tQwXVw9Unft+mAdl8dGUQAG4v54wnWcdJLSGg9SO7b6hrQVsDbOUF5zO1kzlNprmwSjs0SSPNNjzqEwDTG2kVfRW0YpN8IeInBOIG1oAqE9AzgjzpPAjgMANAQ0xqL9UTAGgpBBU7WMjskFow/plXHvag4Of2RCgC5i0zuPW7fD2RGLfvMHYqZm1rxLsfm8kUA9KDUtOx1dyhxFEYK/3lUU8/qMHYAjGtNRZOGFRQeEjlnRYizQO8scApRXUgQii+kRQVqt9RgBEqjoRKIKzqZdDCSDdBn2TkRwuOSFHG0w/aGqOMOHuAbYLCLvKjEOAlTu4CZIIps5A6GpcOHaOmJXP496OoF0B/5jvHC/jfpf3qkr2sYF7VfZi7X2wKe85YE4R9znjfJowzRmXlyuW8wnn9xe0rWJ5OoECIZ3mfYQhCEQemXhf/zi8Q+spjrDubE2bIWSfGAeUzBMCQkxwJ4U90/2Nl9BDMen/YOw8rk3HUm4r2lrQbiv6VkdhoAlg0JGIcGhqeOCX74Eg5aoPSrgJOOeABEISYGZByxENAq4VrDsU4F2aSBewLU2P8OxL1ajfTk4wTE6zsIMGmZg6gV14iqZkYYc/hHVniTwcMN6X8gMBcAsBOxxS1Mwy2nS/iAZb60+RBboAyxpN8JQBU9Tuwz04xoDJSCaXywkns8qONsDJvSsTsjVIVww9AGBug7XTakOraqTXvcIyiI+ljyoOMYA4oN43hEC4f3xBSgFcqpb/AKQL6lUXdN0auIvOO+UJSFnZcn4XjHanjXZ1iV1rHfqMapoHQDpIunoKMYGpoa9XEFdUNMimNtw8TxrEiFC2ilKbUmRNMidl9VSVlHQYuqk6tlcNb9VWxmoR2ZNHomHHIV3JOjAiifcfnPEVQkCMgkkEge2+AqiB0KHsJzfxFCgiEBmIBgsSH04T2TUrlbCzM0MdWhaTXvI+6Tg4yI0Kd8PCB8KED4/b8+gewMjaMVAJTbUioFp/UZloCT5fKGNOakkRyasp67nqTJpXaDuNQcdI1Najs/5uIABN9vEMctV2JRlB//lhsBkwYWbRGbRg10xtRGBQbB8VyYDbw17t6HXVyk+gbEthBoHRO2ELzfZLw5QiuDSUk/VoWsP6fIYEwvx0AotglkXNF93+d0SqxypuXGunbw/llcfg1Io65rZa4aFB7eEz8rwg5gx1m496Ipm6+7HyeRu/yJNQVnZq2za024Z633D/8lW/f7iivdwga1UBhaBznUQ7WYl8gRMhyP8Hzrz/bz9c9gddQEmMUh6ApMyuyowaA3oPasaGQwlvum9ilCaf8RgKyF4ak88V0YADtDFr0Ie9lyGpYni2ULCpfChlmRW/FsPu1eTb1DCwHwhHVp8/n65P3Uj6z2EEkqGqzt6DIFP01k8b7KCapoxlnrAsM+Ypaba7iuod2mwRE4EtKwZkKEK3qlYQrTWV0GG3lvD+iilaeE0o5pq7VZTbim3Kgz1I0J5Ns3kqnXmyihUZXleO7I09QOmGbKxW5i4GGyL5FBPsRo4Kg1tRfTYSoG0qt9Q9SAVUD77FNnNw9lWERKXVNxvkHT5Dbx7+uQfSYuuLWACfg3MolIBdvdYDgIrxpiggN9ETAXWg+Foc1cNRWFgQeIe/CAfCBDAgtB1K88tjQRN7cNoVNZwcsa9pPaBtKzhlH9CRBDpoBcKzek+9LOmyIe4gcRAtfE3u0l76Ow/DpgS9fvY+g8GIOSgkOkeFJrvEEUg1QJkbwgECPypyEDAkyFwIYPgbiSIWA1Gx6pmgSQGCz3Nhn1UkMvah7YXOaoLIjMAMTjqorHupI+RoIrELQKTDwDkZuSiOCDHWymGNjUSpd0gzebDaRoLtKjNl29BrRSmbKuwQYT6fdwdtu147VeZNxYTHP0P2da2ktA4uFfW+orzesX56Qb1vKC83tPsGqVW1SI2cBCgqE4z5pCr0Ko303Ty+0UHq9TsvCBVoa0WeJ+Rl1oZ278BWQKWASzGsVGEqiOy9JCi2rUGAxxfYqqEAhQRscE4TfEFnY5eFOCqhbo12n3W5M7AKUBnYEHRiXizwiUEgIDS9a2opnuIwmhsCkLKrB3dh0z1LY1aDuJoUU0NlJU10iYAQJlIPJsoJz58943ye8f75glMAAose0BZ46kEBmsXtIzRI7TJEexD3rG1UUMAuvyMCKRUdwB0EKR3rfMP99GoBNozDd+8TRFDaDznfJL75erf5oNaxlgoWtmpAD6kczRG1p3HoKJGCUe8bKjQIhKuSbASEtVRtxrcGihlpOWGaovrspBN6F7TS0bZoxJKK0TC2937Mdo/Dr3tiof3AEcVor6Akml5j0Go9iiCGjgigUUfyA5N5PJ9YIBILgs2u/9DFgwaxOajo6xRsNo4skFvy4hbvw8DSIGzy+DCSpYEi72WXHzQeCECHa3FAC2A2HAGIac/Pfc4vWD/K80F/DbIlNYKgHWpTIEiOKs8EhdxOvdt7DzjPE6aUrMrSJ0wQdDEkwoJxJrP4gc6iCTMKOzKg+8wzf++ZJejahlVpcyRcpoQQCE0YaxHULihdKfNiUFvras/Ta9NzqHdMLzeICNbriloU2p4ui66BlFQT0K839nXmwamvG9pdDUnb7W5alhq0emfcb1eUsuF+u+sVjxGXz7/A8vyM5+8T0DutShXKxeEGHx+W6fnyNrSmFx3MvX/5AdcvP+D+nU/4+MvfQr0XlFsFug67B2ZEEFLKkGBn6kh69H7G74WeVL1vKCEjICh1upkcUGeUdUUrqh/VrfSGQTAgc7MU+eoNosfM7mHIdWQzgDdcHd5x3TWdsNfFWlj9d5rXBwb7DbsQW4UZuhHEdutOT7VN4+6q0k1lQfTgCwLpRSf8WwULQWVDJwSKyEEllELOOE/6teSELIIBJsn+GjqQ2YeCsQ/mgo9w1wFekP0wcsWAcKzyTDmibUWDTWdTzQ7jgArhIJs0boXTvDHUMgQYHkYavO1EDQSEqEKnTIiY1TMLokEfAEkbbD1lRqq2Y6tVg3TXwECSEUmQggrt9m5MThE1+nNmHGCBww/n3/jhwUUrvzCwea+kRmCLmn2LCGLUcCOsVaJY9SkGeQmp9TqL0fJ5J1UE7Ytr0nNwTR5BxNbxEcbZe1EeSBwKfAzCdqOth8K79NMhio37A0MKhlUB9oQEe5U20AdHDx6unSWLfpstgcsJOElCYkGKwfYMYTGWXw40PgGC6gmyhPG5s1WjLjfFwuisLERXowdgQUqQQgCCVe5WEahIdABHQUnRfkep9gJFSrqoAldnhap76+oSAML6ckNIEWnOWJ40QE3ztJ87IYyK1xNDrhVcK9r1jvp6Rb3dUV9ftTfkCuO943a7opSC+/2mc3wxIqYJFCKWpydM/YTI2fYxHTzu3izcY/XGSqRqa0G937G+vGD9+BH373zA/cMHtLWirR1EGYQI4bAjUqMytj3rVfPXBsevPr7RQWp9vSP3ACmMOiXkuRhNtGPbNrTasN037S2wyiPtGn2j0LXrZ6UoxAkwOwRnP6f3zA5qkM6v2KHhqgFr69g649Y6NmMBdX9u5zc4rGCHfCWtHWYT7QTReJ3e+zhMIV3hHg6IwVSd2x3MDb0VNCF0REgShDhhSRPSlJCWCe9PM5Z5wiUnZe5ZsPEA1VtTxQyzHG8OpXqfwzN4z+pg8ZYOsJAFqUhBWYgMcGmoLOixoqWqzfeUkKeMmCKmWVUC1GZjoB0jIHqQYoNPceiLaKoegZQQJkIgAbHSngGTFZIOVFX6loaRLTcGSik6piCscIt0pCDIiTAtGSwBrelBWVO1voUa443sFjTmqI6bbsw8scpfkS4W7SHaIRoiARTNPbgrKcYOzgCAOIw+SSdLjEIA2YxcQEATQaLHhCEQhp3LGHM4VL27tt9hrorI7vW+N8iDsMNhwGE6PQz1bwmmuKE/MAKXfRRAaDz/uD6EPUCFABzGOjyYKcNzz8BjDMgAIuvrulqHP2GOSWFEI3eIAEkIbM8DC3hHqK9bj25t3eSbNIEB9GNOMSJbXzgFQbb3kkLAKe69L79n94NuZrUYX7siJ50Y5V7AreMWow1CE+ZlBrrod7Ee9+GcU4X6jna/o28F24dP2D58Qnl5xfbhO4NZ103V43a7odSCdb1DUgRNEyhmCAind+8wl6rW83YNvpJnDZTAvovOT/ZaUa43bK+vuH7r23j9tW/h+uvfxuuvfYm+dXAnxHRCSjMoqYVPIBUXOAD4v+3HNzpISevgWtFDAFgrqcaM0hvua0FtDVupQ0UgmYbWDEK2TZ1C3PXsTBrCmX8xuO00rJJxdWfbtIaZC5EqOzBjbQ1bV8HQKppRHXXzxq0yrb8mKoYLBhpb3iwOc6kXi2f7JGwBkdBJ5w2o3UwrrQIhgUiZVDaAo7YeKWAJhAlqpcCtK3ut90FzdhjL7UcIGCoFR9TBWXWdD/BJ0AisgreHBrtFHOlsslGCEBlRMIzcQNAqyFljggFJjTyeVCU65TTeA1tFo4oRqp0WIiG5mZEx58AdUgu46vCw+mkJamOEaUZvyiZMU8Z8OeF8OalZ3WUGS1BoNxBiiQCrNhlM7HRcL3uPvrEPd1nXSWddByzgEOyzaxWxi9RiBCnvX0nvCAJ04vGaiWDwFiFJGHp2rlLh3RcfPtcqgQwKPBwVtFctIwmzFGGvqOxfRyCA9U6S3hOwjisEgjTtsgYQKMSRNbsSBZsY81BJIejcVQzIOWvVRbqn3WYGhmgwyFQuVCEimESZwMgYlrEMw0W7H0KeVO6fCjDVCVE0o3FHZUsqO2uQgj+fBppkVWyOqnSeRAksUzR2LxRiXg3ubl2RCH1/hI6ADkIXQjR2Zb1t+jlAmJYZvXTkacJ80c+fsmLfvXX0UtFKxfbpFe2+Yf3yA8qHjygvLygfvgOuBa2UMRKylYrGDbVX0Dyp3kiXva96TDzYb7uBtH3vz405KBvnaNuG67c+YHv5hNdf/RZu3/oW1i+/g/7yit4EgozACSwRhASBDVNbX5m8ovKl91ugEP74Rgcp0mEHO3wUlaqsPYZ1LSitY2tq5EZQFYYkATHobEVgK2+c3UIuPGvXMuz+RMI8/KV8cPUIQ7WuGHmz5n5jRgeNmSTH7keybfNXIrApfKALIyoPZBAaWtfKphvNlfXc1YweHdSbkh24a1ZOGlxdk2+K2qDOpIrvpF1enXGw4dK3Oc4OydjR5SiRvV/PYPXn9OhTwcwwSB2eiftGGNQKw6R6Z4TIo1Wzt7b9YJGdUisYMzoxRaSW9POz7NBoTKAcEHIYltXRaPRSszauDb7sLKDaEVIEtwqyIDWdFkzLjDxl5ClBoEGqNVWuqGsyCrfCP6NCAN5cQ7uCXnkDpkqCwdRz6M+hD68OQxRE1lmpYIQbh4JJRnvG5uVEGWqEMVjdhcZriphCut0rV9kPgK497JDeA9TnJbLdMIcsxyMY5UcSQvbBcuskWlXsnlPel/OAxRakKGi/NJgXUvB1JgKG2r/7RfX35qxIFlOJx9Eo1IKTBTu/DQOS9/VlVaQjGcye2HYUFmxGICIoCUlYfzbbAHRjURJFNPUI29ed9Z3UpKorpTqsix2yBXlhotTttaCmiPJ6RwgB5brCPaAchm3GPq3rhvunG9ptxfrxivLpivpyQ325gWvRXpFo37yxKnuwMGK2tQIytOjQ8ZV916m7sfefxVQkNGD2pu+1rSu2lyvWT1esn15RXvX9cClaLlGEREOtup+X+5m5b3D/9j0QpJIfvLYRSmdsreNaK17XogvPMjwirZ5i0FJ8MbZRJDsc7ID33gqRqkL4XmU7TRlqF9ANFmBotXSspAprwOoI5r8Txr5nCwx22mtTFUqIqLYZmTCICq25BuE++wFRenhHA3rTvwsROc+gPINOZ4Q8Iy4npGVGyhkTaTMTpYK3gl6bOt1agIyBECSMAcgoCi0pHm2itRZQu6jygQFaxoDy05MG3OOPkVWy9jGaaIDvwkAKiD0ONQqIZdN2yh6xa2WFmZ+RBABdA05jtWSIhBCUahtyQjIZJpKvZpDcGdIKhDsiGmIMiCkjLgtCmhBm1Q7UA0aQckAvSgnvFuC9JzVgsTeQFgCDdATSASEdIQAzxBhvbgAJC8IItnmDvg4g4O5Q6r4+/R4xlAnYRYeqWZSaXo8BHnqPE2k/JgXC/OYeAXtyArsPOP5ZdO2SIRKUIvKs15l7B1dXnHDBYk1e2NWx3Y9MlLhEKWA5L+oau6g/mbSO8umqowm9jXgZCAr1OrzKMrL//b1bGHCoMnh9qD/rIznHg1GJTozaWYWgBSgCS1Z3r6/OosPhAHLq6unGBJ08JMQAhBwxx4AIoLSOewijt0WmwOJQPpEeGrw1FLnjmj6i3QtSCKj3J3BpyKcZAqDcC9bXG9aXO27f/oC6rigfP4FvN/T7CrlvkF41CbMrQaSEjpwj8umCfDnh/PQOp/MF87yoMy+ZBBgrI7HamVDvm5KEah/sXi4WpO4rXn792ygvL7j9+ifU1xX93kGcoAlqBiRCWB2gBd5TfVxKwZO374VKyisdYK9u+rGa6apb57ABGTuvWa8o8QHJ8J8B7YHq0I8S7AvWf7+JDDWJym4joGoPndmw2DBcUpnpgR2nTDjWrJawf4m+JmjHpzWQmmur2Y8O3ThSle80LwjzCfF8AuUZcV4Ue86K1QMGvVklBRHLzmn0fiJZ9RfCLh5KLgtlWf/XLC4BBhnIs1fx62XDt90OcuKOTkDiDiFodeRBCrDgafd4yPNosOtdKfGtd7TWEXpAawrD5N4hNs+SiIbyOcVkBo42kOuJJM+mPNHtgIM17xncqkkDxb1pn6I5nQawXx87PG0R4giePf6/9YJIQP3IgPOrFS3GaxVMAELSRCrEneIPe0bNUvfrdLTwaJYksTjMYoQBQOWqGMhBWZjHx16A0MP/jYpQnNUJhXZTGO9R2t7HGsoRBLBBX1T3IBWTygEtTyfkKWE+TYANhnIpgAjaijefWQ5B1fqVo0rUK+guz0SOZnpFa9Ug759XDs+uFiI253ZM+C1QEfYErfWOzkqUoLH/CDkoBLmkOMgqzYgwydCMYdlBB55QFw3KIWB9uQ2af29qwFnuBevLHfeXG+6vd/R1Q70V8NYhzXqBpDTRQVIIETGqjFU+nTGd7et0Uuaqux6IQo+9NmzXFW0ruH+6odeKXlStRzpD6g73bR9vqLdN3Q96gCDp/gJB4gSEOCpGvfT7HjgUcF9JkH6zxzc+SPmQHYtWOD4BXjtr4LDTMgSHE3TmIRmsdgzzY/F4gCJfSTReo4pVTV2rCfbFK7LDfE4Z1zQegXfYz831wKyHBLFHSN1IRnvXpMt6O71DRKeIosvHHqCTGCPyfEI+X5BOZ6SnZ4Q8I0yLCr2aTbuwzRs1/cIgRpg0SwAQbZNb1qoFZDC1DDu+7P3hcODjcCkFGMKemqn2cW38M6beEGNEadXUyOOBiTbAI91sdmNYBL0bKaZ1FBMUDjGgCyNPGR06yD0JEFMGzD7bLb+9HxSSUtY18zYKbynahO4KDSKoWyaZ3uIIUinqICRD00I+ZDt++AmGlYv/rUN9QAOJCqwGMeIEsCuBOAOy8ThcBVACxv4KCHxIBqwqKF17nMXYjO5WHWB6f2DzcNpbA4diFY9Hx8g6RsbthpoIhDxPNlcWHn7Wew8CGFTNiKWNyiJmTUou788jSHGp4K2ibZsSWa7hzXX1A88SPuyqCiN58nBsM13wweOh0Ms7H2A8kz+f90D3K+AJo6MngXQtKwPW2Ws6nEzQXhUhoQbWqsiCMkH1DNVNe4dcCQBY0O4F0hm3nCyBFLRNh+rLVnH7dNUg9eGKXgr6XZ2yqQuIoiWp2FGHkIAUQcuM+fkdluczTs/vMF8umJZFjUoDDdmzct9w/3jFdlvx8u0PxuDbrG9tQ8NVRzDK9Yq+beibQDiCwqLPhQAKyYShfbbzzVqy/z7Mw30Xj290kHJG0I4762Ms2hFjjhfF0iyHO2jUT+OUPV46ReUEhbWpunXBxjrJ3mwPdTk4htrCBGCZqiipA7rRe++WhbG6AxPppg1AMrZQCgE5x8FEZIP+AldV9+4CQYBQRAwJKWfMT8+Y371HPj8hP78HpQzK0/hc3PqwseDjYSNepZGx5/YAweRzZB0BCrHlqBl4MOIEiIzSq01zbZbuHkn31rDWah5Au0NxMnuF85qRrHcWHNI6VLTRk4VAgxq/lYpusk9k9ONSC1LOKLVh3grmZUZj1l7TPCHnpK7GU0IkvbYh62YFVxUwrQXrpxf0TRmhIWXE6QSkBQL14IoxIucMLqpD2M1yfq+c9s2pn1XDylAMZ3NKYa2MdXBXPYwpBgRKI0FK86QBFsZq9F6iVdFeSTTWtVgZY+zBDRHZXt/rvSDaTuvQ9zHe7+MGAmxPHfuSbFl1K01tzQFMy4w0Z6Qp75n8Ad3ogz2690A1SEWc352QpoR5yWhrQV8Ltttdq6OXK6T3oaohXu2wV+U+p6e1Dgmp/YWYLUl0+DToBzW4DsSjAh6zaQQEU+oepdMh/9JAxdoLjgplcdyZrz5qJIZq5BgwsVhfCGbMacPIFqBGm0IE6AzeKrZPV5UnKwq7kfVuy3VFu23oposn3QfWA0KYEKK6DKhpYoSkBMoJ4TRj+UyD1PJ8wXRWZQsKQckpLGibmpfePl1x+3TFh1/+EuW+ot7uQG0gFl0nXZOxXor1s9UeiRLGtZRDh/BYtY5l5TN5h6/v5vGNDlL7PJGngTTK5UDYJ+29Ohrfj19vssj9hNHsdcCIaplQRdRM0Lx8GLvitFcPwN6cHoC4ZaPc98FYWCWoE/A0JvFVJd0spCN0XkYI1K2hLLoZBcHcQSekeUZeFv06LaCYgZSUfGBZMCB7U5T3N6vHGI3rpNp2ilkT8YAUA2xq3DanX7hIO+zqa7OyzoqtrePeGkprWFsdh3UOESlqnyxHrSAiORRiwJFdD08o2GDUUit6Z+1bIIxMvne2+RJ9H5SzNrwHZKUwFQWoACyCHcIKnbXaUNc76n1V/bE8IXUgLtGY1TYLFvRAEBtQFFt7Hqgs9huU5Mtpx5KELWgEQuh2WHWrUq2SRaChkxfYoDnqg1XoxAKHWL2i75ZUeYDy9chv1iof3qMvew9Ihx1m9wE7XMas84hdo6CzLqfTPFidvh0ZAu6yz9zpBVJ4N0XMTwtSjphmG4oNhLRMiFvRIEhW7fhsHrzPtv9ZL4MnB1YfGTqAQKa2r0QpV+IWb1AZtBpsINx1BoeV1rgOcoAM997muNFjv9teEA1I3rNm7OdNtKAW7PfJmZlssN8YhdFrywy0rSj81vogPHli4b0uV/6nGFWjMiekZUFaFuRlVnJKMljO+qSerLo6TL0XbLcV5XrHdr2BmgYpHWS26zeqW4d0aa+ksSMHoMcq+BiP9pLheyBIIemMDBntNJlsygSFfAJ1EPVRwudAiNEm8W3+IZJXUY5VkApmEqE3GSSJrXaUxigMFIMKnbXDMCafH6zAwzxM9wqrd4gd1NqnCmMmS52CdcYjp4A5J23mx72S4i7gDjQEIGQEJORlwbQsyO/eIz8/YzpfkC9nnR8KqgwvTQ3JdmsFHoema3J5M15CUEaOfTYKDDTSRcc6eS8iY9AQ9t59EfpQ80trWFvHy7bhWgq2XrHVMl53iRFTCAhzxxzt0LdKqlsmT6QHl0NcnknXvg8cwwJoFUZv2isorSGtBVvrmOYJ563hdJowTVlnpnKElAhuE2KOEOmo9xXb9Yr7h4+o1yvaVpCmBdO5YnoOiNMCimr7kXJCnzqIBNLikNLxQ38knsBDP8415CKUlZaCQs8x6OcIDLBZVQTEg524wpXcuumy2fiAWPP7EKi08n9bGDmqoBmZwzEesBz2cz7CyPlgkKVX1gIQM3qp6DGgm5ldALCcJkSzUkfce1JDxcTHNiAKe8WIvGT180oBbUqoU8by/ozeG+KS0UqBFE3yZFRTluAdPqCFqBE3/O+IAsKUFWZlrWpAHd4PBKn/VOoRi0BZr82MTMdZa70n2JwlYE7M/kU4cgwdBY/WpwKMMDHgLuvddTcIJYPz1W6mdq1W2lpG5dFbV8UKc5Ueg+miCSxCQENEjBlxyoinBXHJmJ/PWJ7PmC4LYkgghlZjXYCgSV1fi6qYb1Wrqrvq8dXbCliQspFqk4fTpDFQHIafQo+zpBAxYR3ZRzQA68nua+u7fXyjg5RbN4tlzzknoGrzvItuBpR94bqH0xRUqTlZD4QsI/LFDqbRT9irKB5Ov7pkfcrfIR1YBrRXA17JdRY0y/C6iOnjySiyWLrNEhxyDIMFnCjh0IZXMErzJcR5QZhmIGVwSOi0B94BV+CY/Mnhbx8fdDyhnDTCUJUEO8C+jjSxbz0NJE1McaN3rK1j7Q1b69jMd2r/aUHjhEyiahx+EbFnx/DsFxhB9jjL5b/h0IlY1t5N5DOEOIRiJRAYHWhArYD0GZyVYddLGVqQvTN67QAaQqqItQGBh1qEMgEDIBEtBpWCGYfyoUcpWlE6/AlooErkFGolQ4MCAqtyOnkS4X1RqMVKlASXCaIeQE0reYWqgsIyXZ8nYp9ncwfXYK/p2n9Drw8OZx3uK9P4rH7oegJBllUrFb8PJWxxpugRRo37UOqu/i5WzRKSyQpRCAiZ1TEgJ836s1YFIQZII0vSbR2+WYLHA28w1uSAFhABkcAxaCUue6+vW08gxohMAUxsOnxaNVGgUU0kOzMGcmD7iTubzB6Z87d9JgEQZNf+9HRLbHTCqfl7qTpU95v/DNEQdO612RptgxofDPpFIHCIalM0M3w7CYta8KzFnqMrMceGoasFKHTvTUcEI15IF6PiYUD7GAoxce+BYUcRQKKi0BbpJfj9srsk+2X4ujPo6x7f7CCVEmLOmpKGsGPLOUIgKKUedNxgopaEOagOWA40BlaFZTCCnJGiyZUFqa6T6XtwOkA6shNbfa4qUdDBSwAcBIUVZmhDI1BTbWHF+pmMVi0YAUYPCYMwwGZRYQd0iggxWZBawGlCD9q/iGY5PoYW33x9BdTx9ePzYuPvNUsLgB3EGjDHJ5cdbgK8gtDqszBj7V2/LEBtlj2q2oG+bhdWTUN/TXuiY2Y23q1RyYc+4OF3RqAyCANVN2SP2rCX3pX22xqEGB0NXGc1Qlxm9FrU44odAukAOmrpSLWDQkfIPquj81oQTZQ4auDT6sQo+qxfK+//78yzDKWDaxWk/YHIOoYgvSuEQzYI6wxWYxUikPpyxWDGh0CoDbGbGjhZzwo7qcKTpkQuB3QUYN1JA34/QWov42t5X9cHgIZFVQi2hp5VLFimpPHAWH+UD5JXx3tlsDxF70uQwViss1NTRsjZCCq6t4cy+WF9CPaA5WKymrxb7y5Y6LXDVAKhB8I29DAFFBWAyzEqcSjYcDQramF4AjIUiVH0JViP1gId+i5eTcY6FjGNwjjIWCCbG4LYqAEeAhYxLMlSR1+HkHUQXuFosR7fcf4ucACLyqsxgNBnhM6a4HVViyjXVZMeIxDZolC2bNEkIwiQYgLHBE6TFkFupOk9JAtOIaVxRweUTTSYzORtAcfej+eNl+xf07f6usc3OkjFFJGmhDhPmr1NSXsVtSFPGWUrmF5vmkWbtXkghfsSqfhktOulEjHHZ9+zMWHZ5WNwqBvkEZ926aBIQCadSQlWLQVjinLXjLazyhqpH1K3BIv0lGD7ghrzSa+gXoGygkSN02KcdVg1RHQi9A7U0hCkQPKElCKyucE+WnT7kX7442+wVvZBVT1UIuLIMJ0CzJYZaSKpCz+QZsuRNSOLMSIKI3LYK0I/AA/Z2NGafLwHeBV1YHH5+xI4AANnKAZ/vaSq7/M843yacMoBORJwX21C/w6+T4hTxvT8zsguEYgzkBiSCJ0SiCNqAyQyUmNTkNaeIQmQcgR3ZfvpUDmpUjszNhbc+04NB+l7nqyqcTINQxWzUyBtUiuuBtcH9IoCBIMnjdU2JaTa0EGgWCGhAERonZG6ezFhVOU5qOhwDkZ20WVuJATsZpgh7NJBIZiKhDIio6EXwSj4dS0gCO7LpMETMmaU8pTGYDUNzjXGPfb7KAL7OaVNxzkjnWekbVa1AxagkQr8+nLdUSR7DiuxDO9km4/spQJJyQSNCBXAa2uotaHWalYmcTg/pymNBIENVqXeMYlKIk1R+2nB0JrWBa2paoWPWKRImFI0ZRsghmjJ3b6w2Ve3H+bkVZYmVGINboYHKRnSZT4/JwCoEziEYSIaStVKbN0gTZ22c1ZHazK4zivZkKJVcYR+r5DGyDkDi2qbcs7qXOCwOoCjivpIbKx9sPvxeRVrfxfsgzj6cSx9v4vHNzpIaRPbqME5IS6TzmG0rPTeSApHhKYZpwUkF5ccyhLAWEQ7BCIPOJn/nH+xgTXH3o4lSIjYHXijQSWqP0cmfhkQSGmqqlCNg2SNwUKWeUM6qFVIL0DbALfplmz/LhDWwc/eGIEYk/lOpbC/P687Rok+IoGHXDm8+mPc0tkoOsCZ9nlkbBU9TMlhSj3EkwhyisgctW/HUYMbqzq2M/cGy9KklMZdoL1C0o6A2UKO4Ak4qWJoB4aAGCNSSphyxjTpV056Pbqw+mSVgm6HAi/NcHUChQTECRKUgs4IZqEjKi7qm1X3O1wFYR9X2IkJTmRo0K9xxW1pVag3VACNmaaRlVqWTlZx+Wu4sWYQQRYAIWAqzewitEJwAsoY5iWdD8thVwD37N6RABZDBEjhLYd45XCNo81GRTfOhMr2tELDPyxOug8pqBQPBdkD0qF3OZiA8EzdzBVTRMgRcc6I84RYmlqQA6DWMUSibbE6+ryfe3tyyZ1VM5EIiDZSQeoUsPWOrVYTkdXPo+aLESHpWpAYtB8bCJlZIb9DJeK9xq2rpFI3pmGKOrwfoybGDlMeD2fxjMXXs62rXZLI9qytg4d+8iEYaElmorZVh7nDGgE2pZLa0VIaTGGCV1Ok/TrrefbaIU0QKCDFBGRDdyIP0QHAghQpqUcsSnEz2HMcI/u6BRFc5mxPLL67Csof3+gg5Ss0xqDzFs9nE7wk5NOEum7IUQ341OfEKJXYMfoDL2VkdsB+GT04DfVpa86QGdk5dQLmopqgPYcMYDJCh1gFF2JAhAqgCtED/JiIMJHekCgM4mpmhgVcV3BbwdsdAjF2mU66cz2BhbBJBCSAmJBrBxCQ0jhlRnAZh3oIdsry+KRyWDxfF6xg14igQDOb/YjY30fL0mIgPKeIuTNSTli2hFIrbimhc0fvbfQF56TurDFFNUk7BC0c3w3rNdbZMN7JJy46mxJCSkjThPl0wnxa8PT+Gct5xvNnT0jEiNKw3qNuqt4g1fZNbf8XeX8Ta9u2lQXDT+s/Y4w519773Hsw5HITiKiJsQAYNBJrIEbvNcFEMAbFiGLQCtFAQYKJCYgJFIwFNcGasaCxSIwFDQkQCl6JSIgVC0JQY4SYV7z3nL3XmmP0n/YVntZ6H3PtDfec99P3fU8YJ+ustdfPnOOn9/bztKc9DWw0yUC8AEuEZjNEElEVrLnYxNFoa08U03mY5xokBtBRdTEauAhsqDIOM26+yRUdS+/se1JnYgpUSI4YkKfVVMeYjBiQKnXv8nIg54QlJZTacByTKONrPFnQ5MQNT9qa1c+aowrRAjrL/oL4HuPso7gsAAir1Z2Nn713HE87ylGgjaMnEALSljnfKXugc3q2HlarQiLrrPmyYm0dl0+/QggBaV2wp4j6tBv5BEAvuEOtVW0XMpsKAHrnPAAcBwSZLD9htqZBcPSOD48CrRVBBC9bw/XScA3AZctILixr/YRyzGcvRjw5DML+8DjwZONuRARrinixrqNutaQMYCqF+D47B4enGzIykFE/NlYlP6ZnFsCGNoJZ99GhRXArDZIC9tdPiJkEnOD735RNJASkZUG0kgkb37m+gjX/q2n9OVnD2awO1/rRg0+faPAgwbetdneuSmKKX8fHOD7RTqqbfLwaXBFjRFgSQiZ9ta4JWitKjjiCoN0OaG2QNjeuB8D80PF/P/xnQUxWxhpO3DB1hfVf8PeTyPg90spxomlGRIuEaZSN0QenpXZwjJRHLg3amEX1WqDdhn6ERMEe8Q6xE2052LymcL4avbue+7jTP9vGOdV6fDPo3R3hv2fAx/qHZ5I+p2ZNEamTbrymiKMUTlVtFaVWg1uBxZ2UQVpieL8bMNozHdOHo0GNY1KoOamYaUCX6wXrwwXb9YLLyyvWy4rl4YIoDaFXxKcLx3gche8ZIiQkqLA2QbV1NvEy8g6Gvtk47iYQaSfYc2aoMyuEBTJyyrz5fQX/rmGSK6JnXQbrhGBZVKcDc5MkCta/eoBEpxKbA8l5wC+pVkRv1uzdIGWTv7I14Tp5bDMIrPHZOcdw+vDs1JinyRqa/bpbrWjacbzZR9E/pIha2pBMUlUkLAhJEcRnJZ2CEEtLJSrSumBpHdf3Hrg/TNev5MwBmapDYFjhNGc5vabcvb4Cg/STY0BHxJIzhYmDYDcSzpv9NgzCkhPrYjEThjRnI9YK4Ov+1ipupeLNfuDRnFQMgt4zcowotSPHNprunYykd1tQLLgxAoXfC6s9dKOLTxs1PxhAyFwbvn9No7J2hbbTkEORYRvIGuxIKSEtHVh1Tl8GRqaMQARIW4fKdC53cL3CbHCA2tTycQSQrhv0rXaMc+P0b3V8op2UtobmgoiqkBSQloR8XRFzRDsWaG3YI4v/B4B+VKgRKgZj7S6gORnkU8QSDd5RCKTbWAhhpBxPCy4KeRw5yHCEsMgjCPttQgjcKJEbXzodba90QgxSGlQbtFf0VtDbASiLqUHU+sCmgwrWYR+8BmD1neeX5NczIDUPzNxo2XXPaGg6Ku9PwfiOZwwKp7ZFcCOsmUtrA3DJCaVWrFFQajVCCwOFNUbkYEoOwYv197UnQClr1Cf1d2xd2/x5XahReL1ge7ji8nDBZk5qfXHhqOpeEZ+u1A2slZRiCZCUAQT0TgfVRQn3AUO/DwoWo88O/ow1PYMdeQ0WfDD3HMX1DhrOYhBSBIaz6t7HJp49TmckqmhRjHElIxMLiQLCbHkQtMqMlD15zUbZ26h0W/dqTNUOwnvBakYiYbBgHZKlg2IgkSzr7cacq8r6T90PCpHWCgkR9WiIK2F3z/oUbL4XIw2c16D/Tl4zBCQTpZSQtxVBgT0llL2Qjt26NZryPp7bIWDZgn/2OjJbVFjXW3NGTqzTNBuH025tPMvrunC/LwnJ0IEpP0HVltpIDHqsFa/3mUnlyItbU6Q6RSO7lIovfbJm7Vx17C+MeXJDhUZP12UmnUGv//l5r2DQ9Kl4bhqT1YZCuk1w5yOCVjp6Zu9VAIBEZuXYe5H5dodNNbf3GO9t+1Osn4tN5jxvb10Yz4R4MoM716S8u77f/PhkO6mZVTKqsPrUsibkJaJXcpyWLWNZM245oe4Fx5sbUBs/ulvo8UpmtGfxDzphQYDrKEIQuqCJmvwRPV0yNlYUn4bKKCNYTpMTs6nrtpLcEANKPdBaw82cTgfYa4IAiRVBMhA6QmeBPl0vSNeXiNsFcn2FnlbU5QpZNkhesG4LkqX0rrs3r9E2xLw0+/cpk9J5H3RE3A4Psa5T1TvqXXqK/SXSCJNloWHLS4T2Bb13XDIzqqfbjfi5MdogYkK9XNjRFLRHLcNufIwRAkVy2NKuUWLEct2Q1wUPn3qJ68srthdXvPz0S+R1wfqwEgzqHRpZVE4PD5DmEV1AK4p+q+g2f6hb74fgRJjXyRQVz/QCZak0kwGqCqTakMOU0VJQ1Bj2t82cbexsDCeRRrmWupozxMwoBejWHNkPew4C6xnykxEgwOAdqxu6U/fPzVmOHb1RUxKB5CFVjAjbHRPHtbOlwSHCoTairLdpo5Mq+4FyFISnHa12LJcVrXZsry7YHh9wOV4gXTIDBpOW8tqOQ6USA9K2cIRHTlgfLmh7wbpk3D54HMSYYE3qtTSglhE2xUBoKyaqi0hkhh1SQlxIaW+qeLWtVCE/Co79wK4FRy3AfqBac3ktKzKALXPab1rycBzlKGgFeNSON63idS24VfavKRh89nMNqTVjTVo0bM7Im5Fpsh3jEDLo3Khpg9dhKaxs37ffn/R2883DjtleCkQoktefRGbPWqcT6xD0WBC62nl6oAtbh2rZXkA3bruzQ2E2AWCAVbvVfeHQtwwb4qbWjVD5aD5q9PF95OPnfu7n8G3f9m347Gc/CxHBT/7kT46flVLwgz/4g/i6r/s6PDw84LOf/Sz+wl/4C/jv//2/373G7/ydv/M+6hTBj//4j3/cU5mHL3JMw+Y6bSlzwF7eVuTLhnxZ2dXuvRjpNPsIp1TUjbV9jIhMZDb1AQNi9/e10uRbGYFTej0qzQZzpaEtx+Y4xAjkiLBmBC8eryviYqrmlwuW6wM/Hh6QTThy3VYsa8aypDE3ixdk/7PPwymdPNZwVncOaq6gUTuxTGo6Lp3wizmt1vsQ1hQBUiS8si0LLuuCbclYUhrzpIzlygmmOptg9e5c540WY2M5OSItGXnNWLYV62VjBvXigu3FhuW6Il8WpNWaHJeMdNmQr1fkhwekhyvi5QLJC2DNxOqYnBv+sRmnMfBFxyTOhEOjSS15UVqM6g0MuM0wwoHR95ORcuLC+DB4cQq72v02CnKtZKeVUlBqRW1TLZ9BjrHxEhlryeoOMSWeq2sZOqxnWX22bMmzKU9MAJzUSqZIsX+0xj6euhccjzv2NzfcPnzE0wePuH3wiP3NE443O8rtGPI+/aye4Hc1xjGxNm8LlutK+NYaU9eHC5brhrQuJpaaBoM02PXFnJHWhXt+Wy3LTkiJ17amhDUnXHLGmphVibHVqrGDSymoxR2PTue3UiEfS4KmwN4rwcnZvH14y4l6U/Ld88RUCVHT+PSMyuqPb6vknD+ewYAy7Y9PwI6JUl45J+TMYaPRnZA709pMbKCZpl8dwxbh/Zn++ncoB8b1UNy7cRpFY9vJrXoLSh/ScqX30aLxUY6PnUm9efMG3/AN34Dv+Z7vwbd/+7ff/ezx8RG/+Iu/iL/1t/4WvuEbvgH/63/9L/z1v/7X8Sf/5J/EL/zCL9z97t/+238b3/u93zv+/fLly497KjTqFk0HZ4Yx3B83P1iBkFEvxy3EnNFuB9p+AHthpFNMVNRki9w5uQHnG1pEDYdtpiHlITNtPxk1EabNQRQxxOGkYqIjhU+g1RWSBDEHLNeMJIrUDuiNM1tEBDFnLK9eIj28QNyuwHqBSkQTKkzAmvGgVtNQvWPKzzTq9D3cf30H8Z2ySxcsHdp8ygm39fw30tHQsLaOkGh0lpxo6AIQUrRFrKi94Kik4UcBVjVIBjAtQbDuNnaiR5RW+E3UjEtrxvXVC6wPG977yvdweXnF9uKC5eFimmZhGNesD4hLY2+ZPfP2+gZtgo5iEaAP1mNky6GEs2BMdpOMNeXGoDcOREwlmJAwsFh9sbmDsSK431k3TtUylNKoptADSTiiNJB8AADQ0JSqG0cpJGCcHLivuyhiEJ2J6qpCSx0NuKGQ4NDbHGUTDCIO5+sdiCvh6KrMGLsCrSnaUdFKpWyPZ4q1YX/kZOz1zYbbm0eU48D6sKGVyqz3siJdFjP+to890DMnmlKCto4ohHNZEhHkJQNNUW47bm+ebL+CUkA5I68r1ofNSB55ICBqDNhaFrS1Qa8dR6mkXYdgrDlSyg+p2G87cozoOSGuJIzEdTEh4gOhFt6rYye0W2UGLePeuTNq6A1ml9g0XNU0QE2PkQGuM34DIXEBnQaM7KKzkqM6AFMzP/xXNAic8Clh8O1hG/umFsKyx83INY1Td7URXRJzjmGs9WhZ9qmMYEmhKoc8llrxuBcbVUSR44GOmF2RTjp7tF7H3YV/v8zxsZ3U5z//eXz+859/58/ee+89/NRP/dTd9/7hP/yH+EN/6A/hv/7X/4qv+ZqvGd9/+fIlPvOZz3zct392yCzctY52VFRbHN2i1XraPHHJvOEiaEtGPzL6I1l/7Wm3iMiMyDOYywjnFAU2qIPDLmm4ATOoICQQRlY3zhTuxJxRxZoYsV8R9lekNSOvCdsL4uKh7KgxAXFnJpcz4nZF2pgFIK/spuoyU2tTN+dE3D5qTZ5B6Twh3K9yHkyc9Nn3aJQ7ZEB8/OBm488d3gg4Gnt/GgBEUouzKGqgAjp7L062F4BaNgNvNhznzN8TYNCiHQIRy2LSkm1YYaaSQYojUvQeEkb85rRNqY1jGlzt3TLewPk/4hGpZ0kG3452BY8oFUBkY2/sfWQnKQiWTsCwAqb1CFTLbP26XKBY0HGQIMXr9WZz4WuOBzfgE/bMlK5Tj88WXQoBy5KwIGNLxtoKeTjVLjDG4qyLnJ2UI41cCt2EdOnspFnm3BSl1FEnOmfiXRXHo0FapqlYbgezlVvB8lCw1AtiTkg2/NAHdcJh3gCIBMQ1IfcF26sr2kHHUI+C45HX00pF78rXWhasRprhXspwxmS5FUipyLVhU5JX3tOOpRSEhYMxe+/YAqfwhjhRjuiZ3cMFaAtia3hPOuKbFSUKbm9uKEfB2hVrSMiZM5sEMiSzTIQBKoomgc9dKRDczR4kC3BFOoLrUqoTNuw5hXDnrPDsKwIBXLcxeN9gGtmzv06VYiQUiu/2YBCxB4OGCsSYjLYeEUhvhAQZ/VtHqdhrxeN+4GaZVMEpu7RN7L2mw0lZa8GXO/6P16S+9KUvQUTwqU996u77P/7jP44f/dEfxdd8zdfgz/25P4fv//7vR0rvPp1937Hv+/j3Bx98YF/ZxrXolEPpGDF7z0i3BkwJnLmETKWKvhS0I6NJYJOgb7JmWY3onYH0ReY9Ja6s0DuNtXems3dJzATKIGfQvtBFjNcAuCAcNw4Jy2XBel2xvlgRBeh7ssKlvVbKCOsFYd0Qlw2aMqACqd1Y8DQKPOE+IJpzZjiO38JR3RVuLUUdk4gxHXTtiuLX1w0SEqb1qet0UktEiguyOalQm9Xr6KUUSizeIzY7Lce7z6ethnePmpT3yTlFOkXLdNTKMSaKWjt61QExMTv0xFtG1hQMdnVoI8ZTk7HcR7PeMwKQZdd7GA5KA0k02gVJzFGZMZrXYo5LFdKBw3vBAEQbJy82bdcDIA8J3FGVylEopelYUylFbFB0YeAzBXzFmveMHKIY6+OOoehnZz9v6Bza2BUQay5tnWSJ1kedw3t4pHcSlXpDqxUIFEqVIFQCscGWcc2AUg4pJFa+OLJERr0l5gQosL4gDBWioNz20dhcbhQcjomOZH1xweW9BzqVbRkiqhJ2yF6wlEYnHagNuZQKyXlkGKk3Njv7iJdohKwtY3txYWO0drySjrAtOATIOaPcDsS9YhVBCmlC7kZo6LB9KQFd+Kyq2Y+5L/nMozH8BHE0d8/meowm+rsta//nW5zFkL3J3VAFaxT2AEtPjcjN+8qA4dRSBmKPUHNQIYSx13vn+jtKxeNRcGscF1NtlTrj0C+QNU32eh71/wNO6na74Qd/8AfxZ//sn8WrV6/G9//aX/tr+MZv/Ea8//77+Df/5t/gh37oh/Brv/Zr+Ht/7++983V+7Md+DD/yIz/y9g+sKNz2A4cpG3tE1owOq6rEo1dGYjEnLNfNxCQbysoxAQKBWo1Eq2VSA87zybTMIA43zpYFGGAyHBK6IkYXYhUHAQEz7Aic/QNTX84LJWAeLhnrdcVyXbCsEdoaHlvFroJbVaB1JBXoUdFTYx9NoyBsayanYrDWUC/yXoc77P/E0vNML8wI3SVmvO4i8AjdqfGCho4DgsPuBwQzugpM5UNv2HvDJQLIEet1hbQV/bJAHi64HQW328GxFapYBUgKZM8Ge0MtbWwGrw+lzLpKXBakZUHKy6DOltKAp4LaFBJYVO/Neo/6KX1RtbpIpQOzHpCcEptrjaRxxv4HRNoV0GaBj0X+Ni4hdusV6wLtHM0AYd9cE0EPgqgynaNt5KOTDMEo1yfY2uRpIaOU5xMgahp8IdrjrThKx1OpuNVKRxgEy5Kxrgs+pR3bkvHyumGJASmxcRW1saZiDlzcEQLDGXZrUNUOqp2EzgzUYL9iWVRtbf5dM827UhnI2TM8lhuzqOsbLA8bbp96QFoXrC9JeolrxnJdCdutC+WVPAgRIfEtCpbrCgRgf31D2lYcjwcHYMaEvC24vLzixadfYrksyBc6qXo0xNdPqHtBuK7YasVDqXiwMTKPtwPHcaDsBf12g3RFhmDbVqzbgnzZsFw3XF5ecd0yNAVsX/ECT7cD73/pNV5/8TWOxxvKl95Ajoa0F2Rrgm2jfCDW26c2nJNrqvdJnuhKPUEVigIkNdtirD8PTrxJ2H2VANwfY6/ak2wNWgRtL+iR9alSjCVZ22BkDjjDGJAYCAKfdYwJKRlpyKBEHzB7OwqeSsFTqYTxtUNDHLZlBPutoapSxQPA3v5fdlKlFPyZP/NnoKr4iZ/4ibuf/cAP/MD4+uu//uuxLAv+6l/9q/ixH/sxrOv61mv90A/90N3ffPDBB/jqr/5qRiid4qEiM1KTEAbOOwQgAeS6jhQWQpxV1wVQoKwZ4SiQw2ZUdXdNAGT20b1Lvw/v+myLEtBBGfXXUoDVaMeNl4xg1PnlsmC5LIiRsFDvHAu+l8qZOKrIe0FIhf1S3SC4ZtL75pCGppnX5zyaOX/tVzAwAngueLfofdEicJKwKHiPGtXfmzngKUI7YdDa+xhhIjEiBcEmNNhpXbCsC5tilYY8qiKaIGyvjTIvJvrq5Bg6qUAygEXgXvhurUOKjbQIwYyAWwEfdT+f0SnIG4QbxFNfyl20eiaNWDZiT3zUq+6K3Xbb/HEDQ5prrBXxdnAKxvoE2DAcGdeAdIN/PFOx14phZj69K52GsruKfViKvGaumxyZIdpzcBh23AOr7cz1y/eGWIAjAmiA2hysZjW2QdawLFEFQ4SWmpNA2wuDpxBGFgYAaTvQW0W9LEjrAtWOuGQ7iURa9EnGJ6382friAoig1g6JCak21j9XZlLLw4blwsyqtY6wkH1KQWqKti61ITcqjy87mX7H7UB5zEBrCB1Y14xlWQxGNvLNtiAsCbJlLJdivYAJ+5snPCFAbwdUnhCOSoXzTuREPECyaEdOBoOAgvUOqaK6nTCo78wuPmEzp3V6ooT7olUdKEIrFdJZK20DovWP+dxhGdWADcB2AEFHkwbX8hTxPW5T0G2/O0o0ztVtiK2R0Zvna+sjHP9HnJQ7qP/yX/4Lfvqnf/oui3rX8U3f9E2oteI//+f/jN/7e3/vWz9f1/WdzssfQMWBXgIVfs0q1NaG8GtcMmopSEsCeueIAGv8RV8p1mkjEGrrkL3AYUS1AniDSdzoqSYlrv4rw6DdOy4uuiGJEpitSYwQK8bmy4LLiwvStmB7dUFeWVOp9UArFcd+4OnpCW/evEYrlZIlyjEiba+IywZYL4/Xn/wEhkCopfR3bDF1WsQpozJDJMCE3axG0ZUOpIighTq6x3vvNnzdaPp230rrSLWhFH7UpliDIAcSHbYHNtV2c4wBNgxQObKgHQXlVnA8Ub25jnlEQLLJuCmT3UfDE9FUcOwVpSlCiSda2jTqOcWT8xUAVrC3Wg0S/x3DvQIHFGNQJC+R4plBMXTpJEZI6iaKOsEXgY3nsEwxSUADBmQJoYMay8SMWe0dMAktVKVgqs6idmaBADk2HIFRduuKozXstVIhYz9QVLGtC1pveLmteFgXPCQb/mhn2aVBizmnPp/tnKZsqIItZq+BNajV9XgBFDW2fdCZiQUFys56UCsNx+MNMSfsH75BWhKWB2NhXhZcPvUSeVtx/fRLrNcNeVsg28L1mCKSjQSBAPm6IawL1sdjwPp5zViuGwcqrhlxTUM2LF0WtNpwOSqFAMy49t5xHCQSHDeOUO+VDMSUyArcXlyRNwaR64sL8mXFi8xsYt8Lnj58xPHmhg+uv4H9g0c8xi+ifvAI3ZnpAqZQYk7KJbaC7Rfo7IbxwI/D2RVJO5J2a8S+/+Aydqbp+btco1qqoUrMbhECqtXeDhvu2ZqL98pwUE7CUUxkhb9CGrpYuePeQU10wP8btWUvH3ig/DGUJ/63Oyl3UP/pP/0n/MzP/Ay+4iu+4sv+zS/90i8hhICv/Mqv/FjvxaFdjQwYBTQ01k2aja3ufDi1NZTaICliOQplk1YW2dGdsmuzqZYMyZSaEfP0Q0wVOE3bBBw/RqAMBZeIPou+3YgJEGQYVn//lKkVFoM1DCuj5r5XtBvnytTbgXocrJ2FhiM+ISICDUilAxLh2hM8K4uqvMucVzE+ZtRPokAXyzS85mEZS7R0P8Y4mH2x0+hIrJAWIdGifsCyAgwj3rqiVhsmuBfUxYq3xjiLQSDJirMhWKEZ0JUq5HWryJfCQKTWAbf5eIc5zp2ipCGZPFJmLSGmCTm4LEzKs9bUdkaUsVa+bgzQxlqAS1mNh2mbL0inbqPdQ+uqHkZUtNssL3dUYmvDWIzAGJVxZxQcFpb7+6gwqFNBcoyahJDF1gEs8i8pYV06cmUWuVcap9o7Xj/dUGpFEPY0tVoRLxtyCFjsOkK0Bl0Lyuh4+N7nWqyK2swrV9EQY9n2EaCoOy27b107J+cGgaplxkZ5jing2G8UlV0XHHvFcl35O68qtoeNWyxFxCXC9QvTtgBBcBEOSuyVTN6YE5ZtGQ39ITNTlA42FLeTirhfXzeR2L2g7AXbdTUWZBsKLsu60KFui+1dBpkdwLJm9jtuC6Q0PKUIlIpb66gePJ6yqUHnVleaseCg3de8Bcrg2OpXGSfVEPF9bjv+rp7IdcKAwQLtUtm3JDKyp17bID+MdXrOwlRNjYk3UBvHILmO36jLn+yhGKri8Dy6kdiUthqGCjiJ4qMcH9tJvX79Gr/8y788/v2rv/qr+KVf+iW8//77+Kqv+ir86T/9p/GLv/iL+Jf/8l+itYZf//VfBwC8//77WJYFX/jCF/DzP//z+JZv+Ra8fPkSX/jCF/D93//9+PN//s/j05/+9Mc6F9UObWpR3izakh7bDDOts3E3BZRaISlgrSvFOEOkR3cjk5jliAISbe6T1XM8Uhj9EJ46CYYjsPgBQ39uRCZizYXJWEgJMQeEJAhB4SMwRDvp0EexQWQHh8zZ3B4ERdkPxLADPYDTywOMdgMIpjRS1CnLDz8VOxeVO6ZPt3oTx3XwIyaqXacUB3U61GgCpD7xNMIHN/KYDEaOO6BBqtZDE+0eE/qi2nYMwvqDkRU0C/rSkdaOuNbxGuf5QCIY/T4SI8ISh6PwZtGYjeEns7cj5jghNxHg4Pyj0DvkMMfSZWiTBcPo1bKELsKBCIK7Z4sY2HPXo00ztGF34ndlwn4eETMzkZm9eu1rOCodNbA+HJta3mnZH8jkc0eVU0Q1lfkhjbMTJYiuwKAdFwvKsg0N5Xrn67r8jfpICzC6Zx8PZvO6w5qwQOwEDfLcLcO0Wpe4o20dvQpVuqNAjjAaeFvrKLcNQeL4m+QMwOQBQWA2FQSQgLiYoLSEMRnhzBiE3bGQuN6TtQBA3YiTRl2PinpU5HUZTorPznQ3DW5MxiBNa7a1kREDBzf224EA5bj3p51knb0A5xE9ZivGeakRhOxcusFmYrCtmuAFy8ZTQWPYnZFAnSA/yKjJu6PyMsUglZmDNjho2jN7TSc1dWfiAicnJaNG7/bwLBqsz9+7s5ncFU/c2X6U42M7qV/4hV/At3zLt4x/e63ou7/7u/HDP/zD+Bf/4l8AAH7/7//9d3/3Mz/zM/jmb/5mrOuKf/7P/zl++Id/GPu+42u/9mvx/d///Xc1p498jOc9H7BaFHKUitoaboWF5AYgPR2IS8L1zQ2X64rtumHb1jF2oLWOGgLg0iBWTBcVFtdxovr628s8D0P0pjq7N0s6uyZHrJfV6NIRIh2t3PBUbwhBUJ4eESNJAfvjDbenG/YPnlCfCnAopPHN+9FQpQBVEEoHK8qz+c9fQ3NCTNbvAFCMNSqiMKOJmuyabJyHGcXBakuk0QaYYXEaMs6NyToibS/oDqq2PZ5eOlqoOPrNT5P12QDquaWAmCPSurEP7HKFRMJA68urbZ5zPcjutwhnEllthUy/YNkVG7nFAHTxpNeo5QIWk72/B1FQS51K211G1B7M0LmydnehTKHxksTifooBSBGxVSPTAFI7AtoYxcFMg2uI+beODR7E9SFJuHClft5EU7ruwsGIzWBj4fgNGOW+QpFLQgOwlwIYtFOq4vFpR+AAM1xigC4ZS7hS+ifZc5Zu49PZwKndB3LadGkBmsGbwe5nMMfJ0UE+HJRjLPxwyats9PwQhH1DAmAfGwf77UDeVpSnguPxhturB/TC7Gp7dbGMmc83pIQlxrEmggUM0QIXh7d8b9JhnZ3ERKCywX69dVxeXYaSxpCpaiSjpCUjb9mCzDScQgiCtrCvK+U0XjcsC9fNXqD7gdY7Zah8PYLjPLyXzptim5UrRIEa2DuliULUCsqPheHoTpGQH8MuOZRo4LWdmAcO3tmgMuWS/JqGPmOz/r5gQrEi6KFaS4XgqA3cDrQJ6FSkoYNqA1qNvSOBY4ySCEfnfITjYzupb/7mbz5FA28fv9XPAOAbv/Eb8W//7b/9uG/7zmNCWfbe1m/QlKwT73yuxsYLvSMU3txSCQGW0thQi2ARXmOhXwTqcI4PZ/M38wUwPhvzyh568EY4h9SCjThI3h3PsRO9NbRKJWYIUPcdKWbEmHAzfJx6ZbZIjCbNTFHJtKrNMIQ+4D24Mnn0QvvpPjkMGEzEUsgU65hOwOHCGIxmoKCTaKd7D0z5p+BRMgxCxDBE0e6NWsGWEIbFdNIhoSFEQUiC5UK2l4SIvAkkLMibjxMId5kF7Kqmk6LnC+awQgwjk/JnJ5ZlAg678HzjmhAr1UfGyPPex++OCFUwstDhPkTG+bmCQ97WEQ23gzBL6Jw8qwBSZzaiClNGZzAUMFXKh2oAMGERMzrs8+JzdagxAMiBwr4KYDPtRBWgGuX3DF+O8eP+NEcdAgiI6CkRjmq872i+4Blc+L0eU367DIivKvvRSnONRSB2tdpmQFJBgowavTadUBR2kl+M/t27Ii0ZtfI+5tZHFuN1U9j1jx4v3wcOXY0s4/nmxVgLnCAbECJrWx6QeK9hr3RSMaWxvnz8BYDRhpC2Bbl2rC+uWF8+obeO8vqJdPPWoIVOoo+bb3ZDFdHgvXCq3QzoT4FqDCzPqAcXU5+x/WRKoan/f6whJ1XRyXlbyZws7Nn8vD9D07MBDQ0KQQ0K16GgMLIHWmpoBCHj5oGWOaYonA6RBLzpH+H4RGv3eVe3YDaxFqVjeqwVe+14czTWpLyrW4Dl8YZ1zVi3BdtlI2soJYhJFwUAoQMhRGi0v0tU3wtqitwdd/0uwRxUCuCY6RDG7BmH+AaWnahOUI8nHLc3OJ6egN4RY0aKC2JcUEye5fHNDcdRUU1qXYX05tAVgeJpgNwLrrJ2lEa6EjEZZWeplZDjyASGSq5vHrs+7R3toN7bQAVA/bzsjtwdp2UDEjj0bbHsIgCQ1lEKBU+1VbR6M+HcJ/ebWF+8wHK5wIkolMCZskbj9M7ZVJCTqK5nPmZE4n1gcRfUqPL6j4TlKISLXtPAN1VT19dRrxnvBTZXdn+lUR8jfJugEFfRXjOZeTsV1UOx/jChkQoGi8GMThBSzhdz/AmYrK676zZHFax3xup7EME1UxarC7CUilw4ukMBxEiF72Qw7lmhAhKAREMdU2RgFgOJE9Ig2obOoje4hhiRHNG0aLlD2czZGh5LG6hDFPZpLSlgiQG5B+TJL2H2AAWOivAUcbsV3J4ObB88ohysTb14Okguuq7YXlxGfVeiOdjA9ShifXSwG2fGfCAfp8zDzP68x4iIecKAHNfOsgKDFAY/nBo88YIgdKrrdaOTU9bM05LZePxhMphNoVoN+jqja3ze3ZCN4O/v61EZCHpDeAMp6gqDzTpsDcygZtYSTwQp0JGoYjBiPbjQ0x27yzfdWYKNxw2wZl0Z6joeZHkJQQPXaPMgrNtkCAGWwMA4/XZwUh4lO57LHqaOvXXcKj/vTVGaotSO1sli2WvDU6nI+4Hldti00WxqAxE5cUOtIogOYeWEFEgQkC6jkdVTZx9wSAdFJxVcTdqnxUb2ewRV9FpQ9x3H4yP2N29YRJaEmBaEuODWgKOy6O3QJVUpgC6NHxDkYI2dsDGMwjlTECC0jBAJB6rjC25kUuD00yUhbYRQWOi06LGSvMFC+z5rNGKTh53CLGw6VZDxGCKdxmVdsOaELSc2JgKUd7Hsse4HWjtQ90fA2pW1KXqpuLx8heWyQVuzjnfWGYa6tc4NN+p9ThKJjK49iz39mq2ZU+5tu5E0dhbWam/UbbvtCBKQSsWydhsl4lOYZMKI2cbDLAlxyyTHXBVpW7DcVogIytOBW4xWX6wQg+BC6yMmYKZjm3dkUxiw6TC6A+tn5goB0J28wMBBIHiRE3II2HJkXQqClALWHLHliOu6YInJssVpbD2A8cwqtwYJDYpqcKAArl6QEhGE3lFP97Upddn21gc8HgIHgbZmklqho0cHqqjn1jrVSxAE8ahYS8Xy+IRbrdiuG17edjy8fsD2sOHhUy+GZmMyQk7MUz/R4T5fF8HWhN9od07zkwynpWqBb/AJAwE9GTHK2kbGTK/xAtyFMSdkBbYXilYqiSG3AzEnMuq6ou0wTTzaj4ELKEZGFe1lu6EY3e5pEzoJThVXdOH8OrZbEKb2qbmenZ4Pp3ix/HTKZlVPzunk1NWcHabDbMBg9JLt6RJic4sGCMTklBDoOJPZyTTu80erSn2ynZRDUX4DjRJZWsfRFEdXFHu4pXejMSuOajN3SkAu3RZ4HcKlq0WjmiLHbVvtAQIkpgwQIVwFQ9rGJN44Z/B4tMpo3JULmIm4XlY9DpR9R68VDRESGyQ2PDXB0RRv9jIUBUIQJAUgzT6sEA4WwzsskmxUfHYBWIx03xyNkzjWjHRZsFy3wZzS1tnkWhoKqPIxR3/PcQEpeARFZmJXki88i7kslIbJKZIooDoajdWIEK1UlP1gNqiN8isS2HhYKplA9v6sOYmpw88V4BmSY+leKD9DfP6rAO6gDC+khxhmVGnrpOyU39HGYXmaOD6cr+E7UaaitxX3XbA4GiTVakfMNzqlGFDDAeywGUN11HuccONOasQUp9M/g1S9T/0/ju3ogJpElABr4KiNBXEY/mTZ7ZICMyqZXWOupj6084T3PWY6qN5JuVeZTiBEoQV7DrvD1fF1CAaHEVUbU1EJN5OBxj1besPRyNCVUnGrDXk/0BXYbjvHatSGcuMY+bwtaKUjr2kQJkhwMAcaBcEdcaC9mOQUC3ZExj04rxEnEKk7KwuMzn1wvPX3C8yd5bItqA8bAODy6oHNzDZ8tZkqurJYNO6aOykfrkjzr+ZM9MQsZkgalYQrFZJ52PD7tuMd/zSHNNfSiY2Ldx96+uLtmrzTOAZoDIGNK3JUBYGAjvJ8SR7iX7X+2yCT4mE0SSUGfvSOW+soCjQVNBE0CAqAo3UblGh01SjIjSyxVIEQqSJwOwpSDDhywjVHbDHiatBVjnE0zbrTg/bxAGI41RPMUQUfricyG/pqp5/pAUEDVDmTpbaKho4vHYqn1vGlnXNqhpOKAdel4EU5cEkJ1ySwcX0WPZqCuiQgko2mCipyiG88ADEibCvSdcP66oK0kOWlpaEepL/3wnvla9+QJQv5A3qg9t1Y+U5aiBHrtiCGgBwCGU7d5abc9PKjq0WAPiLD9i3r+xQEDbUj1Ga1AIzeonOmdGds5tKYPzcLfzb0ZG5wk3dxwk3Bbd+xv3kc5JBWWQcBbFilKSKEHBGvK6fPbnT2zir0jv7luuJ42nF5eaUS+CPVwVupOG5kf2nrFPYEmV7nO3QGXgSTFt76JCgMyDB49Mp5ZYsTB4xUknIy6j9ZldIFMNYX63tGko9iA/+AvglCqggyZ0hJmsbQCTPiAQVg6IFywKftTVf3rn6Nqoj2N10xVLOfTDS3qSLFHTFFPB0FlzXj9njD0wdvcLlsePria+R1wXbdSGTICfmSzVlFPp8UqWThSvBLRkyBrLwgz4gUz9aOL60o5pz9+9NBvXXYOgwpIknARVlPa62zXwuKVis0CG77YbBfHY7KG6iDAkkdDpSRvbgogepsaWiWSSmIliywkTZen7XeyO7sWO9Ncrjc9obXtNxZKd59uN7oyCA93Rr4OwzxiYggqhTt94O5tOgu7rdDJuWMr5E23334/bNoQWf3vi8Gsf4eBh0y7jcFYzsKKgps3k8Qiw4IIXgjnk8FDgrT63ObPWGT+3P2LwSCiBgSYsyABjSrNdQOgywbnmpFMfKHBBDHFcOhtXMhiEEEooxu4yQUdIvYjmZXKjLUiKdqhpkXH6Sn96d7Hltyz66zcdphZhXR1N1znFRvd0whCLS7anhEDxEhJmjn0pXIaajB+4wgw1n1aimrQTViahJ+LzFw95NHGtZ+ho8SjFTCy4UtEUaF6h3zhIa7AhqiDfJjv0uASTKt1ArMV45+SVtGvizDSUUvuKsyABAYg5ERft0LpWWOQgOyi/WQ2NMYhe67RTNqDN7p37tpwgmzGjYXK6IN6xOwfilCuCV0qob4S/Zhcmy9BtsbtnZjjLRBWQ2e1GGb5tiONvcUaPBSCGw2Fj2RcWwOWzjNWxMYkzJAxCBrc8bNihqtNtTADLukA6kDuwS0paDvBXnNvLdbZl1wcVmlZHAg+xLX64puwRhHqtMJKeaycVtw7jocZAL72d1nvfsE18MTAUKOSKpYrivqUUYTcN0LJMWhEypu5P0edkNH4JnUfe2oq98lIkUqghQUCRz1IWN0iav/067A5LYmXH668NO/5t6Xu18TdWdjsKElo3q6AwI5nbszVK1eZT9zvmV4bht/k+OT7aSAYTjfnsNzwlm9eHhXRLRHIIHZR7TeIr4kWlPsrTGl7h2bwRw5+KwmsYdEyuVdc5oDve9Y0M5MC4iIMSOnDViBFhsOE2qsveGxHXhTK14f1Baryl6FFAJq59Te1iPlhIRFySUpsgSETEFXLFRhqE3xeNTZcLsk9BTw4NF4J4wiIibC2gbN2mcfea9Dt+9xxgw3ZYyzjySawGtKcUAM3gsUQwQioD0hJqpTp7RZJqBI6wPiekFcNsSUISGyeFw54C76RjBDPGEXGbfdjwn3mYPygro/DuPezg1GCKUZhbpaT4c2RU2VTc9d6YyXhOVhQ7os2N57YO/MtsyG4hjGukwrhUu3Fxtub55wPO1YHjaU246nD55wvLmxVhV2zvIp1onfRzfeuDCGTtRGO7rp5vVOY+7PIqjRgJWad8lgls4R8aNvbkA2QBgtDGKjvlnHFIDtDOc2BKNHkyZd0ay+1m3EDRNqxsxbZk039vlgEljPXAIJFAKghWCjTBQxtKGj6JtGTWez74WoyF5xOypCjDhyGgSguNosqpOT2q4XLNuC9bqhfeoB+bLwPi0JyeYis744HfPYv2Mt2ZN4vp/fskXu6VizjJkOcXt5gaqiloqnD96gtY7bF1/zuo6Cwb5TJ2V1c05nA83vOKu/2fro8EfGskQOEWHJVMrIabZNaEevsFrYaZ/cXcDZYckdYugfwRYiGcFzDc1XMLYibHo4ZEB/YbzqrO99lOMT7aTuUlJlXYaAhQ5sV+yBJwBqMIdaI2u0BsiYON/JX9A70psqJwz1jpsoWgjQlBgdOA6v3nNwOp4v4s6N1qWjBaNp9oAgCSltgAaExDk/USrByVCpkQbYFNZT9B/YLIoUIJkstpQEy/WC5bJhe/VAQ79tKLeG2hueXHAXQCgVmiL2vQzqtF+Pz1jqtaE8HahHsSFwlCaq9rNmIpG8VE56hVqfRyXc5aMfxGGiQEMZI4fTKYDk8EMA8vaAvF2Q1gvisrKuMGj1CjUqfi9tRLzwkKNzRTgN3hUpmFHatForenu6O5ya12EWTnCN1gODrpaZhAEPxYUR+2JDFbdXFxuoSPKEv7cXxCUGpJaRloh8yShHQb4sKLeC5eER+4ePON7s2ONrtL2iPu1kFprArrd0+LiHZrWe0hW33oaqhB85dERjlzm8Iz2xltc6ujvrkQ0LejB4KMgQtu0wur45q5QSeuD4FVfzdq3A2tisrTr12III1hSQ9STALKxVODN0MSPVtSMHwR4jYhCKN/c2xqM8LBlrirjGiBVUIJejAoF7phtJpqYwm+YXZrzHhdJK63VD2Xes1xXaOvJlsQGo1vSbnGhhmaUt7qEOcXJe5zab52jXtAH8mxAi8pLRLyvKyyvWl1fUUhEvC+vSO5/LtGYzk54mvM/sw2xOg47aUBejHkmABiqwhMTZWhyxYlR9TLjQa9STrH5CVE52zANRd5pT7cJ+fyAScpc50UYCZzd0ZxXVqfBf/vhEOylgwlF+eDoZ3FGBkFSygoqnqJ49+QTSKHL3Wi6ZUiEI2nEEjG7rZL8/yEL23gZQ4Xl8cpZgYYcU7AEFSEgISYEeETUgdEFoavUlYxFx+h8Nvte4XA4oR8QkyAtHqC8XTi4NeUNYMlCB3hQV1ixoBi6ZgnU8Co4QBuSgpVrjbqfSxXESo+ys6XGAWxuLlVGR4eYKwkLd+kokzuZeP/+gnE0TFCGRWs4i/cKPlFjwPo/IsKxGbXAccOrSh0m3qFNrmdmldbFCtgIaTVPY+5t0ZNpunGKKjLCXjJgzI9quQ8ppqFkYIzJdFmQTG40+5dmu0deRBDqLYJNyU2HfVzG4j6NAEvRoKOFgxloEPZDtKH32umif9OMKJyaosfdGWkiIUYKtfZs47JDtgF/tNkigWIkqemO7QK/2GUayGRkp949U/r1r+xWjnDv07rsxSYAGIMMJCzKcFJuWjfWpMnruAPZ1lR5HY/wlJSwxYA0BGYKoQGiNrEY0uyYMq6hWFwqRY3jqbUE7CiHbUpFWKlTYDTAVEiCoAVkyd7CKTITEHdW7jJBZn/PhOqLRyBR5y1guC46LCdQmo80/i2lFMPr4DNsxYhTv1+xxu4ckZ0P//BhzqGTayrlW5ivAAEQPsM/EEK+lBzld6vkahx2YGVPw3jV75XuHNG3sRzk+0U7KNaFcbiRaxrSZ5SyBoqUNgioBGtOMV+yhDuoygOYyMLUaW4rjOJQzFJBE8BQ4NTMGbkLi74wMo2D0BYUg48FoN3y9Y9RUAnEw0FFliHRERCwI2CB4KBUQIaxjcF9KCTlHvLxueO9CodD3rhlLTrhcFlxfvqC45qffB0JCk4SSn9CeDqpE16km3lVRng7gaOiPB6+5N87ksmIrrJmxHgXVayf+MxuFDWUxGCJU6/Co3MaUZ4MdSL83SDUAIWYbWWEMoSAIKUMSmYlhUIctY3W9t9pw7DyXslMo1LXIfMs51HF5eWU94sq6RMgRcYWpJDgZl7DMcllxeXXFi6dXSCkhIrBRujbkRPgkX1bk64blxYX1heuKfGUdSpLX0WCblps8sihoMBSNY95WQoAPF2zXDfvrG2KIKE87bhsNKtmPB7Pa0vi91sZadxKKBqF4ua21anWOIm1k+ST0BKA7u/Q+Wu7eLN2pJK6tIWSrDWbrqYqcrtyt/tgFOAC8aR1HbbgV18ObTNclRqtNBU6iFkH2/QFBNJYXVLH1jtYTHnIajhcgBTw7U9H6wUZ03v2Jm4zTWAEkavQg6E87ak44PnxEfTqQryuO24H1xQXrqysurwjXrg+bqUhQ728IDp9h+3ceb/9sqMB7EGuTDi6XDZcXV7SD04nbXqDxySy5GXJDBySIOZizoSdk74E3+5SYAS/RJ/qac1G1do9K9QqfkGBoB/QM7k2YkktiBsUKN1u0gRP6xKjNxxBGIOPahIMNCVDPEh5UDndIG/sRjk+0k/KhbyNKhBPPAhaABWOQ4daCwDucmVzPTa6CUX/pVm85R0cdnJzaRdE6tfaCDbKLQZCUBcvkD9aeucWJJiUCdCtTB5jSgD00RqACV3nIKeGSE7OeVtlDomSY5ZTw8nrBy8uGh23Bi4cVy5JwfdhwefUCy/WC7dUDVBIaAg4FNEY8HaafdzRk69fyERiHAP3g+OhaCje/p+Nd72pULB9N4zIiZ6+gOswELthu3ftDRdufHSxQgHeDmrFtOubctMIP30DV/r2/uaEelZReq4tUa1gFMEYriJKm7A89qg4oDuK1BrWGa9KGt4cLr3unc9CjcrqpOZm4zPEgpMXHCUmejZl/6W+nYWSTWJi9oStaaYAKysuD8KQIWqFDasfC1oS9otwOhFJRoOjW6JuggDFM1Z4ZyZduftQyMJIrRuZ7hqxEKPhpBilYhsEGUW8WflYjUgwlhKp0VvtcDuzhwdSlg7gyiU0Ixiy+z0w8GDWZMFYKzkLTgXREkfF3A6S6yyhOn09ZncsN7fGJMHW0EeqGGuRtgfbOZvstIykzcBjcfIdZ3R3TzA+aHGCKJHMJuBIGpbqMKm/C0s8zKXFoTicJrKtP5qXGQ1C1xl8L7sTVHMyBG5O2N1gz8iS2OLnlrRxGzu8vd87ZgcIAjDEcLuM1xsWMv/XgG+ao7m2FraD5vY9wfKKdVNfz+AnevGiRd4wRGhSrhjsn5oayqRhsgjHG2ZlKrn4uggHAHmawoToeji+MFFj8zYb3J4swnG3Wtdu/xZSjMdPfflZyIJtqAfByXS1ypMxMh3I8ek548XDBi+sF13XBy5dXrNuCy8sLLu+9wPJwwfLiAV0Cahf0nJCeDjQVlL3guBWERiVzPSql9ltD220kRinweS+DaqqANm7ok+3l/TzDBtpM087ZSDSoGiO0BzoqPQUJHijYou3Ne5QOHLeE/LQjREGriU7qoLF+/NIjRyq8fsNBdaVQNkfZeL1eNizbAq0dy2Vlk3BT5LZYxmAKEqZIwcmrGZcXF2jpWHJGQkDfC/pO9XUJwllf20Jaszkr1rjCKYuaFkfGv2dRXoNOyNYYijFG9NJQLwXLdSUZoTWOjykV9Vawv+HAvpYEsh+8r6KmHm9BVpusqjBtDLo1eTrrauwDhzsd0umdzMXWkVuHOvklcQAkwnyuFZzIvAPYFXjSafiSWjOytU1kGJwbnNEnBsXbOQkhdVUgiWvxBfSTEbMEbgR+fg3D8I3fPX0WUDvOmtNb6whPHPO+fPiI5eGC8rQPxt36sGF9WLF2NWKFICQQRju9F+Bsv1P24enF3TnwqxAYDMdoE35NpDYkNrsa7QWmrGREFYwSAVozJflm89y6UfoDIBFRlHp4YHlDm2snCp2UEXEEOuzSXf5nQeIoKRhkDcCYr7yQrjqEadzW8plioFEy8qR5A+7n1z17249wfLKdFCZGf74FEsQUdolf++I6LyE2Gar1U7FOcyilOsReV43xJ0J5e4ftxLKJHsle6mYAOihUKbACNe4N1Tk78zO5S3stEs5B8JATlgjkYIwzcMR2ShFbDlgjoZNlSVjXBdfrBdtlQ95WDo+TQH00AItlFcdTwdPrG9rTAS0VOKgooYUKEL1WtFJGbcGL3YG7fbIYhTADBCZ/0se1MEH0qI1/021qq5eHvema+nZ2W3pnFgfF4wePJGeUiu12jPpCPTjy482X3qDsBx4/fI1yHDjKgWoZVwgR9WlHWReEBlOu4LiQfN0IC68JuWcb6WHXKIKUErbLSjZSU6tn0DkDgrQszKKsVna3zRRffteNKJWBjionyUoIQIdBfMXgmD4yRzqpG8p+YPnihv3xhuX1Ix4/eEQ5Co6nm0n3tEFh5/AWMdkhBmmtTwN/FhT2c4uNfYLJBUEbnVSokcy9GBhU+HvkiNQz67qCqXpvQeMBWM8P96gX1gWYtWEY9I1TVuW385yo3IOUIzDvmM72rcO/2RS9Uzi4FUGtFeXpZmzLG/K24HjzhO3lBduLK65f8YpZ9SsdpBiK+Z5OyrE81fsTvT/LuR/sJ96WEYbXDcMDO3QWB1kIaK2iVqBW3lsoZcWckSkh0qn1DrSOLhXNnhNCGEG89zcFg3fvs5iZOYlDd8HuuqENnAwQxt4WWKbUG7xmPrKxO5urTKyepW/jVn6E4xPtpNrpw++L3yuvZZCNMtNvv1dRFQ1knjXMrEmC2NAxw+rt4bXOyHWQHgQUghQxBWMMyf1nccqQth+L26K8c3Th2Z4TP3KgTpzEgArDcy1gj+jWk0VYJVqTb4phkDoog0QnJiIo28LsqTSUow3RWm2s82htg1XmtSYVp6HOXo4zhNFlXovMbH4QHTyK8tky2po5KYwpnjMA5iasUlFu+1zEigGtcZQC1bGP/eC47uNAKQfnTYGMNOkK1I5bZg0oZpIHuqoZVkJfEUbNjsEKw4x2NSUysoTgbK+k7UZriuX5Yl6wR4uG2/v9mTHJvTPjWiNhIWRG7Ho1qnDN/uJoxcZ774WDO28HoIqYTE2kNEbGpUKlcZxH19EOwfXO1xqZkzrRZEoy+dG7IgSP63l9zQwTjSals9yBxMgG4RQESQnTqghgEwNcJdv15rp4ZK7WqHr6jAn/TWMn94ZMR84+ruP8eW645xmNw6GMmzxwirUyGNgXAFOnLy6JTtqbfkOwIZhuRyakP9a63GcQnmid6eVjzZwyMj4du87gs9biIPhwSZGwZB1kMPaQ3SOWJ7wG1YV9ZUOZ5fR+ghMkd2ejTk7KN97J6dBZ2d+cnfJ58VjEMa5MMdbffJl7u/jbok/qZvdpbAJgSIskS20zPIKbjWiAGSRVSuArUAXIIjg6G04rgGoZAzeljA2q4P+8Z4EUUJcLmY5n1G7MkXHF9fnYfLFa5fucFgfLWhQdaAeqVtSmZH6VBVIrpBa0LaFHAPsKfaLxagogJSBlZpIh4LIws5Cl4ZYotTSKqPQagLHJ7s8Fgw0W/P+iUOGkT+0G2wHzujGNnHb2h6B1tGBOSifbTOx9AoBWKrQF3PoT6q0YkeBm7Lo4xl3f3jyhHgX70xNaLei1WEQHaAisHx4V2hVpveG4FaxvblR/2HfkbcH28orlsoxZRQJY/Y2LKkb2yYfAic9kK7oGIQ2y1Eg1DBuHMjfwlzvYRxNA+EdjHI6Hi83uZu9jllZ55Fyx7cWGpw/eYNkWBAj210+klpdKNf1OwoQL5PYRDHgTsLVWNHNWw5lhwNTZarM50TDGWpml2URkiVRfeVgywrrgUEU+Co6j4rYfqHsxCj3h3h4EzYgIRTDOZfRVYWoVUtfNaisjkBtLazikcwblsODzXMZ7coYtdUZfa+itou8H2s5M/Xh8wuMHG9aHC8pRsL68onfF9uoB6wvFEtYhXvzcxN8f9+cLu85e25g2XfaCunNKMZuxMdZaTAk5ZyQLhmoMth46aglQq/eQhMAAE13QUNC6jpaOFAOWlO7sXZdZ9x5rzfducIagjDrZne5f5PoeVHxz9gqgid7ZVneG9H2T6HL2aefn9OWOT7STOgx6axbZsKinQy042GaIAiRwlo2CDsuzAi/CRqsxdRE4B5COx5zKwBd0RH5OxHA8NpgW2UiF/VBLeRvQVUagMpyAS5d4A6ctbNVqtaCK3gqqNmZwtdq0zo52W9EC0B4zmgDSOoIKkDN0MeKCbVcvXnv/iUMozydkDiczzt9tLyMqtfRedTqos4FwGIeXrrPHpxvr6hSEedTvr+F1web3CBgq54STrDfGDKc7eDkbhdbRlbqAvTMjbNpRSuG4h21BqxVl35CXhLytfCYqzCpbp1zRwJPssTicVRtaaZBYERujbVh/lq+He3qtf/90YxUn7P/UE3O6eao2XiYSdmvJFCpaQ7kdWLYFrVSkFIdSBokSfdDOAQwnRWo3mXNH9wb4+bw52t5qFmZkokG1PjXZ67VBAMSILQqp5rCxUL2bsCzXSrJ+RAkCDUad94Zga5ietHQiAKSjm9FzXHBkUJhr5bTu7oJ6X4eelpld4PcnmQQCtEoo0DO83jsn/faO5bqNtoO4ZNbQRGccck777r7yGq2vmalTWW4HytOBUtja4RntuNThKNgW4ixDGSotrPm6kk6395BGwemuYEO0UtAXEoatGc7jdOqMiWa+486/+brxrBZu4xx29YD0FMyOoNz/d0JiTlnZ+Oq3RSbVFD0q1AyxWk8K4GPO2UuSwBkmgBVuVSfTCZjwsi2S7HjwyCgwVQDMWenpwU+2koz+LHGJap20WAUgQ4fplAY7DGMGne9R0bs5qHqg1QO1sTs9hoCiDalVlDUj94aSAvuTjorQACwLcAGdlWUAQaaj6oHCj30sQoxFOL7B07R/TnUHbvTg4IMpNc/n4lmUO9smgt6VsKUYecQdJE69IPYyvTZCqKZ0IU5MMEfuhtpEG0GEaspaweSIjv2A1IqjVhzHgfiYcRwFeVuwP92wPVwol3PZBp2WI0p0spyAKS2jCkgDQkU8ChCEkBFtAbzAPsRa/SaOXoTzjZXT381CtRuAsWq6Imb2WbWcmCH1jno7sFxWtNKwL5ljLnpzE2xJAx0XP5RjbDonA9yaSw/BjK8g94YcBM2DEAAptNHUHLSPScUiQEoROSR0g6h2g/1i66jSoE2RIyc7i830KL1jbxV7KaiVbNEkoPBtpP53PO3XcOeV9C2IEvNH0zaq7ekTy04hp+CBa7Mr7xnER6pXxOOAxIByFMRtnSNJtoWjU7x3UeZ7j/Xu68UW4hj3YS0Ex9POYaaPrC/WUkfrhC09XsuA2wQh6mn8jE2QDgmwjhaOp+J7pE4WchCKY7M31JyVXTYz1Ht3etYlVJCxWVTJJLUf+aiVBKurBQx1DL9oDwJOdxzeDI65G8ba+qhFqU+0kzq6kRgs8iBbzJpwDZuvrSMLM5hgdOcU3r45nmkgAKsa4cI2J6dM0um5AVEFslLccYFgFc5Jyaaj570BI6J6K+57BgsAE2JThePMABdIjhFQMtikN4T9Bu0VJQfsbcctWJbXOlJaIBIRVrUocgrexkhh3RbFGmqp89VDRTCR12FMbec7c2n4LZk/O6WF82LfcW/PDvl8B8Lp61HIDzg1WwISFCHOlx/0V1jWYpvQDbs3+Xat1H6zfhGJEeUoSEvC/nrDel2RlgWLTWcOYeoNptEfRP1Av6yQAuJe0AXIR+XIk7VDlePPh+KEAOpSO4p3bkjB23dLAUxSBvtlVBUhkeEYl4y0LsjbSlbaURGXDDWF8NaqjXWwiajK9oXaOTrjqXXcmuKpuaI2g44AruXUBSUIOipqp4HrlslFVQY3akouBtPGFHGJVHNxgdcWOJYjmURWygxqbkfBrVe8OQ7cdqqrB3dSKeHlmrHEiIeQsQQgISAagkFq9dv7xu/lvIGYjmr8gtBpWSZ/R7iwYLPtZJg+ffAarTakbR1ixqSpc50g2Z6yLHjUbp+dFkfBU1F/fzpwe3PD/rjjuB026RsUBI4MTJsICpRwJEwhvndDHtj4LxGQqAOy0C42QqNzfpMqJ0SDpQxRZa3Lzk7uz/B03pZtK3DrHY+t4U2rqMrzzIE9jw8xIgfB6utU5JmjsuA0uGgBr4QBhOFMw7N/tOMT7aSa0iB5DMoGR2tyU8yubZ11I0YqsL+4/+xRRjKjm+2H4ptZdFIyASRxqIJjHHxIXcCzDuuxGWbT4buc1Mz75zcFATGQAiyRhgetUiS0VfSyo8WAdhzotYyeiJFmGxfZde7G52cfIZDyK0ZyuD+HZ1951Hj6+wGJPjsGNVXm8xgXfg6ST8+GtarndSv+jedgM7PD6XzPzhJQnbBXV0IiEEWvCT43Ky4F9ag2VZdTk0Ng86VPVNY+zzokUrTDQlXrdDvMkXL90bFhiO4+uxvPHrid9ik9oE87Z1u+iGlkxYZUBtdJtAbUVuNQVOmYWpZqkJ73NbG3iXObmk57IfC6LjdLJLKM0libi8K1Gyw4YduhQGs0JxeR7WcaAmpkrSL66IwYSIUXQbNIfTftQYCIR1X+fgeztAAqQURYRGJdsnoKSM5h31uHnppPPf1TX0GOhpweRO/QBtSjQGLE/njD+njDcdk4uiVG9CUzw0Ewh+mvdb/+1TKcoZ1nqvjUvtSRPccUrTdTTY1fxnTebi80Rr+7+IDbA8dqT+/XYXXIYMIEIhB024enUsVYfPdrko3UDUeteCoFR2f7y5IS1q5IqwAIo3QyXkEdtrdz7QP8wOT9zk/0Zb8NMqlmMAa6DvaJ3zjqhXEJJbOSPgxv3qj7pe39G2uk5lgS28xdsQUy/EqYQxY55JDirqsQtkjuNEXu3+Pu8/33fIHZN3kuEIhE5BwRwoIgHdo29F7QjidwBlOH1gKt0XB1sREcC+JlQbgskJwBCdDCIit9y+yF8FlIUfs0+p2ZI/T5+fp9siwm0KD0qCOKG79glk+A0csDi2ClT6LL3Qvblfv9EIsI/GWD4esSI5ligxzQT8ZK7redw46qgHRUVfRIkkHbC0JMSPlpyMgk619alsVgltlyKsKGzJDi0OBTVeTriuWBmU3MrF+Mvx3BgW1Wrwm4gx4CbPbcA39Xnjs49yQiQ2cwX1YiBS+u6EFQtKOVg1p6HgqZY+pgnaEqVfabTlasP4JqEE8RjNE3AsEaSStfOunRWanhFx2OjVxDApu9BuoB9gCr6QRoCmjKIYrR7nFFx9EbSm1ItZluX8eWE59vzoAJFofT/vC+SL1bN/cr6Xxd7uP4DCkXpWc41hMTex59LygKPH3xQ8TE0HO9bmRTBuGk6JygQ1E9mEO8z6fooJRMWoNXJUaklT15MbGJvB3FWLWWUdnfQRVSXcdx9kZJTIjsg0GCz5lyiHpmRa6W7wGdiEPqZ/7xyW6qotlE8L0UvL7dsLeKo3U6qZR4fikhL4mxE2zfq9fRuZ7JrreRHUEROZ9lQH8fJ5X6RDspL2JDSCHWMKMXvw3eQBgDGVjDSamejOQpqxLTnBIAEg3ioLxN64LYZYz7EPhEXhl2WZ+/pkVYE/YBPITzupa98dh8gJojEKOlmtJ4zlCtqClAW4Fq54ZZV35cVsQrP4d1QVwSENkxA1M5H85DMIqyMUVoS6CDoDEns/U+MjzXVsSyzWBKHrPpT8d9HJmbZRlRwWiLc9P9MUz7++z5npu0RzbmmZ99dJdaOnHgn7/eKY6bavXOakQz1hNrX62zxlkUo2BtD5LvbfUBCHtYJAcshaoXrTZOZr00ykL5mHE3ZMLgg8+aF69tbnCA9yoktrneOaqxNuweJNZJlq7YXl6tKVRRawEC0GqhE9J+/7e+tuGOcpr6bs5M1RQOJCDXNjKuphE50gJFCCANqBwFouOZh6E20Z1sgVmDyDFwYvOSsVpW4uy8rkBpHUE69lKp1RcD1sQ2Ck7+sIi9n3cvL8wTUmYUvEgd9/zEPht1kqk+4mjHCBxaRzsqyuOOIz/h9sEb0tJTQK4Ups1QymFhBhXTT51ueABFb1PGelkgQhHrfFtscGMdDEB+dEomeRCm3Vi0FoSZvaG6zmmB296Ifs16D+/Nfq2Z29y5eT3rnbKWz3YcWJ19snH9D7tdqnY6OEc/OIF8MgYFXu/+eA4K+IQ7qVab0ZoZXQ7qrt1MqkIEpAATkpWhQcUHPjMav4kibARW4UZ0yCQGDujLnWMcnInn0YTZMb6cfTjURTxchyMaWZ8ZBU8X9Py3CmZAEiEpUU4lAtCGtCS0eqD3irQmpG1Dvl6Qr1ekhyvSZUNYV4Qlk0LvTkX8M1c4J6vGYSRFYLRS4vaQcXZvGTo6UoPyYkDvXHxn4U6HEb1mKICRIdSCBJ36ZnjmWIZjPzsf/hckcAqwhCGA+VbN55QlP5vlyzaFwfiwHEMECDZvTALQ+nCIs+4mY3R414Z4ZKiAChn7jq1U6vNZr000IVo2QUY73xmwuEqEDgjIjJ1Eq59O8u6AjwA+O9MShATU2k07UFBKAURQ9t0CtjAMurz1ccrkfTdY9A0RNOEeKs4EBLMtCYEqEgYJIoRR+3A1DQgbfCeiQ8O6poS6sE52q4RZW1cbA6MolcoKe6w4UsTSoyF0giDWVHyCX8fGOyX73pICe8TR1yM4gWCqI/i9tfWofUBsaHQU5XHHLUY8fvENem0IMWBtDbmtgAii7ZsgNtlaT+flHy6JtGas/cLm4MTm9LoXUxjp/Hfh947XT3SSlmF1aabVZ0bfGsJjEJtlZ+tTfFaXo80nm4jpqOZP7r8K4P1KYEtOF2c8832kcyqE26sBLXfFYYzYjs5zUNbzQoiDWOZ2wUVvP8rxiXZStTQUFE74DD5+nFnTElhbusRg82vofASTtdctDBDBkPtg86/BU+aoVAFRGxOhsNpUsDk+VnRVq38Gc4wIw0DLeaEMGSRLf09RNLTZg+fhESZhyoR0WRCjYEkv4WBNWhPyuuDy3mhn/IIAAQAASURBVCss772H/PCAcN0gieMZcNo0HGMgiInsH10SmvWxQIAebUSJUL1hDtib7EMnq945FbEs1TIn89Aj4vPslZfD+G30T+j8eiZ6p3qT3TfvyZB4ErN0g6g6tBBHgALBGbEc0bTMsSR+Dedrka4Qq784hXnkZ0GAZpmhdsh+oJZCh7QmbB+8QVoWbC8vQyE9XzdO8t0WwlyuVuGGsE7NSFdi55vf033diPMeMIu6BMHywOm/+9MVt9cPSGvE7fUjQgT2R04C1psAgbPRUhRkGDTYvU/KDM6pR25X0sU7KrIE7F1x64olBjQAa4q4gMFcAsgo9SxHAQkKE8UcazAIsMYAWUiOiCEwY8oZh42Cka44Df7l9XrGz03BFeT341SX6Tqb+90t361Le8isGbtXo5MZcJkaJNMFqB3tacehwOsQsb9esD/uWF9dsTysePEVr7BcVj7vNUPTFEYGMNX4LWNECMgbg5jrq+uoVfmMtloqylFRbgWPX3qDctvx+L9es2n98YYqNwoOw+nkHbkTvEuYAZVrHQYPSudGGvf0XYeADmELQt1CVZQeUVRNVDviIWcq3chkXtZGJfxba+zD650N3pGBS4RB/jkNhKgrG+k/yvGJdlKtdfTYIa0ZPGEMNgQSGoJQgTnIZLroLCR7bYlD9BzPdaNltQ2DtdzIKUihdozf17rXWM7Nc65UPYzSeL1nh1jOPnamF0rtvcSkTDJHlqeV4y4kgJnUkpGuD4gbx3OI1UL42g7V8fVmHYpKDCLm7tRUs41owURjjoIYY8JPTip6UgZ3MM8KyqcMx51Fhw4CCy/xmYOS6VDOPR3+3neQnjvCMEeNuFiqBx84vc/4u/F035VdmM2zm3ZX9+ieDVMFnI6TKvChBDIrF47byK4c37ux7xTdZpeNbN405Vy6ZkzSTZTVCn2CNdoJDXoEGlKACGsjPoU1hIC67wgh4Ljd7O+60ZyBEDodVbc6bQCaF2/1riWM1wXOdlIhAQWN62IxJf0UFDF04zQIPB8T8cCLO0KtcO/ZezZD/rAsSCGidSCFihIoQRUEY4ROEJ/kbEoqsYNq7uLJ7VjfY9Ky/xt0DI4cjKT4HCCOF5jP2ZeNdHCG2VFxPO6UnjK70WpDXhdAgbxSMX6K0XpwOeuYEZEZq0/LXZNl0n00Nrfa6KT2AhHgeErUEnWRk9YYUHYTU25zjU8YUKZ2o6/nU5Z5RhjedbiKTRbBJUbkEFCVqEUMAaup2iSZGbhnUy531rxMICTD+LoSL7mADu6tuutvcnyynZQNfeMO7BR8BZBjwBY4ImBLYaS/vcEgC5YjPHI83yrOQJrwkh9qjq2CYrOHAkW9MVU540YovLkApiATrIvfX6DPOpRF6fDmQJ2ffbdpCFAJ6CGihQSsK+S6Yn3virxwTHnK1sexLAjLAuREuSfFaRYRMxA6uEwlhhxRY6A23FFJgRQAu9U2esPeGmrt2GsbWZVnjkkwAoEluGS/OQCdmxSYdN+i7OOobTqaPKJbjAyJzyHcw4XQSXW1gwKtcdgXNaMTgNHE7ew2P597B2Ub5vw9DyredZyyUq2kQ7dik1UFuOVHxBTxuH2ItDGTWl89UOHixRV5pcJFDNEy6FOGmgIHLYogFJ6RX62qoh51CKVCSPUO2zIc8nZc0F4VLFvC/uYJaYl4/aUP8eZ/LUD8AOV2oAsllCDVnjHrD83WNiNcEiv8einArBQhVsVhDqJ0RxwEiDYW/hz02fe1M2BS6BztkAKWFJBjQu2KLS/Yj4pSKmqtEJAOf1kz1kyJqhyoRclG/YDeOqRR03JoxQ2ZCgwiDIV8TQgVVlvpRjpqbdz/waDzlaHCnsbSob1gb6+xx4CnD99gef2GOpBdcX3vATEluManE23Eg0ITlQ0xIqZ5Y0+WBU426N2ksErD06sHlNuOy4sLnj54xOMHb/D4fyWUpx27AHpU9CKEpe3vB3MXpxKEWmCFk7N65qimnTNJNvp05MR69shmRci6NEfYoEaacGc1GaSjWVotmBexwZJhBGmxfzT384l2UgBOi2x+7bDDiMzNISjUqLiwGwqYfRnCnCRDnGVZMCA4ltmBJoImMqIEwgw2xVUY9YYOG5utVNv28/WUAefIzs69Uw0aGkYmJTYAMKY0vpaUITY2G6bC3VWoUq6NGywESGIj4MyggLTyPHomhh4O1jJqKYBBCTRKdE6lNtxqH0V1v5MR1BdMXaCJs7UkmtE/R0inTMpiAox+Zr/GU9THb5kihi9oj5pxUnLw54x7x+JU+mDPixDO2b/8VtHbqSHYnst9BDMzQ7+oMd8JNlq9CHqtqKXg2HfUWpHWBXUvyCtHiLClgIvLa4VpW4b0kqujo1vdsnVK6FjULTZEz4kdRJLYA3dtDwxAahnZVe+K/XGn09sP1tTAqcgxiEklAaHZZ5lqFH7n3AhVAKUronYU60eMwenSlqWY4oXfRFW76xEc+2HqHFk49kZzIgQZArrBnVGANUfknGwWlU0WsMArlISxIw2GdxQjhQBYZhOzzbGyRmNRBaqcHFS7r/fNJTuMvHaFlkYBVxdLbor95SNCCrg87UjbQpZs06mi6+sv2BiS077gcp4Lk2QV0JnlCeWrgtJdS+Lw1TcZ0jvq444KgWoZ3fTiz2C8zf0CVnsfnJ7r/W6w/WT7KpjzUcs+RTBaVDwrE0M4OFjWnFFw4QCMn59ts2/43xaZlB+kXLrBkKHKLTLlZs6OZjgbMxSkUcK66nXomenQZ4bBfDCojx8VMqKHUdfoChVFaJ2bRXQkKSfLPGA1N+iqajI/cgf3xUQHRSXwBBkfGYgRiDwHtagPoixox4DQdYzRHlNyg/WsVI5RqNZbI087JMqgrZbGXomjdtxaH5RlwBelIndCN0ECNHZEjVYGk7nRDTocOmu4S0imox6vPYva7qgGK3Hu6FkIHq9jTgoCgLI8QeaeOI99OGFa81v++ip3Q+vOJzsdqTkYPxcbvOg/akeB7AdCjqhHQVoy6l6xrAuh2ZiMpAPqEo46FMVjmzDg0MLalxfVuxn/sxK7Tw1GUmjmektrQtc+fkbW4RNqY/MKC9cdtbHIPpyUwTPSwCnMUJwnm7M503qaOicHVO1IXcae6eokIQBqU3M1jMI+VUIoAhxTsKicDqvHgG7RNcsi0YSTEzOhEKaTSg0QW5FCNMWbu0OIkEzlcmauHAiYeh/oAoPLDn9sXA5cTXemU2GTqjEEpruy1HB7/YSQE46nA8tDRVoa+qoIrB+MNR2iaeUNeuFcT+M9fGmnjt75fHNZrGcvIa8JaA17zuh7wW4MxFpZgfOG8zsQwDfHae2PL++SKR3n6q5atI+1zr/pDJosCxovpiZ2LWwehsqwdySq6fg9Hbql95nclzs+0U4qxYgY/YnL0C/zOVN9bB4uvNq5uerJCQCBhUjLihjDyLPakVktme9FoNg06YAJKVnaLugIoaIpxZJcG1CiWBNfMsXjgKHbdxQM+SUQTokpIUZCRGpwwP54oJbGoWm+lByIFxnTRdO6YLmuiIsibQvhgCzQHC1q5z1orRlFmpF76w21VRyt4egdR8dg+UBIBGm9o4FRtfSOBcxmFoQ5NwgY/ShBOO9GTE6FuDlncTnt1SWtzjt4JC/2Ouea1HmpuwSOQk1nTu05TnjjOcwxszumdz7bavzeOaiwbCUEhyDt3Rza6Fx746yqALugHsUETHeknPkRmcXGEEi6SBHL7YK0ZCzXG1LKpnfnwcusK8U1I18WKrt7LcVwfgmCfOEIDQlA3jLWhwtiXnB7/YS8rXj68BG3N0/Ir9+glYpjP9Aa6wlHZWZ0a67x1wetGPa8ok2CHZJR40NOps5wCw/ae0cX2uhew3yulkmyE0ks6JqZsdeiwmgzkCHwGnPlejbH5xCjxADEhLhmSIrIy8J+nRA4iqR1tCioh6CKorX6bF08i2B8X9mzVUyId3/9hBAjHr/0hvsrksEH2CRkj8I8Yz4fcno73yt2DTEYmSgrQopYtoztYUMUwe36CJSKqIKnrtSxLACxHGatri7hjexuy+5QiLFbnn2lM+txMWK3lwo6nkF6sr/pRKCRBYhxCiMNge/e0CtQ94geI9RaOHxywZc7PuFOikbePM5dRK7PFpobpNmJj2Fw7yziCJnvN+IYizzqGifDd4IKzvBf6xSB7SqDLSiWyQycOrLjXGxMAFNyO50go4EVOvvCyl5swqidr05oSkQQa0JsCRDCHT4HxiV7vPN81HvMOJ8XqOP0Y/SIOyjryvW728FFHHTi0eHkMKfEz6mnAy5rZBtynD+d0OwImXDe2Own93R6cpb1zAzO7qDlVfc2wVdId/hH3WHN2sSdAzQjEhUIGpDQrQCMu/vlGTz8GYpAC5ghK8aMrGaZVIwBqSa0HCkFZZNiWyrMgkSslGnUaDGILMWhMn5/ojDyRWKA0rnOH24FMTKrc3ks1W7/Dqw3tQ6EhtAVvbLeIz1AMKe6em9adDhWTs9mGGTxJfnWMQIAv1ciEAv+Zg8TX8/bQUb7gFpqe3JgUKUgrfU7SQjWWBxtejJhMrLdWPhHbEBrNgzR+uB6eDvqsffUefLz665jRlk5CkVj94K8V967EBDSdExOBLo7Tlm+zt/ym2B6ih2SE2BC2utlhRZm5PtiPXhyf78n1cdp3vcQ+YDd/KLGhXkGpfCyCIN9I07Z61pJdBAz/Mz9FU98LdvjaoG3oNUKmI2ACHr7beCkLtuCHPKQFRFgUi/9l1RNId3lYKhP5XvKG/uoOHxii42sAYMtww8dX9/L/MBehXupgdIv0mEwDRdUygkpJ0a4BuM1U0RWBVAqiPzz9QBhlN86yu3gPKX9OKUY/puTbpvWTIJE6wNOTGs2RQq/8JMzalyMrbPHYS7kWddDEMSYrDAMqBefe0cFs6PSdKT+0R2tTCNP6SiBRnajjzzQlM/HfCp4ZniK04UUh2FM1OnOOp2hqumz2aOHIw08r64dd6/afUN2k6MxNhsw+2Uw10oKJMLkFGF+BTKy9sb6xgiMMO5TD4HZ6kkrMYSA5DJMMaA83kiAyXlM7Q0hzmVgatxQZnTt0qxvR0hOMKfhcF7CihAjpwjHjHLbsa4LC/AfPmK7bjbd+JH9ObVi2SsVLI5K0kxjU61T5L3XZY0Ug11iRA5xjPg4xSPnL05fKeFLFYxJwadARsB0y9ebdLB3rbm8FcZcKHH1kWBrDgCi1aCWxdQ/6KyDZfZoZNPZfGEy52IwZ0wW4t3hSdTZWQFAI+OxPB2QwEwqXzZIiNheXADb53q/RefLnn26noy8f1PVEJdo+2dBDAHbiwu0dSzXlZN9Y3zLDuC0boeDOmVSjtqcG7fpqEz26vS8avOxLp1ZtdI+PA8gIyx4iXEIWDM7tunLrUC1ccJ4CGiRBKFSj7dvzjuOT7STenHZsMSMWps1RLIQnAI721M4DSuT++c54LyxYAzSkTk5M4RZSPdCIAe8UYg1irOi5gvN3hY/7g1dDFSMTtnow8kXWkBIhd375jgALqxm1NfREBzKPHGczju4jA8XXokRaS1UUiiV698yTz2NnGjW6c6amN6d8x3sod2yFXcKZpVMk61bFDYYayOSxHQkds4jEleqAwQ13N4lZ9+1uz1y1zCcVACggRNIn/9FNy8VwI3pTYQK1lq6ixEHr1N6vfEUNZ8cVe0dMTBzbJ3PP6KREuyCwDpx/bHOlNkifYllbqLU+DMmT1Oghwo9KkTizHr5cBFy5oDElfOPynYgLdy+2UfYR1t7ihFsxZiwXFjb0EoV85QoYVRuO5Ylo1gT6b4X1NqwHhW3UlFqQz6OkcEHg9XWELCEgDVGJGPOyf3tYhB0eiJ83uGkHTmDF/+j+fxhWZKLkzLVbs68OTkN8YALGHWrHCnIHE3c1vczV7aiWZRJx+cyXP6U9S5Fv0+uThVTBdsHSuXojduBejvQSh16fCEEiIbxWgOwefsfp6Aaw1FhBB2zsXxmTlMC7u3c//QwToEfwFojaeKdunxWHnEnRZVz9hMCXsaY9VcfB9O7V6gx6orJ+6MCmRNBgKQkmamNNerCWjnEyFof4fho3VSn4+d+7ufwbd/2bfjsZz8LEcFP/uRP3v38L/7Fv3h3Q0UEn/vc5+5+5zd+4zfwXd/1XXj16hU+9alP4S//5b+M169ff9xTwYvLiodtxcOacVlIV11SRI6RTiqG4WwGjj5CGP/sGdN0Tu8SYY32kcSMk7jjcoke4AyBnAkA/pZnnN0zqpTPxIjZ7Ol/3U1upJ56KI6nw2T/dxxPO/9tkEPdC9pR0HZOsa17RTsqeqHcilo02Q3WoUJ4ozBtZ3ETd1kMpmGwjNWz1nPgrGZI7vMfr/3dBwEpyqhb+f1w+STrQLTXxHz/03Fu5H3nR4iQEEc9w6Ejf13WnJn57cZi3BtHWBzdqPKgukIFM+JD1cZcNNxqxVEbjspso9lmdwURRbeMwbOseZ+rfXhg4FOR217QblQ4KI83HK+fcLxhM+7xuA8jWG58xvV2frZ9TFR2J0xbF4wCnbFsCy4vrnh49YAX773Ay/de4MV7L+8+Xo6PF3j56gEvXl7x4uGKh+uG67ZiWxdsS8aWkmVTJCSMwEwxYeJ+D50+XweOUoxV5GvMFBaGCoevV2PW+c/cl+gpiHAoMsXTpGr7iCcb4JnbeY2qOZ6zE1J7k7vVN6JcQGtHLw3VhidSPWLupXGO5+s87xqZ/x7/yfw81vrpnDFyGD/3s4N/t7Pyv3SIu5iDutWKx1rx5ih4fRS8Pg68KQeeasXRKqqt6wG/y8zEau84Gsfg3ErBrRbcSsFRK0otqL2i9crhkv651sF8baVSwusjHB87k3rz5g2+4Ru+Ad/zPd+Db//2b3/n73zuc5/DP/7H/3j8e13Xu59/13d9F37t134NP/VTP4VSCv7SX/pL+Ct/5a/gn/2zf/axzuXTLx+whMTib22opY5HuBirJyeOMGzKERyhdai22cwLjEzJP4Z8kpyo6MDYXFB2UbceUWQ2MyoMxpIpK5KDjP4tV1iw3QzbFc8WIb/t49V7V5CuN6pgk2AgNERe30AiJb2CsIiIUbJNpsezM4gMEgY/CDfWxu73ZrCYl4q7UvCy9zBGwQeZhAc29vIaIxyvVox+L4/GTtnRHR33pPChLhps59nNt4S7zWcGLghEIgC1TMx+bFnWkGiaWMpoQSgd2E0NvIhArRHTVdBDnFm0Z0nNKODVGnCjKLZArTl2lEy4yGuTnl12hRFVyJRMgQp3rocWJod76M7RabPO4tnv8frJXpjXWXeK3CZTvXA4lkGFUqGhW3CQEnpegFXRLgU5cgqsU9ubPf+jNOxHQakVT4831IPZQrsd0FIRSuH0a50TdeHrRJmlUrmEEbk4/KPKDEesPnnOli3rJTwv91DuSJtmA37vzeZ89Zl1DENtUJNiCtPqrAke+zE+9qMOWSKus9NpuaPyveeBjgey4EPWwobfthd+5IjeOkLqYzzIee1+tMNtAZ9jqxR+LaXiOBpKo4I8UYs+fnf+6TlfcxfFZXN0BmYfHgWlNxytoSvrRYsJICyRgQgD82BDKBlUkgndUcFhok0Z0FHoVpipqgxKuitg9Ni515V9ZYMR+2WOj+2kPv/5z+Pzn//8b/k767riM5/5zDt/9h//43/Ev/pX/wr/7t/9O/zBP/gHAQD/4B/8A/yJP/En8Hf/7t/FZz/72Y98LjlFLCEBlcZGThplgxl0guxioKmLJuqJjvt+Ksum/DX4rIkY+/eTwYldbTiiBKqowDN1M9ZyEre1bMsVL3xap1RmBN1IEy594zTwbuyas3l2ggPPURGsOYvKzoSwWmtAEdTI4XwSA+peRt+IEymef0AnqcQLpcPuwwyPa/xh7j2vL42MEqefE6f074zPfq/Gd40xJ2MSq0W3A9541yE4NzOdkQ1+V6ZWnus62sckuEwV50FhdkKO3e/u/UqKkW1WazWo6EhitOP5hGzDsmlcIeM9ZKYcaEERrSdpcEr8fwKMqbSnLMMH6JXbgeMpAVCkNZqhUmiKQ8CEaw3sW3KobNwBhwPjlGISQrZLb1iOilobljUze1921JzQ9gP6KEBrCK0P4dFZ73CFEruvogihowkQNUJDRfKMM0ZrpMZYf06g0bm4Jn1M5+9RfaNPmNd+5gSW8wfXm/LeWfN6LRW1sHXAjX3wnGEsNp1tCGH2Gzp0PQJLPx+XORrZIKDByT73698X43OXNZfwJGv7Euj9lKEqxmiiMxN57ob51d3h+1unmkyxGhWMIk6NQDqUFKwOb68WAzj5W4P1u3Ej9dMZuK1wCN374kQ777FyBz6r9P2mx/+RmtTP/uzP4iu/8ivx6U9/Gn/kj/wR/J2/83fwFV/xFQCAL3zhC/jUpz41HBQA/NE/+kcRQsDP//zP40/9qT/11uvt+45938e/P/jgAwDAEql2IDkigRGCFznjcFK8vQHAooLQAloW1NY5jh0TunLG2QTq5mNmyxMjBEQTz9SI0jtiu+//cThwCwFLoG6gG29tDb0A5baj1QZxxlc1tlBpKIVQksuM3Bdu7883h24d/ZGEeA3s3zLJHogMde7WFD7DRiybOkNpA6qBmvK14fbBcfsTOwuOzAmScBBaPjnkt2tzGJHxfU7Em8sNYI2c8H626XHUja6eAQ9zRMC9h7K/GYb+7kwEvkVc482bfiEYdYwlJ3sZHcZBANRKmLD3hghFiEpDFGZPXlOyoHwsRleX0KLBXGLDEh33l7m+5N6MCTBXo5Lq345jZknoKLcFik417cuCuDhkHGZsQAvHdWZyTYSllIbIZlLlNY/aVuuE2PanHfUouL1+wu1Lbyi4+j8/QD8OtKedcJzOwKo0xa220UfFixPEUq33p4wG3TWlUWgfwaI/C1Pl4Pyqyc90CSn/rObARMBgrwp6JRnpbjl0RbnxWvY3T9hvO25PNxwmXTWWSpA5p0k6M2yLYn1CtMPTIUREv8+1oR8Vba/oC1l+mvtAE56t+Hcc5++fHczce61RraUroEHYsJwi9UADWBq23/Vk6q4yeFoPHkhVkFDW+mTvUbCAAW9WrttFTA/VoMgsHKtSe0cJfTJ7xUIC9TovgyK1OrKKEy/cnX3543+7k/rc5z6Hb//2b8fXfu3X4ld+5VfwN//m38TnP/95fOELX0CMEb/+67+Or/zKr7w/iZTw/vvv49d//dff+Zo/9mM/hh/5kR956/u9Ef9vVhco3Qv3GIyuYN5eQEhKA0bTGWKg/hXU0tlTBnUykOds3V8DEKzmrKJ4j9ZAAxBFsEWD+k64vVpjbHk6gMhpsc36YMp+jJpFMSd1OBPHImyHJIbSMbwgOTMciNGGIQixAiKoe0VIBe2opmoAgy3CaPZ1/mgfC4zNy/B6EjCwb5/KOpy7ZY73gp4slg/ra5umWcTsBikAY8AgAof6eYSonll4dNq6GUYMp+WHM/tmX9SzBeMPxyV9wuk87FzuvradThZaN7gzWCbqDCnzAcpr40ZXFHNSpcvIKvxS1CRCU+tj1o46W028Jgr2SkV/PmEw9wQKbQ3tdgC94xZg9aqMuGa2ONgzZU2OTq2WA+XG+hZ7fYAQWQ+NS0a+rKbrGAdMuVxXtL1gXRfkELDnBH3aUUShxRuMud+8T9EnARNG4loKoUGCINWKfDAQ2FJCCgGLNyZjBpcx6iDThNMGHHW3NpUivN/Pe9a6C/eCExLcqZed7NhyFBvdblmPXauEwKQtTIcJmyzrAYiTkyRMpIbBp2Vqe0FbM9pRrZH+FCzAo5D7APj5Ic9MuJNo0pIotfXignoU3ovjQI2RwWxtZs/GyY+16Rl0MLOXOhVjOgQtsglc1QInbhFUh/btvEVde8+mmPt8PhVETBEEIk8ngQMRTNrmSY/yN7n+58f/dif1nd/5nePrr/u6r8PXf/3X43f/7t+Nn/3Zn8W3fuu3/t96zR/6oR/CD/zAD4x/f/DBB/jqr/5qptdiheluA+1Gyk/YLSmx6WjpeQwG+6nCmT2iUzF4wDG2uE/597C10SKrJVhzo+hQqIY7kXCqR9nfufOEdpSjmHUPA9qrhRTgZvTfqtzoPoHY3pajRIxB083g987CpsCgRAAQFuipOlDRazIoQofwrfdtOWHB50K5Y+zKvOUO4rsz7H7fThCIkx/Ozsn/7XfXNk6wFxUBe1wSVbLVGVgDTiH7sNeKflAtW10O4R2r/S5K8xO1bNiDFun3CdgsPs/TBzBmZg2Ch0wGoKqJhGAyB9kwTn27Yvewjpc2qyGKpXXU1JFsHtcIGuxzdK0zDyLA9QJgZEZq4XMtCekodFIpIKfMoXqRAYl2RT12lH1nob82a+5OFEB1unaOZA3a22UzuClFoDYEAPuWWQD3ddMniKiANYFSNqlZawPsvsXakIx913NDCgEtRSPR+JA8r6vZWrPgyZ3QgPTUn71HUL7HbJROE0An2cJhvlbbvYMyGFZtv48gBwooaeCe2wxNwOCSVMEvmgQYgxS7DSv0oZajM+KUJPHLe1elAOQOwp5BX8wJec1YH7YxKLE+PjGLPA5z2m0qvNve0LnALah08W1TzlEAKjbF150K97/3QDqNf/Gmat+3APVFLWBzFxTQT3vIv3j28f+Vyby/63f9LvyO3/E78Mu//Mv41m/9VnzmM5/B//gf/+Pud2qt+I3f+I3ftI61rutb5AsAOG47NCmejoLSOh5LHfpwqSupkBAsxvo6PyDT62aUpJxwmcR7X07pskFU58UUGETwwSo7rfVMBIA5k3MvkB3a2GnQW4MrInQrNNfeUZuxbxo3+d5n57c/6yyCho6mdJIAJwQ7DBK8BuNW9GxBwE0fEplfqh2tZmt+5Bwg9pXB4EYaXMIbtjkBk01Rgw7oTKjtxsirne7FqMmdjI33I0kAoihhhCCcnbWtHBiYMyTZRbeOXitaqSiPN7Kqbge0di/8zAv0dz55Vp+emngCrN1ZhjHERZUFakFDje4sTs89MEPRbATgbqR1mRm8kzKKUoC4Io6s1K1UlIAGQQFQYJvQ+qCytSXEaFTxIJDAITPD9Pgp9Y5+dBylMggI4CRci7pTTsg5IiYwMNpvKPuOshdAAuKyIL5YkbeE9WHB8mKlwsnCtggFkBqNbogBrRTew20hfTgSVdAAOMPF130zq9wdTqqW9cocQvpkzmk5MfBypCLHuixYUrKetGj7aDondRKQrTf2cNldUpIktHdUqYNgUdxBuYPDNMhjf+JERIBhaGqvK/eO48xq7LXRCT4dqDmh3gqdfowI+b5/72MdAoTEcsbl5QV5zUgp4vqw4fbeA3IS3D58xGsV1McntP2AVLaS3CEJBtMLOElcVHCNJDdEJYN16nN2uJqRwLMqPp8imULFIQ4Yu/RpasIIaONwR6OtB65Z+dEdFPD/gJP6b//tv+F//s//ia/6qq8CAPzhP/yH8cUvfhH//t//e/yBP/AHAAA//dM/jd47vumbvuljvXZpFUBAaR1HbawVuMExK1lbN4guDNgguou3VFXU1AROSQCAU9TzdqhOOrlOteRT4uVZ/dlBnaMz9YgPz+AijxLHgvF6xryuoEAL/Ow/n/WiGaJNGOtZloMJH4gAodE4OgVewtjqd1Rid9JhwHd2wZDBYmuYklDd+qlCCEhCODWNNkqT8LeVzSGTHCMhqkgixmqjcruIABkI1RtaFS1WU+ggpOTzpt6VVTmUJAI0DYjKCl5SGliH4ro/IWtStsHbOFuXOwMlwV6XcKCe75tiOG9fFP4qYlmwisEtYpJTISAkDkuMkZJYYjT6EWdoH8bH2wV6a7OGdxSICOqRkFJEzZFzfbSjHo+EvEpByAsjdu0UBE2WsSVjE9ojdr2+mCNbJUxyKxgUKd7HE8xZKGHUCLUp1rw+v8euNNFtzYbAidehuZNSpJhoCMF76GNvot+/02Oe3UIyNpJaczUMBXBCB5loJ0LQCdLli+l4A6+a3FdQz9m5G1uu4W6Z/eg9tJYPRy7E2jPeNs1vL9i779hekygcligCva7wIaX7h1dAFceHj0CrcFk1BYg2WJDqWT4dEM8niiKJYhGu4eaN/HYS5sZHAABQkkoNSWiwNo2BYJkN1bnmw7i/Y/VPrtM79uq7jo/tpF6/fo1f/uVfHv/+1V/9VfzSL/0S3n//fbz//vv4kR/5EXzHd3wHPvOZz+BXfuVX8Df+xt/A7/k9vwd//I//cQDA7/t9vw+f+9zn8L3f+734R//oH6GUgu/7vu/Dd37nd34sZh8A7HtFTwG3UnG0jqc2QR7PBBaD27r31YggR37Pi3qiHUEFURXhvIrugp9zCm5Rg8sN+a8/j1zs79yJnGsYI8nxxWBQk+LsoHByVJgLpxveawY2qAxCw4z2zFGe+8SCCZImqmhTVknRa0Na5mwij9ypQkHn75mUSGJGZafs51dVcDSOr+iNWQKEdZUNERkRF5uq0RQ4jBTSoYi9IwNArcghUD1eCcuKaRyOJuXeEXNC3Q9mZDd+RjUm2yl8PPeawDZpFtKStfGBxd4BkFZbhRmidqA2qskHpdM+0+eH8C2ABEH0NgRbSzOzdCflPXjBIOdpeLsIi9SRgyjDmpG3heKqOWP09DiMbUaPTFAAOtW5a2WdxWIHhMghiil0qFa0/Q0nOreK/PAA9Af0+gKChhCBkC0oSGHYX3EjlRonDdtHWBKL9skIDiIjm1ogkBZM2WWUzpmZKxtKFRwVb2nKgI1zTMgx4toaHlbFtjCoyg6DDufxbK8p14YKKAarM2iYARfrZV0wphHM56onv+MsTDevHsIMDuad2dWug95e94KyHKg39iuGFJFWMkFdQOTjHV7fjcgboAvH86Q1Yb0uaEdBWhLqbR/oRu82hqTaWnFNydFw3iDakaQzwJCGFNogWzkzD6erjWIjZiySbxCOK+rUevSuGNbjQbtrGZtzOM941MdJKj+2k/qFX/gFfMu3fMv4t9eKvvu7vxs/8RM/gf/wH/4D/sk/+Sf44he/iM9+9rP4Y3/sj+FHf/RH7+C6f/pP/ym+7/u+D9/6rd+KEAK+4zu+A3//7//9j3sqOGpF14C9NoPHdC4hVUAFtQW0YMMRYxxOiOUg8ZBqStGPV6ehvMdUT4tauDjl7MlkOqo7Wrc9eHtVnN8lwCJ44QMVAJR8UcOmbUM43IcJIfL7von8VfXOQd1lhX6aBr2FFBE6e6g4DiAjpmTq05HGtHHXd3OENXBktjPyBBjEAFfFrqp4sml6IQbUvmDN1AqECJqCIqato/RKGZWmaBKwVIXGhGwMsWxR/LJxKnHICWljFFlvHFOgjX1cHHd/H/mOO233Lvr3AmuJLVA/sCmhN87wsjCvW7Yis42BOmqkKwcJyIFD4uJwhEBoXBeuSewBzTkD8zqH09SbwmCUYKytZJNM6SC9uZWwoeV4tnAEsJ4gMILvnU7W0umIBmhFP95Ae4H2CoD9XeXpFerxQCKFTqcxrbRaJM/JqnFZkC4bltKwvrhARFiDKRRvldbQQ4U0C0BkMj5rUwg4l4o1ZOs/c7kqBWJsSDFyVhs4BXjJtO4pRGOPBYPbub/on4yWpn04K/WMzZ5fM43CUql96eoJkOcZkzsqD/dd43Hks2NDDQfoDfemfn887cg3is7mSwYkQeLcN/N93j7kHV/wcVg9aAGyZogAl/euABRPHzyg7wXaGspBAVz1eVvo6Dp7IGtvzCo7a5opzFE747w8KAIACSaJlpDyAgkRDQGtcN6cDzxsvZsCukJDoLisgMG/TpXUYafece3vOj62k/rmb/7mYXDfdfzrf/2vv+xrvP/++x+7cfddR2kNHW1qS3Wvj5gRURlUVV9gDv2oQWYTGp3SHwDuF8j4UuZn5WvxHe0BjDRWh/WeD9sf+FnSVmyDAC6O6iikR5bDGZ0jO/u+A3Pi4xHsXM6/AzsvFoF11NgQZDDHosF90QRv40lfLojRc1WJJDRS3gWwbGBmhc6wPHrHU61QALGZsKcI1qwGuWH83l47gihSJ7uqdUXImbCMCDQGJNjIkWBEgkQV95gTemkIkYbxDud2++LPw47Z78b7EiEGdZCQ0iAmj2R5rWXBY0F0tb+b9c0IZhJduiUTkzU4Ax1bSRZB3EX46iQVi/Ad6vRR8x51CGnoOsTfTq9rHk+7soB/HLbuOgIqpFeg3KBaAK1oOaLlhHbsaOUwsc9pfOd9tIzDkAgnWKStIF9WBmoxooUCqR2obNwlG3LCxGR1G9zWqZWo3Xt1+mheFw1IXaFSkEyB5WgdUYIV5m38g0y34saU9SqwVsctNbKpLsJm9UYykpMrfN7RcM72qD1jm5vffue0lnzPey0YzaC+gxlVPSriYo3CkXbICUNjjb7jGMvYf9f/HTzw4fpPAJbLilYq1ocN5fVKUozJMiGojSZhkOJ9jk2r1eUIEgfBGM46ZsEpTC6Ja5Is0IS4ZCBEFI2oUiFigr3axjQIAQy6B5Jyv8e7lXVneL/s8YnW7ntTGlJv2LspB3Q31GRU9SBTZkUJxczGsolN30FuA/AH7rG/3+zwlT1toh+K2e+jp98+R0ruz5wxEwMbUFOn4GUyZxU80xMSNZw5mGWyB/3DexVoJE1ho5Lm3h2nhsGVNs8o5kT20JKxLBlbzrjmCk5rNVKH4/3KbMjHVrjBUHBacq0dx0EnxT4q9sEcOZO+LYLDJv8+FYrpigiO2pFTRGmKdc3Y9oJr6xzPLQJdEtATpDfD5I3MkSJCSlA0KnQAo/ZxXz3m4Y3HMZAksjhZBGKTaJlVdfv63EAJ4drJMYw2g2D3WlQgrWFtGJFsN0HV0dDb6NiDBQrMMoK1IVhto5+ekwcbIgykzVDdaaF68qMYWoC9NDSTpInaINqQtJucFyFVaQ29kI7eakXvDdLjqdVABtIAEcQlI3XF9VMvKWK8ZOyPN4oePx00zkdFve1opSHedpRSkY4CCQWpdss0xfYoG3UrHDLu6IV9N1UJMXcoHtYFQYBN41CCQQDHu9s+7n3W5hTePjFh8wZCuK0rSq3OVhpKKSkSBp5bXiEIg0Th9mNkb7bPfDM7IarcdpKBXifkjXDtct247oZquRPav9zxjoDBDg69BNYXF0CA4+lArw0IQD92tEdFKTdAD6AXaCtwViQdFRGeFEksuy4Le9eWlWtOqFLu9VkJzO7j9gCNCUUSnvaK9aiQ1484joKnp302MmP2ILLv0mFEubvHH+X4RDupomrRGIaUztlQuzE6s36mRwIGpAfMLOOcSZ3uodc1zoQBf43ZmvYcMnj2xVs+7+QQLcvzoDkGil8my1o6ZvaS/COYscXMuMb5nq5PPZO6O+6zjtGfE5j5kCbMvqBihXRIH5HS89dyWFRsEXrvGI2npQmuHIBJBGj2TARAIX6Fo1QECUiB2nQhCPtOwDqi9E683YvS/nzMaY4I25/5qVbFtyfkMqYkm+NUWyusPsioDbrzYOQNQAJn52A+HwHZpAogxY4M3qtqBB7WYfh+HTB6vVHjPNtW16ibenVBwcaWu7UpY63cR904rXEzps6ihCCEZDPYFHldkZbF5pXFu5qb/w1AKNUNOoTwbd4W/lSVaialMvs9qBdZUkQ7TMk/0IWXruhoiLWNOmrodh12X+BySr1z+mxrqL2ZczFVDWuA9u05stHeUbSbQ/LmaR3Rvat5d/VBgVwbWZTUazJEYI8GJF2ofU9Pz2juoymYjPk7rQ/h2VYa5z3VBm1xEKP8Xo49+vy4sxf6ju/RUUvgPLJUF6wPK44XG1opOC4r0Cvak6AXcm6BOgSZo6X5QWyf54R127DkjHW7DHp9bxXaG7SSDYoQELcMjRk5rQi5Ih4NtXekGMimLNQuDFanPMu9qe3JbqjRbwHI3R2faCd1a4ps0ZKDbgJXfKAab/ZoFcDIdfpceHJyLncw5slx+M/uHdX9uZwTMO+4nujAAAQxV7VL7cwsDIoBRyYzuksMZN08c1I5sACfxTOn+49xUsP59tP1yTt+mdUuCZTcTzFiy4lGWxWhCo7WBnWcf3LPHAxg3UAVWMLU4KO+m2WyalJF4zZPlh8qM4gcmMHlEND2gqiKliMj4CPCR5T3ow7l9hAEqhHK8Tv8aBZpS/eOxunQZNaJvF/NGXcKwYJJCvFRJp2idJDebeIJO/DdmAiA0NmsDVDxoHv/XnfGnz3rIOhGLxT0UZziELtExh81aIZuoffz+DWcIV3DZ3D3Jn5fQRp7WjLWJWBZAtLLDelhw3q9cJxHpjI6lFmBw5za54cIEGLEct3IQFwTlocNrTRrEK6Ua3p9Q90L4cHbAQk3FAVUKmLlvWvKGmW0z6HPvaiqqK2htIqjRerUma4gnl8zGAC0TvHfqmoOcRKOWAMzoo45QYBBbDNbIQA0kJLNQOSejcdMyunp9hzObSdM56Z0lTX21iUx0EoRqXVoDFYr9Ff/iJb6/KviQdkcsnj91AuObgmC+uYNRBrqYwCODkiB6AERIEpAFs7XQoxYUsK2rrg+vMC6rLg8vDDmZgR6g/aKvj8NNCpcNiBnYHvApXYctSNGwe12IIlg3w+UowKtwcKwyWQdMdZJBuwjHJ9oJ8W68RyYFgz6SgJcYjShxGgjzr2n4RxyAue8QETuHRVso7pfch24k0OaQatTN2eR9f6VzCO4n3R5F4eR7FocriDjkP1bUWDD3WaW4jCFox935zMv6ASJJas9kd3nBvq84clO4/4xQYox+qRHM5Uya2skZoRB1Y4iiDYragrIgc8gBk4mFsKwKVmtqUVIV5PombQQAVhjqZXzuR53IAVoCiMz69VVKPRedDec4BTHU4O54ZNTNWtvVGoBEmnVCMGkoExY1/TY2HvD9w6eJZ6cFI1tHzJLMQSINJTeEapDLJ4pAzkEihALmaVirKx2UMC1CoWDtQV7lr7w9f5he6aqFnCNRQlmBHyQCMuCeF2QrxmX91/QSX36U1hfPCCvzI56bWgHs41uWYHq+TUnDOk6kGkhJNtN3f3YVjqplJDe3BBSQoEAsSC2PtENy0ylzVEUA9nwvdc94rDf9c1ojrNa+8nRGh5LRdGOoytcu/vMkh2Qk0WE1uqIJGpr0wgont35fnXnialEcQ4Wxn0HZtO5ZVLtIBoQc0SvFSGxt0wQ5tr8v+WoeALRZoytDxf02hBjQH16RMxA2z8E+iOqFMRg8HdI0LQAMSKkBSllLMuCy/UFcl6Qry+s1SBBbAxNvy28nloR1hWSF8QXFyydM6egiv1pRwRwux1D0QOWETNAnfZWZUKBH+X4RDupkfbyKyRzUFmYgeRAyGqomo8Fx893+aacVsDJj53yHMuHzhHQzLIGpHaCDUfUPKyLexOdDsrJFWbsxnK3om7whexOCV7HwHS8fpxDP4u2fASGU8/poOQuCoQ7yuHheL0uGHvW4xuCvcPQWy+KGYXR7Gu4uwBjxkx0OFPo+Ag9ueBYt0bACQENR9Ua+lHYIFZnsNHNWI+9bq+tcEelZvTcaU1mncu/AIB4z0+O3JyRTCc1okg7WPxmsZ9O0Sf6nu+/n/tidQyBZQyW0bWOkRMHy/KzRe/RNvIQKQVp6egB6KT+qlGwp9OwZ+cNridHcgq9+IwkADFBlgVh3ZCuD8jXDcvlirSsrOl1hWpDOUydodLY+vvMoaAuVyVDrSTEAM0JqfE5lpzRKl1yb4q8VxbQc0LsZAGK6fPdKZactp8v6nGvz9esOmG+3nHYCJXSOVbFnRQwA79ue9UDBSeV+3OKCqsf3pmA886Fp6hnR+WSQlCMxmEPano9ic46PK2nRfvcP50X1NtvfrIlCqgYkQgcx3LdAFVsrx6g7cD+4oK+r6xJmRpsSBlh2SApI+YVMWakvGDdrkh5QbpcZq9eUDqpBLTjQDgOSF4QMoeqRhXEprhcVgQAda+IQXCkgLKHKWNmgYZ4KuV29854/ebHJ9pJrVGweQ8QKDjrDmqLEVkClsThh2xIO1txO5yGrrb0fhOg1DOlYMvbdeWcmeVd7BwPYRnJwGRPawszKHXhUu8b8Y5vRn4OSbg47clRPHu9qaLiG8hqQ0EGtXy9rFg2fqQlIaYwMzBrRHSSyWgI7N5n4YuKDpK1nDDesvqeCyCUEAXbEgc5YczeCswINQh6zFTDjgG1dZTSETrp3AwwaLxD70Blr4eK2OBAD2O9mVQgRlP22peqsm/HoEE/VzopGU7WpYHiEpG2Ffm6socsJRqbyjEV3cZVuBNRiyBP2qfwkR4hBqTUsNSOHAtq67ia2nZXshY5V4tqCimSrht7B0phs3Oz/qMY0GMY2Z5IGM7onOF1m2Pkavojo8dMtCoCWsjoeUPYHhAvF6TtCgkZ2gTHvqPsFY8fPuL25objxhlJAgYfPqQzLnlKA0WuMxbygzWcBqSVKtoxZ4SY0CCITwsKBJJu6DGQnFJlqKkHVURr2Y0Gda+2DpJlPsyiMWZ0HbXi6Si41YoP9oO6iapAiAMSc0fdh9uaR7N/u9iwx2pv/eIpqHXtQPhkXG4ifmoNvYbB8IspoNwOSApIazLyhP2ZQ83//xyOEtlHXjK0FSwbM6HlklBefwjdD05qXjbkywUxZ6S8AmJ9kZKhEoHs+o0Jy0JSEI4r6r6j7ceExpcVvXagdISckFXx8LJju2TKulVrhfChqq2jHWVAolBl2voRjk+2kwoBqw82FKGTCpwauoRgU0OtkexZJjUhBdvEkAm96cycPD1xuzgo3KeMw50TZ/J4hQyj6ZW052dZ1/kzPNI+iZb6a8h9RmPIFV/vzlGRjeQbwIUwOQXYqMNmZEJ0dXi7BIv+XBXdN+kAOQYsqdZDNPIcZn9qMkMqg2rKCcQmdwM7J+ggKCzmXC5CxZAobBaKqsNwR9uAwd4DUCsi2BnEyeyLpqAxItlO4+2ZxtDL0lPCaY+RmSWVFNKaGUUulllU1mpaDHTYllmoNqhT//1WCIW20rg1zHpSoOrJdFKTXZi8ZqrKiaWtQ6VZPUWgkTT8EAOGoKevI9/8Q4uujaxqruF5OAeCPS1sem1NoYUNwbsNz3z84ms8fviI47aj3uikfHhiSAlpWSzbDIiJ5+ZyPTHF0VweAlUz8sqhiwrgchTYpAdU7ZAiKL3Z7CFwdBqAZIHmanC93yMnQPma9HpU6Z3BjlIBIQj3/bkr6YxnjA+ZH+djmoHp6BW+Txp6F2pkhhmIjgzPAodWKmqJqEdBPKjM3ltj72EyWFvn+72VWXlkpfPM53mydWL8orFcAUW+XqC94fL0HkQbyrZCy4EYE/K6Im8X64fM8IlwvRO27yEPJ5UWE2GuCeHIaPthZQoGPLUzSDhKNdX5bnB/QEoM4ttqc9haR92pHdoL6fD1IzY3f6Kd1CUlXHIa4oerZU05BiRMbS1xoM4hA/hzt68xMWsfUc2/U6vdYC6ee98wFubYLB4lAMhWn8kx+p/OPwMX/eyT0dHk6JmL14QE8VSDOqf7fk4zWxPIdBApmXPKNCIrP0dTcfCBcWc22RiBcPpweaSm8yru6mB2H6oqkrESo01HXXIybTUFvGkzCCRFpBAgWaiYnVlwlm6D12JATszKwt31Kp1DEISUkNeFjiUljyLGdNRW6oAcmP00aOVN60rhzCGym2iE87Ygbfzs2UqIAXVP0KaoIUD1GFJMDkONyNjufxBBD6R896ZYggkJn6zQyIzdAPvzUAVaQG06oFlxkdmzQbRst1m9wI2gWuYomPCnKjjhuXWU2lGKIpSGeKtAIAXxzQevsb+54YP/+SV+/bSj3g5MJtnCgGBbzUlFC3wCtoeV7QtrxpIzn3kQpBzR14ztuiHESPjTFCuaAHE/Bt08BEArg5IcAl7kjEtOWCNH3rhWJDAhtdo6SuMQwMOIE01cPWbOCJPgtKrpvH1g52iOH7fX18jsq3MSSe8NvRmeEjgeftRARUh+kYZaypjhdjztgxWZVuoxhhytUfO0h59ncL7fxAPm0/dOoTZBBQEMql5fPNg4HlDF/nbj/K8YsWwr8kKNRirmAK0BpXR+Vq61mCOWNdK2oqOXgnYws66lYX88cGs7Hm8Hnp5u6KUi9sp9m6loY+wS9sB1xbEXtEIoudeOvn80Kv4n2kldc8LVDG4MgiVRJSGGwPqG7049LW4ALhqpmBNw90aqa22MugUYiumMeIPRN+9lcnxR+2sd7qSEmyNZdA1PwE5Zif+9Z0ZuVM6Q46mE+wwXP3+lA8IKFt1SVodRrH8QbgijTaObzth5tDysRDShxUmV1d4pciPP5IIs+0sC+IgHNcPg2aRAJl3cPpIIUl5MAwzmVBRJSef2MfNBYHi2BRCmM5jXjOWyIG8L8raaIVLrCWuzlmRzlJqNaR8jyWHZnc7ZRK5aPSYzZ4GupnCxG/W6EV4bihS2vBx2hmBMIw0KdOmEk4LcPXv4vbAMYY4a6egSTOXfDJD1o41aokPLDvv1GWD4whJjzzV7p94VpTSEW8HtzY1ZFMLIhp++9Ab74xMef+MD3D58g93mLwGgEkdiI2dPif8OYYjhbteVvW2XFQ/bihQjlhAIi9aOHAOllF5esGwZl7Ihrwn7cWDZEo7bjrIf6EdBUMrqXGxM/RY5ryzYdXNb+7XObH9m/vPe+viYIDOncncllvVGYb/QHNdz2mnuR0btmDJPvZGIEI2U5HVUFQEqm8LLbYdCkd8szC7WjLRlOoclY4jWPsvifuvjebp1+ok987wtCJFrZblulv13BAsqUsqDONU7RZVjYZ9e7kqSTYzIS2SQKJa1l4r9zQ24HdBbQakNt6cdb16/Ri8HYjnIHk0R23VFTBl5WQnFh4CaI3rtKIUIQLx9tCv+RDspj7hjDIbxBzMOZvYNvz4vXJyMhKph28oeilIZlTndNnVmQ/Q5wgKxzmyGBomG2t6KmZg5pR7uHdLcXBiQzDnT81lBLsdyLibfqVn4pUBtBMApkxKZUJ/BL65wPjTwxIy5Z08+UqCfojN3UHLasmq1IdAZDYKDiAWCMieFDtxRxv2AO2Cds2ZSDKZfJxzj3pVSTRZs0EnpZK8ZjOTTc4dDXuKIaFvskBohEnh9w8BX1t0AYwN67DJfexRw1BOjk7MdtSG7hzLCnuGoPOodEXgwxqLNrgqeKen5j05BlN0nMi0t2OkCGDvrvp/JFcHbgI/9Hr+LhKCqNlyz4dgLjexTGkF8vR0oTweVvG8H2m1n4ACgS0CLDV0CSojoEtCtXSHEgL0UrGvGcRRoqVhSgqbMjMWyzRAEy5IJE+aEjo58JKh27JH9eMxOFBnAFujovI3EHdR4bm8Zaz195r0bbNhwkhMbe8rmK5kAsieqHhjq6WVZelTWNxtvWKwNiGrrYTp7Lndm8hJlNDn7CI9uZAINth9kwn53b2yP+qwJKvL8mjHGgFiuiRAnjhZThPZm2ZYHFdwbAto/qR1aGkIj5CymeJJztIZ9Bmaa0xiJBGP7ttZQy4F+7OjHjtAFIQe0qAjaIWkS10IMaBYI9Cio7aO5n0+2k4o+9daKt/Cs5BScnB/6MESe0VAlvbaG21Gx14q9sjkNYKS/pIglBnZkB4VkAbQPR+jHlAeaNS9n77XGSJwq4f3OUQK+OZgaRxU06eM1zpFdd4OG+w3k9Oox6yYlFkaXjLQtiFvmx5Koz2WQYtkJSZTDNk+fzLMxlkKBbEPVChS91TEa3UkIKdkMIBeDTSZSGqnk0MyguBMIYAAQApmYkiIkJ5TsjD3SlGP0Zlne80HsEJxgWB2QniiY/gJj/k5IgCwZecmcrWWNwQX7MPJev9JCFmEXQT0xIL3wO2qb3phmcO10oEZysbEwzMgClN6JMl1KggBHn5tjGvRxX640XO5IBxbli2WuOl/WY0G4A6Xh00Ek6QoOAjwKM8gPn1ALc8kYucL60wG9HcBxAKUAtUJrNShccENFUeBRxeSjMJx4zglLTrhsGS8vF6w54dW2jvaDbAzKZPOqLiHgcsmoteG9FxfcHm+sgb1+hNrEaqrVU1fOR7vD9tjY3mJD/OyDHhVD4TsHqrPkKEP65xz4zd5KNYKSjmfH52v72nurgiB2NRieAsoBrrxiTeomFdTMCZXHHSFGlHVBvRaEENEubcD56Ax4RlnBH/HZH/0W2ZY8+8eQ1EoRMNhabE+I1eh9PcVOmFkahZVVFQgzMPMAFI3zsVJXZAhJWGtitiUd2gra7TWOW0MXBZ42tGWBXB+Qlg0xZ2ikuwnKYD+h/uYXdTo+0U5qHKq8wcJIP4TTk+6nLOr8J/Z3Az6AF2IVpREgaadUfLFBgql3BAmMQGSqUw9cQDB7AizzqCNq9yjasygZkZtHfZ5xMJJ2Wzh7NM6xIgAuKDl9BFN7t+JnMLkjqp6HQR/W7ioKZiTt713qiNNRI1QpAVR7QG0BBwgdaFcLHgUBhFm9cB5THCoO1d7L4TTS6gEJHejCPgo1RXFj/sEmgDqMy+dojbZdphFX1p8ATpkdIzD6KdOT6bxjAqJRrcVqcrMYTiXxXhpaqAhxbmQv/A6I1h1YkDHe4xz+ihrc6ZGwGGSnGHqSDpXOjGAGHzIM8szSTh5sHm6dZLYFjAzRXnvWPYGuzTJAQSk+FbrxCXpUb28wMnyL8hUYdUmOGDdVB/vlZmy73ik9VlJCKA1rilgTdQJjjMg9D7JLigEhCWRbkURQUsQB0DHuBdKpPReB014weFQ5HoQOSpAikKNAupoWHbMvp/kvMUx9uhMz1QnnbDy1lvkBKWKgBp4lT8h62oDemgUks5UEoui1UtrpqBwtX9gz5ePlSYRRSHyHgTo9YvWAQwAfx/N2qHL+Iy4UrrkwX2gYmUl80sB9EgZr1B++1UL9naxjIyayhZfLgu264fpwwfFwQdGGUiL1G02lokHRQuRwxiOhh+TwBASCWj4a3veJdlLn4j6E2mg+AkGCzAfJlXayBfaAoPNbsA51myjKlzQYrwNLYHTcvEYSTg/VC+gehcmMiLsxqRQ6h62pM/eMiuw1NGDo8PkrcL3PGtpYkMOB2m97FhVPzmnJ/Fgz4kL2moQwsqZheNXUBMwxDFZgJNa5mjFrUXD49F9vrpTI3rQgWBIhgpQjEKlg3fqJkNFJNY4G/TBC60A0CrJBkQAGrOY16d5sgzWMfhmeP2tNtdSRQdgNQrDMLi95MPhaTqZFZvfP6Mx0UAWtJMyuL6Mle4Tp7QUynRThzQk/cY/z/8GfUwACwghQAuhsIZP2PI/fLGRWjGmUI7OyGoi9j7ohA43ZmAfWJjHHt8JxFEgMaLWxWdybr8fL///I+7+f2basLAB+xphzrlX1vnufA23S9IUQ8UYlKgohQkyMGIMh3BC48nciEWMajE1CFOMFSLQNRv8DFG4IhgtCItH4I1FUGo0kXCgJiYnKxdfgF6HPPvutqrXmnGN8F2OMOVe9e5/u037pxJOu7jr17nrrrVq11pzjxzOe8QyaxsuftyAu9gnG9VUAqA01JdS9Q/aOJSdg2XHKCW1J6IuNH5FuBIuyAonLoOGvOaOvC7aU0PeKVjar+/kAP3hwgyGFZXs9qdUuF2WsXsPs6k4qEVbPoqJv0sa9B2x6ZPPqaF04hoOhFCKxAQFEaNI9u+7UQckg0biEGmuYYPXQrRqEulW0ktF3CxjISxWjt/cQiAQMG45qOtW3rxk6Lp27vXB4GQUNaaZrpBj77PkyfB7gpyUjq+L0eEZ7Z4fWhvb6BXYWXPfX0FuHaRg3V1sh6LYDnNAQsLsFwbd9x4e5faSdlLFGJjRGZPN5mAgk00kN5eGxyOwWsiwKQkmCpagL28/oaUjn4LAwRBETV2x9C1gFCTavaPxCrMy69Y7qgqpdprBlNLmek1Gog5ad7j8Nz5ckPAMbFzw5DXtZkNYF+bQin1fkhxXl0R7TulgvS+bBbEuJIdmmuJalAKJY1mKRqqgxggDrCdFsat+9oKWOvXUbfcIJi7PrSsnWR6XAbW8mLNlj9pF69zlQ/PR0UeRckT3qY8m2WzK5kK2iNhk9MWFQJmsT3qQFl6yZV5fIhsSlnIFTnxT12mafhmd3Aje4TGiJDX+X0BkcoQeC0pySkQaEyCaaekbKHsAIvEE36npuBEcTp6pljNFjdQg2wvk8v97jFs71UPezt5wsTAQFf9QbD+omzQv/taHnZA7e32NoFCoBlMCcwYkcBtZZw3FHxiCgN6O0d0FDHzXFnhipVrSc0DdGX3YbwbFVc1JLxnI6ITntn30/Yl3QfYR8sE7DSZETVrQJ0CpSJ7AKSrYc6EHYoWgapJtToZFp5YRRa418cfonHY5gpFA6z3wM6yXyoCWcGnmDNwg0EBx3cN4XVK83y/qWhO28AFCUU4bKOhxasDhjCRsJ495RveF0vqjb/NuQWBvPHz7n7nbnocgyv5x9SvALEKwvK1HH9v4jnhZGfXqC3K7gJq5cQ2jNNBhvdffzZIM8t1Y/1JF/pJ1UULdjW9saccFIYBaP9dm2P/zDIC44gy+hZEUXo8qKGKWaKWC5mYMdlq+/pcEFmcb6dkdnZIytN1xbQ+sdCmtaLclkmxIIS0BEOK7F+2UzF+v8GgOeS+lu7AYXo/myQ30xLt6iGPFskwe0F3+bcoLkBPGxHVBj2omzuXpiV0iAKQZ4JMyuCq6e4VQ4NHRQXg/oBrB6HxN5nYhNMDbGgnQasil7t4isulIr0ZwrZNI6mA1A+nxD2/PdwH+HZuRANAgLBR/WKA4fHqCOcBp+vmLjRrknppJ2cfHOw0dHRh3Z1MjmD7+PNXhYkH5tY6U9j4bpQEyx348GcIE7KZ2ae3dwdDjmCfNq91lrNBvTLQjytaJTd5F99EM6ONKuphRuo5zUmYwWkFQVJBFkYZuCnLpBh34NCOR9aBgZhZFO2Iv/ZPO1FEbP98ZcQQdJdzKGQZUp+TBTtSzSnBSw8KxZHffxMW7VDzD+EVQE1PiGEfeFMBAdmZmOp/rQDkitkD2hbUZI4cQ2C41N5YTYGrbnhDZgDCMz33DIqI7HdzAEvibf6siORuMuGLr/XkeLNlTwD5G9KkZQnNeCpa2AKtq7Ly1wqxtqyWjXBVQr0NXmim074DPHxNeSUozD/MK3j7aT6gJhFzEkQI5zfBBOZV4MIJzSvFBBNV68b2ARxpqSR+9TZ2x1TNvIGcdIBC6sGnTtgAhdnkUE17rj0ire33dsrUHUmjvXlHDOGbQqJCUsHJkb4y77phnUBEFj1FlS9sa76PGxzCk/nJAfTigPC/J5QV7yZKaJ6X2lzJDsTaxLAiRjWVefTwUr/hKgrQ8dwYyA7pJ19TMD2dheTQR7bdhUcet91C/ilmBZYiPbzCqKNVvx1FTfDXeosJk/W2t42nZz8rXZtUuEh3VBSQnnXCwjhNUigpQRgE3vHZoYqM2hFxqNvmh9yrQAFhUPGaApIDrlnQiSEkSNqht9ObfanXwjA75diukVLjk51OfzdMicoR6Vy/h5CHtwjLGeni98r0GZ0jdQuxoc2zq0dUAF6eCU7vaDHjQCW0er5vwlMWrzXjhKIM7gBKgPR2R0JDKkYEwUthVpozZc+smyOHtNVzL2V7eZUMIM2RtSSthzQr3uSNkHWnqjuUatMWA3cuklVYCDvMJWryICS0fWbOQUUxQGqXojP4zsxD6CgyZUFuZ3wH0WVfrPMs4bH855XKi4zod8/oDVYbyHVjvP7UqIqblXJvRtt36mvZrDfjyBS/Jm3Lj8ZMLjXkfmxEMW64205xgzv/V275i+uNsxolLb8yDkE4GziQ3nJaPebji/84j6dEG73UD7bmLJtw1Pr5+Qrjfs77926NkCXNQvA3bfGxpY6mn4fAXuMT6z9jH24hjBJoKREMDgZI16wrMpsjB79O8R/LO3TWbVkEmH8O08rKnCXMUgv4CsEhGadJt9dXjPY/AT61/9mI9jNaJ+NDIoL0pHNhU9RXgLbKk4rnUeXeu5JKBnSDFJ8Tw05CybcvDKIltmr9tMeG4Xwa11Y5RhBgpCBp0xEzob7XwoWeis2dmcqYqnbcfr6w17b9iqMfNSToAo1pKRwUM8mBO7UTlmDQLpigYFS8ghwfrgCOBEULFMwM6v9x4J+VgOAjn3Pwx8QJdDRkpl9CgJIlqnIdCbKbnfmZlb1ELteTqs25m1zVVwoEPrrGFFu0NXG/7Zm4+g6H0Qd9jrDYj1eIRv/BxZb5WrQnrdjpJNkWXPuMiNciJbBqF8AsyRMulwkBmmRZg82BkK4ep7w6+NySIlQAVcD04KLgZLVsNI2erBrMGExJTmYoOcs4rJFImA1BRMGEBSnnvd13/UGaEYsl9z1poM8kl8x0motExswLhjXfiLRySp82cPCKwZNqFeboAqUrDunI3JxXoYMYJQC5JSzkiFoeJOzLuPLXh5nu1MosOb7ogOr/mA2zGVuvvHJCuFMSI26I+JUB7Ow8Eu5xP6bgxRrQ3tegPOK9LlilbKOM/ECdi3DzqSu9tH3kmxNUIBOKbwetzazwIPgpGcaCxcggt8EnwQXrD2eERUMYk1Nigd3o8pNo1nOAFNOEvQYBSbHrx3m6cTSzxRTBUOEgNmNjgc0yFYis/wXgYjByTrIHeFiSOrj3MGZb7bpMABonCIJ+A/cwLZIuI9g9TqC3AlbrAPYCcdYy2aWPTam9WqttaxNRl6aMGmEqLxOS0xSjAMA2bqig7Bdd/x+rbh1fWG9y5X7LVi66bwXLKPD5EFp1wsOEgWcYYhVafLmmo5TIomaLk8WYyjr0vceKkZOAjQe6jET2ME9CHFo60BUS8JdqAC/laD6pzZU6nhPL0FAYrZxWArSg8/H1eZrespfDpYqP64N4MpW7MR7oNG7UFVOCginp+lmGsAMzsPKSPxfkH0bvU9b/HQCDzIzGT3dWWP9t0yTKw1qcFvpDTqN+LZsgU9asFg79a2MIIez9L836aO4M22ke2MLCshqckBkTZ7WjpYbRYXh6ME25od2emcEWZKL33UT+FXwLgxdg2SP4YE24AO6ZmzihDCyU4gMhIBAe1GYE6GTJSEMZgTVj+lkhCNyaYywiirQCUjFe+f1JiFdnBUR8fzhge6rz/RW152DIKev83bsng7IabNqUwojycnZxVIrbY3aoXUina9gd8/o1yukGW12WDNnDNvXwbsvuiLGpHpSA30kNneX7X75st4nS/AWHQ0N3KYjoAUw948v7FnN6al4vRUb0zkiPbYRrMjIrWj0XjjON/yc0CBKbIlkzxi15rLizH68snuxugzZ6Ye6UWBvEv0gBCQTMYkNlzKhpFrD6WF3SNCnzEDozoHeNLFjLXEiIJu03m7GomAyJx1dqNjxAkdDtjqLECDMStf33a8um747acr3r9csTcbd72UPOspMLiyrBnrupj8Epsx7ntFrxWtHyj/ZBdvWYykwkyQZooUtfZBNGhqemxZOhKn0QcTGUnUfJKIsZyYXCBWrdXAjRcj4k+bzDucFAYL2F9rhsNHIg4jCp1F/Qi8InuySck2N6l74CNiI9jZr3FyyvDQWXCn4pZ3GNjkvUtlKdCc0HOzkeMS85vsezBsIjT7Wu8wZ8VsPVM5eT8YFEUt6FsJo37JPkvM4GbPQO2EAh7gEPuIFBgfRhxWNG1GE+TNZN8pj3PJyMl+LyTQvkNbBbVuAQfZGk6pmBOghCYMUYH0htotQNy9P/Ioi5YD4md+YxKAPRoBAPD4g3QwQANBBKyOJ01A1NDoBmnN2hdqB5paI+1iqIeoB6wEUGLrR3pYsJxXrI9qRBMX8TWyXLiYD8yPxu1NBzX/7pCPHZzfW4zR8S/ZwneOa+SizCoC6s3HzuzILx9RbxuWr3jXYGkfy/N0uXzBYwY+4k6KgJlW49gPpRMiPjih4y1iKj2EEOyXZzSw0Yw96G3vonGZwxDMTv/jyImhBu7kjKhHJD7MdfoAvPiYAUYxm5wkEf1Qd3BfCciPraHWe4dAuIueLQAjU7FODBTThvPeQssebwnS8+ivGmKaAZupHs6fOlVehhGyz6NDNuhbIenhfNLIGEUN5tnF6j0xJ6h1GREmyBsq+SD9tGRjJRIBap/dpZsIusNsKeCTkl3qJVyDQneD61rv1tjo31FTXE9PeUbtwmDgRAYDW9/OrB+RZ3Q01oiHUUFc8HV1RPXmKvP15FCUjo/VcQhD71EmKWXAprFmjo4Q0xwNpmKsT1ftSN42QFC06KkLeAeBWsz3Zd8vYMuYyGE/EA3HlMmmRgc8ToS7uULw4xU/n7YuaQxSFgREDKvHHOAxHV+PhgYdpQRFMgiX/TPZWZeJrLmVMnoDwEHHVxfclfGoOpEYsweT8DOzqCkZRr5To751XKvwVgCKbLtZDbZvFT0ltLwZsal1ICc/DicYMPs8L8vKbHZUKK4AoXLxdgAvrvNYUYdnZ/AdlhBhL3EI5N/2lpHEjpXqmbEqKGOiT92VLpIhU1wKNKUxQBQgyPLhFGY/0k4qTPgbemhuHfSNy/eFIo5wOSOeHcSK5381C67hEH1ZM4/u9HBQJWUsCpwEAKUxGXRhG0WwpOyMtfsYZzy6soNRvtkb6mz0d14XpKWgnFYs5xX5tJjKxFoM9nOyhEREHtkTO8Tg8km6FjOK3nDYt4zeKpQU6ZIhvYEa3XnrmWUqEmQMnMxknexzwJx95qjTII1hjsdgrcNEajcnJdSgnRMbU6sUnNcV59OKh/OKh8czHh5WnM4rTucFIKC3BmVF0w69mcEjJvBi5+r08uwae4rtYplT7SYIutc6nMwiglKKMc14Oo4RZUdvWzHqemfyfjhbgCP48BaJPr6lnTG2NGdMkgj+hH1EBBJRx/HsUQNdjOZcmWvfj2vEEISQ+xtrWQSWjTuUxNmi8uW0YD07U6swWq1IN2s0dRaQw3iAEk0SCEyarIPRstXelPQwdgMHxt4MxI6GfV79SFUPBtLPp4pAmcFJxnoeWoYEMGdE8KMdEGpQ7oB2E1heC/JpBZUTlBL6rVsWCheVVhmPkU0pgKRWzywA2Ed/RO8Y8xEGtpMbbN6YZABfC6n7TmnW9KtdwXwztXb1AKBkICXXD+3YWwUISMuC04sz1hdnaBcsp9XO55L9/PL4nLff3ubC3uKEPp9Z/DwZ1fg9EUChtBKBaAaLDVVUEZSXj2Yv/eSkp/Pn+dB5+0g7qSoC7v2ZYOtstg2nM93VvBJvvXSeFYyIf0RsET64w1IA5JTTKBgdIk4iN4xsQqyZGQsnnLJt2lA5LxTd8IfpwXeHSXMBsBeJkzmpFCMqYsZPnsP64NHTMHr+fdXrSGMSLZFh/GWm6VoTemY0JtTrYvWCkkH7nN00o3I7XwEBFTKZqp4Ye3IdsEMRemEa90wxxFEPX5VcRcCbibM5d1VFyYyH04IX5xUvzic8nFecTgvW04L1vGB5WAEAvdlGz12Q9x0iBteV02Iiu4tp1cEDBY26hPdiATQatYfyNcW19WXBPiOLnYARS6NHBE1jLYzkKwYheiY4z4q/Mc0rZhmRQNTaFUwv0TmBMh1VXFz291W2GpAx7mncIziBH68tZZcS8yAl+zwuVbFZY+48R41k1E+cqXjI/RKsZmOkmuRq7qPGf4cU3P0cJyRSTv832aEOuvX4/LFR4e0aB1URIhBns2gFVlclQV5suF8+PUBoRReC7tsgaETWEGLKTIQGz1S9pgwAjb0WlA5Nz7EJRhbrMDqCJUnjtSZaLQNdEWeSRqOvdgVyR++C3hravkNVUbMxAHvryDmjP3RwZhRZAC0gKohi4UR9pg0YtxEQjqjx8CvfyQe7M5KmmUy//fbcOd5BWBpRKUhCSk7HZ+cvhz6p2juIppMKFlLm6ZjieRycmD0C4yI+uwrHHgH1lH1eZMwU22nT4yIPSGPCAokJhRMkkTV5skf3aiOrM7tkizsois+PfwTE5/0JFCSJlEdf0yBHJIswY/MdnVQYRUMILPpi74MCMLvta0dLZqTypaC3ZrWtLU1rjLs1bpi/Gg1/YWsW3hNNiTscnFQ6TqMFiAJSiVqOGc2SE5aSx/U6LQWP7qQezyseTqa6PZzUyTKp1hilNfQuKHWB+KTYcjL9vrTYd5bqcGTU0lzJYIQ1GpmgGHxEAGSuHw6NQDc69tggMjdtwG8WPYpnGjTvmAHEcPkejasKFP0uOo/M1PqhAFX7y+TvC8JQ8s7RP8SHZt/RPBzyV9NJJWdnSXeoeKAHhywnIL/DjX17pGB5HhZHmKq3OqhnCMVxdA5Ag1HHisEIPDpKCI3injkogHMBUQJTBvECZkVZF3BZwOWEKhnaFHKtEHIVCV/Sk1Yee8ccThxvS2LBhUOSRDN7NbvrDFXPojqmHSFxW0SWRQl83hR3cKroeTdtyJ5sLbZu4r4ixoBtgl47SimQ1lHWMgPhnIyYRNNkxDmc9upZKqTTOR9O+fBSRMPkDY7KvEhz57/hu8KmHqBOJdvoVqa4/6u0L8/f4a23j7STem/bUZp6eq5DwWHNhk0XommI7zz+M+9/OPdxUdQ3ZIiuHiM2PWy2eTu4AiLrK/IMaM0+l4UJ3ZlIMcsmAygcJJBptqL0ThxK01Z/CfHYtDhzL6WRPdkcIqutoHdw68j+PaLGYFHWUUMhDt82nTSL1DgR6m2HQrG8Xi3q85kypHwXZRLMXhQinHNy3TRGU2AfEbDN1SqJcUo2Iygnh08SIWWTVWIkvKMnLCVhXbMZChBOpeC8mqN69/FsDqqYM0tsxX8KirqqKeMzRvNuZoNK224F3XrbsV037LfdxsELISMDPuIgEYMEkGYziuyK8IBHjy0AygQ0zzDFaO8jKArWZsBY3qJASlDiQakG9WEcbaCi5U5xt4K6jYLpAvRudAZSQoIZKU5AgmV5OaURtIWDY0xHGQ7IHGJH0PBHVufYbPxPD2tkRmwBjccaIB9T74bq4Kzih+m07sAyy+IJtu7JwOFBVjo6PYcehQBSHgMUQWxjzQujrAnlZI53eViBlEEp43Jt0FsDPe2gbNqFiZPT5AVQ24EpqOiwthFVRZUE7oSegj05HSmNL0dznwWsGtCv/34QYIbmpI56GqfsLENbn6Y4LuhbRb1ugCqWxzNUFef9EadqDdFpycir19ddWQXw63x0QmGm8AE/Pzdph9sgU7z1jd5+m6+YShrT5sQi+8K3j7ST2nuHgH0hWVd5STbsyxwUo7iDujPGd4sL8wR6GDgCAdz9wh8PJzZef7xgOv/KIARTTFZ3QcmKWeakVL2eox6921vIcIpTJy5GRTBP5YjQj4sMJ6LpYzRrxzJJD3Y/ztaJrxiGTK0noydf/KEBmN04GxHjaLTio6wvyByzZqOmD4FYmOHMiS1zjBErh+9G2Qzr6bSM5yJIWHPGqWSclmxDLR1mHTClFVzsO5FnCMkM0OiBkY4uzUbC33bUbbdRFN3hm/E/dphSII0AlbFJGU4P1lhLhLt5T8SgoWKvM7PG4br4fWRazze8+t8iqOTuqpz51bo1SZNDPMZyt1qJsdBmvxYd1v4dVBtsRh+W2GoDsTdABzSJyH4OO8C/xOgNil3iTmkGb/dR91iLYagOQd4kfKg5KJ1rfxi14SB9xwmZoxIAakGh2T0G52I12zWjnE9wZWFk3dF1t/VcqtVinSxCnqWmEDfW+wLBmFcVm/Rg/ANVgbdw8EA9bE1kohGQhpO1uMWbyqlZXxqL1ce6DbKM/jv4V9+eNiiA23kxglRiLF6L5ZycyRnKHLG3A+3BAc67vzR3vomeX7WZXc5r+qajeoNsEdfq2dP2DvQBq+Ptt4+0k7rWitSBXQwWSWTQWRfTmQMSFrZI8z6Teh5efMDNDUlkVpZSewr/LJOaQ9gwrkzyBazJmnzzXPGg6KXwZj+ooMGNHvlIcVi2ESy2VLJHXEdH5YXkGJERzKyUDv1UllGOKa9vfG8dRmAahIz1YQGp4PTiBGlG696vN3MITIOMETeGCc1mEJY02XqBRSQnfuRQaU+M4tT5vGSk8wIkRjkVU3Go3R2IIoPGIMSFTK6n7xXNLK5vVM9sDqKk0m0S6Bx6aE6q+ZgSbbN5M2qIrARtUQC34YYEGGml5EHm4GTnfSQNzLAQg036RS2zYbL+L3PodGhwxUyaY+/TzDSY2eJuF8FtXbC1jtoVtVnAxCDbxZxg01GCKo27+hPUDGX3DKHWBmXg+voKQNFas9aD1i3b9IbzxAEhO/VjOAoMTcK4RYTMzjqbe0PvHo8li7kCI0gTkDfxPl+p4utUFdagrQxBMlkzAlJXZxxm8HJGPi0ojw+gnEE5Q/IOLhvOlx1Qy7JZ1RptAdioQhOrHdkjZt3LPl6g6vYkrlmQkHIGP5xBJYNPNsWY2MF1EaB3yFahPnxTRCG1WX+bBzihGdpbmz2ETUB7B9TQDahlYn1vNm33YQURI2UxG+Cs1BE4cQQn9MEmjz7P756/7i6bfpureeagDp5KYTb0yO/9QrePtJMam9Gj2qYCJsXeCVUSsjBmfH28DbADCANzjCxUR9ThScZYk1HjUj1c1HBmEWkpgEM0mjyzJfAwqDbHz6KzqjFZtQ+oAMmo6kH9jt6M+0KxwVu8ZOTzinJekE4LynmdRIGSRsYSGdndTQ9fffhuh7KcmlxWE6Bti+kASptGKAb4RYTM8ffw4NJhjqOTCrq8jal2J+XTgyknZLW+md4EfbdMx5pUAe4K0QZpQG+EvlfwLaHu+3DevZrkz+VyQ9srtutmIxJahzjcN4RKj3CuGLYU2o8xSNB+D8PTDksoAgGbG4Yh5GqnwAvk3EFiDjDOr2Uo3vJAM2OJm5C345pvAfmcIlEbzrk3wWYzUIYDkjSbzuEknOnwYrWHzqCpx1cRCD+htYbtthl5QtTYne6oyNedsvXNGWFo5D3+feZPRPO7Y9gz++ENhBz3e88O2ZuyOSG6CKMvzuo5scec+u/7i1Wh1Ycm5gbeGpQT8snaDxJMDZ9AeHz30e2G4iqCvjGqRtbY0RRg15GMc1iYx1DVu0beZIFLfjghrQvWd18ircX+XbIFiYCrTjTUy4a+V2yvL5DdMlgdtNA+Ekb1touZPQJ9MzLF7X1HUVRQTlY3JgLyYkozgXiMn72RffSFPr8OFNfikN2OlCrs2QEiphlh6FxeBzuIEfTqeJwfFXCzyCwZfL7bR9pJhUhp9NB2jSZHmx7Z/OQc/cmoMPotHM/zgIAOMNYIHu5uOq7QXaR4+Dk+xQgCBI5mEX8vo1ybczIYp9nignrHvGVxFsweHRRGLccciY+JP69m7M+rK5sHC/DQ5xTw22Hx3yWWcX4cLk2jFymhLhkpM7rPWlIcF6XHnkSulu3vAXjz6AGy9BobRUaVp6MyaRxAmhWXG6xwLOqbWUw7zw5bbDhhYrQ9D42z5goMl+sNdW+4uWGQ1uboB3H6L1xFJAzxs2sKV8cmIuDY1kEEYrGu2XnZ3QhECuMBzUF0Ik5zGL9w5hTMJ0+zFGoOcwRFvsbFlUuaGHGCaAiNJjLiynHtxucpuZSSWp2ldQX1jgaD/MpmQwsJsFpc7XN2kgdJjlThYJFw+FoI2zUM4RFmf8vt/hjtlbafbbq2vY16rxswlN7VoE+4mgSHk9o7lBqoVKS9AZywVAElRVqAnM1pnF882HVtAtkrGpOhGX6wi6hl6JEbktVTo7GXw/n7euOSUU4rysMJj+8+Ip9XLC/OSEsZLOPIUPflgnbbIb2jgkyAtU3iTpwY9cDb/m1OrO8VKoot0Xjf9eEE7YKUCH0pSGVO4s5LAaBgTcNm6Dzh47w/N212icOOHa7g1H+aj/q2vwlP64QhuW8RCvurXw5O6uVSkDmjMKOKYJOOGCNPnMK6jwZAW3dvEez0mz7bdKN5ETOjCuhv/lH8bVCa/fEZ6MvjekbGJWNyqkqH9I4ufZIn1GoSQ39tQCv2LkFFXx5sLMfpnQeTJzkVlId1jEBIi4+Nj0bcN7/14dE/N+SBegd6A0kDS0PSBkZD6A1EERhqxoRg0Z3Glw5nGiqrA9/yT/VIdGRsi0GaACDcffJrRYdAezOj0vqYlNt6QxQQecz/BvZmqunvX2+oreF2qy5hJMgAMjwyJtf94wQlo8vfBTRx+Zw9ZbUhIzVIMM3CImtkVjQcLT1faxxOyStf3mAc9Uf4GT32y9nSdQgMEcwIamtoYhFvS0aWif67NeoiMJp/UNebEy9MuspGqdANKCWhZMapZGRyPUSEv9SJRt6DB2/+NGKwqKUdYb/74OgOWh6/JRQytlrmhCjVxwTjDkWV7gGoMyTZWx0SowhQasfaBHsnrKcKAWN9OOGswPKwIqWEh3cekHNCWQtSIuyXG5h51CmJGeKqLOE8i6v858RD6Z4zIZWC8nDG6Z1HnF484MXveBdlOKk8ZqSJWOC0vT6jXjdQStjev5hUVNSpvI/qbu0dz1uzETJVffLvtoFVsZwX7K+ffOpBQl4X5LXg9HjG+nhGOS2ALl73pfmuIyM6ZEZ32dSHvYVTcvvn30V7h7pMUkzVtnVkdrk+PX2od/9IO6mFLcIRAEnYZkiRsehm1PNmAfkuk3LPMyjbh03ztov01gs3gsrol/jAV44I4/6O8ZlhxA42F4BOMV3zCC5A6Sl9meSGXA7U9EQD5htf/e7odDyn/hljuF/vtrhqg+47pO7QukFbNYdhsteHLJLG9x/9mOQ0Z/jCVB19SaxWtxnC0xPAtvOgOpTNIQrtOifoupOS2kYtkJJ9RwFcO7Dh6XrD3jpuWx3n78SMTtaxb+cG45o9jxPtKzwj3ajrOnZ3uCPidgekrlZNdE+rpkgu7rPheDxGtoG0EIWs1mSK2VvO+VCihAqMXrVTdhZaFmcQepF+QH2ONnSZ/Vci6I3ArlROOSERm7q8RsA0v0pEzQGXj3V0yD7CSR9+Ye91gCWOaIN9c8+4IwM/xPnxfVugJf6pqrZuqceYkA6hBs4bRBXLegOIxnRqINig3sT8cAIBaLd9XGMj+glSj+9FY2RPjI23DJMHGpCOPYuJ71ADymwkqWyKC0SE8nBDrw35uqEmBjV6i924z0RtbwJo3TJAwGDD1gAIeEmgnAzuXxcLhFxjz777XEtvkBf8uujd591nS/e3QxY14tsIun1vtgbZNsi228/BCPbpCfX6ZSCLdC4Za8ooyeY/7cMgGGW5eJE+dLeAowEKCRs7wwEjHLMkxayr6OHJeSnNaFmWFb018ftDhDisnw7DfkAGEV3sxWG9zGxjr8k00CY1fR4bhTFZrQ6Vz6Y0kdeCvOZJqAioaoIyzzaDAp7VmaPq5oRqhVxu6E8X7O+9h/29V9hfvY9+eQ3Zqjkxse1qgyJNFaG6NpfYZTCYJDGSukKDp5R2XgFuHdw7lmaMptiUUrsxnPyuPoZiNEF2Kz6r9HHNBIpdFJdaca0Vv325YusdN39fBvBYFqwpgQtgAuUJChM/BUc2EDAPRu1vBDZhxO42r/2eVSHsdPVomB6LAONaHB3TdFBHWMUjdVjU3rW7rBaB2equBJd/UqOjh2pCSaYusuaMcJ4xQXePexdsPi25S0cl1/przbIol7CBZ1RvGqno/3r+SzvwkUUShpL5XXaqXguMyFoPNPbD3U63Gc/QK9y7jcDZAaP+24UHkYJqQ+qC3Dr2LljLDlXgIeprZFnH+rDagM7EgCjKaQUUyOWKLWUwbQYx+9BPQ/aOga4fr0PtaSnISzE40SMMjb67pFYXiuudGG1dUB26q1vFftl8nHwdW/Ju2cydakFZ84nXe8VFGrhk3F4voEygzFgeTlgeTui7Iw1dwMWOlShbRs80gql487ta04DznuMKbyyFGaR7z6G2jn65QG472usn9Kcr+rah191OprfO3G5fIoHZX/iFX8A/+Af/AL/8y7+Mz372s/jZn/1ZfOd3fuf4/dshJeDHfuzH8IM/+IMAgN/1u34X/tf/+l93v//0pz+Nv/k3/+YXdSwpYAE2o5gxI9Xi0z3TYUDepGLOyLdHpBmRafT1WNh7t2BmBDlvx8s6/ntwZAM+OiRCM/OyOk3mDCYBIEPjb2EXtzzQxcctaOdpwmV3mn18qEE9/2PVecwid9kTubq37hV621CfnrC/eo3r5z6H26tXuL3/PurlCmnWV8TIACeIZxebBPuso4m6JJSNEk+SfCglQ31cfBYF7RWUGeW6W09PMcMgTppomxMeXERVPVIL+C6E3oIU0JrNtLr53ZxU9dNma0NUDwofpi9n/5/ai8mhvEECCAOMuSYOJ9Wec9o6xFh5pH6dDqmZhwKIjPiYtWEwRxFeCkTGYrPMWcGkYBYk7mCvq3U1SaYdiq0Z4aD2MtDIpiZM27rr0wFDabwrPDMDKhEoAY37QSfPQyRXC499MqOsCMbsHEZXXOwpqECJB+swEUDKiNEl0Tg9No87LvGx9erH3r3WXBWoCkhKzmidJqw7pLb76PJarWdPpIOg4ETo1bJqDqZpzsCi6I9nyzwogcGDPRfXZpAlFA7tqzXQ+xw6qDMw94ZIeDKs5yqNkTmTHZqXxRzbUpxEFIzBw/mEE258TUxpM6tRiQrqDWDPUsj1OtVMCfZlRT2fkFPCcipAF2sszs74zXZ9BxVUj0ooh3U6wvpYwf54gG7hCEffdvStYv/ca/TLFe1z76E/PUFvG3rd7POcbXn7Uo3qeHp6wtd//dfjL/2lv4Tv+q7veuP3n/3sZ+/+/c//+T/H93zP9+C7v/u7757/O3/n7+Av/+W/PP798uXLL/ZQrDck2DNkRIpIwwPqm9CBjhrBXZp7+JeodYzbszSMluizC3Tnpdyw4Hj5DrdDwB0EjXBQFmFaPw8rgyCuEmDOdxaP7z3N6NZnAiUaIrGha3YnBnsPGNz/OIyNG/wugMvs676jXa7YL0/YXr/G9mT3vu3oQiBaBhGExKLmeueoxBp8JaErkN2gKlvvUwYbVb11pL1j2ZspafeOBHI4zzOnKCw7rj0KsfEdgv3lUGLrNlupik1FrkNLcWru7b1jScYkjOnOMltMnIE0G7fjnMeauatLxlk+4PLk+e/IlnSun2EIdLKe7hOpcJAH4+hOlMlmJSUWK7mqoqmtrS4w7TcmawoVW/82fNIyka4Y6vfqjgrj7228SufuGZw6HDr3xlCu8O/7PAmE2meomu6dnQNBJpeSivqewqJ5oXnuCP7+iug1CzZiOCqrhhrJJSZDx97qnh3amBFBa9aTR2RCwMuSgS7ITtQpxUe9ZBv2ad6boF2td4ynIguPuEJdmUQGa45C5cUp5eZkYTVKojEhGTTJSClG6YxWkbAh0x6NPrG7hNwyUHF/1neFdJu6wNmhReZRY2u3Ha1ktK2O68WaQOJ2M1jHHOvz3tYcl6cenh22LtAhz6La3tCuG7b3L+hPT6i//R709Wvo7QatuyUIpYByQas7Pszti3ZS3/7t345v//Zv/8Dff+ITn7j798/93M/hW7/1W/G7f/fvvnv+5cuXb7z2g27btmHbptd99erV+PloxAej7G11KDwPgM25jFrU0YCE8WaLSmJ88/P3eVucYc4QeGbDxr+nAhkBo3BvpelowjRYJzuDJxpn408sOg/F8+Rkg1RsLEf0Ck04yc+RHmxJFDd9M6tZN6A1yOWK+voJ26vXePo/v4Xrq1d477f+D7anJ+yXiytLZ5TTCZoLkFZII7Rm8NyuimsXbJ695NSx9G69Uc2EQjUlLCAUAEvtYK7YbwwWRU7e+xO1J4dqtEYm5ccb9arjOvDrnT2TPudstRVOXn+xXiewGWcZBnsSQHTwIWw9mJjqmw6f6U1HNSKSEQDEBVfPwuz1nSaEpAfVDQDDKdAxJfFsBzDoN7NiSfZujQFuh3WLDlVyxqhr0TlpIsZ8qK+9CLC6K3XXbqoUrRsEJ8SGNMDIC82NrTlhtSzvcO7VmZBbb2gKXLsMqaTTarD8y9OKhYy4EpN2tfv+UwU6QOTTnxG0exOErQKTCWIyTb6ckEuBdIctNxMKrrWikslvFSjQG7g3sArqaYG2jmU1Ka2UTXqrEIOW4jqElsm33chBc5Or96z1Ae9zsswAnqFL726H2OeacSwyX6jBdKUBJd8FQRSfdwyKnq0FwEg+YbMO65ABZy129FvF/voGVmdMlgReCrKrcWRvCo7+S2OiHgKxsGzxscfjofhcRW8WUO5PG66fe43bq9e4/H/+v+ivX0N/6/+Ar0/gfQP3blFgWYBS0GvDh7l9SWtSv/mbv4mf//mfx0/+5E++8bu///f/Pn70R38UX/M1X4M/82f+DD71qU9Z6v2W26c//Wn8yI/8yBvPhy14du7cKOvd6+zx2XOx8J65r/Eaj+5w+IznzooOP1tmFNExzQU+3g8zs4qs7/Azk061aGI3SfPLxULmYMS50GwqBwZfQHwRwcfRaQSrQUAwWqy6bJDulkH1yw316Yr99QXb5Qm3ywW32w217qi9IadsNaalgPIC5AWZrdk2mEwg4/5BAfWheQK4wCSN2s/xvGi34+kuMGcD92Ri++5RohcpotPnjiIxo6SEVRXnUpCTIDX2xlxr+M7RUMz3GdIIBmC+JWYzqRt28rThuUBMnOMjEyuuNd0tAb1bOENA1XDQu0w/KHUG3ehQrQg9vpIsy2PxI1IzKiZJZWxBiov+9uU9sqk4JvEsSVwkFWRkBPVMLOA+iMOOIye0c2KHatl0FcVTa0MRvJNiLYq1F8smRi/XwRjrJB4RglRzuNYR6Q8o1jKG4dAOQsHWB0VoraJVQt0Z+7aBoNhKcTkqRS6myRcNtwxTRvFB9fOq8cz4uoSCifeRpfTMoZjdCOdt5ygWlo5AawRdqmNScLQT3K0tPf6g9086tjhIFYKxVwIyr2mfU7tLRa4W2C5QpG7wY8qe2R76KI9rY9Zp44vFHr7Povbbhu1yw+31FfJ0BS43lG0HagVpt15DSmbf/l+goP/kT/4kXr58+QYs+Nf+2l/DN3zDN+BjH/sYfvEXfxE/9EM/hM9+9rP4R//oH731fX7oh34IP/ADPzD+/erVK3z1V3/1+HfMf1HxZjWeDsT6TOZ7HS+xTtwC/mdvRgzPbgFvvBFEjxfQOKY7H/U2e+HO6B5Jsn+oG/SuGBItIYmUlmzNumcnTKxl1qTSkSxx+GzoGH8eo8/bbt3vUhtw2yB7RX3vFW6v3sfltz+H93/rt3F5/T4uT++jVVNs4LICS8HyeEZaHsB5hdw6NFUstSLXCmpmvrpj59o7kioymXRL1A7Z60FshQczgGGoD7UndVXVkPIZ49qfOSkmU5SHZ1JLybNvzg0YYDBxYUZJefS9+EWbkemRLgsFOJp+5zo4Op+Id+zRdOBmZhYL5t5ddTLnQzKL2JToTpFC1JiN7Bp9xRllBEVWgiihFYKIOaqFo5/nwA4ljAX7ZjmJrRiPQ6+hq3wwzectG5MR6CWYDJnS7AHrAKp2PNWOW+94b98HoeNBFpz7gqUUEBdbAw7VE/OkLo+9M4lMg9R0OINjtlpIggHDQbXe/ZoBrRF2VuysuDEgtYLUBnn224plXWzw42F+VmabgVRygpJ/RgRgCGduoq+RTZgqxzTkMfMsRGttH3jdpnZXQWlo0WQ+GnoP5sFXyvGcPG+Tgdj31C5QjmAORqHfrXlYm6BuFTHNO59sSOp5f0Q5L1gfxWyIk2VsMjneOJ4jy9Q9sF0fR1G264bLqwuePvcar3/7fejTE9L7V2jbsEg105YxIIwY2fGFbl9SJ/WP//E/xp/9s38Wp9Pp7vmjw/mDf/APYlkW/JW/8lfw6U9/Guu6vvE+67q+9fm786hRc/JI7BAWhx3TZ387/g52zhMRYkwDMFlGR6BHD38TT8QQuzeO74Mc2eG1wyl67UFBLvBsVoYSA9mZfO6c1scTlseTPZ4Xm8JbTGiWDgpQllH64o+IVEywsjcTsZTaoHuDXq/Qbcf22+/h+uo9XH77c7i99x726wV930YAkJeCsq5YH85IZQXnFRU7BIpTSVhLQm0J224btLu8DhMNtuWaEx6KPa7si7A5zh5nyM+x9qiDhIPySNZn/8Q5HgVmhhlqtXuoNHQv/BtURSOjSodrIced6WrmnaK3S70Gp/eXMHbas9BXVew9Yj1FIeawLsN3TbUL14xLPIYE2rVjMGUkVqzlhJw6knTb6/A5Vo6AmsAsI2d7ZCYkMSo6i5M5RlYSJ5tMrBWCGFIZDsJIFzCozQkWRDYLTCP78yDLRlQ4wcElnLqaSGrqGeywXSiFDxh1BGY01+qzDRRBpHWLedbUxRRIPNOOzIXJ1lQmgFUA6ZBeUaspvjAzJDQL9x0pmVBxqLoYskFu1BUMa+cIunnAw70aXN6bIA69e4CT1AIpkqnDR72j3irqZooT29MV9boNRxWOaBLEP/g2HZhClQAho3iT13ObSYFtUNRt9/Nrf5h8GsC+71jOK04vH7Cc16GQofHOgsP+YpcFS6NpPq5db0ZwaltF3Rr2W8O+d2e4ELoyGrJfxAxKBUgF7Qt8x7h9yZzUv//3/x6/9mu/hn/6T//pF3ztH/kjfwStNfzP//k/8Xt+z+/5Ij7lvllySBaN39Dd757fjo6KaTqbcPB3Dkrv/+oofxNF3/H8W6IQe+0bhz/hjmM6ZQWLQzOyKTSkkk1y5bSgjBHxecB9A9v27z+bgIGY76BO9e57Q72Gk6rAZYNsN+xPF99AT6jXK9ptg7aGkGtiHxWSy2QmlcpomVCyaScWv5s+IQ8FgeIw1ZoYpxCajUzqICl0J2UVGemoh5hREgn1+3jZvbBqgmUeqorMLtop4hmq7xeikQXM9oFwHuEczTkF3BR/TM+v72G5DRZpHP/IpA7XAzp/hRCtJYhr/UWKogpACISExIqSig3gk+7LjMYQRBsfb3ORhoafE0VMGNdYmaD7JUqH/6ify3DN3etZ5qTsDDg6aSzMCIo05l3Na9Nd+aWr/9uzLVE4w/aw1ueJv986OvdgZCWkAVnPwMW2oF3PRIREMVTTmtOtYb5Ztrdttt66ZeopMbRkrwVbc/dY6/bBcztmT1Hhwy67ANTH+hQNxEKHY+q1Ad16ofbrhrZV7NfNiA17NQcVch4RAL8F/XnjmUNQZeeDrCWjd0gzJqNR72lIsInCbMZiDed9bwMatEDXR7YMmNUOJiDa7A3KqSRDGdico+zNpcyi2d4yuq6Ebuqptm/I4D5im3DwYW5fMif14z/+4/jGb/xGfP3Xf/0XfO2v/MqvgJnx8Y9//Iv6DD08Hr9uAj4vZHf/1wdHRKF0fYx6718LTAcV9YowauGw3uYQ33abm++ZrD8RcnGK7FLMMa0F53ceUB5OeHj3Eeu7D1heWDc5j9HvNDPDwHRi8XfXwqsV29OOulXc3r8BzTIpuj5Bthu2z72H/f33sL//CnK7AHXHgg5mk1qJbClnNn0zbSDZkXTHQhWnJJAMtFLQWFDZ4JPMjMeSsZaMh2KjOgoDRborWUcmA0Q6yIfzGZFz6+LjKgS1zwmoySNkExS2rDTBMgiCoTUyjPCM1O0UWYYwP99qW3EdrRYj8C6YZ4HQwZ6Mv5/B0whahtearzv6rhiWyeEK3UvbMTOICpgZJTMyBMW/uapNlI16jEb9iifLq4DBAlQQOhq6yvAL6t/Hskvc9ROqGn29we4uFu80ciDrsKuj5m/nmkwIOCdTvCDgtC44LQuWnIc8EemhIZrf4qDi2nqWtUholhCaDzttDvtCFAWmul8yYyHLpEoCkvdC9tZGMNRaQ03VNB+ZsUQmxa4xSVHztSBwEZvyuxJsCGgyZENUkXNCiwbzLuhVcJMNrXUz6E/buFb7dUPddjz91vvYvXbT9h1aOwZtko6L5W7hjJ9dxdCgWgUU3ddBBxHQazK6va/ZLnPKc9Sn9ssN5bTi4n1VwTa0LEqHZiKBvEk52Xyu7HapzPKCdCNqoKll86lAiwDtjC4Nqh0g9SAxgYRw+XBm8ot3Uq9fv8Z//+//ffz7f/yP/4Ff+ZVfwcc+9jF8zdd8DQCrGf3Mz/wM/uE//Idv/P1nPvMZ/Kf/9J/wrd/6rXj58iU+85nP4FOf+hT+3J/7c/jKr/zKL+pYInI5whYAvHZwaLw7GIm76PFtt7FABtYSn3YH30XkjfHq+AC9+939h9GdgQrsOgr4lg3x2Cgp25ykFCrhPjojr3lSWI9sPriD6pO9h5AjcSfV9ma01M1SdPQG1AZq1iHem0VE2vuIXnOKjvqCkrPNKkJEbwppO6TtoF6R0VFYsWYXCSUd/V4Ls8sRETIMfkLvwyCO7IQxBgPOcz0VE0I1oakO4pQASBCwEMBmyhhxLex8RE9YQMFRmD/WPgLqUWYrIh+aHnXWlD8/UKHThYVJ0VgovgiPq+z4jmPtRIwBy2asTMSwyXGMGDGoPiFayYKrGOSHw/oimDxRTorcCY1tfAT5+FuSo5M6OKoBIfjaOgZBNI+dEHvOAsTClhGeS7Hhgkx4WBecSsFa8miwJw24cNL9x2b1cxifw7ChmuVgr6N2yB7EJBgiAgYKCIl1tACZKr3pD1J3+SEnTyRmex9vpk/k8latI+eG1EwXUj3rSqpIxSnoAMA6IE8cDHxtYpC9079VBPttR98r9sttwu1dRvY9Q4SJ0JCvoPnVI+uSg5NSfxWjVxo0/IGr+OsEsM9rHZXZRJa93SNEnyP4HvBjZJUpoa0Gj6Z1KmzESJ3uUwVMT5HNIXHy1gVrSleBfaZ23NqXiDjxX/7Lf8G3fuu3jn9Hfekv/sW/iJ/4iZ8AAPz0T/80VBV/+k//6Tf+fl1X/PRP/zR++Id/GNu24Wu/9mvxqU996q5O9WFvPeoSgEXeGqMR+LCLYrfrGzDN2IOHm8INZEB4YwEcXhwRJKZBYTo4pqOHCmcZay5QDmBORk1eYA3xVU+rU+bplNZiEJ/DfQH1sTfmRRRuVW5X+t6rPfZuCg5NUPdmmdTesV93QEzxgVzGpLUG6Y6hs40JIaf65rLgtK5IpYAAg0+6oN2e0LcdqDckAVYCZEloQmjZ4ScyyZ7CjIWB5EyqQV7RUAAxaIKJIYeoPijQ1Zlj9rMeIChCCnHYZMPwvINm1OJmn5pfn4BonJoth+uVmC39ytahH5N0rX5ziE7w3NncPzGzNl9HEaT4cxFQqSqYMWpCgDHtrMajRowAQMimOMFmpIYAK2zi69FzmI0wx0YKLOpKBSLozGiqs5anNmIlHJXGOo7gJ6A2h0hNhmmSM9jPDQCcckJWa1y1pvOEh/MJS854XBZrVCdjTxqsyUNcdaqjzGs/JJLIzldktMan6dZXpTaMEGS9XZEVptgXAFQ7BAJtCqYO4QSRDiZG49lwPL6nN/umnCHSkZcCFcFyXqCLiTrbgEy4oIClnl1sn/XmDD6Nmqo1CPfWsF+3Abtrd9Fj/8Z0NBZ30a6ONWTTmmUorihgbDkiVOkgYnS2oPcuECKj8CsR0AU9J/Trhq24GK6z+w6gwmAwJs8ex6MHyrkYBV9VDZXpikQM4QR1WK/BdBdFFK1XgDsu9UvUzPvH//gf/4Jw1vd+7/fie7/3e9/6u2/4hm/AL/3SL32xH/vWWxOLxrsbAOaErF7z5owEo+SOCy6+iWNBfJ6vQc9+rc/+jYNTCqx30t791a69hqiDzOBozsAhizznMMPpXAPKUekQaUjSLSL0KNkgQp61lW5F4n69oe8V9XqzwX7OKFJR9KbY947e7OfYkMoEYbZFWxZgPSGz6X3lnFDKgrwsWF68RCo2klucHWh4ujWZriUhF0bGiq6E6n0iJMBChMxezI70J86rwpl0sE0Ia1CLLCtqHcFA6wq/R/OnRcl8wJ/EIRHxSPWoFRfU8IgaRa1RlAGLHH3zlaUgjanGBz3BZtqF6B9AURprgca1PDqpYYbJWHnMcy6VHSuZSr5H5FF/sxFWhKRufFwNHLif4xRMLNAooSCT6V0iMTQROplivB2QZbeJrL/ICFiK7BkNeK735O+VCU559/YCApTMAQmAc6zpnLCuK0pOOGVG0lnvtXPhSAIxNIgqkUWoBY3mFO07m+MxokZTI7eYi7bHAEkjEBDA6npBiHDYSVwDkcmy5hjRMtT8AeRkRllFUZbqWceOsmTo6kM7k+dASqCQ8djNIbXWbRxH1M+a9fv1vc4GdY9036hz+nHM2DfgPXF0IKYqx/qKbFw8Q2UwpeFwMRAmb6iuVjuqTYDUPeIIuIBcfsunLTiyICWjR33cYcPmTgoA6uZ1tlqnUozfmxNpqlrmuX2pnNT/S7coyDaNgmVM62Qbmx7VTod6ANimfp4hPVscx1jurX7s8AvFgRaq3pvivzHwxXbaSOUPIffgRxCN+3hP6IiURBjSGUX6EAUNOCeiS1JYxtBsA9Tbhu3piv22oVWH8JwF1rqPXRCT24H3R2hiSM5AKcDijY4AlsUmnZZlQTk/gFK2bv9una8xysEyQoZQQuICUUZuNmoDXZFUR+d+RINxQo20Ms/jMYrUz3fXCU0QETqJqSWwO6mIYiWYaWEgY0M/ez/DYV1uKnnTqNcMLaWz7DSyc3dab2RT8ebjR/uEYZDGfw+/J5pJv4ZM14Q4FX7NFc5KnPfIyNihvdkkGmuRhnORCIwA1xe01MmAxAn3GQQY7bhzZSea7Lnk2Upyh2gCz0ZjDrFayglrMbp/cWc/cFoNdOP4feL6z5N6pD2zv0dXOyomP1cUO2de0RlI0iFbt+wzjHDsz/v8zY9ELMhhhwQNpuxAS6BGSImQEmzqgtE2gKZjDEgfAw5ltn+IeqOv3hEmxl7A/Pm4B+Z3kvE4HVQEKwC6OlyuXt71kIDm+iFgNMXbd/S/CQdGIQwN5OhnYAs4lbsFa02gqVs9za9PrzZU1OpzPidP5aAcYnUzAaz15UPcPtJOKlSRjeqqUHTkzugOVSkROAcujyF7AvU+irByfptL83D7QE91vM3FNpwIPPLzTRyptl//w2YwCZhwNupNjG1vEFJII3DL4LYgPS7gNXsTnI56lhl+hdaKvu24vmdNuK8/9x5ur6+odTe2DTmVOa/gZKoRKTFyYUgBektIUqGnFXh8QPYIfykLcilIpSAtC4gMntFyA5Vi1ONWsdbd+ywSOhVIJ+w7UG/VuvdD8BLw7AcTTnp+3o/Y6Dx9I/MjOp5H//4aEXKHqEsDqWJrzWnrBkMkIhu94IZ9UKLjTrCRB6eC9cUJ63lFymb8pRq1tz5tJtsEnQoY9ysCw8iopY1HR6V3X8pIGhxQoBcPujPEwpAo4IoAcXeHRMmgKmXr5/L6pmUeZOwtsfErGXbeKcEUJeAyRyI2X8vX6OiVAdv1lfg+Bp8lIiMmeJ9ZcsIPDRqcZWyhFJ48kKKQtvIsNPp6PO4apJR5+vRuCyb/jMQ0pJpaJ3SCBati8lASArzugMT3l2WWURLwBgQKl0+j7gbPelUUQt2GlGar4fYroSZC4wYmRWJByhmcMpAf0GFBZd+7NdTWOhrSA/4z5pyM9YDxHee6iVBhuN3D9O/JNtXD2Tk8jrloTmj3a8sIKoW9rOtkYQpg2aTfi9sWErXaGqtn/QrA6PlIVtdSwNpCmos/79Uo8SJDS3B0cChAIqD+BQ0rgI+4k6KD0Vc1aAQEUO9YfCTzonluHgAgT5XFlr5tiEMUM/pgDmKidIh04/XPFzMUUIGYxgsU6pkzT5PiUa3d7IKbDRLf/LMeoe7suq0csHSn18LqWOxQw1hwDod1Qa0V+7bjdt1wvd7Q9t3oyZyQs4JSsc2VeQw0FCGkntCkmarzugw5lZxsA1JKoFzGec8EIDFWKKRX9H0bakVNGI0UqQk6Wc3OQ4NJPY0iObmjDQXx8Dx+nu/MMhNYyNU5ptEmiThWR40nBvxtPmupiyKRIrOd56M6vu+joQZvKJdlVGnNKE5SkdrRSzYnwhV1FH9lxD1xbY8OaTRj3jkp4FlabpsZJjkUJ/NuphOOGbmTD+LZQ2N4UM8JGGPXyWtYTAbTCQx9sPx/su2GcfTrIOp9aA7BRbYVmVRQ3QcEPdgKk8BkM8owAkNr1PbFIjrzB53OBIc9F0uC/HvHR0B9UKXSyIJ1JiCDsWmUAjtPevyfZyGqQT7C4dPGVrWMoLmkVQeUFYTdXDh1aCm2RxZGp4yODOmRxesh43nmVo5rJj5bZytNPD9bW45//eYSOh406YE0FgHewUEFONpUUdWnGcT6IdsLiWk4+lHzpnhvC6xmXVmiUOiHFXuboaxInOz8q0KYXIHmC98+0k4qGjJ3n1PSVXzENFCapaCrKozd5EUgIlP7PkQiR5NxB8EACAgm/NVYIr5hh30JfFiGrKY3gysAx4WHPwk4zzapjSZixKV1Uz4iLhuV3ZEZyMkotjnziF7HMu+uAh0O6nLD5XJB3WxkQUoZy0rISwdRwbLY4LflvEBgqTkKj5Q96vDknTZeiR9OhUpBkoZyXizFrxuaQxy4VUA7GgmYbLxEw4SwZJ46M6huYBjmtPmuauGQKFtPVWIzquKGV4ns0DDrFYAXsHvH1jr2blmV1SkZTdQEZnkWlsNJkXotELAO/bVgOa0op2Ld9buxtWpiHyWCYYjG2hmOSEc2NeAZWIQrUISShbrRYJHhXODHkyLNwMFhx1OI7iAMBxRqHiMm8lEsYVQARSbLlhIHT9B62mIFwo+J2Y1KrHno+MREsx/LnJNR3uHHMDeNDhZnGHwTCsaA4Y/akjNmfOYsDk8fWYFGIIHp5JF4786EVg9XAjSeUZ+1JRhywASDxtxAg6JORCY6K4IqAiWrf0GvYO0QVOSyIKUCnBMkFTRap1B/LIH4fs/ucZO4huMK4C1/cXxufKF5qg4vmT2TkaViBoYI5qgRc3YQenw6wbIlAkQJwSe1dFKGDQ0InHQ6UhJTWmeEt8tQdG/UNhUTgg16bOn/AVmkL/VtSQlLYnRNoG5f3JALQe0NREBtzRk4JttvysvdsimXE4lbRDLhqCScim+4O8IIEeAXCu6MbPH24WaI/ELyYWP5Zug+NLB3gJJCOCNnE44tOSHT1DwrpxXltODhnQc8vHiwCaOL9S1FhhjyO2QrbLAEOWWkYhBiShm5mBr0smQsJ2MMLucFYHMOlJPVsGqDVM8OBDBoiUZBPjGBaLHcr6+QWlGvF4g+odaGerui7g31VtGrQNokPexiE2JDVZs8qlp92N6aTV0hs06mJizTzGzzkpg8iiOYkrbMLGHov420wzekBulC5jV2CDAidLGuWLTa7HtUG72AZHN6AF8zREjrZhOCr7v9bWsIuZcRqIgbStWB/DnIY+V9N+KWPRmpJEkIt065LvIMgJTGHKZhkQ5EILOpOvzDrP/owYmFkQ+jiKGqfecW3OErzY+YBi9Gb3gtjCdd/O5NVKF9vnFIXE3H/Qzo/QJJQgSIFP8ja9cAETR1L2UpkmduGqokh3QsJiPHPrZ9E+0fPLLro3exw3U4UTugHaQdCR0Ubl4VqB29M3Zu2IWM2HG0GyOlYUdryTOR6cXUzy8c1p7P03zdcweu8+3NafmeuatNxkttdTZRG30i6j10gEat0nw0lBTddRo7xPsHD3W0gSrBqedW384pW7YpAkiCqlH9uwi4mf39cC7qI+6kEsVYC8P0d/fsAl9QOg2heihBhMmbBRC1DITnh71uiH8eLm5EvLHqGcbMIr9WEHLFdIWC5lRcmo/PYjlYDOuSOz5vpizmsJQMay+nxaSIzuasUinTQU2POO6cpkJFLhlhJHPKyDkhJ2u2TH7nZFkmgZG6HbuRLGRu7hheF0y3xEPOjIQhiSGtGglDrVnS9P4qzB+byQ1nsR/09KLYLmo9VUSErHHMvmk8M2DyugTsHyqROoXZmlnaIKQ4REg0IY6oZxiUZl8yYGN1tYTeO1pzoVOQNXH6Giq7qbzvr8vQZDNlagmvPjKCMHBHmE+BkZ1YzcBJN+LEAjLoEjhExITh5WYbs471CDUWJQjQDoSSexgTD2fG4936i7X+zFHFq+KYImof2+gI6Y1vdjCm843s5g5qONrYe7h/rScwCLg3Akg8e7k5Kva6vhlDJlMiPzrAezfxlv/R8e65LHvNbHqpcR1NT9AORUBTa1EsCLLZV+akxA/9AMz6xWQQ2WSAN2C8yEiPpyV0jY6w5MGx3TsoOjweruYBcrQ94IEbFEKzt4/VSvgEZ9bS7CvE4TEChnG5YOSdCICG3RuBmAe+ysjpy4A4UZIV9zSbwVR4HULFJVHM0IqHiJTdjAkZAwaW+djGTXZlCGMA2WjiBMa8FwBz0/cOqFgzZDcDI2IOUYigZONfmfOMjGDHlYgdMoApmpeM9XHF6fEB58cz0nkBJYYwI61GWHj82LsoDyfkhxN4yUC2UeWxsqgkJFlwfvHgY64Vp4cTWq1A604vZqxLRk5A1g6WDkgHyJr4pAG9KpoN3wWBrCic3fE5080K816raAmdCbjdvE2rYb/dDPqrHYD1YFBJkK7Ye8NFBLfWbCChG741WR/VYylYU8IpZZyKmGKFTz1lMhq1KJDVxTBF3MCqGym7Zos7ciVCTjaVtrmQp9m/CSkO4+FOqtWOWhv2fUdtDUvvBv0txR0/oa4L2t5t8B4xKm6uwdbc2dlxmTEel8nvnsnBHYE3XroozWgOpbH2YrNjZI8jfhjvGu2AYRl1ROIDThsGRrwWamvYalbT0GAcs47s72hDI/Mk0jdRubDeY8f4OT5E3xr/vrvRwZHT4VmMbxl2WURHkMbw7CcZG3VQtP1ngX1XUnYjmhDDDRNlEDGS6SSY0gS7WQw9QxG4rr87KDsQQQZRgpACVNApoXXCDsVVKzYxxQ6NrJNnLTm7PSFvDo61G44sDRjarhVE3MCb/SJlIzTEyX9+LiPDpdAjjF9EqcGCRXOq9u3EBcni+ndfE41seKMyjcbx2eyLmRwi2iDs2sQ6M0g26s1iNSpVNP0ygPuOkbWSMbYYNsmUkw0TTImRMhvXP/HAmq0BskM96STvVeLEQMnGUssuwc8RlYQB8E3dTWbFnBQA7VBNZjiZIcgQMNQ3h0VpDkuJbyYIlrWgLAXLecX6eML68ozycAblBM0JXAq4ZJSHM9JpAeVQK3ZHHBFnsgm9ZV1s0feOnBl9tyGGLCbImbrJ5rcnE9psIkAuUCXsW0PbO+reHDo0CHIqXETfxTSiUNf8SsnJFaZOYcFhAlGByR8naFNsnMBq47ylNWPIeTBRyRZ3TRk92yYsafZ6xNaN5lHLki3DHROXyZxA8b8BMRILivccBYkhEQ0lg3BSwzmoOnQ3xykMdeuSkNcMVUE5LZDa0TZvzIyo0Xvy7phbMyU42gwcXjEzbNXBIAkuj/rvw3nQDKQxXuhPmPp/ZEHxax0OSaV7747P6wp4DHOtD4c2Dv0YoR8ztfh9GC2aj9PlDTsawcDxu+t4zX3O4x80nOI8U3DHOp0ZE/kUYIaIsfeEOkbuRGyOiZMz/JL3EbH/e+aDd1MExhHOTMRIBgYTW3OsadIFfLZJx60bmUsOiE8pGRlmk2ad+r5HKhwLeZ6mg74OQA3SHAQbNUqNBpwTGZs7qWMqFpl7ECoxIOj4duY8yCG8aHDXEawcwqz4u0AA4jPdGd1BxyPjimvptSzcn+MPun2knZQ5KKdNRm+Gux0K5YaS7mYuAQJUACQQNKjXkIgd/ioMPhVTGlgWp146awp+QXo3I9SSO6kESAJpRzQKIiVUSehKcGTIIqTkU6KUoR7FlnVBWResDyecXjzg9M4LLC8fwcV6lszwJ5SHEzhnkItcWl9D7Hyy7wyYonGyaG1dMvq+Qy8M+Fh4rXVAcrqtwN6AcoIQY99tqm1vYnAhWyaVFx8JkkPIFkBQYtUcFKcMzkZVz8sCo0YLOK8AZzAS0ARbrsie7Wq1Dvwmgt1rMbV1rLmb8wSsRuUQIA41A7v2Xi/xlCEagoksKxFYzLF3MQw+ySBvxIaa2QgO8K4THQ5OKrJszmxqA36upXXPGtuAO6U3aNMp36AUlfHDJn5mCIcRMChMvUlZ5WBsdMJk0dSt8YWn9YaDtgiYbmaJbvRat8GIdUc0hlqsM2uy8X6hghHj5CenNLyMZxfDG7qBDObhQdT34Kfn93DI77iWI0SfuZijGuO7RDARr4ezyARJE4RDKDW7JBJ7xsVI7pwSJ5A3u07YMBq37X2HEO4R7vMvI5StruSCtABjF2DrgmsVXJupoyhM+aOkhDMRNAMF2eWGnHP4zLnMHF+dyt2dtWjryMUlJruOHGo+nCty6Hy2ExxGnrhPgwdjtvZ1ypG5yLD4OoxhiLO2GkSbu6v5bD0fPiiCHZ3X7MvCScUteSS8kMl6KLMTC7KNszgVrKcFTNaICd2gwpA+IZOlJCMSrCuWly/AywJ6OHuvh0fsYrOXTGmhoW9w1WH18e9AWZKTFRZUITQlpC3I14qSfGBc34HeoL2ZM1gL1rVgOS1GlHg4gZYCKosN7uEEWjzDI59d40bsboEQjWmhtHRQy0hdUJtCtor++oJ6vZg0Cwi6rJD1AXR6BFKB+EAyECGf0sjwsovcDtkUEWgnQLozThmcFpTTA6TDGG8+VJHzAkoZnRNOTZBvFXxacL5u4JyxbxW324ZarQt/DwkZ31BrFxAx1pSwZiBTzP45rgJ3nBGFErzL3h1V8sbvNMd9DAMZhkfVCARkRiWHFEyaQyU5WdMyMkN7Ql4y8mLXTUTANRus6WPvm/eLaCNXBHGjNSxdOK1DuoT5ox4QnaMTil8quWM+vPBoNkgxRGsJ4k2kHa1t6L1jr5v37vRhGMMJDTMSpALKppTiI9kR84uG4fIrwWwO6ghZvcUgmbRPRPL2HmCPxolG4HB3jQe2NI1cvLN9mu2PnPJwOHNrmPFOlOw7ev0JkaXTdBIWgw1L7scnDh9G1u3N085+VSVUAfau2FvHdW/YXREnJ8aSsztGS9eJJ9ozkBz2mlgctyhEbDRL6w0qHb1Hw2xHo801OgEVFy4gAzXtivu6jgxQMM61eDab1EhI4nEUQOhRmiCCkM0rsy0TSh32aJz1t9D3KTJPWGAlao5WvfcP3nrzIW4ffSfla5ZhcBcCbjotSCXj9HhCWQvWcwGpQFpF2zOkJUhLBnMAyEtCWTPWU8H6sILXFfR4dj0rQndpj05Ak25aajT6EIdx5Gyfm5cVUAYrINxGlFySjejmKlAyHDhF05yn5zZkzaEzhx6tTsb3kV3cIp1WxRh5cby7EoXUjr7tqNcbWq3YFNC9QyrAwkDuQFpdJdkk+a0OlVxI0mFGHA07HEGyxcqpIJUFeVkhyTrsU1msoTNloHV0TtibwTDbVpGIBoTaqaO1A/tIjLZaxc5TEbKR688DOIeDolcnagDjV3CFAQXExziEsGwUx0dGxnCtMtdVTPzmwDd3iqNxNifkkr2ZFZDGkOaQrD8qdW+qdZ+i1u9l5/DZ9YxHevPpceLj/EPf6rRxuE4UUJU7JJOqaWbwxI9rAnqjMsFuyDllhDEe8/8IXlBX6NHg+Hmc6hfAyFWHc41KWvz9/Fq2n+d3vHc0/p8Bv88gbR57iJsCmfJ4j5mlBF/SG10HNDYd1GFjHeovk4Q1a3yO3Pjxz1ElMSXYGmUBq8eYUvn8XpFNMVuGzs4wDERAVUDCjtbYNFvqjM4EagyJkS2iGOMEBmxJA6qLlrR+cFLHuhLBxryIiGde3m8GgrhDEnVdTf87qwtGJoxxTeYliexrftbxM7+QvF7cPtJOaixNdhx3WawR9bRieecR5bTg/BWPKGvGshZI3yG1IiVFXRL2JUH2HQxgXVaczg9Yzw9YP/YO0rqCHx5jwCr22466G1utwSjut+2G3k1yKHFC5gQ+Mda0gNcz1pStaN9d5w0CthQDXTe0LlBxmAiCum9I9YxUO1gU0MlsAzjewmAk3/wKh63EO9tbQ7/eTBnhuqE+3dBvO/b3L6hPF2zvXXB7/YRWd4Mi8gZdKtJNwcuK/KDI55MLyjrTcCmjHgX4Ylf4WIKOvgukCaQTgAzmxab2dssmUikuUmtZHecOiMF43BXbsuOaC663DbU13LYd3SfwdgIqAbsKkjIKgKQBjxx9lD3BdGh0DkOmJt4axXZRWE+IOjTnHk+gxrRKjLyuViM8r1hOKxYPegwytpMQjsWGwWWszjIV6JCF2W87ep2CopFdjlEIYpt1zBRqfZhPO/hnEWo8p3r//NuSlfBjbmBJZeio9dbQekWrG5o7qu7kjQj6TDQigTlhQbEWCWKURKP1oWoHIOi9+aF5ozUnlOxZFSeou53evQrcPapWtaDPjbyxE31cBqxfbihxhA0MhzJwzDtQ0Ht0MtjVNwbRh931xnWPJlv/62MmFadPPYCY86JknFfxay+I/W39d/1wfUWMYUwA+jMhXfYSQ4zlMdWKyNoTQHEMUam0ALT3ZpOF9x1EQKvVAiyR8f7h9E1BX22sjbq8VmTzYz3BJZDsA20GlGVhDcCmaTSUW4+ijTVJCMcufp7TOJchR2cBiXgbgw4NQ1U1EtOHuH2knRQAM9ZkWVRyrbXlvOL8eEJ5WG0G05JR1gxp2WjSvaEWRl4StFaD+/KC5XzCsp5Qzga1ofCItMVntXSxhtnWG1qrttlrg3CGJEWtHSlbb4al8TYxN+oasluxets2tNsV9XYDQODSoKcLZFmhywp6rMgwiAACm9AQESTJyPBiHAVaQ79tkFqxv35ygdkN7bpDtjakifZuA+yaEGpwTJsCtUOpg6MDETPSm597MIiH25B8aWJd9h0mB9PciQlAzUgbXRStiQ9HmzIvTDZ/yHo0ZjSafRx6zib0OYRT4Xbhg2z0iCZdFid2bRhsYNRABoOTaIwrKEtBWRYs62LO1QkjkfXE7KCoVUXWafbRHKGKIp92tNqQlmwjw2szyLjH+bIakdfEvck1wq8JbYGCEOLfSsMB312Ju5/JnxomV2ediiBWQ9UOsgs2s3ALfQz6FB/RwEDRDFWGYUv+rtLc8dnsIjCBkiuVFPY6ZYaoB1ktaive0+iTDGxmFwYiIbCeOOXZXj/AeZ2og/373jAjoCpMMhRnc7bDAUkok88GaxrnOLLbEHMNRtxcr/GBAV15auyAgKECJrxrdfJQYzeygxeVxPQAtXvfnq9LZrLar9fDR1Li5623DmZTbm+1AWyjMoYuoJ8I8ZYI6YquzcbbxGswiUMR0MU6IV9XARFWdYKGmhSbOKSYQS5iHyQQz8WPG5EtKAhFkKBZHMGAL3T7aDsp38DEphGWisnnrw8rTi/PWB5OeHjnwesGGdrNQLAqqqsIiDupkg2iK8uC9HACUkJPPMREe7e+mZi31Gv0ATXT2WNBT4JaO3IR16ZzZiHIseRuenJ1x3a9Yr9esV0vUBA4F8iyopcVWlakx8064YM4IU7I8EVAKVQa1Bb6vqNebJLu7b1XaNuO7bqhbzamQ24Vfe+onVCF0cWiPnSysQ+1A+SRvujAo4/CnmOD8xFSgEVLXafhrYruBIy6N1ATgDtSNZ3q2hXVFaJ7N4fORCgpI5PNm4rxGeSZUfaha8yusxbGNOADYI7TAB0a68mNxj28cK+XhkGQSSkbSWVZsCwLlnX1sSlpwDum7CEjYiaioQgdMkrqUWbddrTWkNaCulnW3G47tHX03dTppXVzzKQQ4WEk7asQcAgUyI991KHeui0m/HXcKuGoTIHC7qw2uNKcVB+/F4QjYAAZxECXbMcnbQQs2iu0N3skAiuBFrtOpTgrdFnQhc2AU/VsgByGsvErzWWr4Cy7Ykk7Ms0memOeHb7Ygdn3/FQQZl3SZrOV0eAOYGS63SHYgMXGGvH1Fxp7ockph3N7nHpLbDBHNGhbpcrIPQrT78xkUD+Fgrl0SCdItLz4mrZgaULtOCja27nrw4FVd1K5q8+QiqBaTSfRg88mBp/vjlDAjy0zIUtkruZTLFA0IlN3OFOUx/ieLmnYBSLLEDnWaEDsvl6tB3AOED2YjA/roz7aToq8TsI5FKuNgFDWgmUtWNZs7D6/05Kga0EqjF4fIT66GTBlA3N2xsjroti2DZvDfPvTxbKTy9WMTO1IQkM7jZRs1l4TM0DVSBYEhZAYvLZd8fTeK+zXC15/7hW22w377WZd3injRhlnYVw7oacF62NFbxiy+IJprDj7cEQG0Dtk27C9eh/75YLXv/XbqO6k1EV1k/eGIRUgnwEUAB0mbpqgozDqmzBYjYGXs82tAsGETNXGeBDDCrcEkwuCK37sDdXlmSIio8TeyKt4fduw1Yany83EX3sfKtwZrqgOIOeMzAmnJaMwoxAjWZpik1YPGnlRf7FisW8cqLeZeLH4oEAQm+TYA2SlNULEA6qKXjvq1rC93gaBwpQ0zNEo1AzFaUFaMpaH1dS/E4/gpl73McagXmzYXb1VtKsPwXtNaLvNeLIMJrJZjq7ZQyYb2UN8kfsMaj7Gl/IGY++JUzGnRNrA2pC1g9CRtXk0L6NOJMRWS+uKXhkNgpYcGlOF1B29NUjdRy2VNSFRwrIQyimjnFd0TcZIu5hzaiIQMrr2tTVs3ZhwgNUSVwVWAAvM4EXfUNRDI0g5GvfIlufZoHkaRA2NQEBtTh+PrLrPQCZqd9Jt6oAg6mZ6Z1iPx2PZCUG8MS5BsbIiw7TxTCEFoLZDIWgbgXoGic2osmnAnngkBtOClBhlMaics+VhIgLsFakLRIDldEbKC3JeZsDmkGOtFXrdjOhVu/dE6ZjSG7qXzD41IuDOAToIelfsvWPzbGsXwZITuiw4iWAVsfqvj52HIwt3A0tHhu6BLc399WFuH2knZQ4qjahj4LnBPrKwE2PZhscHQClDS7YxEke4QGEZR+u4XTds1w37bcd2uVqd57ZD9gZtVldhJUQD3Ci6+oXpbrybNtS6Y7ttuN2u2K43q7/sO3aPhFgI2CrotoMuG26XGxTstSGrCSkdnVRCigl10iF7Q9sr2lZRtx31tmO/7ca+AQFUHBqyGsEI9Y4LB6GSwbOgm+acq6PCRVCxVQ1m1d6dhkse7cmYBBwK5Eo01CZu245b67judWi5ZfIhdWwK5db0yKaQ4X0mmflANvC+DrmPto+G5vlzQETMQJB2JZ51lQ2WjtY7uHlD72ZKGnmMGGf0zcYR9NZnzSOz91AVcPFhkWpRe/IgqtWGlDP63sB5R2JGSwxxPUDx9oAB12EW9QdpJm7qRjp+HhDfwYHBMyNvCO1i1HPRPuAseEYVNCAD26amBZRsPEXAgr2NvaV99yys+joyZ0jozgT0mgsSegdSTUjVHL19omIXY3Tu3ZpJk38v612KMTxRsPdr5cZw+OGDr/Y/x6BLuxSTIQSu8HDMmgBEvUcGW1Hu11K814CKHcU4on9QwGExAsaoi7gupN1ZxUBvFQy4+LJJS2vL0Jzurn+sK8rZ3r8TmLsH1ZYhGgJwqJUqbF1uCbWbzJbB9vfNs4NRecgOvUnHTIMaxFmlA0KjeV8BLKmDu/Uf9tDzs8KvxwY03/9u78UnBTP5C98+0k6qnFaUZR0j1G32TwIxjLm0E+qNobrAirGLX/TV+wsCxzXmW90Nunt6Muf06nPv4/p0xXbbsF2uJknfmk2VVcGiJo1UyAqelJKN5VajX++3DcrAbb/htl1xeXqNp/few75dcXt6GnNXiBNYCHqtEL6haQalFetDhVRFWcxRBcMPbgxTSliKwz6toe4dtQraLmjVnEQ4qeRCpgwCOLsjmmoazMboK4uRBNaHFeW8eHZQxjmO4DQKv5yt850IaLcdlA2n7m7g61ZRa7XITny0She8rhW31vFU64hSC9tY854zFu8NI1UkBQqZWkZOCZwsygMwj6fJoAwPQ3OAJKNIfpBj8zqIzyODF/RbA+2MTTryvmOtO277jmVdcH242sBB9qZwtUwnrdnkqnJGXheUx9OYXBp9U31k1x37xUgU9bpje31FvWwAFPslW6TMO6SRRf9hOo6K0aGQfoDC4jZQQoRh7t6w67TztkP6jlo3SK+QvgNSQdKQXWKUgEFHjl4shmDRitwVqQWnTkF1s6b2tnuxPIH6YgYMDTkpSrYMq3cbB9Mqg3er1VQRXFvDtXXcvDaTiPGgGNduSclMnl938oGFAemF8wBw50zsNhU1kqjNuEo8sqLemgeUba6RA0M2mI8hrwYd6Hckcra+1DN0CMhFWe0bKhL6kFHqtUM7g9HRS4H2xZQjSkFnmw6sKRnjIQvGVNycPGjwQrIjP+W0ggiTheqj3FttuF6uBiWmK8q2WQbr6IOqDnhvQMGRpcaeUZN4urU2iC1NDTIkzJCmFLFpyeyOHhjZWlzFMYx0PPch0yh8xJ3U6fGM03IacAhnK1xL6wbJdW+ude9OnpIaS21SyE3KR7BvFft1x+X9C26XG55evcb16WZO6mb9CNrEB74ZUyx7ZA1OILamVpBHid1gkuttw/V6xdPlgsv1irpvlkE5gy8hAbCOdQhBm2VzjSv2ZbNBhsUzFbZBcnnJkJzAKINQy8mabpfz2b4rMcTFw0gZcKZPKOoDx3pecdLJguW8mNFdy2C0RXE02ISAGyl7G4fGYBlU9Smc3qgrXnuSblFX6wbVBfkgnAVg01GTR7yJCK1Z/450sXHyoYVIQd+1s2dOybKGIy3ZfzXzCt8nY6CgGpzRXG+t+3dJrSJvG5Z9x75XLEvBfrkhp4SS0hhPkUtGoRWpJEMeQ6Ejm5OyQEDd0HRIEZO7qn3UuZgZ9XIDAKuRMlltsIXhC5gPI2uyx5kpxve0itYxGxA0p5rXunt/1I5WN6g0aNvB2sDSkWyhILABhsEyxEBOipV8Gi+6ZyjWEKfSDEIEWV2tV/Rmn5H2grQXUFaIWm02zJtpa9p536Vj66YiaEMUOxZOyB6pJ7LaopJlGtEfOMzgIXOymot7Ew/S4vqzGKU7onzpzY4jBvTJnE4gGoSpGNZn59QmYbvqih5KZP54VJsPwsVAV8TGt+8ezLJiKPEbspNsqm1t0MQYUZV/yGA2OlEMnt2XpXjgaqr2rTYgM2o3osp6u41Wmtbg0kqmhh99hfMbRE167CBY/RBoIqDesYugdEH1O4jAYn1vIora28hAk9fkkp+b4aiObRef5/aRdlLLwwnresaYGePwlfSOtqlFo57GEzN4dQUJd1aGIbtcfe3YbxW3yw2X95/s8ZU9btuO220brK6FGYUJZTE9uex9DRSOyiHFLoImHdvN/v5yueJy8/lOrXufjOmGgTLGBCCxOkhni7a1CVJt7njIUv8u0CWPqZnMAOeMDGB5OFtmB7bIrSl0t54sERnrfjRphhjtUkzM1u95KUiLsRODr+p+AEN81m8RgfYmBwfV7R5F6mbsSGlzjLZ2K9J3wOj1ZMQKgoK7YmUzUNI7hK2pkJKTJ5iHUVZJRnUNHRcE/HIw7AfIYQzME8HeO2rvuLWGqgIhBe87Uk5YbhvqbcdSCur5hiVnLDmjONtwebBMXk4LApob0F/2cwdAMyA9QYqCc4LUDk7s0BRhd4X1vlcLcvwc2rkOWAbTaCGeP8IoOoKIUVcRc1KtV2x1Q2sbWttNacKEGpHRkbVb/5i/E/vaMIcBFAIWsnlcKZxfwH9qd9MTVPReLWPab1ZP3QtYAQX7upGRZQhkBAh7t/peYkXujJoEi0PFggPsRkHRp8HMHLAfIuBx2rdaKDV6c8go07bc1OBPp+Z3OewPGFOvO1TaugwKNTsrrsDHWsxNcNwQwzVSQK4+kdZEhN1JAVhytkCsFUh1sebaIDlhTu7FKGFE46+mZM3lOWE9T4KPEpmTSozabHTQ+nQBoOjVBjWKq+YYuSPYkOH2Kf4/GKXigV1zssjeBUXs3kRtPIfMKdJb7V4f606GgrORMT/rGQrwQbePtJN6+IqXOK1nk6TpHbsTIWoV67FgH2csCqIELoux0A6sUStmN1zee8L1vSdc33/Cq//zHrarOatt27HXim1vM933iaM1JUu1wTZSvRRgKabDs2TLvNQnXzaD4Fq1YqQIgV3vi/OKXBYs6xnLckJZFqwBHXaBqEVWouROltGrOREGkJeM5ZTBXrtiZvRasZ521Otu1HO9QbVB4KObQTPqLwXltBoF/+GM5XxCOS8HKSmYU1Qd7L8xIloE9bIZdPV0w3a5Ybtu3lPWRupynGeUoFh8pXYyaKTCO+2ZfGIsoSqwWRSBtTUAOmDamD8FODOJyIOI7lFrFLMPjgqeQaltpNoNb7/uFXvvuLSKvTczTDAIJaeE63rDmjP28wmnknEuBeti1HSoGGnnvPgMo5mdUgqI1HY6J48sE6MXayNQ8Uxqq0iL1Rea91a1vTmVvw+q9KC+9+79RhiRuq3pKJ57BtAbtrajtYrrvqG2G3rbIO0GaAP16mvCtRTJHBJz8uvEyCAUh61YAbTu2Uez3kOHFIPpVverwXKvPofbvqHcbkjrA0AJ0hO23fQhx3hxZ/c1kQg50CQ5NV3GNQOAoatzGOEyZlfdZc/ebEqAxkBEd1JRl/YFYUy+yPLFMoNgHVavT+6tjnObmVGIcM4mG5bD+JLlbIPGLzOTiqAoVFTgXyMB2L2WU1NyB66WmWtHWvKoj1FKoyaWsvdWLQYF5tOCsmQjiJWE0gX5nEGJbF6cCG5PV+ScsV9v6LVCarXPk5k1hdpIAqFkQwdOJWOPjMkDe8ugbE5bFRPvTbBuFoP01QUQGjIRMoA1WW9VcYWg/uXQJxVwVHe5mZhMq72B3ElxYvBW0NaKvDcvQPPArevNyAb7xQ3s5Yb9enPnZT0uZiT6zLqJXKDWjH1o9YXahd0TRgUXcKwpqkKuBehsQnbdu5yz1TVydqkncmPjNRTxGsHB8LWlGA00M5I3WSZX6dYIdKV6huclUZIRKQW2HdAZx8jvIztnUFvFRFRFLMp3g9n8HFofUB+O7IhJx2MUZxN5nSk+BzSH2FF08mBkuuPuePidsfG6CRhgpKFlBohlZ/PCAYeIPJxVZFRhoJqY2U7CaN2GEErvWJhBIkiCMYNKXGbI2GEHKIPuVQz8oA3uSHYsmgVpSZBuNS14EGBBRgOlCmkC2tt0Ui2acS2bHcbbr9WwgDoZaVMBwVogWhAmvLYa/+twRY6QAQto10kVKt3zH3LlBG9ADngNsIyhd3Sq2LcbOghdCdwB4gxoRmt2HY0Kr+N4A8YcGRN01ELs+x1XE8Z5Vr1/+jlRZmw/tXoJHfblgII9U+ie0TUR3DzDrhJOyq7xkhK69+/Ba2gx5sTfzT87mKTPCBhxLA4lSjAJW4MwGYtyN7JO2/ZJXMq2r+GOCkNKyferlwOM7EQAL1gfVygU55cPFgD2Diag7ozOGDX5QEdDQAAgZIcOl5TGOQImNBjnTMf3JQ8g5t5t4ahhjs/aFCaz8sPcPtJOKi8LuGQTKa2EvXb0WtF3G+vMI3pKoJSBdEPaOtLSHDsVc0rbjte/9QrX959wfX3B9f0n1L2i3mxMQ20dveuAmMLhKMgVChKoLOB1QXpYDebxoj96B8FELTMllFTASlASECUfRLiilAVlPWFZFqzL4qOWvb7WXV4lppsmG0zYPJPqe4GqjkgqpQTKjLQQenMGYqlGcEgeDXsZNliR5LDhEA9VQJsXfIHZG3bbfTpt1JS6OaitYn//gnbdIFuzIYQ6IYNQds7uTAiEZjvNMiYF+pB9ckdFhAZb91GsZcWEXNJBby2YmzyNlIQMUeugA73e3Zc1Fo+MymBGE6K1GpUx1Iz2u6eEBIL0YkQOH1UyFCT6QYh2zP25d/gBcSRnSqoqFlmsebUKyrIgl2LnuHZsV4OF227OKmjRvTZbnzB2mNQDw280nEYdwUbXNDFYrXsTa8BuUHtenTQhfk1I7WS3ToAbFXJVUwGNelI4KnZHQwC0VXeKAG47UK6g5WqSWfkE0QQRMhagykFMFaNW5DlwmHwc3CAC5ntjMCDdZ8zTSWCMtLEQcU5jjgRMYNnBrXW8v+3YupF6tl5RezMnBSMbnHPGmrIJGGcbpgoFOERe/UPvaesDj/RHy+DGNQXQ6u7H3KGkyHWBAlj2HW07IS/rlH4LyTI2AlO8p4qOdpG8FnBirA8nEBFuT1espwWXV0/Ybxtury8OK1dvKp8RAStwYkLu1hlWWsdGHU2jZumVvrvAzJm5ZOUVYUFVr2OpneTMDLAN3/zyUJzwIrpFi95oW00BgqEeMSRQqqC0QTkjlQou2XFoxe1yxX7bcX3/Cbenq1HOnZG21Ya9NVTHowkz8g8ZEE4ZvBak84J8XpHW1QwQuSSJBLxoc2pKXsBI3hjH4z0o5QFhiIUgZkxFvQnYcHEFQNzcAQo2TkZCUEU/FeTFGppJFaGppwNKs4WtYj0bEUVGLalX623iYiM/rBJvx9N2y5T262YagHu1TErEqPnBWtstwyIc9PNEAdaZHTrRdZDU/Fiqw00YxdzIlOawxC6CBPZMViFhsJIOZ+W4iyvhB64oAMmoAwQjMTQTw5kO4+ebNbKQRoTau8FQ7riDdjt6yoZDmve7bEpd1VxtYHnKCVpsCy4PHd1VBlJKJpWVGNIErZYppdQ7Wq1ITgwCFG2zaalzPI8ebMfh2rvyN1KCaPKTb2wvUULzVlUWsVYENQhMoT5E0UyTxJ7zTOB5xoJogEWz9efEHWJBLslrQlNqJ3vwktn7DYNBOSBd3MGZA6Hw829ECoyMWg9/omptIOp1IISCA+K15nSbGBX+1hqeasWtNbzeN+y9okrD3nYAMejUepL2UpCEfHCnv6e7Uw21ekz0IEbCQO07p8ALnBnYe0OomQvBBGWZLFCpDcupmwbmYqN7eAG05IMUUtQiddSt8lLAifD4FY/Ia7Za6ZKx33akpaBXCzB79Wzdm+tVFKgN3E0gN5G1f4RjYSIsibGwkyLIapekdg2XzIBae0UcV1Pbd831Srt8OTgpX5jBwulRnK/NIjAGWu0gbgDvUI6+qjxS7OvT1ZpOX19HT1St1TOoZql/9/HrbD0ctvdpMO24FOTV6drrAsAlVxxOsgCFwJyRUgFRstoJT+JCjEFQF3HUZvWsFkoXjpkDVjglABDBzozey4AvxRUQiCjQLgB0MKTk0iY0IicRg5Fatag9bdb8HMagt4667di3iu3p5kK1cw6RKX6LLXZ3UpYr2UYRFs8eaRgXePFZGAbVYmY4Xno7sI5cZkjD+BpVWbpOB4PpYMg9DiUCCQF5qt1TUPGJRhY2HNXxOczREQE5Ni8Sd9XB5ItAIxxUqGpPmO/+0SA0I3wgA8mvaVmXoS1HCKYqW5Nzy274DSZLe7Wa6LZbsJPYUiq/5kcURf28mHBpgnICNFuvHEzNWoNI4AFDhxEGWBldBarkzGexNervLRJQz2E/DudowZUqWXwAax0AqgvU0tSDcweVmQ1uctWJuK4jCXkOD0XA6FnpVEexPwpIl/x6Cdn7izubcY4IzvI0iO/iTupSd3NSvaL2HUBMA7d10nTWzMbQzYOTsqOgGbDFIduSHOoORrcFRDrQFaIdHYrUm60BJxppV6RcoKKGGDMPWH1koIfsjYhM+k0ZZ1Fjm8bYn+sGSlYLrdfNs3VvkxCFNrvW7HXDxITUCI1l2IU1GYEskfc3hm1ypEGVkRs7vBxjlNRhZfYxkl/49tF2UiPNdZZYbZBWoe6khAkd+0ire/cNzTzYZrfLFbU2XJ+uuG1GN366bdhbx2XfLXpWW9Q5+hBMeRNpXZDPJ6wvH20G1MMJ5bRYwfC2uySeKxBTAuWCzMkLofBIkJA4gzl5f8MUIB39NU6BjaIpJzYJmWYOIWXXhWsd7bQYK8uHPgIY02EDQhnkge61G+nAxZwRJx5NwXnJUAKqU/P3m5EjIhAYdYRolhxF/eC40zCQYAwdNtIp9Kr+s/UE6ejPiegzoCA77ukw4JtitJxGQ6GIwx023M5we5uMLMmELql775Ia8aVJ9hEJJqDZJBnTTNV118yADmg0J6R1RT6dsL44Y3k8YX20a5/XMjQGRx0sjKedEfsv+wbNdmzaPXtxdmLozUHD0djLexfU247tabH6AhPa9Qaok0YiFVVYfQIZORX7XHWGZGfLWntCD1csYd9dlofZWF8+Sl0RzsfUsEdAITO4gAb0ZldPPehS9WCBTDYprmsmK6KfOKEmONPP1sziU5ozRfYx15rAmGR0FC6MtDiWAwSiHSJt/FocqldNEA9IjKSj2FWxieDaOp7ajlutuNQNte/GjpQKIkXmhCqMrOTMQAJghAZzVGF6j+CkncvjhNwEDMcFV0avTUHW3QvuzdRvuqCV6oKyDSkX5L0itxNy6+CSkHlBQpmZpE7ikKnxENZ3EnLvWF6ccHrnbGSx11e0zchV+3VDqw37dbf2l62h3qyfL+eE5ihVd2ISedCekwkOJw5tAEVKhLMyMmdAFZWB1jqodwvOYDX9D3v7aDspx52t+BjjBxxvt8qcRfu9QxqDagN1q3tYv4CrNNRmP3v2tLWOvTVsXkANodPk53XAJz6BNq+L34uxcSoByZQkNHqokoKLYbOOwpjhICC5tJLB1MaOsOF0iubfqeuMYOzPY1ijTxauydTYg+TA96cq+heCaXTHfiOrOQHmkGzUBQ+ySN3MQe3XHfVyM2iwNYTlGhCMn28zJs9Cesy6wx0gRgSOiBoHQznu5AaKpoMdbguYVnyeGw1Ib0BdGNAQs5Fm2DeYwsbMg4ySDDYMncky2u61zeQjFKJhknMCl2yq+6tl0FwMTiEnsNDROR3qUn7UBzhrZltmaBicNBbJyIBBQOpizerSLSCqzSY3e4Ah4g3c5DkZWaM2VKGc0ZOrSXRXuOAE65I1CJJgsE3m5FmDaxbGufUvMaDiuMfa1DmNyoqPxmAlHzTIMTSRCEQW/CVmFFYUde0WYnNQw1F6oBJZwvF+tAXP/j0L+rEHLKsy+tCx5vX8u8ClkIJZKF5rwsiuR8IWn0066l8zHJwwMuLsEibkPJAC+4voz4olzWrsZPYqWiMrEygzNDGUCaWb8S+HfRYkJxu/Yuc6ZVeOSclU/msHEqNtC+q6I68FbW9IZTfVmlJBidD3BiJFqsmUUWozOFHV4fwQp7U9Gd/J1NId9pPAJQ5wO/me/RC3j7iT6pDOaK2OyajiHeRhBJTFpeK7iVuSN7u1ZrDetg2Ya9srrnvF07Zj7x3X5pI9MKM2uu+TyeqX02LjHB7OWB/PWM6rDcOjZsX6ZQE3AZ9Wm/RbMjiF4QjVAoXszg4LokJkJWpOqvfJ4iLAOtIHNm/wFzG7ynuaG9MNoKo1w/ZuVPju2Zp0GX7E6M0J9MSjaZkdT95vO3aXWjLihP3tMAqzAGLvFbHkHQ50vN/fImuKZj8OWBOzQC5iiFb1Xc4Uc3z07h2JBMIK7gwUg/eMTWTvk1K2mojXMTJbPWEV0yTbneW3RR0wjoUIaykoxSYPWxa9YnnxgOXFGeuLM8rDYXrxwVFNBzXSKd+fhyOPNPEQCKUlWgSS1QjJ2FlpsQxxv2zmZN+/ILQM1WtIAIMkvnMZ0jtEhA6GZCdBaB/yR6xGFV7YxqgszFg4HPh0RurnXUXRyckZgGdSDNYYy17AXJBSQckrUsooKYN8sGaC9UQtyUanUKCKbE5qTYwlxfSnqLfYflSxrHjK8IQT8wwqmHNeqCNYlm1ryYNYdZSBnAA1YNmZBIfDiYCKECQgNjKdv2Z6KMV0lRSfPAx5OK273aD2CdpHOmvfhdlrPDAYUBRUM1ITJBVkEeTHkylojH4qg+6h3huazVnyUpy4BBQ5QbtifflgNfzNSGJtb7g9bai3iu26YXt9Rdsr9teLMZ23HXLbnSjUwerQuaMQCgmBOJRMNqNOjTTWGtAci47+LDpu3M9z+0g7qdvthoZmJ3Tb0Fo1lQkRq/dgrLtYb/YfUVsQXYzYL5OZFf0MIbBpE0mBhROWlGYzZzGqeC6mLBDTWyknqGJMtI0FN1hQbrgSkTuDjqo3dETzpgBdRw1qO8B9gDVZLjojF072fchT8OkGbMEOgdNqNadefURGN0pxRG1hL3ttaOzD15Jh3m3bIVuFumZhnDuNTAoR/cUmjVPtP3shPeAz33MQwaEn5UA3ZgKF6SQ3kmz9F6IwTBswBW+YAUs0a2DJj6uTBRdICTHtdNQHQn3EF0cXMejCHROPkfbdZvDAmz/Vu+zV7p1hygAjUo3MLRbcwWAdEsCAz0yayxXkR+BgBpnZ1tRRYkmaBQ6lC5bHE0QE6+PZ4T43VJUg0VcW/yUAkkCcQUmBXjxcbzZeHbDhemQEl5wySmKUVACyeqKpYfu68mDK1oGMOmTUXsEJKS/G6EsLSlm8JmvDE0P7jklR2Fo52B/Ji/SWTdFA9YNgYOfNnRU5MQLqauztbphjlz6DHRaQuJMCAeielVutqaSENWecy2K/leaOFBBxO5AKzmXFQ1mwpoLCRkZhMoajijtUmO2ILDlg74EmPEMZEMC1L5nEw/NagIkObaZS00DI2coO+25EsN6sTw0Kr9EByILkjjj69iiZELb1XTHE58XltaDXjnxaUbeK9VaxPZ7MST2eUG+bBfSjJr07rG+ZOYhGdkdOVmIIsut7mkyXpeAWN+iXh+LEdqvo6Ki7qUv31kBdZnRNEce4o4pz4g2iGpYyrGNAV3A4BhY9JWYUl8MpKSHH3cdHJFe85tFvZEXKclpmVOSGejop+Cj6Bt0a4MoLMedmNhY6Bd2jP9OUtSiFOaLvg5NSNxUazjHGSvShADEg0S7ju2p3yKF1CDcf0GZOSlxzTg4OKv42MqigPY++CT/Vw1mFw9JDbUlDasVVsWEXzTIpNkflsIHIrIO0gAKd+kwACluxPfu1hgDcre4Fj/ZAAbEwQrSZ3JkktnNqfVDWM4RuDjGg1ui4r4d7gzlDNabHEEMbcMYhedT54+Hc+DkJ+PWYTXFk7T4GxCNWkakOIq2jPKyQ7qNjXKDWxHX9PGpyQ+W9ciqA1wusdynOvH+FZBlnjLiAk3oI8AmvZpxsTcYcqpmxmIBxdgdVkLI/pgTm7IELwYblWW2j+P5Ur4ElDrWLSaC4g/kOrMOR8biDkhDR9cAw+u/Y61kjk5LomTToKjNjYcYpGbmktgyCzUGTZNT1NRWcc8EpFyzuyBPPbC9miUTujOGcvCb1bA3crYXnz3kQIzGOpzUgKbQStGZort7H2XzfJoC6jcYgsvE+ATtGc3mePaLWTJ6QstW1pXdv7G+op4a8Wq07LwX1uqHebthz9mkQyVCr1qHS3d7yYFzHv5OqjyIRiNuTsANTquPz3z7STuq9//MeFmRcXz+hbdWmmjrmGWrVKYrdkZtrSKTYndWw3wRCoYQ1Kc6524JNCcnnGT0uBWvJeFhXrEtBKRYVDlV1fz+CKRUsa7FNfVpwfjzdWyRVaG1o1x1VbmhMPvRO5kTXZo2El716D49YoZJ90ZGAiaHZ3vYIHYhTk3tv2C83g+suNhLC2HfdPcVURbYIWSA1oTscabWKyITCmh6M6CEadL8ws6dBwQ3D4qKTYplT69ajdGs2XnuXqSqQWJFUkZVtPpGnw8PJSTdYR6Yg6poSSmKcckYBkBmzcVMBsMMfaVKbk4/dsPKlwYzSJ7mkiuBSd9xq814ioOwZ676jZ8alNeSXZ2jJyOeC0/5gkkddwMlrQ0GQoEij7k3UPePRISen6ltmzqAYNcM0xqUAivM7D0jZ6OpGLzYiSL1tqBfyxl9zBCQAxLIogiIXYxMyAdIztFck2c3QMiCpmCzPcvKMztZ6JkJyR069QWtFkg7U3eMVNYfEGbmcrNCfF2e1MkDZlh6pGVJW5GSwUUYYOBrQ2hyT5wFVZOWAkzJsrSvExXOb9Ur2ao6fLKYnMFS7x6d9ZrLkWSsUCxOoZEBXPLSElYG9F+y+3pgIayp4LCvOpeDFckJhwpLIVOLVg79AFjxSnlBfOC8dWNdA5hEYxHFH+RpxGLOFDiEL0DvgtWFDSJrZOABcaL4TMQKXJBdiHFrFZCw7SgAXq4Hnkwx1/1YfID5mpnnf6Pb6irZV3F7b6KK+VbRarU4ViI7bMhUBajWRAZ8fJp0gbNeR5cN5qQ/py+z26U9/Gt/0Td+Ely9f4uMf/zi+8zu/E7/2a79295rb7YZPfvKT+B2/43fgxYsX+O7v/m785m/+5t1rfv3Xfx3f8R3fgYeHB3z84x/HD/7gD5o67xd5268b9pv35rQ+jbXXYu5BTxqQVBjNWCHkyzh6AZaUccoZp1xwLh45lWIwX0oG8+XsRu5AZj04KiYb7VCKja5fTgXracESDs4NBCGi5+kIxiRg1Tvqc6gixBRTGQucQD5jiFSd6WjqEK02a7xtzYeiybwfIlPbYLPRUw6/n6dwRoLH+5SEiUfHLHTSgAej8NmjQWg6sqquU51cDsenz95rNuLqUIloMiV0RjY3srhjljwWCkYjME3DqICrUJjsy9Yqtlpx23dc9x2XbcPT9Yqn6xWXpxuulytulw11s8h21PtksjQ1zsddngnECJRoqKZ8GD/jyvPxmjESJNmAz3wqKOfF2YVWF1seTljOq5E5op8m5q6lqW6SUjYHkhbkVJBzAacCSgXKBcIZnTKECzQVIK8gv3NewXkB5xUpL2OeUc4G7TEXhxVdy5Kc6UdsjcDkvVbumIPQko53itaAGQDGWBGD/WwulkgzNfd2uPfdoD9pDhHIWI/Hu10Tg6sYRuvOZNTqNSecS8HDsuDFsuBxWfFiOeFxWfGwrDiVBWspWHJBiYwxzit7FhpQIB9mskXbAoXe50Hx5dhrF4GZbyzF3GTxHlHfHmYufhewsyM80d4ygiEcAgGXIjuOmknea2li0yvWF2ecXpxxfucR5694gfNXvsDjV77Ew1e+xPkrX9pz777A+s4jlpcPKI9nlPMJ+bQihVD1Uly2rRjhLNvE5g9z+6IyqX/37/4dPvnJT+Kbvumb0FrD3/pbfwvf9m3fhl/91V/F4+MjAOBTn/oUfv7nfx4/8zM/g3fffRff933fh+/6ru/Cf/yP/xEA0HvHd3zHd+ATn/gEfvEXfxGf/exn8Rf+wl9AKQV/7+/9vS/mcHB59WQR316B7kPFPL2OiyHDlHomfnAGRns2w2TOSQFk0EKjQMyeSa1LRk4J67JgXQqWpTjc59FRQDXep8TAGFQGpLGQpIqP8ahoqpYu+x0HByHdmneryFBEEG/MjV6dORId999vb0NRoXom1TYjPGi1zxkb12/q8Jq0jp6SNfElvdswgJ1fpcMsmIOjN2gzCMv2ruEcQvZfXPWgq2VQ4Zia91EANGrQd05wvOV8vyY6oCp2aGNJ4syi+Hz4JFTDk+aIBxqbnsEu/DkbGWtr2GrDdd/xettQfUJzYkbJGcqErTWczmdwZpRS8OKdRyQQ2nm1zJIITPbZpuAQWZAfTqIxjyyteWRJUQRPq0HJIVRL0QztdbpVTjZnTBTZmYWqgv1io2naXlF9FhZcTV6bOYZEbDVJTug9QXqFtmQNvBC0lEGckJORftKyGEsVZCoR0pF6Q+EM7h3EGcdpykRsAzZTBlKGkI+0JBd1BhkDVR1N1MiJDznEQT2DNOBL9zHopoovgIjVclu7DcV3GZl8Gr1xz0G1yF5sh9oT7FJq3TOkJtlaHmD1zcIZp7KgpIyHsiBGukEN9oL0kfngjU+MJXyPQMTr6NnvhtHw7NrwT+vzzMWmjedi078j0OGS3ClkZxvbbLMR7ByOafSXuUJKSHaxw4FJTclmEbcLreO0na1v0nurol1Fejfb5mWBUE1p2wbeNoOh2SBC2pO//5egmfdf/It/cffvn/iJn8DHP/5x/PIv/zL+2B/7Y3jvvffw4z/+4/ipn/op/Ik/8ScAAP/kn/wT/L7f9/vwS7/0S/jmb/5m/Mt/+S/xq7/6q/jX//pf46u+6qvwh/7QH8KP/uiP4m/8jb+BH/7hH8ayLB/6eOptRz4QBwIcEEcHFAptMiTxmd3AtclsC80pqz0lLB7dRBQeEdBSzPuvpxXrumJZFyyLsZWS479ofRj0qHV4xj+ppr0DrdlU4M3SZevv6sM4xValuV0x3m2gbfE5MwplANTVFCBEbZ7TbUe7ufS/Q4kYgqTPNwu5KGkHtWaRmGMDkXFM2rQOaGbQclXd7pPXF+xlRud14oPAJXq8vnH4fPIuigF9IQrOluXaObAakHj2ExFxSU5ZDtLHiBQPkWZE08PhAoF9EGCbXASsXns80KA7DK4UAK131Fqxbwm3pwuu76+4nk64vbogg3A+r1azc1iME5t8V0CXoYIBMxQMttErbkgiy41sipJHx15YNWFiGsXv2Q/DgCr2hxs48Rh8uT9d0WqzmkRt0NagrXpdMiH1BOkFPZmTInQMNf+8QFOCcEboKTaBTa9WhsBUt42w5/vQg0RNxeskM6NCXGO/HFbGUV9HByelEfQBcyZUHZCiaPJAAFaLko7Wbuhi5Im4wExBKPLJt4whJcREPuTYDYZhog4R2rUpXOx4fV2VZNlTzhlrzqMhV3uFDTQkc67g41c87NvjivegL05HkIv8fPiiBJw6zutqrOLzGcvDiuW04rRmLEtCKYyymIMqDwt4KSjnFWnN4JJH0H70gbZnD/blsC9GqKkWeI7svRgNvpyKN/GbGoZ0b+qv3th/s76u/XqzetZekZ6uU1pNOlq+t28fdPv/qyb13nvvAQA+9rGPAQB++Zd/GbVW/Mk/+SfHa37v7/29+Jqv+Rp85jOfwTd/8zfjM5/5DP7AH/gD+Kqv+qrxmj/1p/4U/upf/av4b//tv+EP/+E//MbnbJux9+L26tUrAEY86OrG2VeWktdXvJlUvTirnYZ8SKhB3EU0xDYYUPVu4XAU5F0ANh+ZfaGdZW/qYzano4p0YE6gdEfW+iBNyN4mIWECycO4TiT7uIXnT1M1gQZxImoRfZtQ372+3MFJRZFT1aNSAQXcJ+7dh+O9f4zwLyJAUxGY3ztweTMBk5l3hPpiE89zNB0UXHkg+jH8zNjoDjXZq6hjDTXqZ9AdjffWcd7cMgIuEhs1IFKH0lR9IvAReoragp2rwO2tY3/HftmwP1lhuV63YRDN6Ri7LhrJCYy7AjvRGCgJ5EEKYHdOIwoe60rAYJ9XxV57sXPZap11sZKdbaqgvY5iubCxsCAd2gncGdJt9hXU9PQovkA2pp5QGtesqovWqq1KJQZxQdSP4jmkDOUE5ezZn2eBcSUjC9fxjF+eyJDt39oVQPe6j60YlbnGRKr3SdpsK5scPKFcglj2R+rXhcZ9gq9BNVdw4C9sa1CJQVGTSwb7hz1gmDK/SfTCSAuKkUkBZvwD9JhbOLLiaahFYrxFnBAPTrJBtnk1uGw5L1hPBWXNKJmRzY8hZUIqzgb1O+djYznGQdzVv+6yN8xzN570thYFJBvdPZU0ZNF6jckI3dUxrOG87xW8+HSGfQfI+67YFHcyvgSZ1PEmIvjrf/2v44/+0T+K3//7fz8A4Dd+4zewLAu+4iu+4u61X/VVX4Xf+I3fGK85Oqj4ffzubbdPf/rT+JEf+ZE3j6F3KIkrkfv5FTHsW42KLpqMBdZdKw8YNRm7hTNwg69GZwZwiMTJ0uuSHYf2eUIO66HZYLHeO9DSzKTIcjsvF9lYi1tF302MdX+yKENrMyFa9fiLCIUYysCazMQTGUw1qbmM7Hh38swBiin62jq224Z2MyV3aXXUnKL3y5KKgCfVfIMoqHV0YvTcwUmHynIQDxQ4GIlpbNUjQDMLzs7zrS8wckQXRVWMIEHsEsSVGA7XMlgjNyQ2nbDpKCKjNKMJ1dG9H9N9UwQHgeMfnRbGXrRr5c6NkuLEjJxMZSQlg7cKDNq71Toc9jllLMRITaC3ivb6iutvvwY3mzfWHlfUhxXLi9Ui0PNisIvLcsU05FEjYEKi5M3GbjBDPsihvnGeEgPstHsR008ryeZ/lYS2VSwvTtivO7bLhuVzr1FvO66vnqzQvduYBpXuqiUGVWn30fXah8xNYgsYBBg6ltfaxjqy62S1rmHcktdbsimpELk+JVk2EiYxxItHsKjhsNTYrm2HdJucjCaANINPoWCeQZsg5llt3lbRDUYlNhYsEZi7zcJiYMmzBiTde6q0mnPrzSF7IFGx7DpnpGX1Wl5B8QC1MHkN2DM8USNyqDg8eQjq2AMUP0/sfVkREKv6bCsRZ+XZtU/Fpz0vBacXD/Z4XpAToSTCIjtSU9CewL0gY0FZs0F+pzIU1BHn2Z1NIEXjOd+HI1jyelfYx7ilUNgQH2evzpZUZ6h2CyCsb7Wj3Sr2m0GD1/cvPnFiQ68dfDm91d4/v/1fO6lPfvKT+K//9b/iP/yH//B/+xYf+vZDP/RD+IEf+IHx71evXuGrv/qrR2oc7A+JbALzuYhKBOI9I0Aw0BxbGtnACLbDoB2c1OyqttepN9wqmdQM1KRskBzyGpG/s8W8zlQ9Da7bburDB5iP4f0iZJBEVqO+K+aCCY0zg7YO0T3gPTc+RbS1A228e63LxFJjlHUgX7YhFSzm5EkYHOQKoiglzGg+qNbqiP5dXcrrfBrZ1oxWHVQZny/jd3ENwrPTuA4z+gxIlse1sGROPXFzsVLMwxsbLN4PEbjOwnEEFMHECrhxLWU0U2vvKGwzhOL3S85YUsZChKyWIffbjpYY++srSPy8qyAtTu9di6tUiOnoZcWYtkw8nKl9n8N5PzhaX76eAVo9TU2W3dZ5F6+F6ojAtauJKoui5g2UE/rmTdvJR5uIALqYO1LrX2IoMkJmZxJpYrauENlImpSQltW+S5rFeg4n5f1R0cQeRrJH31M8obEPFZCOvhN6I0A3+45UPQhSH+oZK5/RYNI8hJmZgRTGgrVWhRBBTSM7ZcCFoZzC482/CsDaFFLxqdWn1acrLygpWYYNy0ZFXYZNxWSYNDK3A/U8kU3RzcXqSikyS5oEpd6MIecNysSMsi4o62LQ3oszck5YCiNpB0uH7pY9Smbow9nHFPk2OmbfvkePAcFgSj4P2A89psBcd/YPg5SRIhN2WTOFTRrwkgqIwLl7JshINVuv3d5c2LajH2UyPs/t/8pJfd/3fR/+2T/7Z/iFX/gF/M7f+TvH85/4xCew7zs+97nP3WVTv/mbv4lPfOIT4zX/+T//57v3C/ZfvOb5bV2tDvT8FjOGhpMRGd3gI4MdjXuMrh2B9VJEEwO2MuxgRtk0sysKvTG14nrv0EaQvQ2yA7te24RmDhm2mthmq81IDHvD/uQTemu1hjjPBghmADInAIoTCIkExb8QM2FNaWibJc9c4Iakqw6Jp7bv5qwC/guGYGQy/j1tHIVBIegCcscrzRd88s3GNg0UsMV4nCM07IL/TGxzY1hhzaI6VQs6+byZAQ3hzkmpO42hcnB8z2StAVan8r8I6NKjwlndwHGnjZ/vnnIK2YAISZB8OGEmsrEcRE6kqMPZppSQOeGcksn51I7+dMMugisR+nVBfVpQb5uxpR5XlNOKvBbk8+pKEiarxSmZOoYf4x08M45xHjjBHRXbQk6w2mTIJ/VuBfNyNtowpeyCoslaEq4b2jV7sbuP9WfCr+pEBu839PXeW0OEFJ0Jnews27icgvL4AinnUagn9kzqwCYM4VjblkaiGTDYDObNSEvDvhW07QbiCtkZmk37jQHkHHqEALGAq0I6W615Xn3f0wpmMbmnRFiWYB6yOSbAHVSD6NT6S5mwrDYQtDycTbkjLwMCJhFHKCqEBB0NvVeQGpRlgyNNQ5IygU8LeD3ZXKjiTdIKPw8KkjkE0pRtbOLu+nDCej7h9HhCZiBJg14u0HqD7Bs0JfvMs7HprMH2fs3bmTDnMRinMgdohoYmcZ79XJEFHjcNpp08woSqXpvzRuGUTYs0r4J2MmZxPq+eXflk8tcfzkt9UU5KVfH93//9+Nmf/Vn823/7b/G1X/u1d7//xm/8RpRS8G/+zb/Bd3/3dwMAfu3Xfg2//uu/jm/5lm8BAHzLt3wL/u7f/bv43//7f+PjH/84AOBf/at/hXfeeQdf93Vf98UcDrZuUVLUDHKaRfCA/w4HH7/xR7sAATmI1wFgQQIAHfAfDedkdbBGNCjaNnrdqb1E3qsSM148i+vWz1BrM8q8P0Ytio4m3rOApGG0GCVZHcd+R1g4DQVis7FmnENWRWofyhAxJ6aHAxNxuM37iMgMUlE2kUiyfgfuhi0TutUl3Hgqs+u0ySi0jtDeH4loSBsNCRnSmQGFI4KPjR/ZTtSivG8MsKZRkPdZ2C2TQ4/RT0SEYPmN7A4KvLHB4sof/4UBZwacaDXN7LVORYaipYSaMobCtAckS0pYiEwdvzZ0AnayYXWpJJTLdTip7NTw8nCy504rymlxh7I6FJjG0YGfreHjbXxFGhlVaBSyJDOQ2RhgRDTG3G9PC8plwy2K2FtDTE0uxWqsKdNokqZuzaJt21ATQUpCjgm9THh48QLL6YSXX/Eu8rKgnFarQyQGlcikbEwNhZNyODNUVJ6XRKy+27DfLqjbDZfPFbRtQ79eXJgVKLnYCeiK7WlB3W6gpEZBr21kDFFPomQKMGU94fzy0fZpzuDbBXtNqLpDM6DV+ytzwfnxJU4vXmB9fMT6+BIpG0U/sjXZdvS6Q6kjyQJBB9oGFkERxUIdmRUlA3xKSC9OSO+8A1pX8Lr6mJDpsJszSNVh1JQSTg8nrOcVy3nFUpI5stfvo6Gj1w39ejXB41KgrVrvoxenI9seke8B6UAQXilg+mGu3lxzx4j77a84ODNjCzraOnr7VIxhKF3QTzYpWD4kR+6LclKf/OQn8VM/9VP4uZ/7Obx8+XLUkN59912cz2e8++67+J7v+R78wA/8AD72sY/hnXfewfd///fjW77lW/DN3/zNAIBv+7Zvw9d93dfhz//5P48f+7Efw2/8xm/gb//tv41PfvKTb82WPt8tuv/Fo+pQc+Dph465P8JBHYv0AUXJ85AuIoaI8NX7l7pPpY36SzIiAidL71nMGI8+Nc+iejNGWNvqmHt1nIoZqXZ8MhBOyuixQRBgooNcDA1W8+wliZrbhFDCsM4eI+9F8s80hpJvalaEQKXdY17PLPLf9XE8c1DwUzhqQCMTZTD6yFg8CB7ZbOD25sDsunQ3nipACnUCsk09o737LaMeWIxo+ghVvOUWNTRjSQYlV32YpWWOlDM6bIy6AoOAYAr21uBq5JkObUDdCNw6Wmb01sAlo/WOvHektWLtYpCHNw4XAGmxQCBYfHRYDM+/grHp/Rz6tbEwlk1IQmYQZgZCnUzRETUjgTrjz4RmSWFDM5lRio0SIQLQGnqroMQoraFBkdtu140T1hcPOD2c8fCV7zgsdXI6fXIn5dAfGVPUIHdbq5HNTx9lYsNQgfaGcs2o/7/2vi3mtquq/zfmXGvt/Z3v3IDSG1IEgxIEGkVpGmNiQtNLiEHlAUkfijESa3lQ0QcfpMYXvCSaaGp9o/qCygMaGySpQEvQUhUxKpiGmiqglMbyLz0953x7rzXn+D+My5xr7f1dDtSe8+EaJ9/59rf32mvN6/iNMea4rDogiCvzcKGD1YnrmlYFswRERrgYwWmN1Ld63qYZKZiA2IBih3YpINrtLsUU2TbIMQNrwqpfeFGkGCOatsVCY4OWu7tYnjqpcWQNzFY+REK/JjRpjTi0ooUNDWJiNENCAzhIxTai2WnRnNxB2NlBWC4FpAhe4WCwDBmMEUh1S3GUaAjgoUe/F+V7rKnNMo0cNcwqUZvpJOuG8D8mPe9UoHIrEhWeY6/JN9gUqDZ30vhlULOx7JPMsk9yzgit1LRrtfzJYXRJIHX//fcDAH7kR35k9P4HP/hBvPvd7wYA/O7v/i5CCHjHO96B1WqF2267DX/wB3/g18YY8eCDD+Luu+/GzTffjN3dXdx111349V//9UtpCgDgG6s11lnS+jfBgtaEqZvRpx7XuuaPMBtN3ZcY/WDu3wyKAnqIoewg9b7LLDEZpGY0sb1HN/dF1aTINCmWZLYSGS6payQru3kqQZiTgqVJNDEQIrSSLSniKuO3QEcDqfr8zQCrOs3RjzS2iKEBwZoDz0xd0hCkLLE94g1ZpU/SdhBBPMMIkydoV1zED34O0Ig3CLosZwTm5Zf1gNgOlN1OzxrMnCSrhJ2P5SSJQGMGcsyaV03BreJ21i4/E56Afzn7EFGzNtW663OjY5kZMTMSJXQKnlKWQcYj6PlEINLzQCAPotGygkRoAuI5SUobFi263R00ixaLU7vYObOL5e4OqIloFgImbNLuFoDaHG+zVsuFMUQpiaLrMiYJLO36hHbRYH1qB/3eCouTSwzrAau9tQs0XdMgRhKNikg09DQgrXusLlxE6iLo4hK80wKBENsWp196Fju7Ozh71cvQLRfodpYIXaPmPs1wUOVNNE9LMf/WAqGuHgeppMmf1zj53Bmkvke/t6dmd8nqzTkj9z0ufuMb6C9cxMX/dwq5F6eQrOeBQ0oQF/gGy1Nn0e2cwKmXXYXYSSzRxb2L6NcrtM/uShLVdY+mlQrJJ8+cwYnTJ7Hc3cVi96SszyD1nXjIWGktuvbcAt3JJfq9i8jPL0DrPcQL5xH7HgEsXni7HRZndrG85iyak7uIp055Kq1kgiQnjVVmWGhJt2wlP2gTQUOPtFqB1ivki0uk1QoNA6Ft0J7cRXNiB3FnoYGydkCHgi8m2NkZEqDWERP4aBxTVX1/69qzxTeyBSkUBt1f6kASWEAwsBVBBLrhaKrUJZv7DqPlcon77rsP9913377XvOpVr8JHP/rRS3n0Vlqp1x408f7A6g7KjOi6q3rRBHKAkhLwXDHu7CWOCSwxONJh1BqOcT3RfgiZkkrt6qkW1Nyo5zGezmSQWk/JMj64o4HeFnAAMiqpgKTNrm1A0z5NBRtTaGDriqqU+AYeqhFi21wyzA2xPuOZugmPG4nCJW0nVAoVoBIhMxjilcgQ6TIyI3tcjWlPckNpXx2nARcqoGPKDD9HM/fYaoqKFG2HjyoIlCSfFfPU3+JZl8scUskSEDgAIWu7dIe7RsgaTJpEPC27V0BqEEeUmBhxSACJ23joWnTrBYZOzw2jVWwm19St7TVtaI4oGhWraKZ+GOIKzupejk60v0bW09AnNIvWGWOjGVSaxky/kPOovgdHwpIzqGuATlI0NV2Dky85JZrU6V0xZS4XEpejWREsHY+7Hbm2XWlRdf9sotOA0AY0g1SWzf2AYb3jazoECUZOfY/QBPS7O4htI+EWfY+URJjs+0F5QMTOqTNol0vsvPQUmlbO7eK6Q7/uga7xSgpN26JpW+yeOonlyV0sdnbQntgRZxcKbqYPTUCzbhFaQrOMGFYL5C6A9y6KgLt3UZwhuhbUdYhLyZzfnliiPbkDG+Ts69XOiuDCY9O17ujBvYRMpJ0dtLu7Ys5vxWu0PXVSQGq5EGeWEArITLQg5ztSStcrGYPgAOUFRw8AqfpMhfQmbFK27j2zQvn5GBUe42XvD6Fjnbvv4pDAyOqGDiwyIxJLyjmGD74xIIqxMH8N9k0KUGtNPR+I0GZGDlRJ4ToBgHoLyqCnQU0tSVLie+6sEJBj8Yjqezk4zIPlzCsOGmwroZb4yXigwk0woDKwAYpzAPuP44guMDd/VtoYTRgezFzE0EVVAJSc41eAJsqHcrD6zQLoVVck2zsDhIzURCBJFpCekx8QC0CV+TI+5dls6jdRvKFyzsgUS1MqAPbUSNrGUvVWtOxQAZSnrSGCxMUwOAUgZtBgGUxEO2RwOYvTX+IFmD15ZlYbvzgh6PlZPyD2CXHdgBGQEyN2LfqdHk0r3k4hyhoJgSSbNBcQ2qBawtF1wWpmZbD2hR28QpbMBE3XIA3iMZaGhH6tZcOzznkgxFjMyMiSFy4uWqBt0K3X6FYSLNx2DXZPn0S3s8TumZOIXYem68TRxpw/iIq3WNWTkYhERSCxNQnOaJPUiOtOLLw8hHRdLuQs8Ybd7gkMqxWWp09LoLIBa85Yr9c6QAGL3ZNoF0vsvuSMOq00WA49hmHA4vxJTb6cJOVZ02B5YgfdzhKt1otDED3OHJHEEaDH4uQS/d4JpPUaaXeBdOEChq7B+rmI3PfAokPYWSCeWKI7uYPu9Al0Z3a9vlMtNI6FR01XpHyF+4gUA/IpKSFETQPWuLju1El0Z06h2T0h8XExlBGvQMidTcRco7klQ3mk8Qw/R5ist+niM+FwFBWsz6oFLLFhFn4D9YY+Ah1rkPKUOipRJxZtKilYmRv0RhAqqDqfEZftPiXN38dIHLzeD9ezZNxX1XOqGJaVKpj+mGQEzzagXliqDRCz4h774rEN7UB1gMBR44jL0WRAZVH1AVG1gMyElsWRITPcbNipibO4eNdtUOk1apAmQfug2gqyuuCNtTNT/UViCugANbUmDAwgEfqUJG+feh9Kn7RDWfKpRUhMSBtIMktYELUKHRZCCpAeQkOT27MLGQECGFI0wLJZBBcE6lYDcAAjUykyFSy2sYdmz0gJPTP29Nmy/qT0RBejJBxuG/HQZJZs1qkRl3/NKJ/Xknk+d0k1ctP6saFJbaWRpFyEBqjZjg34opybRnVJ7zQQk1X48rWjtxSTb0K3XqLbXapVoHeNbHFiB03XYrGzlIBXdbLBaM+Ve47bW0vfZT3Li4iQpYp10zbujeZfZzMBS3mJ1A/IZ9ZegsbyVA597zuqXUrC2+XuCXdSkSDgjMXptYdpRC2R0nadgxk1VUnObMlYW824sJQMMkOP4eQSw/kL2FsuEHaWGFbi8t+ePik/uyc0r13nHoa2lsrvIuZ5KixAy2DIeQ61DdLJXXE5D0HueeIEwmIB0jLxU77hAq4JDzYRLuhAA9utq2NhbN81d5iBrQIqUk2K973pJh1rkOLJT3bexiMFgCeMyPQOkcbrA1zJj1f5HPj1BaBU0yADBn2w1IH2Ca6zVtjfXuNKGuXlNEKVZZxgHoWVowKo2sgTrcb6Y2ofWdwROVCVAFlCzOQluTM0xpAMoKrMCs4YDaGonLWxanccRN3h0j5vh30VALE+m0XiSoHRBMlYEKygI+eiUWWL/NeAUhKPvhIfFjytjWg+GtHPrHWqzJQLU3pRWPd4c7hJbaow0ngMK7spzDuNFZD6nLGXMy4MgwtJTZAxTW2DNpkZWM6/PDswi5TF6knJg6aW0bIcjIDDGMBG7JSvCPIla5hFpPV+LL9kloDlbG2xAXC5SNop5Rwk6W2bJBaINN6oWXTuem6JUm2/OTOkCvgnaxejtV11l6Haula3Vu3ZP1TQsiD9psvIi1YDakt2lZK4mhC7DiGKV2UwLUbBLnQtrKSOB5JrnJmDia1xdSaySshxiMiLBjx0GAjoYwAPailYdaAY0OzuIu4s5Vyya7X0SlSNZ9L38QT7cGVuRJDeWQBEkmorybl47FoErQ5d0pfZ+JcBp+p+o7mwOfe5Gj+/nqOiPlXfn7Alrm9ipmvjn6pJbVh19qFjDVIhRGVWwZkN+94Xu2cyRgX4mQbYCszJ4arHFQDuGg2MAdA2/sZc1bw8SDE9S2XDKpmGHCXIF1HdnfXruYpVYJb0+6pBCJDx+BkbpKuLUbQxqtcLuamgYQ1+1Ehyy2pgoNTYxtR0MWQRsWqu8qzKHBCCRMVrwhnlbdr+6YoF/AwwxuCmwkyEGLRMR0pSIsPNOYL5kQhLzSCx1FIcTSTNMF2i9xmExBkJXsNylCU+RKBhYXjioVcYuwgq1u46PqkC+SC1u3IIGvRahJtVGnBhGPD80OO51doTAkf1ztrtWiyaBqeYAZLsDB48qwl/00rKtoBRZa2GVOMNtP/0H0YVFyIDKpA4VkRdKKoBy1iYplJBuWl/zOjyQkGAxe2dSJ0jRMIvDJU21qzjPAqj3OSU1T41xsraVmmWt68+H45do6951F7b5/4ELX4Z2sbn1ozkjacK8x74mRpMOKukQgYDrT2r8wKAw6JDu3sCzYkdNKdPIq17gIBmucDi1C7aU7sSGNy2Os+EGohG3ayJWUpwREnXFbrOg/ShbQ3Vnqj5Ui00jn5P5meqOR3Ieg6hTS6ga0/vXPTDw+lYg1RJ7a+BnTrIJkUnaOo5XbDBuJaZDtiST7rQYWvSyXj/WIvy3SYf2CG7pb03rcNuQvCzj6aNau8lN7NY1gnLsQf1qBubAKZUGskojbRFIH9WmpRK5g1MKhP49QNaBSYbx3pM/FwhBq8XQyEIs8uknm7sruPg6QIsLuyBgIYiOsgfy5z9TKkn08TgZRM6BSkxkbMCEo/mDlRKfIhrfdGOhdGRlv8ruC/zmmHVH7Ll+wjb58085twhw9aZmouHJCbjddLzzRAQU3bHjq6J6FJCZzXDBskYnfoBYT1gWIvZJvVifgqaXobCwYJKra1P1wbVV5jL+uhT1kFRyZZVqq9VSlsnzH4OKedcUFNkcUwar8laIpc1wZX0XIchUDXg2zTCSbdg4AVtB5sJyWp3OVBBqhDbSrCA56qmmI0RF1NAGaGKIVgGmTKksuah/EXaEBCzdIAZQAgSrA8gLjoxydXajoHUZC9PPXPrMSVNBxUgnq4OUkBhXq7JUiUwbDHj1cPqlxWgujSAmmhXo/uL23tt1XDt6gh0vEGKJH+dFCakkh6J2ZPMhkricpXV/s5aS4aqCqCVuWukSfHGehIiKh5gUSr1lgwBdokGCGoNoBjFHm4gldbiWJHWvVa7HLx0hNset1KV1ZjZcwTqGzBUIBAim94jpzK20Qhw88YYnCozl7p5m6u3mO8kb+LIVENFm68tAfa/nXUFkvMR88RsBhnzdVITCYR3NoHQmqlSn5OUcRfzqNCgmlOClqYfPV82eCYFKwWXAPkOkQgzGZJtvZanrXgmIjyA25ynvQRJlszxfUroFaQoZWfEmRld02DRJSxyQtIknB5MG9YY9joQEeJCzjm4jbUav3XuD/5b14AvQhUeavnKPLIU8J35uyjt0ln1d/VEmrxwE8FmG9xb0b+zeQ8HrGpR1aaq0pSKIbK43BfbPvuv2uvTbjA6I/PzoInlon44ld+1eaouHSO/xAQYLWdh2yAuO7cOhBiLSS5KKqliRq5AfZtEogKXYjECRU0BFxyQRxtOf2ij3ZUZf0NAqAWebfCxH3AeTDadNFlKtRn4MDrWILUTo5iBVNoG1NMqA5kEoBJlRIZkCtY1F7i4gUddhK3nq5qeyWwKsiZxUBXf47EMoZgRsiV0BbRcQ0S3s/TS33bzQTOh93t7GFZrpFWP/uKemCP7VBpxGGlDXYNAtRl9U2X3srGDTHOeIF/YxdvOeJeimZxnVHEVIlGrCU1TWHAFHmMeoedIQcpKtJDn9ilh0Q/oB0n2Wbwf2RXYxBlrPdgmjU9ru+xF5UzCTzDJ2tqu/aEinxqf85x06vcR1bNvZM+32LdQXM8ZkgwUpGBKAYsYsQwNCGlypmmOHdKPnCUDyTAk9OsetLcCA2iWazERp8MEk2+BtuGacyr9wzihky0qk9ImDdvGaHT4xgKTP2yzPfu1q27B6NoK4SqAdXM+ahCZPK7WoIyJYxKUXjHSqQls47cCKqn1QGLjtGBl17gA4AmFrfxNNIDcR9jYGLt6IOTcTNbxJkhRrVHV83NEUHihaPvTaqHnaPc51iAlGcHNHGQSiWZTyAwiNaeR5vMKWf26lEFXTDranKLK5DCaYPttE0+edYAsGNQOLKEM0Gzj+n2KQVLVtA3aRafSBMTLaEhlZzEj9ZIen6nkx5tOO4/eK5pfvZHNDjx1ohjvw8qbjyw2awzS5Xry62oygLfNrsdjfgdnT6SmR5KWtVE1uyzBvzmTlMdmqOejnJ0Nmqdx0ANxCgGZgBjl8N+in9ycN+ZqozFyxHD+q1km6vmq+2sZ2HNG4ABKhQEEkvXXkqRIYgBD1KKZBK9vNRpPLmeRdjaak3nYXRr5/G75zNNDjcZhyvRMyp2AUW13GwHU5pO27RMXnGtmu6W1/gjde6Mv+xoedRjVxDngTFu2taX2DCpCljy7xPPwqC/j67aB6sYzQhC+Q5BsHyxD605HVYXc7SNSk3xSYuBQ4grr70wEiw0Niqpx3MrTNgdstBKnKtfomi3ra0IG3mXz6QOPiJnHGqR224hl00DFYjH3sCjvksdO8q7FFBAjYxEkMWQXgKhbuCECBwMpYdVN2GTCJpXLwXMBp9ioFtW1LnGLBqXOGernTXqQHjXB6GJ3Rz2MpFxAGhKaCy3Wixbr85LCBkSS+j7l0SIUImzsmmmELwHIAgJMbCjsVgxjsgQxd9aLuZbSuJbsCeJfINHO7oDhgGmcaZvAVPBd3dIJHDXLB0eZC2Z3JOGcRwUS+5SwNyT0OYGJEActTtg06JpGAWMck1O8mdh/SkcM0UpqKh9nWw9BEoSCGYgEXgMpJ4SeEJg0S71pogHrzFikpJnCWUo6NBHLtkXXRK2LVbwYPYWVblrTTK1Mx1F28hH3+sE3qKWWGqi2Mp992Or0LZtvHdsjnUFcEk4rizcGaMBmZ1Xb2jdh3KWhBnyTfrhWfWhT9HISxA0EgqybGhinz61YttBkedZ/SJ+sf/X9SqMd27c+b7O9+/5dt+eSFpi0Zz8hwe95CXSsQapVTcqSpXoJCKCYtTOQgzC+wAAHQgPRYBplqjKnytrUxOPv2Y85O2isCYIk0KQmIrRaXCxagtRxXjI7JKVGc5opONlP0ESMObca9Cv53kLKVYyOwcBUnPMOuKZUYvMYbDFMqDSgCozcwYPKIeuGVFpJ/QgWTFt9rouyBOPaTTbVerP/Z2XMyYC8MhHavjOFB4Ce/5g5V4rMDSwJc5ssbuyRAnKMiNBcjiRSbaCSxd74zYg9cAkQ9mwg7hAQ3BOOVIOrD92JRKMHCEuQpKlpopeziFoxeNk0aEN0L8qgknXQ88nYlh8P4Kx4G6rh3E6bF47mcOt19uGmdjU6j9qX29Do3lR9RFuutfVV92nD3DVlzjz9rDpwr5jeqK/28Qbg6H/WVapusB8nprFYM+3VqClbLhyFcuj9xk+sNCS9rCgcuu4srERfH4gwZGEwdcM2x7i0f9oeo9Keg2l6gemHk0suEZhqOtYg1YWAlgJ6Tg4IGZpZWykzI2oixaCpfxOKyWl6/jQN/PUJV6na6qNQCA5O0eMeZCFZcUHXL0hcdIUhaYCo/lCUSq0cMxqWOJlUgZR50ZknwGjP1tJSqAAolANyUYLq3HuWPaE4SXjwbrWw/VSI2cvOpyEVhw+YoqXuy2B3VjClpN60tXxVii6WIoxWOM3AqtwfGMexsXrxadG9HBA1KWeMYiJtLfO2mnEi4DFXNVCVzgpQIZdxAgArDSIH7IyQK4AiuOnUxjsEAacGpcSIuPUHLJrodYg8C0VTA1Tjv03D9vmww/mqzTRlBlQyjmxDjeL3uYVGasem9jSCvFpL3gJIU+Y9Sp1TM+oNCZ82GrdhPGDteQWu27SzKYhx9bp+3KiNk+vGYw2TZeSd+vX0npM2jAF5/KmBUY0jLttR1R4mNfNhNJ6bHSrCxlRR3Phj8l41Akemeux5JERvrrXRirJ+HI6AAI45SEktJXgN2DJJBLVHAURe/TOoH3LHkindJGy9rJiGzJ3WNCgN7KMopZkbBad20WnGZDsTYQzrAUnNfazSUGgaj1wPsSmpUIAxYw9BCqNpXrE0ZAEz80z0rACV9FObIyovQ6pEO06EnPQ1s8S0sJq/DjILaAPTkMAkHoc0SPrePCQBCr3Mf9g2X2FttdRmwDYkcbXuhwFQDcY8iC0GKeWMdU7u3m0ZKUT7C1r9VExmfZJqscgZHDI4BoTYiNYSo1RlJXg9IoBceq0zRJemqk3TzhBU+LCAT3tdC5xSZ0hNyERq9RHhoYklcz2RVBxumohu0aJbdmh3JEt3XDSlHlPFjDf4h6mb2yTlF5wqiYMw/n3o9yYX7md+Ooicm9ecvHppY8PTe38L4vt+7fjfHuqKfNRrDl8Lf6g/xGRNbH68Qd9kf0Zf22jP+NE8+WPDinwEOtYgtbkFlHlvYbzCb81kBHgCUxXzy1cqSYPkHmKq0/OnRSdJHxWkggaXQg/0vSy6mY/UrOPxGa7xTNeaOhOEsSmwBiR5UTELBUEqt4DFRNmCdXdtx91a7rUu2oelOfXzOGVwSMg9RNuAaYvs/LMG3SnzDg6k5JrRkEp8Ue3EAJh2plkjsmpOVWYQu2dJEOszD0vqmzNgUomdRWnIt0rGFdPbZ7f6KQqXNjlQmQZpZ6Aw+Ug0J2uUtc8yB/qYQ13/LUu6alWegboq9WI8ejRxOm82t+5CXv2acpNpL8e8ZaR+gUdX11oabXlMLVOPOSRtgGiNLuMF52bi0XtQ5kbl73J3bVv9vZo1jvf/Bm15k7dwUL+b77nN7+6n4Gx7bnGmIjXfjbd12QcVU7f9yeVZpaXVhNNkcjAe5hH5+9tauf1LWwGK4Almaxvr6JVphxYGYXN6BPq2AClnPJXZYrLWQSiF/kocDcFMN/b1jRlVN/NmZ4F20WKxu0S3s0DTSQp9gMCcpXR4P7gHWkpS8dOZURM8Bxg1GslunuHWbDMBtlHOuprgfvMSJGuIUEDHXpP3Rc/MtB9BV3+IWoE01+auGuAmu8zBR+ph5ZyR1xJr5M9BnZqStAD3WPCXBBPBMx4nBaj1IDnvUp9ECwHsvBkAtBS3XDPkjPWQMLAUbiRINgrLPOGpnLThDkLqW04xbJj79hXl3BRVmCJrW1IvJVfSMLiJctBExVLHSr5a1zbzdalPrEuKgCDON7GY+0IlyNTF6NwcanNl56LGxY6w4Y/GEuqrlYEe+Sa05aID3ptiCY9/j849q9cjsNiQ9jYZ7qXpUwd0sAaoSxvMTdPlN9kUjzm69CZcAu3fwX2fuQW4t15moHsJjT/eIKXVJ/1gnPVcJI8DWzcyRNT30P+ne9wkSVJHiaZr0Cw6dCfGICXeaJK9mlKCn5tYMswYRqnwx1pVlXYFjEyQ+IoheKxDraU4bLDCiyWtNTR0LwMfINU22BPCBkvt40y9nK/4fjczmI5nrx52PTOSaw3k5qtoQZGsufPY5VvJNQZ4IcoMSQjcZ0vuy15w0WI/LH8zVWBYM3g7C+pCFO+5WEBQQgss55/mJKRKkKln2De7G31HvxlccsFpwK55bKbqxxx1Akk/TXcLFTiVp461MteURheVbBN+beICRkGyY5Q5LqmpxhavTWDYimVVAyqdqTr7sV/VG9V3TFtia89IjLf/pg8u0nbN3Lh6YcU6656IHEqj5X4UGl27H0Pd9340vqZ+eD0k+47tfm04pAPVI0bjQNjnYZvPKprYPrT1NptvHghOwFho0Nb6dyo10+UpxpEEK+CYgxSwqU35dmAerbmtqqWbZOwNrjQIKqftdha1aNHtLNGdWKjJT9KSpJ4wrAKwNi8xdQRQplU0n8r0V5n1DBwCQZKLRgMpa1XJsFDb14ggaVEUgOz9sUpO7pFGpKn5K9Qs8RsG1PIsYrj5cmBgZSl/tAmBoOcsAW3T+NIUkKrMcno/Ax7PVM9ZvPRYfQOJ0LjtQADJizrWQAX4OWQXAromom0azxiSc/Zxasi8/Czubbw+6jMOZlIT8Pgo3lzi8yiuKZdzM87IEA9Tq5GpPp6bliFtVznXymVewT7PAl65lCqxKskKDHJ2J8KHyipjPmoP9ofXZqX9GMOY6U7BY8RlKoAxGFaRTu5PKkSR8aIp+Ff3nshVQAFKA/QREWkNt3GfN4BkcleuL5veEtuI9v+82mA1GGwgav2VKVAcpE2MB2MfRXEf5Jm0YV+A2gCX7Y2ayCmj97ZeuN8V9Tj5Zf8HQMqYgWXYbnQgaiYpaXCgAbsi/TeWSBXbZbwa3mKMaDvJnNztLNCdWKDd6dB0ciif9UwlaFJZ53t2ZoFSDsQ9/og9Kp08mwHAA5B7KcanDVFmBpcswdVWNwD0H81Fl8jNlBbrE2IEIKWqva6LgqV5HZqtzcuYJCnu1q97rBJhj4FVP3iW+UY917qoFYpjGDFBG9uE7CY5MZGV4Nxs5kiVkN3ZIApQLThqTSpxNx8sg3cgLJsGiygxUq06ixjj55zFUYJQ3NEns61FQzWvmHksSpusbD0Re8HKYT1g0FpCkgpJgRaMDEJKSYpdUkZkyTgfNS1VCKr7sFSCTkny9qV1j2HdY9jrEULA0PXSwpwRm+hM1cGMgqfzYs0cb6PN1dmf5Z0rZlzanykeSps7ZPTKBCa78iAGfBRyABs/Z0O4uIRbbd5o8tYBAPOt0VEHY5+Gvai0jRMeKbptk6bdIcBTY+nfR73vsQYpoNaexKzDgP/OLO9bQu8YignI3qtkO4gWoX9T0TJClINtj2Ox9P0Ez1ph2SeM3FTB7JJw1sDN8cEpeVLTbMzFmlQBkIGeSZFUz/IUrLIAoZ9XWftYUhqJMFxMmaGJ4lGmWp3ktMsY+oCQkmRbTtnP81I2xwZCVHf0wFL7yc/GXMYubQabs0UlVtUMh+rvmxJLAAd0mvUhBjPxBglB0J9GveGYqEq6ycVkaOMwYXBcz76OMQyo9D3XolyjqRxG1CSaAJCmwEJKyByQSdw1AgGlNAtcW8opS6G9PiGtB6QmIq17MV3aeZRpuDbvgpwC6LnuB5XxDND5Dh5Izj4bdYmEqaYxFdt9UmyAsI21+JaBsjWy7OK1Q/KW74ybsMHXLoVHu2C5VQs5wp3s4aqi0RbU8oip/TSlrWpHYcrTR40fX+376vdBpjre+Ksa7f2aOm3j6CtTQKKNS0dt3RK4RxsvyjNGe+8SBJljD1IgSVQKAtogphAiIHJJgUYWqxKDpLCJpKagkVwtjKP6m4IEZjZtg27Rol20iJ3Gs2jurcCMHEt11xFQKUNzqbmXMvJxyM6ELGZmLCFWmQgMpNQt0ee2Xmy2WNRFnbXiLSK0qiwkZ5i2KZOmHdI0TbFr0Sw7Bys7V8urHtwPkj0+QzJ3k8SZiTu4JINdZ0YMAW0DL1FB1diynjt50lyVqERjKjW2LCs0GVjpHDBJfSbXvFQra5oGTdOgjQ0az4Umn7PaXwhwk2Gg7btCrW2iIWZJwssp+3QMGruWzYHEgYor86WaGokQs2qOIaBtJIC3jREtJANKGjJCEM2sv7hGoIDVcxeQ+yTtWCc0iwa804mG3pRwCjf9AlozqQSN23mVp+oyj8EGWnolbIB0NQr7vL+NNqGnBqrs7I5d4DjqXUd/qwZ94EX70IG92fKhb6MXRBU8mA698xEAauP6S9AuN78rL//3eryFLuFhxxqknLmRgJOUmWAQy2G9gJRqTaGUfWgCwepsmrjFnIvmQ2ai0VLaTUBsJR4qNgJQodGkpmAxl/n5knJnLudIEqArTCmtB6QYkfuk51NV9vakxdrsxyR3O2DnWh6W/8gCfamU9qCsqZtMegmEoDU6IliqwTJ7tgMLSI6tpG0yRtgTIRG0b5XpSH/8OCVLKK9k/pbnxkA+nlbraiNnYgzgbGU4qLj2KaMg6H1YwJbNZEYWa1QHxzqsQWcF4FQEgRo1QWM+RNA+ZyATCCW7O4G9yus487qClDqVDJnd83HQcQ9BtawIgIKUWVDNLKWMYd1jfXENAGg0+zkB4CEhrVspxqeJiU0yYVMPmKUelWqnuhw8G0rsWsnE3TXiuKKaqM3JJpOodI59mWMNTtt0JFNd7LPyPLv7hgVg3+foBTR9Tvn+lqfriy2BzQeQz6dpEjTRHqgaG2wZuv2etZ8auc/l9dg7QO3XkammcsAzaPLG6L1RGw/uIdevJn2rDUBbv72pOv4f0aQqngmyzALBvLtVwiyH721QUx/M1FdWgpnn6vMoAUCtWaVnTrEhP38Ci9RoWQlCZUPk6p55ELOOnEGIWScPAygqo9YZFnAy8xoXSdmBCt5CP5g2qVoT6hqYWaArCAqGWgyNAaIkzgwjaTtK0HHX2IigSQkxawaE6mzDzGbWv6TnN4EYkWu3dB3d6pzHo9P0rMbm0LUcww+1ZXlWelR7ggAgbCT2tbUg8WaQdFKwmCps3UnuLKJrBZxBWc6V5KNiri2obIIN1JPTUjWR1zYKCtYgKZASQ0bMATEU8+HQJ8R1DyJgfWEPnLJovsyISbOsNwFxCOW8yeyMGVIGIk9MqLoWc8puirXqr+7avlXqrljQFsbqBlxbq9aOyT1saN3RvwYMNwNu409b2kSEbelXt/H9DZ6+7cIDGeP+HHPKusfPKD0e3eMQgNrWRIfCGqh40rq6ERuMn7e/bXOh/01Ba7NjvPFR/Yqrvjmo299TgN+P6LALCh1rkLIB8rhHZRJWME+EOtWKSA76pbTCaMjLBucy8m7edyAs8UUhAMLzgzBDZoS2OEH4RjZzX99j2AvoY8Rq0YI5I7YB7ZDAQ0JsJANFWvdYP7+H1fk9rPdWGFY9Uj+UBLN+CEEQnzJCQHH/zjmLmzlJPFPQs5lA6tQQMnIg5KQJU83cFlT74AweEiwmi4cBGAZQEvd60vtb4cNAEhtVL25x19Ych/ZuBZwBJQsDUDFQ3dVZxy0TIZpZEFLzyzQo1iwQTKWKbSlWyPINrsp92EIx0gUT7D510lcAzBkhCYM0zYc5e5+DMtsMRp8y+sxY54xk2SkQEUhd6/UsL4YAChlIAlgMSSBM6nwTKGBYDMh9xrBaI3Yt+vMryRvYEJgKew/6YyCKLG3OonJqXF8nsX0nFgABsWtkTZtJ2objUEaxoWdspwrZRMiQ/8Z8ui6DYrc/Iqd6IeiwR31TTdk2kIepCZWWae+YGjXVpA6jLdj4zdK01Qfh4YFfPNK3jtrBYw5SlXyH+her5GwlJ5y5oJLYjSVNVoMvOdVo6zMh9iKERWcW3lnH9FQIp9+XSqxJwGrVg4jQX2z9XrkR0BjWA4a9NYa9XjQuBShPfOpadmEY7K7F7JK7l57Q9pakqeS1l1zqMi0ls2h8GGCmr7QekPsBGJKUqciMaFoHlWq3ZgIzAPMxs0G1v0fnbmXc/DKQVzkdSeB6LSY/dWaRIpBIoyjDcylu23nilh/cAy7rajJzGnO9PmzsSYEy+DMzyrmUGVytnAL0zIo8piojh4DMUuvM8iJykEBhIsIQ1iCCFEbMydNyZTLNUkp/WBCzBfNyrjLux0GAV02FeRDTctZzSQKVaRjxke3crsB/lQ62Fuwm800kFYzrPeD3tL2pAD3SREwMr/PA/S9j2AZGHoTDoxeHMNkNlNnO/jeASu9df90VFwJG/hybatjBbam/M8VUlHm2ltXOT6PZr55VEk3wWKMa9Y42x/kSJvaYg1RNxZ5s/Q/+m9wEOBoatxFUm4+qiWGWXHCamTz3PVLfi+u4J3EtAGDMkyYLIA8DEkl59FWISOsBBKBftGj31mo6lAq96wsrrC/sYX1+D8PeGnlIUrbj0FpDutnNPJXIE8NahgICPMO7mwm1m2ndAwQMK6g5KqFfSxYNWvcIQ0KbkzBdBUt3Mgkly3jgBMoAJxQAUi3Kx7QC9WkPMqqk75WHos9NCK4xZxSQ9Q2lgoKXXVcTo/NCQJ0LTPMNBdw0Pikb482sjh5QDVo0tBxY1GnKSAwMLMHJLMgILTIv2qQ6s/TR8NL1AAAVlklEQVQpIQZCpIQUIyho9vYk9c7SepCA3STrIMSoGpQIIlm16KilSdquRdM0InRAzzNzFqCMhMZMfSEgLloAGvaQ1VRNtmZHyx7jfVQLgGUu2eZRQXZklwKVwpE6qbXzAwW4g5EJkTLAoXrWi0PfihJ3dD1gy3Pre0w1J+bReI2+pEKofW1DvjgiizhaG7+JwanufwmPOpSON0hVgATAN49J1WC2Xy79+3uVVOhH7lQVCNQbihY0YFitJY5FD7o5ZYRFK5sw2cJixSktOw5NfMviPDFgjXUISMMAQkZsG/Rd4+dbqU+qSa2lQm8/SBxW5Wo86rc9S59XS7mcRXsLUSrFZgsitgEpIwaoFM5Q05ZmVkh9Dx4GBShWhklogmSfYJLgTUs1VKxIGZRJNU01m8EyQZFrX1ULwKiFiIrpVRK/AepAwvgSBARIGbm7wE92a67uY84qaMqZlsWpEQNJzZqcShFKsrVDMoYBQAgRFCXWDLnuiwKUabIUIMFr9lvXGhfhiXQ9sWlkg5oN1bNRckDqdV0LNAk0ZKBjL+nhbvIag0ddqtI4JQXlHjkHhFTlkAw1wFcDFYogUSwS6npv56SaacWtDPp9ChHm9i+myGp/xHKma5UDKEa9kCbAWGkO1drffvIiY0/VCipLYZN710z4oBxyvO8fhd9spY17jvJ16AseNc/riqnlBtYH1ShJhSTRqGj7EBzQgY1R2/rGfp/tg4LW9i3zVTRrhsVPVrLQkelYg1RZ/DKABk61lhCAkRpqr6slCtBUtdU1pppUHkTSHaKa69Rk1uhZhnNP/eIoNkcaKpkpBsawWqt2AzRNRFpHcWoAwL15APbIWlJeM7bWgirMXFMJwpO1xn5GJR5jBB6yRDVHcynxEUNdaDAPAoxJtUdOCSGL6zQTIQUg5WrJUsFMH2sAhenAzax+vudDxr5Rfe6qaRl1SX8ys9ZqIiRU54S6DoLfodYUy9wEHzjLn6haFSwuSgx/5icx2pg+3wb4Ugrcc/DXJrCqyrJonhnEYXymh0rOyhnM6mY/2FiUfyaQUAaotWS5ARwLSKWUNbcbI6oGngYBkhQI1JM4hQTTpki8NkcgpWuYtZRNsKKZOuMMd/DJ/YDUW6qopIvBsvDr2R3DHX8YEqcXm4jQBjSAVz0mC5WwubfhK03yNwyGuALV2glqhAZbeKutjA1toQbp0XtTKua4fZWXmuF4M7YxefZfJatMHoMU2bpTDdUkqjrT7LStVfvq06/i0LK1W5t/F8m3GtZ9HrRxiyIy+nedJ255/j50rEHK4lNsGBJQYp10Fkr+NuGWAVRJ/EK187K8UWlSQ8Kw7rG6sJJyFxSQEqNZtKJFqOMBZ2E+lrJHkqpKGiIx10k2iIHXoJ7AQ0IfxWvQMkywAmJWBlAcJqydppnY7+LR5mQIrMlVc7K+sQd2lkzspYChMxJ1f4f+UFYNhSTWLIKRowT1lr0xXsAMCUyGgql51wXdYJI9QlzTh1xc5zmMwd36CVhiYMYAYK3KpVVxF7OuZJhAIPciTHrvbGPifbCFXxixaDesZ5ZU4tJMACnquPYrIASpvBuzeO6NzFrMmu1C0jO1AFoSh55Gnx9smaomklHOHgWULaBXNaYQkBGQM1SLBHIkLYXFSDkJAKWA0LUIq1609QsXkfoGqR88VCI2jYQ/NKES1m1AIeAbCKwxb4HIS5Sk1VqEtosr9BfFJJ20kjQCIbatlEeJjQdAD2vNVEKM2DZougbLk0vEtkG3syhFQ61EiZsBdWXtqzls16r2kfv9s/2/eSl0dE679Vm+8NkFYk7iTJVTkq0cSJJNxxKGQBTGAHxAJ8Tj94jt+SZpv3tNoWw87tvbtY2ONUglPZi22UpAKcVhQgaxeIMFaDp5kUK5Yoa1VLWhkSjjTn1CiGI6GRX/swMa5pHw5GcjVBmgmME5ARzANIBzQM5aswjSeDtbsEjkMZgaIFCZ4HrmN0RGVrAi0d44qJBfQMouc4eQ0bNUSzGmQQIkDHjtJwZG2gtrC81lWs6HSiyUxBaVnH5e0wnws67Sz6p7XIAqsWR40OxPYECFFSBKxDBIz4msPItZkmJmRN24xEDDXMDAhszm0qXb8pqrefbxgZpcVSskNSkTSoLbRsMfPJ+g9U2DosVLkqtnaLt0LfnMqKMFp4wcZE5Nk+KcwVGENDlDlQwWw15E0HVllaG5ldAJ5BJADQCeTimoKdNKKwTycjTDqsewt0Z/foX1xZWapQtINYuM0DRoO9PmM1Z7awlqzwlNJwHkAKSGVhP12VAt22aDYC7oIy3kALG8tmYbg2bCaG37vbkCweppo2eMb7//m6P7147zPPnGeI/VQpCbUDXwH2AxSUM5VCx+zA7izrkqzEPVrQ3tqDAV23N1uzHuxnYtbUNf2k4+Z0xi8oPxYMhRwT7fm9KxBqk+M1rlJsyEHiXBqVFDQBsAUEBgYfBToWzzHEMlfgUNkHjmUSDEVY+mbZEbDfAkkTpN8rb7yRk/AbkKaGSWw3Eyd+EApHEGbs7wmJx6a9agYaBKel95Z3xY7TsgZfcBINK8fa5NhaJNGWgosFptq+K9Jz+2YewxPm4V2LBrUTLQdrgPQNMNmTkjI1k8FGgMUq55yX0zNDltLtnYM1EVF8daaVmAmVndwhkYrE0ENJSlcm5ImhGW0Kj5DduCp12jMhMqlyS2YAeexvquoyDvAy0R2iApnCSNk+aNJDhAFSuheelZVWf5wDwyQwByFEeYrBoHmeMES0Z5ZDmnG1Zrl32Ys2cTCSqVZy3YmdtyLucLTfM5UgjgRtdVJPAgJujV8xfRX1hh77kLWF9QkBqSehYGtCcGNG0L3lmIhjcknD93AX0/YLVauyaVhoTliSViDGh3NMBbQdlNsrY33cyFMW15b3TsuhWgNr9I9eXVx4e63h+iyRkrrpswApAKoJAS8rrHsLdCWq3ADISmQbNcyBOaoA479szt7dpPg9n4cFtXDu/a/t87wrVHvWVNxxqkLJDS7PdJFZCh0iooiIQbVeup7dhjmso9tr5r5sTOnOzzIg2NPtq+sOuVWqn4rtLVUXKjVo1fj1tatgNqydDFJHaNyvK9SZsJIdp3Nh7rmqD1klxroar3qoNYPjl/fDljqN3zGQBydk9LOf0pD5YyF/Zsb4hsYgXRuh5Ytj5rfwB17tDuiGs4sPY9zeAsm7yxOQ2sLuEogoS57XvTKkbiTEU+L9rUeH6C/ZCanENQ7z4aXQu1BDBb9ojsr6GCh2W6Z9e2DEzLeZibi/TOWYPHKcq5U9DAbE4ZOWrQcCMmyRxyBQwEikBm+W2aVYZ6iw7Jk+L2qx79ao3cpxFIhaYFISCbJpUzhn5Av+qxd2GF2A6IfYPFzgIhBKQ+oWkzuM3wBMVEVbZz03qAUYmK0Sn9WPIcAZXP4WiF+/VVnT6fv5Jx5BK49IR5jPCxVr+nNVCseXqGnPsBadX7Qs4aaK/JSDeezQe0cYOrbat7z9MXlwgjZTGPnji6S3VrZ01HpGMJUmb7v9ivUesWA8Ndgo0GAoYQkGJAZsnd12uMSSRjw1tGjCCu1RwRckATGQ0ScgukhtBSQmpUyg3A+sIa/arHhdVFrPdW2FvvIa3FSSL1Q8laoLeOoeT7q00ObnozQKzMcKZhGPM3cHM37MqEB30ZQnSNqTgMaIYGPeAu+0h2hUnzkg6IkbNqjaPBMYlTvzPaoMrstC3WR4ZkYR+GAWs9cF+n5KBngbJDgGsiNigDRHNeZWDPHB6pZPrgIGY18fwTJr+XtA6WtZUITR7QRELiBjkP6FJETo0wwZzVWUR+w7QqBS3LZi9l7TP6lLEaeqyGhFXqy/kaAcwROQZEymAOaEIGc4PMERlZ5j71rgkZEBpTr5la0HVIQ0DDA2Jq0OQBMUXXUpMWhGTVhCI3aFIr1w5rqVPWRsSmkXyU/VqqS3eNZMdwLw6RRkjd26mNIEgWdzNF7Z17Hqvze7j43AX0e2uN5xMJg2JAxxnNosWaLddhwrnzz2O9WuP8+T05r2oDqCP0uUdcBiyGJbpBUjmV0jFlb4xqwrk0oKywEjxHvNcFGBPUJltcO10CjKt9SLbCaVPDQHET5xqg/LrSxo1EAVVfyO5jCajXaznnu3AR6eIazIzQtWiHJZq+RzOstbxPHI3NRHqdNJUx2ZxwzdS+stG/CkSnz/A+XwqQTYROpXPnz2kTD0Ys4sOuuALpK1/5Cl75ylde7mbMNNNMM830LdKXv/xlfMd3fMe+nx9LkMo54/HHH8frX/96fPnLX8bp06cvd5OOLT333HN45StfOY/jC0DzWL4wNI/jC0dX8lgyM86dO4frr79+dG49pWNp7gsh4BWveAUA4PTp01fc4B9HmsfxhaN5LF8YmsfxhaMrdSzPnDlz6DX7w9dMM80000wzXWaaQWqmmWaaaaYrlo4tSC0WC9x7771YLBaXuynHmuZxfOFoHssXhuZxfOHo22Esj6XjxEwzzTTTTP836NhqUjPNNNNMM3370wxSM80000wzXbE0g9RMM80000xXLM0gNdNMM8000xVLM0jNNNNMM810xdKxBKn77rsP3/md34nlcombbroJf/d3f3e5m3TF06/92q+NMpITEV73utf553t7e7jnnnvwspe9DCdPnsQ73vEOfO1rX7uMLb4y6FOf+hR+9Ed/FNdffz2ICH/+538++pyZ8f73vx/XXXcddnZ2cMstt+CLX/zi6Jqvf/3ruPPOO3H69GmcPXsWP/3TP43nn3/+RezFlUGHjeW73/3ujTV6++23j66ZxxL4wAc+gB/8wR/EqVOncPXVV+PHfuzH8Pjjj4+uOcp+/tKXvoS3ve1tOHHiBK6++mr88i//MoZheDG7ciQ6diD1p3/6p/jFX/xF3HvvvfjHf/xH3Hjjjbjtttvw9NNPX+6mXfH0vd/7vfjqV7/qP5/+9Kf9s1/4hV/AX/7lX+LDH/4wHnnkEfz3f/83fuInfuIytvbKoPPnz+PGG2/Efffdt/Xz3/qt38Lv/d7v4Q//8A/x2GOPYXd3F7fddhv29vb8mjvvvBOf//zn8dBDD+HBBx/Epz71KbznPe95sbpwxdBhYwkAt99++2iNfuhDHxp9Po8l8Mgjj+Cee+7BZz7zGTz00EPo+x633norzp8/79cctp9TSnjb296G9XqNv/3bv8Uf/dEf4YEHHsD73//+y9Glg4mPGb3lLW/he+65x/9OKfH111/PH/jABy5jq658uvfee/nGG2/c+tmzzz7Lbdvyhz/8YX/v3/7t3xgAP/rooy9SC698AsAf+chH/O+cM1977bX827/92/7es88+y4vFgj/0oQ8xM/MXvvAFBsB///d/79f81V/9FRMR/9d//deL1vYrjaZjycx811138dvf/vZ9vzOP5XZ6+umnGQA/8sgjzHy0/fzRj36UQwj81FNP+TX3338/nz59mler1YvbgUPoWGlS6/Uan/3sZ3HLLbf4eyEE3HLLLXj00UcvY8uOB33xi1/E9ddfj9e85jW488478aUvfQkA8NnPfhZ934/G9XWvex1uuOGGeVwPoCeffBJPPfXUaNzOnDmDm266ycft0UcfxdmzZ/EDP/ADfs0tt9yCEAIee+yxF73NVzo9/PDDuPrqq/E93/M9uPvuu/HMM8/4Z/NYbqdvfOMbAICXvvSlAI62nx999FG88Y1vxDXXXOPX3HbbbXjuuefw+c9//kVs/eF0rEDqf/7nf5BSGg0sAFxzzTV46qmnLlOrjgfddNNNeOCBB/Cxj30M999/P5588kn88A//MM6dO4ennnoKXdfh7Nmzo+/M43ow2dgctB6feuopXH311aPPm6bBS1/60nlsJ3T77bfjj//4j/Hxj38cv/mbv4lHHnkEd9xxB1JKAOax3EY5Z/z8z/88fuiHfghveMMbAOBI+/mpp57aum7tsyuJjmWpjpkune644w5//aY3vQk33XQTXvWqV+HP/uzPsLOzcxlbNtNMQj/5kz/pr9/4xjfiTW96E77ru74LDz/8MN761rdexpZduXTPPffgX//1X0fny99udKw0qauuugoxxg0vla997Wu49tprL1OrjiedPXsW3/3d340nnngC1157LdbrNZ599tnRNfO4Hkw2Ngetx2uvvXbDqWcYBnz961+fx/YQes1rXoOrrroKTzzxBIB5LKf03ve+Fw8++CA++clPjirbHmU/X3vttVvXrX12JdGxAqmu6/DmN78ZH//4x/29nDM+/vGP4+abb76MLTt+9Pzzz+Pf//3fcd111+HNb34z2rYdjevjjz+OL33pS/O4HkCvfvWrce21147G7bnnnsNjjz3m43bzzTfj2WefxWc/+1m/5hOf+ARyzrjpppte9DYfJ/rKV76CZ555Btdddx2AeSyNmBnvfe978ZGPfASf+MQn8OpXv3r0+VH2880334x/+Zd/GYH+Qw89hNOnT+P1r3/9i9ORo9Ll9ty4VPqTP/kTXiwW/MADD/AXvvAFfs973sNnz54deanMtEnve9/7+OGHH+Ynn3yS/+Zv/oZvueUWvuqqq/jpp59mZuaf/dmf5RtuuIE/8YlP8D/8wz/wzTffzDfffPNlbvXlp3PnzvHnPvc5/tznPscA+Hd+53f4c5/7HP/nf/4nMzP/xm/8Bp89e5b/4i/+gv/5n/+Z3/72t/OrX/1qvnjxot/j9ttv5+/7vu/jxx57jD/96U/za1/7Wn7Xu951ubp02eigsTx37hz/0i/9Ej/66KP85JNP8l//9V/z93//9/NrX/ta3tvb83vMY8l8991385kzZ/jhhx/mr371q/5z4cIFv+aw/TwMA7/hDW/gW2+9lf/pn/6JP/axj/HLX/5y/pVf+ZXL0aUD6diBFDPz7//+7/MNN9zAXdfxW97yFv7MZz5zuZt0xdM73/lOvu6667jrOn7FK17B73znO/mJJ57wzy9evMg/93M/xy95yUv4xIkT/OM//uP81a9+9TK2+MqgT37ykwxg4+euu+5iZnFD/9Vf/VW+5ppreLFY8Fvf+lZ+/PHHR/d45pln+F3vehefPHmST58+zT/1Uz/F586duwy9ubx00FheuHCBb731Vn75y1/Obdvyq171Kv6Zn/mZDeFzHkveOoYA+IMf/KBfc5T9/B//8R98xx138M7ODl911VX8vve9j/u+f5F7czjN9aRmmmmmmWa6YulYnUnNNNNMM830f4tmkJpppplmmumKpRmkZppppplmumJpBqmZZpppppmuWJpBaqaZZppppiuWZpCaaaaZZprpiqUZpGaaaaaZZrpiaQapmWaaaaaZrliaQWqmmWaaaaYrlmaQmmmmmWaa6YqlGaRmmmmmmWa6Yun/A8Dlocg2D9AjAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import torch\n", + "import torchvision.transforms as transforms\n", + "from PIL import Image\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# print(img_list[0].squeeze().shape)\n", + "plt.imshow(img_list[0].squeeze().permute(1, 2, 0))" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAakAAAGhCAYAAADbf0s2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOy9Tah023YW/Iwx51yrqvb7nnNvEswPuajYEMQ/0BhQCAFFQRAEG4IdtaE2koDejkb8IXaCjQ/SULSnDQ3YiZ9gIw0DRgRBiEgQiWBaoiT+JPee991Vtdacc4yvMcaYc9U+59x74pfk3kP2OtTZ7669d9WqteYcP894xjNIVRWvx+vxerwer8fr8U148Df6BF6P1+P1eD1ej9fj045XJ/V6vB6vx+vxenzTHq9O6vV4PV6P1+P1+KY9Xp3U6/F6vB6vx+vxTXu8OqnX4/V4PV6P1+Ob9nh1Uq/H6/F6vB6vxzft8eqkXo/X4/V4PV6Pb9rj1Um9Hq/H6/F6vB7ftMerk3o9Xo/X4/V4Pb5pj1cn9Xq8Hq/H6/F6fNMe3zAn9ff//t/Hb/ktvwWn0wnf+73fi3//7//9N+pUXo/X4/V4PV6Pb9LjG+Kk/tk/+2f48pe/jL/9t/82/sN/+A/4Pb/n9+CP/bE/hv/5P//nN+J0Xo/X4/V4PV6Pb9KDvhECs9/7vd+L7/me78Hf+3t/DwAgIvjSl76EH/qhH8Jf+2t/7ev+vYjgf/yP/4G3b9+CiH6tT/f1eD1ej9fj9fhVPlQV7969w3d913eB+dPzpfzreE4AgH3f8TM/8zP44R/+4fEcM+OP/JE/gn/37/7dJ/7Ntm3Ytm18/9//+3/H7/gdv+PX/Fxfj9fj9Xg9Xo9f2+O//bf/hu/+7u/+1J//ujup//2//zd67/j2b//2h+e//du/HT/3cz/3iX/zoz/6o/iRH/mRjz3/5//QX8RSTgAziAg5JeSUUBIjMYEjyVKFigIKEIDMwJIYS864lIzMjJIYBIWqorcG7YLWGmoX9C5oIhBVCAAFLIMj+0pE4MQgJpSloCwFy3nB5e0Fy2nF0xffoJwXrE9nlPMCTgwuCcRsf5fstfxUoX6+keRyYjATOMffJHAi0DH6UECaQETQ7jtk7+j3irZV9NrRth3aOvreUbcdbW/Y3t/Qth31vqFtDdI6pFaINPTWIH0HRADtIP+snDKIGSln5HVBPi24fMuHKOczLl/4APlUwDlDRCC1o912tPsNfa/o9xu0V2jd7Z7Yh4NQPBhCDBCDcgKnhJQYkSxrU/TWUW8beq2ot91et1a02tBF0Lqgi6IBqCCAGWBGKgkpJZyWFaUknNeCsmTknFByts/GFDfA14xAu0Bqg/SOeq/otaHXCundFwLsfuSE5emMvBScv/gWpw+esL494/ItHyCvGeWygpPdv1g3IICYQExAYru3zON3IIreO9run3Wr2J/vkFah+w7UHegNqBuIAGZCOp1BJYNOZygxRAltq5Da0fcKbQ3a7V73Lthrw/1Wse8V9+cNqgpmwuXtE9anFV/8zm/D6e0Fl295i+XphLwUpKWMde9LD1BARSC9Q0Ts2qntFru2CXRYw5QYfiHs5tL438Px8hn9xN/6bD/Xw/8/ftDX/OnDD/UTnvu6Z/YJL6QKVQDq16tW9PuGdrti/6WvoF83bF/5Kvatou47nq8bau14vm5otaFVAZRBYDBncClIJeH8wRssTysuX3yLyxffYL2ccPrwCSklpESQfYfUHdsvfQX7u/e4/9Iv4/pL/wf1+Yb9q1+FdgUpwMsZ4ALJJ1QB9gbstwbpgl4FImanxG2WfSQdl0H80UAQAsBms1Ixe721HT/y//4/ePv27de8Wr/uTur/5vjhH/5hfPnLXx7ff/TRR/jSl76Ep/WM9eCkUkpIzO6sCEwEOhgdUgUDyMzmpBJjdSeVE4PUnRQ1CAsaNWQW9CSo3ZxUhxtXTMcC37BEBEYGKYM6gxqBGsAV4AzwruCk4AwkEDgBJAApDaNlDgqQPhc9C4ETIXMCMyOlAnYjzon9b8kdsaCfGqQJ+tbQ3fm0rZph2hv224a2Vdy4oN137HlDyzt67W6Amz1ahWoHVMDDSbF/TchrQVoWXMqKUhacy4KcFnDKUBIIOlonNFF0MFpr5uipQaRD3enTsFUEgCBsXwmElLPfUwYyoF3RU4G0jj1v6Ls51F4bRC2Y6CAIgG6WG5QSUslIOeN8WpGXjNNpwbIU5GzrJQIZqRag9G03p60NFfCN2AAlqDIggEJBILAmJE3IyihIWMgeKxec84JcFizLaQQyw9apuqF3I57YzrNkkDtnFXPM7WIOsr3dzdG0BrQK9A603a4cAbwuoJxBywoFQxRotXmAUqGtQ7vd594F29aw3TZsW8X9fId2AangdFmxnha8WRaclgWX5YRlPSOvC9J5MUfDDIKZ3OHQu72PNHsPu0qK5Gs1L6s5qhxrl30BxP1/NPa/WmC+fsK/xjPjLen/h5P6vzibcFIiUOno247OCZ0I+9bQuCB3RVl27FuF0B1pa6g9gVBB6CBhW4OczFFRQlJGEntkTSiccS4LUk7IidG7oLcObQLZK9J9Q7reIbcb0rYDSoASEgsoCYTU3kcJRBlCip46hAAlRVdLAGw923UE2bXsBAgRlACUDE6MtGQwMUo1dOzrlWx+3Z3Ut33btyGlhF/8xV98eP4Xf/EX8R3f8R2f+DfrumJd1489XzihMEPNgoKZzTE9OI250BkAE1CYkMgedovx4MwwynQ0/dB41/kvHWtNx/5SEs+0GupWQQTst91uIGwzc05uiAiULJsCWURtkYlCWh/vw55xSReknKCiSKVAswIlezTOdgLuSKCAFjO+xHZy4hlbah0qglQSpLmxYAYnhXQGaQKpgpKAlDyTskyDmcymJHLn7BdCBNo6hJtdEwGkizndQ2Yi0j1L62aAGRBSCCkaFAJCJwYnQRcxQ50TmLJvErseYDN8KLYxiAmiCvbXkHB2zOCc3UklnM4rylKwnhYsa0HyrNscVEdX23QKO9/e+nCCdW9orVnW1g9ZgmeFqXVQst/trfvXBm4J0mRkKeLXQ8QXEBFSt2wsPgsT3FHZ/Uwl2ZqGQnuC9gS0ZJluz8NJUSmglIC82DpUy9K0CzglaDcnxTWBm0C4mhFJyTKgJtDekBLbOYjY37QO7RbxQ9TXmk4b7fd5ZJ6toW2b33uB9mxZOFn2z0QAE5Q85FMCSN1HfZJL+fpZ1uPvH598+Xovf4tsvxyf/5qG87N4JhpBzMf+xlPPOBd1BEdVDEGJ7ASAEkGJoWwoQydCV6CrfbXAm+yekACd0GoD7Yy2VbS9oe0NvXZLzqEWoGw76u2Ger1hv16xX6/2/f0OKIEoQZFAWSGS0JUhwpAOiOh42NaXkU2TAkQKAj/eIOJhpzgemj7DdfwGOKllWfD7ft/vw0/91E/hT/7JPwnAPuRP/dRP4Qd/8Ad/Ra/1VDLWkm2TgaAckAkhMYMBJAJYzZAkHJxUYmR+CQsCY3XA43ryjIwZrAoRGb8avy7wDAiKpIreCdIFpIq2VaAJylpQ3y9IazYYKxv8QcmgD2JbkHbjBb0FsAhP0xnL2bKA5XxCPhXkUpDPCzgnZI9S6JBSc2JITmZs9wSpHVyqOa4E1G0fkXpvYtCViG0fZUCTwQlkMAEzI2caEBXnhJRtCUkXtHuFNAVzs+ujCmlmsLp0NOmQ3tG6fbVMitFB6MzYe0MToImOoKOuBTln6GmxjIoZULX7WBJyZosrGGZYMg+IT8P55oy0mJNazyfkkrCsC3KxzBSilqXcNmx+jrI31G3Hft2w3Q1eud03g4BrQxd3TEwoOSOLgHKCEmHZGtJ9RyoF9V4BWOYJtsCpN4PEem0DJrGsNGNpJywXIK8K5sWug0NkWBSyZncU/lAFxAIac3D2PqDka5RszYpCexuOptcO6YJlr1jvO+pesV5Wy4LuGzIpEhFIOlArZNsga0ZnApdiKMCA+9QClNrQ94r9/RV927C9e28ZuXSk04q0FJzevkU5n5BPJxCtoARzquTpzK81D2pElofn4r2BX9X3p5cvNt7GHZR4sBLZ7VYdft+x3Xe0rWJrHXs3JOfWBPfa8bx31NohVZCIkaBYkmXdCgFtDUSM7bZhue/IS0avDaQC6oR6vaE9P+P2la/g+stfwfv/839w+6VfQrvdUW93dygJaemgtABFIJQhSOhiEHIXoIuaw+wy9rttQwKTmD1jQmdz2KyMBEKCJWuf9fiGwH1f/vKX8Wf/7J/F7//9vx9/4A/8AfzYj/0Ynp+f8ef//J//Fb1OQHbd03SLnD3iJ1siDAKTIikhw4xbYrabyx4tgjyIG0DecFAjI/OokT2NFeAxQnPfJmI3ynDbDlag5ereTJCaO5PkkTHTyErE4R0RyyICh8jZnJS0jlwypAlKXdCXBhFBKhm6WmY0IUAMJ2h1E4aKjPdjphf/hqfoOj5fN8DcM1AGvJYWmZ09/HMoLPtTWOQHd9zaLWMQv7LEACfEFmYuACcoJRAp0K0OA7VaIBGgXVABaDLnE/eWFF6/IfCSQZn9awISA14Disw15YTltCLlhLJmh0gAqR1Qtc2kgEgfGVOtVq9ptWGrFa117M2cFADknAC1gMTd+3gEhBj31PIgMmiyOwQbWabXJVJOkCVDkjklQwMZFDubAAhbvWdk/mlm/BzwmQduAEjYz4VHJsTZnBQCetsLAIXsGT0RWLrd92TrXw+fRcUMkEbA5oZWWvPa4x3tdsf+7r1lob0h7xVpXcCp+OdJBvv4OR9Bvo/nKfTw7EMt7GscD8TlCXt4dneE+XDEnA0hcHvwCEV98js+ApSf/rsW/CHw0XEt49q1raLed+y3u0PyG/b7jn1v2GvDtldstWHzemIXxcK2z5W8njkKuAdUSHR8bktsxYLK1tBqRas7aq2o/tUQkoyEakEECsAG24mvKfU6U5RAxN9HvMRCzLYuPOsTscTA4s+GJAb/f5bjG+Kk/vSf/tP4X//rf+Fv/a2/hV/4hV/A7/29vxc/+ZM/+TEyxdc71sQ4pYQGHGoQNA2Zw30Jigz7sFaTskxrOik/dPwPwMyi9PBaOMI1Dr/gxV4Qj2h6bWARVIJHmckcycGQwAuK8M8gougiEDUDQOGkckK778glo94rltOKsi7otSMvGXJeUE6LwYhrAVKk1u40eGZAxAbXcTioZAvdUQ90Vavt+MJO7vRTSuBS7Hz8c9h7mNMxmEdwjEoNNnPsHQxQAqUCTn7RygqiBHBC3Tu4dYhH+dIFkA5hRuodksz5pJTs/ibL5LhklKcT0pKRLwt4yeCczQAzI+U0anhlLVYbKXPpt9tutRgiqJrRqHvFvu3Y7sYsrbXhvu2ovWNrfdTSCsGct68Hh/MR9Q0dMKdBpwqrEUltqPd9ZJQDhsuMvGYPKgqIky0Pd/yJyY1vciv7cUBsGHGiGUdFYVvk4GwUqXe0vaG0hpIJfavoJUGr1btSdpIO8PB3SmIJXDjZWiHbbg7q+Yr9esP9K19Bq1ZLS+cz8nkF5zKyw7wstsKTDEc191F85w5q+A96/NnwX5/ussIpwWE1HD7HeAtHIIKMFDDk13vtcd0/2VPN6z6+j/OxwEW8Vtj3iv16x/58w/b+itu7Z7Rtx3bbsNeOvXXc7zvue8NWG3oz4oLtBbjdIxCFoyKH6uf7+oL0ILgPB7XvO7Z9R9131H0DwAALkiZQUiQUC5IcpTDUiiBd0QE01WETSQ0FQe+21qHobkOZgJQEAkVOCVvbv+Z1jeMbRpz4wR/8wV8xvPfyWJmwsqWPAqAzO4brmRTBak9KSFCD/gB3TjQyJgCH8PeQSSHWr9WvIn4jRC0GI3IY692hKIbj+Qp0cpild/TKI2LTeDPHrzsU3bHe7kV1JkLy4n7fKnLJaPeGetqRl4LV0/nlacVyWZHXgvXNeWQPEUFFXSU2RN93g380XLzBi10FTTv23rDXak4qESQToAlrzqCloJyW6cS7R1FNADjrzeuElGh8UOIEdttKHkyk0wnKCT0loHak1tGYjdG27VCHBtvWDcYjRs7mIPOymAEtjFwIeU1YLyt4LeCSDwzKBPIMM5XsmWCQJQTdrYz0cE4V97s5qPt9PzinhiaCpmKG1jMCgxPtmpTziuW8opzt33ktSGuxewFz2ikxVIK1aBm2tArZAdl2yFYgiSBtsd9JfFiY5M5OB2ZyLPcfI38a//N/xHXXyHIV1C3TlJ6RGJC9oS8Z4k7KapzZCBklj9pnQI7q9cV+u6NvG9r1hvrRO9TnG+pX36HVirpX5Noge0U5ncGckHJBLguSr3NicQLU3A84fhIPFD+W3XyddOrox6VbxirV6msiYteEYJBqshoMpWR5/teApD7mk+jT/dQ8EQ8S1Ag6vTqR6fmGvu24/vJH2N5fsb17j9tX3qHtBv3VrmhdUfdqzqnZeRtKxM6C9cANNOrHxFGnP9SBGEZGKgW8rqB1AUqB5ITOjAZ3or1D2MD4gNcTE6hkR0KsNg6xGqTt/+blCq+rq6I601YIYBXkZOWHxoz9m91J/WocQX4A4JEtDSdFbE4luQdnxXQe8QKPodvDcxPyi8UwHYvAipUzRnokWLA/KKI3r/N0VYDl+DYP8FBXsTp4OD0ykkDuDE0CVqOZk85CPymsThFnI2rQlkTR1hdN86itGmtPnIoMh9ZstzrUp+asqtOIhdhqbYDDaFbnMdIArIYT2UBADINKzYdrZLAE0aQhW8ZjmU9PHWgdxUkjKsY0I6c0iwg6EUjFyACJoZIc7lIQKTjB6mcp2gKC5u/v5zVLYnKGnhttUYNoW0drHbU1VIf2qtfRmpNazAfTcICpWM0rrwV5WYyavxbkxQkbxTK5uBecGNztvLQHzGsML/UanrZk3ws7MYfGKlPSF0bUHdDXs9iECRn6fmF/HSIClgVCDILYte2ezSc2xmDikfUjjO1eIa0OJ9Vvd/TbBrlvllnVCqkVPZuDa1u1bG23DAueEds9USjRx60/uSehYEfqfH7s3+OuOjx9yCRVFdJ9L/Q+yEnh9IwlZ+cR733MqB6CgU+41B9zUCOB873pQdGoA+8O8d02tPuG7fluj/d37Nc72m4tJE2AroBUY1CSxHu5k/Kg+RhYz5NyB+8Oi5JlsVwS0lKsvrg42SZZG0g49iAEjWsUwV1KAJdB9EgjczIbgD4D7SbihCgg+TVJIIAF/ZsZ7vvVOgLCS0xepDOjaGw/ZxEhHIYRJx43ssFQFHDAA85vv8dE5pR4GjMig/wsm2IzMoCnu75Q1JhmpArtitYJILF+AZgj8D/BqAMpPAIBOjCYbD0RGjO0eSSydzSvsbTbhrxklOuCdrugnBb7vbWgnJcBj7TNsqd6vaNer+h7RbvdrHjfnLRNCiSLehoUVa02lsiIJ5lgkFvOoJLBftIiRm2WfffoVEEemTJHBgGokj3gzouSMb5KAZ8WMAhdBbwW1PuGei3oz1foXoHtbtFdE7TGEGZzYCRg6uj3BKKOthJUG7hncF8GccJ6lBJQzABznrCP9T61AfHdtw23+4b7tuG679hrNeNmKSCK98OlnHB5uuB8OeHydMLTF99iPfvXN2csTyvWt2ekYg4sMhgmcraeoLFCdoV0Cxr6fkffEhoDZV9tDTizbzLOBsb18SPgpONTD7WZj/+TiEFJQUuBeM+Xnrq3NHim488TO3O0dWjd0d4/o9837B+9R79vqPcb6rtntPsG3XagWSCkuzE/23XDnu8G+bJdF219sFxpZI3xeckz6Bl00IC2JqKhh/304tONnxntuqPeN2+3MCNJzAaVK9ygsyHTdIDLvl4A8An3YdoSDIPedmsJ2a931NuO/fmO21feYb/dcf0/X8X+fMP+3uBSIx0JFFYLSp6lnEZtl7EGyuPvZ0xBnsE2WfbMyWuzS0K+nAEG1i98gCYN2/2G++2GToS0G9SuXUG+b1JKyJ595fNqDNKyohPQodi9DWS7baMHs95361tUq51J3BsxtAhM6L1+pkv5uXZSjgKMDIp5MrswoiNzGKRetg4nAnh0BfgqmllAvDwFuHeIUg7sslgIfDwhf91jrcpqVDRAtWADwt86/kzwQC60+hYEBDPInQQUDsyKX6iwDABQEFn0m0qyDSlBP8eA+ZpHsFK910as6D8o5omRstdxeoLKwUDl9BBNq5+DtuhV2gecQZpB2i0S9SL+YLV1+xyshN4FaZAQGEwJZwAlESoDDR2SCV0btDbPLjpEBb3C62mKemNzltQd7ivgXBzmK+6orAHZ6iHiRshgPmM4dmsGbh21ddRmrCrbZHaNkte4Vm/avpxXnC8nnC8nnC4nrOcV62XFclpQ1gV5yRa5Zp5OyskWKScngxCkKSAdvRlFmHOCtA7K3Qkv7DgpPbgoPRhD9SBqwHEeAI11PtY6Dg5vHnHvgMha1PeBrYFBynBauuyWQbXb3ZzVvqPdN8jevMbnmSasVgIlaLUm85o3pJxHkMR5ZrsUtWVv7I6sPHqr+PB742N4gHckRoxNHNvS65zSBd3vOQGgZHuIJcgF/vdODIkL/XjZ6PD/MBY6tnQ4Johlb0GQaJv1I+7Pli1t7+64fvUZ9XbH/d0N9XZHu+3oWz/Q/e1yJIVdw7BGas+xqFsPWCYK9X03T1Xhjj0x0lIAFSyXC+q2YX3zBuvTGwvY7hVaDVJMqSClPNjFKbMxYpcMPi9QL68Ub6a32MFsX61GmKA+znYkC27YHq/v1zg+107K4DgaqSg4FnoaPwcwMpyPLbqIPo7Z00PBNhyVv49nR2EA7Bd0FE4fYi09UNPVoLyuMHzW8do4P+aZpMdyU3/z6P4mZXTqgAg4ir9OJzaMvUMFRjVlRl4tCuNsm1lCmWEz5QKp1f/GYTKGNQEmM8IpJyRJgLDDA9lx+yiemlFD7+jNXq/t27iWpNZoqCrG3gOhhSJG62BJIAFS7aBsjiolazbMJaEv9thJ0UrC3nZ0CHrD6LFq6CAWEDr2LOA9obXNaic5gcoK4gzOC1IpSDkb/blkyHlFyhnkznM+msN9HbXbQ3yN5HBQpeBysn6rp8sZ58sZ5yd7LOcVy+VkdanTYgy2zLN3TRWpGL0/ZbbaUyIADgN5QzVnRm8N1BOk96nQcFhfYx0Hldmw4knEOPxs/NvXJEWN9UjgOeyNARMFdH4gFYzAxGtQ7XpFff/egqDN1hXE+rLC2SknQAl97yDeASSQEtKSIdvu12hS9Q1S8r93qJGDsJPSyC4jCDOUQwaTMvYw8WyUDzWM3oNRZ3Afq/f6HAPHsAt+vaJ2NWyBP6HhtR6y1QMLUo35GPBevW3oe8X9o2dsz3fcP7ri+svvUG8b7l99HjR0tA6IG3hn3mY/V1b29yUkCcMv4z7GqdAwKzogU8rJiBBMWN88obeGertje3+1vXBvFmTUbsIEnIY9yJkNwl4LymUF5Qxktv2yV8sNIIAKtjsjiYkAqEdV7CgCq5Us6LP5qM+3k4obFZHWoESndIiwDin3ERpQsVKMw1Mvsyg9fuVDoVotVYUC5JveghaPYA9fyaOvLoqmVkSsar0F5qSsXpaVwaxWiIdFHLP3iqwnRy3qU3aWoRqlGCpQdlkVAfreACHktaDdm8FK7L/nOLw6HZw5g2hGXcTAGuEikckbQUGZsa4L1nVxyjnQe4fuO2SvqLebRdG3m2U66sXnlMBlgXKGEqN1S/VbkxE1CxOKKJQYK1s3elkzCi/QklCg6GvGrTfsTNh7d2jMomHaBNorRDarV7wzKFCZoGkBpQzKJ+R1NXWMt2+wnFecu6CsC1JiV63oHl2bEZuGydUgCDiVgqUUnNYFb57OWNYFb94+WQb1dMLpvJoiQ6w/9f4hUWiarLq+V2NCVoMvg9mnvpakPxpS7dZkC/VgzDOkY31jqD3U+P1mEbw/D4ngKio6XosKB0Rk0kyexYRTChYnZwJ8Dcq+Q+4b+vWG+v4Z7fmK+ny1LEUUSBlcGOVEhgyIwroTCFIVTSukKvreXIGAJwAScl+5AMmK9JQzELU/J6GU02JZwcH5m1qKEwtsiSOXYg7Q+/lGfS8lcA4ok71v0Zl9TngaQaATBOBEIUpR13zhKIEB64k3NfcmaPfNmmrvO7b3d7Rtx/WX31v96d0Vt6++R/OevGiaZg9izSYQCOyN6hglBA1kiAlQdmkt8p7GgEKt1mksUrsu7E5/ffvGP6/BoKfLBQsXU76479DmTpqchJTJCEoL43Quo6ZlTMGMjA7WBtaOek/WtqOC7i19DGNTZ7d7U67gax+faycVeF+w7djpl8Hssx3lqS7UN7j6cwyrQNLIdgA8hCE6Exx4pv0QMFkzhY7UnjC/978YGygYL90LihKRmQKJFOpwpM63BMYzzl4UAcCm0OAAonT7kNQI3dUe0n33xj4gR1/WWLBufOFWQdQYp36+IUG05OQZndWXivdq2SUywyitWQZVLUvrdR+bIQqxrABYoMSo3Rxuc/iDO4NLM5ZfqcirqWfkJbvRTKaooYK8FPTdsrkRMUb/UVdgFyjBjB8IHYBwBVIGlYayd6S1gXMxiOK0gpNtExlZqY6eEoAGvKew2mQpGUvJWJeCZTGnvSymAVhK9F3RiKR7615YFGvWdhZkq17Dq90dkQdMEZ37PRp0764Qtj4rb9CzZRwZZW2DFNP3ZsFINY01baYEErJFE7YKJzXrPAbp+VeH1gBAsxnCgMbV73vfdvTdHtKiMdlgOXMs2bMUtZpUV/TuzasySSR9d5iI1NigzKBwUikBAdsuBdl7Be1vEyRPSau2734dOoIMoqtYI7camQZw5CJZNjucdDTCe7qkMjOguM4EV3/JCZoInD2IOQD+1oOk3vdkMPh+3QZBYnt3Q73vuL+7Gtz3fMPu2ZUFmGLBb2Sx5A6KyLdq2BK3E2Q2gWjCauSwm2XnHgC5w1XYPWcQ0rogtxXr0xPa25sRILaKft/Ryh2ydW+JAShzIK+GuiR4WYDBahlUWRKWzOiFsWSGCqG1aOOBO1tjWjMI6eOI8ycen2snpQFVcGDo00GN6MbVOcwJwGERi4wCsrLm0/7CA9lhNaTZsCa2gr3ONb8i8OMDHu6ogRkUcUJEOClY+kvR9yKYxdp4b3VG3YAd7DkjgJijMY08hzMVw+Cl0pD3ZtFO1Jg4GI6O55sch29AY9ItLBbdqkFvCjjRwCJN08iDGaptg24b6vMzpO4uINssYk/sUXCFcoFwQpVkGWVXZ2AydlGsNZpjFWVdQPAN4NBOzhl5WdCWCi47OKXx++PaVqtT7a1iF9Pw2zUbzFROWC4byukMlAWdCMvTeTg86X0YdHiGu2SXhgJcL5FxPq04LQWn04qnNxeUteD8dLLaU8kgmGGrruAQBWtbCzqcYHd9QHX5IO2W9ZhMlvfki0G9UgV9N2ovcZ9BlXi/VRfsLhDcth3t5oZ630cGpq0D3gIRqAGNGoVnBMwGkyYGl4x8WsAlY306IS0JOC3mSFTQXbGgvnuH+vwe7XY3yroTavKbC3hZwevqBrtDn2/A3tBvdWT0fW92ZaRDtZlyBul0UrlAU4HmPJzUcj6NNovkSivq7M963Uw5ZN9hUB9jfXNGuawm7nxZnTyQkTx4DMcUrQkUsGDvqNdJBIj6VfIm5FQSyrqMtoaAbiL7bfcd27NlTfevPqPedmzXG7Z3N7T7jttHV1snTjbQJoD36hHN7JYjW0PAeo7ceLZnEZQ5NVb1vlCYTWoNsm9oG6He7+AEE7h2Nl85WztAThk5JbTrHefTBe1mMO7+fEOrFdttN2IakTknErB2JG1InuExKXoiyMLQJaGuCYUUGWoqFQKos8ZiXwk+m5f6XDupwXiKutQRRx9FVTPnM7mxuzzSZiJMDTLPuA5H/EnUkbr04aRcyMofjiG7AaBgdTjxwrSKNZBCS+yInN4evXhH8VH7pRHYAyAyZ9ZZkY5Bv284UzXAlNuJTK17tElkOlHjUtHxA9qjd5AIWAVJxLrMxTIU7d3f06AM3XeD/Go1g+uwVQhmgk2lWRgQEuza0QTYR6QYGA+cQWQ9FFGgDRq2NHciwcsn9o52DKaX9WcRwHYfRMmaDAPmsl2CTgaZRtM3URSi7WCXXio5GwsvMHlmnM8rlqWYOO1pMUVwzz4AoFWj0GOrQ/Ylenseakdeb0Bo4alalAqy7mC/p70KrIq5u3KIrSdxuKfWilY7tuuGtlfs9x3tttn12qvxlruAmt+TfmCvHjKpUGCPWl5aCkrrSKuJkgKKntjrrzKyp7bv3tLQjbiUEnhZkFZzULwu9t4OJVNE9xbneZCoU8y4N69hEih3IHcgNWhZzEl17ztspiWZckLPaUCa+/PdnNS2e4CQRpN6KhnpVMBIA7KOmhUI9pyvAVOLMSHm/bah3na0Ws1/ekN4KhlysWbnvJYBFZqTbKjXO+7vrqi3HbdffmdSR883O8etYrvezfltBv1Gpka+ZoajYn5RLgxyB0ZwS3A4Uti+9g40mKr6tqNnsvtVkjVWe88e5+xOz5CEXIxh224b2vmMtLxH3XYgXV3hQuweqlhQCh3Uc+kCtArqHQliShiJoJmHlJIKDXtFUHT5bO7nc+2kgi5Kx9voxjcipIkX07ix1AOac3kXeAHUa0DxF0oHmSB3AiHZA5XhpMRZclCZxUGY/hVcwpZo0uF1wJPuqAKihJ37kZ4evgPwGhiTw4U6SBYBqbB/pk79oQiszv5TJlvIyWoAwycLRmMeiYB6B/dmfQ1iReVhLkfto5oqQWvAXm1kRO+2aCNLZYUKQVghlLEpoymwdZ34vtMkExnMqF2QmCHF5IHII22r3Tgsy3ZtCZ49J9/eIiCN+ovBFB3mqEj92jNBkxWQZx+XBxMUyhqMtRgdW7KOwvHJndS6mrpHzhN+lGC8ORQa8G44haF0oOY4AExWJQUlnSyDEoJ2GCHG/WtE6grPHLtg24xVdXt/N51Bj8y1dejeQKIWcHTP8qUfAjWMoA6u98eLqeunU4GIIHdrZQAsswVbhP7AFHVdRis2JKsZnVakdQUtBaBqwZOlAohmb7ssplVpMKX1VKk4RJoaqHQgZ2ARIGWkAxFhTD3IPLKX/b21VNRtH5TrIPuU8zpg1GP/HDCdwhB47XNawPb+hv15Q72bYjcnRlkXlKXYWl2y6XR6wNP26rWnG25feY/9esfzL31kckfPdx8z01zn0og6scGtFeAYZM/A+5GYgRlsqEPyXYAkICELfhqstWFL6Alo9w2pGJvSbIUzeX29ExHktCLnYjW0692y4vsGIULbd9DulHGxdhP0DkmunSmmqE69IYmguFwTJUZnNoFab0GJ82/62apSn2snZTUoj9KG5A8PjblgBJFvyKg7tL2Z65EEcIcqQbsJ1I4NDGD8w/BC9xp9iGZK2yHdtMnEoZSkbnA5IefiUEpBit6gRKNngMh6HDJPRzUILxoUjPkQIkRrrNHVHboMmncYPf97S6VgtRcxGE+zNcGavqF/QhWHnbqz/hrQ2+waV8Imik0E190UwFtrI+MqXhTN2XTZIN2knUDenCzoaOakRLEP4gYjt45EhJYq9htbz4x6NpUTCJZttM2yhbY3W+hsorcpsxfGCQoB9xO4d3Dv0A4bEZIKTk8XLOcz1rdPWN9csFxWlFKQiCG7Nd3mkrGsq0Wn7rhVFbmEOO2KXHwO1WLySgqMGkut1ejr227Eh+4zlSLaiEK3Z9sBI3JilPPJnBELwNWC4QbPOGfGHMFScydVa8fz8w37XrHfrVFWPXtKqiYJps6sGoGLry0ikxIjb4AvRn0v+2K6cNXrP62DyVT0Cd7wLC4DVYrdy5zApxP4cjaNvlDY6B2yb+jbHf1uX10NEoJk9U3/PL15gydZvYfZmXwezDARSBTaBO26oZMX4D2b25/vgzySVmto713GvieYnQjHFfBeQA42lcZ6Ctttw/7uittXnnF/f8X+fB8tHdFacHp7QV4LlstpZGcG4e24vXvG9ZffYb/ecfuldwbF3m3+2agVBnzngTUPWSP2z+59Y+GfKHKtgz0CPHuC1TlV0UmhnUCaUBNDodivGzhntK0ir83tkj+IkS8M7R2prCh7RX+zgU8r6u0OzRm7w38hSt3u20CExlwpb0OR2sDNnJXC6miZFJ3TCE4AoH6zqqD/ah4DsjrCfO6oYiEaXuwZClvxlppHfgREkj10zmbiNaJXaABCAdsEY8YcVGt1EAZEYWrsKlZkBXzOi2dHFAK15MKtVpwnTLjP3vrQ2zVgSwwILyDIUUSdtVtMMVAx5wtAKDLANPW1OHrAdBjkGfnLiPxFnKHoQ/Ja0Mhhhs82uwGaSN0i9t4tQ1OCqI/O8FpcNEJP6GL2kQhgQq/NoAI7N3FiRsAiDlUl6z9KJYGLwYYBfZAYqaQToaeM9XzBcrY+pgHV5WwF3CPtviTkliEHSSlTmPeGRqeT2z1xFpeYQ6rbjlatJmU9YbMhFqoDix/wb2KoZ2NcbE1S7dDU0AXgJseb6hGrGfXeBfetoraO2/WOujfsUXzvlj1leJSNg8yQG7bo1+ux9t0BcDcR4rQZCaT5uJlWGMhWoBe/r4PU4DRxXhZQKVaPJCd2NJND6vvmBAujnyslIyJJrFeM8Q+R4Q/n5IFISo5KqLEXR1YZBJTaxsBFaCAqhwbgkTkeUBaiQzM/ECNnDDasaNtmdaPr5hkh2UiXxcaOdM92Y85avdsQ0e39zbKwq/VDGT1/NzFj0eE0AYws7OMZVNggnY7KzdDDV0Smbk5BOgBi9AZwtV47a23wRxNw69BBQprQNwPeAkCm/MKMcrfWkqjdipNJAkoXDwRi4oHVQLutJzWjZSo96mvN7C4fWJFf6/h8O6m4sBREgENXtY+wSGsZi2FQjWsHHQzAWJ/H1z7AfoTIToIaHk6qQnpFbztaMzy9K2y6bM5+EwoyijsjQia22+WbCDDppqNDtOcSlBVJXD2bYqQIDYVyEqBpd6iQB4QYM63QQ9XbzpeZkbiPTCrSfGaaUNS4GvNQuJMSwS4ml1RVTag3JaynYpBKZrDDnrRthmPv3bimwQoWBZM5cJO1mk1+2my67t776OW0GoaMnhOo+s982u6SkZfs95uBJePEDCHCE+yrcEZZV+Sl4Atf/BCn84rz0xmZXdzKiQzltGI9N3Mg7hBi7hazM5l8Y4lnatKNXddbx3azkR77fUMPMob37AxKMSbEm3KCFHHIkMFN0ATAVq3mFhn8gZBjTsoc1b3a5Ojn+47WBNXZaKSCrMACjHUTckOB8DZ/2QZriVBYlpHyhIq1iVH0twXoYj0y2cM1zsB6QUrF7onrF6Z1BXnE3O+mbrK9e4f93TubOrtVgxdThubVVLUF1pytdi42Q4vAS0I+FaxPq+kHHodTbpsbxu5jR7wuQjQguXxesF5W11CMnrU0e77o0CuGMLjds54N9f0V+7sr9o+ecX93czUVJ1mUjHqrSCOTcie1VbT7hvtHz7h99b33IJl6hFavYavbF3eiA3B+qEWNDfBgo2ZYfUiwFFaHrWI9aRBoZ6gmIHcodWzXCsoV+bqDy+ZmhmbbDjvcWBakXMDLAlACn3ab01aMbalI6HtF1Q0a07B3ZxDWCm0+/bnWSezwfMoSQVOaifLHZzk+306KXI8qZD+SR1y+iFKxyDiIBTNTnjDMUQwx/j2CGOAh2mG1hk7rwxKozK9Qo54SZBSI4VkQOzbLiaHk4kxjoAo9LDoHhcAkNr139MU8Os6YauvEZFNEDkUZ+4VRG7I/IAgJlAlJktWn1KLloL9DAfVRGgSAnaCRVFGSovWEhRjkjigno6afLiesOeGyZGvUE0G531FrB28NtXa0pqDdRgykLg74kA+ghA/XI78HRicPwYGgJrMv9OTQbmQ+XKy/Ki0F6bwCSwFywpmzDYvjZLO3SsHT2wuWpWA5lTEszvT1mo9Gz97j4rTd7lddgzotnjXbzeoOL7XWRgbVdovoo0E0NBwDqlUKFWkajL3WGkjVRpJEtDlTZlsjatm1aSsq9tYtq90tu+3tSDN3OM5fbyRSY/1YltUFA35mGDu0c0ffGpoC+zObcVVxwkBCKr5mOYMWXys++ZiL6Uiqj71vdzP47baZ8d92q9WmBi3WH9c7QSRIEwAlGsFHWQuWxZpKmYC+e+/Ythk8WL0fTGHQVWKkdcHytKJcTliezlguJ+TzanYhp0GqOo61eBjM2Zr3se0+at30CS1r9llxXttJW0WtzXvjyMkQxuyr9x1186nRnm1QBKFmwA7OiUcGdbTtsc4UgTwM62eZSXwrajPVRA2FUJgE294B7j4GpGK/7khlnwQGd1IWgMVwTUcp1oJEQHm6uBKO1UvbtgOUQXsF7w2gu10zr+ArFCKWScFFfb2ry/YBWbjGn7Gb93PupBxjdqMVUQGXMF6uQxbNeMCEs6IHxR3Yg5PCdFAhLMtuWMwTMEiSOykr7EO73aQeRWI4zOgRqkNKD07qkLZH9BrGyKA/m4OFw6/a3x4cmiokTivEQwPnDtjO6+XRGwNVq7/BoEkb2RFxmrO4OGAp10gURcmKkpIV5CHIXsc5v7lgLRmX0zKcVLquKLWBrjvSZiw0ooreu6lMuPHORE6ZdbhCAREytptO2COFOKw3cHJE2yUhFTYHsy5Yni6mA+jqzmCGso30SClbXSknLEu2Dd11KMbnJY8ajORkUb7Xb4wx2OIWePQfAyNdUmmrQ25HJNiIce2BIOHYa9p1lq4ACbSKk1Ss7UBhLLaACaPtIISNuxr82tTqLiKz9nI8ooEdB8g4zmqsI3ESTlczfU4PJ1XUZFJGMcIkLxnlbBON2XvnAlYPsoK0ahnf3qajciUFc1LeaKOAkk99jfqdQ00cWfJq94WJnA3Zoa1C9s1gt72N3IIWWxvFFRGWpxOWpxPK2cbapJxHxhPB4cHMDyq7ic+6Kkub2YE1Wft9YZP0opyQaxu1M9OBtJ6oench3WCn9hgKiAf2Z5QqwmgcSw8yN/qj7Tve1sP5i2eFcbupCsAddbOBp/W+Iy/7MCCBPuXF5I9GrS7ZcEsQIV9OdpXIqORcFpv/dt/BqdraqXVOtIZCezPM5wB3B5xCrpD7GdG+z7eTGpNowzkFmyfkWxQ+m8iiL5MkaV7AbB4heXQsMuWK2DKLRLORk9jmE2VkQBeodkhbIL2h7nfU/W51E+/RYCIsi02VLUtBKgu4LABlAN4DI1OFIKCccRxQt2Nqr4oRZdvCOXAbPTochAhg4NSD7UcYBWARGYafOGpfUfMxOISZzDExoTCjsBE4eiJk1697+4E5qafTarCWKPbbHW2ruLlGWb1X3N7djJ572w0SEJ+WDPLeD892M43pxeVs0ftyWb0vxhWzyWMztiw3P52QlwWnt0/IlxPyaUU6rd6o6SM7XNYoIGIrNHfrH0s0+skkGpdFzGE0z4ya9YAFYcUcqs6alKtLiE8qtY1o9cbkDdME72M51B0UGPBgBaGKGNnEnRSrojg8mkOSChhjEVg8a481oEAme8R7Ra/NDMS9NUIVIULMHtiQqPXYOLTbM5uDcZmntZ6GgLHN6co+0NAVIODXBE6ccVa06RK2sW6xNx+Lnn2YHoGXBXlNyKcF5WQklUTW4Kr7Dr3fIPcder1Z7aOJDbdMGaWcUM4F6wcXXD58g+XNGZcPn+y1LkZ2eGjajb11rMX2DnSnw0sbAahFemKfweu0SjumgLGNxAiq/2Q/CqrX8CKXGNhJ1IB8r8VDYbVCgwXVkQudNN84cSvsTpPhzswcm3P8U4eC0W47mBL2YuPh670iXzd3UoxyOSEv1u+Vl2xZMidwIZTLxfQvlxWprGh7Rbneh6J9u17N9t1uNsF537BfrzbG/rZZW0YwWgmudkX4bBron3MnhSBKHG7ysScltLmkx2hrfzgbKGoGsdlVI9b1hWSplL+PX2COAcgCZIb0aiwzgs1BAnxxxdj3oMEm1yVLGK1sZNCjjwEdAdOE/eDPH86KAvs7hCHunB6o+MBYwAqdTkrhorUEOsBr4xL6yVvKbxslBh5SSqDiIzJ8NEVZDUJbS8Y5nJQqckmoW7XBhMzI2RoWGzG4C3pknYf+kOFk2SGIxeRv8lKwvjmNzC3o89aLZdcin0z2KIew68lGZoSWYzgFphnATIJJkGEC9olrr4DXw8aYE3dIEkQGd1ii8zWON2ZASx74HOsPY/jluC8HppuTU4YoMsMGVx7jmMi+Haqj8Z7moGZ7w7RpEfKE/ijDxlEYxOxrHGEQBdLiOrgzE0VasqmELE7LOBrqxBDpRsBINoaFQjkimTxWZCxAgwpDEwxi5hA3ziOyZybv4+mH8TLW+EsqDq9bTcUy6oS82rqJtZOKZwlBUHi5T+J6HqNCctj/0KVg+9NIK73HOB0G1W5BctwfMbi491CYCR8z70U0zgzGrgcSE0+ZW5j8H3R4Qg8jV6DjzH2vA94Iam0bLhU1VEm2ZuutG+OXytRl7K2DE0GFZ83f0agMOPKQATKVmL5Um7hbbVhm3zJkL9bSsO8GOe8VFILXLjrMzMj4jcDuc7FT65qnYXSkd+jdb6bIkIup180ERO91zA7qHhkPpxCOKajsbmRDD8uixeiJFajYeIV9Kej7jj09D8OXyuoF5WV00CM5cMMCFzSz86aoMRlJIUZ2xHPhbmKDHRuWY62SG6a5DR2XiigxXketn6KLjtcwz6ozoPOaD+Vk6kRkzZ78tIKWjHTxwX6nBZe3F5SSTdvPL2G9W6f+9v6G++WK/bohg7BfN9xU0RwaMSjJgIuAGWKA4Ho54fyFJ5TzYhGxz2jiBIsYY4YN4DpiGcvTGeW0jplOFDou08pgaKu1Nmb6tM2knaQ1azEIJlO3yakmc7OPjCcMj2LWEORgLIgOtVIy0kz0yQXkE83MpngSDDcf691NgT0gV1J1GHTes4g78li7Xt9zCDXmrZnKCGJFADg4Nb8sg+lJzjxUteShd3TukGq1GAu4DI7KS7FzYFPWTqH27vsnX87ItSFtDelWIZQAH/HR2+5DFW11G3SbfWCkDfAsa7beuebN4tvdsqleQSTgZN6ZF4MH16cFy5sVpw9OWN9OqC8tDv2nQ1AwrQhGlBQXO9CZwuCFwYt/5q7o2tHqrLMqCKA5pTqGo5LqmJ4bJBWbXuD3gmI3+6y6I+w3nJUf4tltOKpjThy/6zXq8XeeeYWDUodG+72igtB3m3JNXh5RBZYuWNZiKBIzyOWwiBlpLUNoWrsgX06DUdk3Q0b63WeK7TvKu/eo2wb+6L2hKvsOVB+iyXa99LNN6vj8O6lYeArYDYmU3dfiVCC2wqE8FLY9AiZ4od4FGh1fD6WBaHhjMkgoJzc6rEDP6IltUXKazazd9evYqbY4RO6IrM3PEbZgBXOhCR4dlCVPtqHIVRpG8few8Q4JE4J4fDSg5hB9RwqNV9fAi1UQI+ZTLuDcUWDGLzMhSUGCYsmMpWQsa8HZ+0aWdRmwEqeEvDRzmgokTmjXHaaE3ZwMQC77ZOc8Rry7oSpPJ6xvL1jOK9YP31itYSnmpBxSisyZPJK3Annxe5jGxg+qc2jlmZaa1Uq29zfTVXt/w37dbCifj1SQ3Wi1oZDeXfy1eyZlpAPvfXPDYxvRlZ41Mji/R3RwBj77jByis6zpoH/h98zkseyed/LfiVsX15tmS8N4YDaMH19zOOtRK8DIlOy/kUrObF4Baj63bbAW9aBOnr0+mAcEv7x58mZ0r9+sC5SAtt9BW/bGXdj9Oq1IpxNOX3hCPp2wvnlCTozERqoIJ8KLRYjs0JRBhCZ0un5wwfLmbFJI59WlnWKuGX/cP4Ud8Us5ap7F6pvpvCJv1WqozzuoBdTr4z581pNCDGHxgCj83ZgN5w6q68Ef+sZkfyQN9ObjTbyjRQBTXMAxD7+Pnlk9bv5R54WYk9JkDsVsZ4Jmz2q61aQaEdp9B0PRoFAPSI5qJzZexvZpkEH6abEG8svqaiTVZsJtO3hdUJ6v6JuJUQMOgTND9/LJN+TF8bl2UvFh4xCRIf8e7L1R2K4NdbMMqoX+3KGuo6EcnEIzjk1R4NBoy0yjrpESG1khdaMldzPI6iKrwh2gZJg7aNS7zLDSyG6AR3hPYRCAOsw3wl0/zwHthYM6HFFsVYdzhlMaBnX+ey72eK4PeCsyqSwAd0/7E0ESg6Uja8bChDWbEO2yZG9yLSPD48ToOdl5NMsItvMK7YJ2L5B9ZpEBYaRi0Vo+uZO6nFAuprm2vLmYkOtaTGHC4b7A4aPWEKoJQxJnYiLOfDJh1z4motahBFCvdxuIt5m0jQ5ShBwyK59FFJmuwusp8KK1BRGC6JGbWWxYyZc9MX7bDo/IzzCcSRB6uvfqmFwbDdUKwsycyDO3kYUdFkhAlMfH1z50MMEkyYOUE3BgrpY5oZjsgyNfzljE6jetNqBkY33dLYrXZj1YqRSUyxn5fLKa4rqatqI7bewEifehbCPeXYjS5JzcSb05ozydrP50KocxKTSUZ45bJq57BICDEl6yO6oVvBrxh3LyjDwaqvtgFSoIynave9IRIEQ8EPs/9BXIHVNkVQF6BeN1Eiliz9u/PYwcditeH5jQb5gMGr9gjkocLg2iCaUObcnG5KgJHDCz1ZigYIg7KWf8pZgnlyyQyXnA4ykEjGuB1AVSmxFKtg1IjFyy9Yi5aoetG0bfPhsJ/XPtpNJ5QVqWoVJsIxy8NuBCp61O9tVeDd6rTiFWwAvqVnMBG2xQljyaO8cmP+zlsTg8o2ECcl9hQwfroLB2tcFfrXVUKDYInPMCVlvISaO4DTilDUyKNOZUzexqZCkHiE/8dzpoLkyZjiyytxh93r0GZRlARNMyx8iHkyIzLFFj6NKw9IZlsVHyfFmQTourGoSeLA/Yw2DRPrIYTgn1XsHJVAY4Wc2qbfvICKJ+EJHw+nTC+sETlstiNamlIK/O0EJsTDMUY08foj4N+qvMDMpEQ9uY5VNvG25feTa68PWOdneF8oNyuDmo2Z9kvVyWTVWvOUTWC3ig40SNRUzmSUWgKdlMKo7YgxBy+BFRh9GyNTJrXd0zfoKxyOD9NZGlRTCVPGMYsNNYsyFwLKPuZVqU0cAGO2d3dOPv4wXI4AB6gIQxh2S6g8pLgZaMtIoZqPOC/HRGvixo9w23D86o1xva/QapO0BAWQqW8wXlcsLpww+QloK8roZK9I6aGVIb8vk0GnHTsmCol3vWXN48WYDzxl6LRy3qk1MoevENpYSUC8pqEkrL5YJ672ibgPMVzO0h+wkKuDXmu+C0RP06lMnd4TDZWHVbIQb9AYMwlHyiQlKr/QyhbLgTVB6liTGTDlYaiECGNJigITRrGawCpkQSNdNSDWY+jJEnAmSvYHSUzNhzQsrGoM2nxRTTTyvy+WSjPhZv4SAAYqr02leoGAsyXU7otaK8eUK73SC7NUZPCSiAbrdPvC8vj8+1k7K+lgRUI0egWQY15iZFs1/raN0eoZwQhp8JYB+3/BK3H71DhrlgJEBMEKFD1zhZ4TcJOOXA14xeDKMI2zypjk0jcmZkIhRP4xlk+Lqn7eypUNLoLrC3eWl47B0CKowMaqb+EtGczjEhpsLuDkqcpi59iOYeEURVRWsNXBk9szexTmrpVFF4zA6sVui6d0sezbLSFctuFGU6TKw9OqlyWUdtwiJin6wbkVyasFRszhlSzuxsjEjxpmJpHbtDfPvzHdvz3fTZnu8D/pNQKA8x1mOrAg7X0g18OKse150IrALhhMTeG5XUqPYBSztr9FjwDlwtqPk0P8zhPvtnexllv8jMOLJtXysjk/Zzbt4wLaLoGr0qhJyM0RoMisgI8MJh0eGtX9YxI3tlpqH7BzJdvLxkAIK8ZrTbAmk7CDbzqVxOKA7zJc9kNOTGnFmZFmspIK+BcZBifF3k88mM6bo8ZtPHzOThmJHnQ3Yb2pChDh9qKh4IWoZKUxBa5760AHI23gOWbSaxbHBuZPJ9izHpoIvNWRuTDgZMO+/z8W/GuvM1byQkz24INolIXHS6dV9D1bLhxNCewD1boFmMmZjZ5K96JpN0TIxSF+R2gqpR7kEAax6ZrNklHVqdlKLOaBY1JUav1ScCB3JCWD7jrI7PtZMqb1aUvILuFb0S+l4xZh15M6bVEWzS6u6aY1V01BAYFhVp6uDOSAFndLjRd4q47+Wg2CZRMDIiSALZhkllsYK4qsVLzeChvQmuTXHvlvYzJSwp4eRjzcEzerX5LLbYOgJSmuk8gEEt7w7dAbZoQRRSfjZq3iHGvcsYvhgRmWE44l3wxpaCBhnY4FPuDCVFCLn25mrnIl5X0sFUMuNFA5JkJGgGymoQyLlerGaRGPm0GB3dMXIABuXljOWymiba2Xpd8lpG4Tbl/CABBWfmQcWjucmyE6f5S/Uepr3bHB8flXD76jP224bbR9fB/rSMMoxOOEK/9BRqH0B1YsO9NTS16xruOjEhJ8+gVJC7Z59qPVni18mTqDHddTr8GRXDG89VbVTCSLuDEBLhdkBf4aji+hycdah57K3jHmoBI7IlLJ4Brjkhs9HdB7EmMieG1dtI4EA2LHVXz0ZoQPCcElIpg07ea8Py5mQ6jF5sJxgLtrhzyefzEICNLLat6/g35zwK+XMOlGfvS/Y+yeK1Mf7ULOrxOMAkRCBKYEo2FBQJULb6rZhzSggpKdduDAdNVr/mkF2y2zKgX+CQbZOTvYi8viljL3LsTQTzD6OvSmEOqios8I37K0ZVJ1UsMCcHJpMWYQWwgyoDtaGz1ULFiRBpL6YiXxLklpAIFmCxgBMNuH19+2TB5Plk1ziCkrF+53rjbDUrXhfo5TzQpZBAAxHa8/IZ7s3n3EmtTyesZR03vG0btNOAwEJOJyjmY9yGhoYckLxP2gQ9LW3fCUM2KMBfM/zkqr++KPx9meGFS4aQsbVAPjodcPzaCq21W/RjAQghsWDNXk9IaVCMKdQiwnAF7KdBvqBhzF5mWYH6AQFRwWEpsa/hpLxuR96Aa1/7MJTwqb0JTv5g8rHs2Wo/SwbnMqbsxtiDMQKEMGp4gPrI6QTKjHJarE7oxVSiSVgpp8Waa70XJ4V6yCEyHjWmQ1+KOaTIhDxY6TrqkX3vuPr4hOtHz7i9v2G/mTqAdCsuk7r8qcNmoRLhd8UcCof+oqKpjMCHXPA4jZTb7k/Q8q0HSWxtEOwVPJqYObk+ECBiZtrQsQtx4INa9iGtmfXKmX/PcxVzrHs3J9XEnovfV00oCU4KsnuOnJxMxEhLjG+P5u/uqvN9rKW4l0QEIVNB4YCNekcqCb2efChj8/FRPu/Mndqo1bHVRVJOI5scPW/FBmNSNOcSTcmj4biCuTod1cN0gGEndKAv1oxrNUkLbJpl4bUDzaTIWNl6KEnMQenk086a4OxnUwiSMwDH54ha+sjyPHf2jFd9X0a2JodPoR60dsDH0RiEyyOTMt3GCB1H7Z29psqmaSmJQbUhVaP2l5KgdzLpXxUQNeuPvCxY7xtqraBSDMJeVz//EPGe+5ICmnaboDlBe3ER7mGcsB/7Kb7G8bl2UuW8IJfFUllvTJVjaq+z7hKY/BA5xYRuoNYzINStaOhOSmjOcoksIandEAXZSAv1Fk1R36dBSaYJz4w+HF8kaowgayCGLVS2hR1RPPxvjyNEptHxrweYYTinh59PeK+rDqPUdEJ9NrhRwB6FBTxgTYQWPY+NzTSntpZsUa0XV49RfFz8gIVslIYPiQsGZU7orSNXa+kjojHCO68hsZM8u0r+s1kAV6VZb+oyet/6tltPSJeh6dZ8NIJR4u/Y7zvuz3fcr5vN+XHlcHRBIstYMhQH2+GLYEIuwdgM9pao1SiOqJJRx0eScRAy9fsjmF7wCKXBgiX2CzgUsnnWukaP1fEkZzo71v/cBxg1tdZNg7F3Re0doTCehEEkEE0u3cSDQRvKHqlMDUOKFx4OSsdpwM/Z5Lc8MxLry5FWxuyxAZMd1s9YQWyqFNaXYxnncFJ5MjcRjjmgp8F+nWsxlnDsi3FdJMZ/yND1bNFPuXd71A5tka3QuEdBYNFxDgG3TihQCcgwCI/0wM5zOEDjfo0HHo9DgDOu7RG+dcivS/xejBIMtIdtjYlNexAIqq/x1gncbW4cQyHVaOesHSwNQAOxIteCruaQlzdvgMRYanVbIGBNh3Of1wKqtp+YgWzixfGZQEDuOz7L8bl2UuvbM05lMbo3gP05m7fe8HCzYyMkz7+76GDfmPGAqQtUBToZO+W4AUBDlic7VJgi40lsQq7Sodqhu09Brb7ou/j7GEyQ2fqkrEgbBA37ml0qhtTOo6v6UDFb3MEmCwxbcei7AQ7soenSDIoS7A5JVcDhhUOkL+pOykZvMOb1ETGDDO9jSecV+XJCeXNBvpyRzieb9BmKyn5uGDDpjLDzYhphaS3Qiwx9O8DXOKfZnxVMy5xHBkGRLioAH55njqlie3e1PrV3N3dYMvtUdkHdK9reLXvadlzf33C7btg9y4psOXvLgcZocJr1GIk8Z4gKenMiiVH0U0IiwpKSq3Mw1mTQ2QJCdvgmDI5hqvA1pgOmy6NBPXstTCcxwuEuMM++mGOY/eKIv4+ApUk8vKYBR4PcCSAxUEy3LReHXhcLFspqTmo9Z2+YLUPz7eEYvtMzIlUkJkCNvqwBb426n9c4RUzsN6Bk/5y8ZOvpyXkERKPJnh43+nBxx+sRDkqCUj8hstDVa/fdVOzvFdevvke9bnj+pXe4ffRsc6q2antbLXOymhSPfRn2Ih2cVOj+JIrhpYcapNfuNK47u8qKr5chvAy7fw8ZeayhyKLUKOMIBxW2geDlCr8MBAhHLYvQwGD0oVAubLVtEpOCkn6HkiAtCettw/m2QcqCc62g9YSTAKszrBnwdqB5M2wNJIe9AeTHzCnV3wBOKi3JmmVLQvcRCnxM82O9UjCfCFCLkiPLGdE+/Ma7JtsxKoBHRQzMoWJkMvakCngvh4hAq9U1yHXUYgEzM3ImFPEFnDJKsUcuCcnnx4TaZ8Bu9t6HLFBj4KEO4w9MdhmpCYv6bnzovZqx7gHC0sgmdWr9+TGxccxIL+CUyKiik/9oqFRHL422oOrO1+FkmzIhQcbz1lKAcEieOYz7yRG/eqYYGas3b/dqTMF6u7uqSEdvVpfqTVD3NsZotM2yqta6PfeiaRaAq7UrNGaWacBmkSXP8wldx+QwT3540GDNvQyUH0gQ8VogY/+pG7nhpKI52PuDHFpRBaYMhR5u7MwY9Pj0gA9d7cDPO1TeYyR7WYrBrZcT8pqxngrKybKpZfWRJmsZlP/IpD/peHg+JRu14QrC5qwI0OZJWTdRUpEHVIFAY01QNNv75/kU/zw+ewRMU0R2Tmy2Ju6O/WZtCPttM9bndcP2fHM2qOvvHRpqyQOLeP94Lm7ygPD85B5gY5q/q76nppMyaalEPKcD6OHDRFblW/NIEop7/tBjSf4Iyn44UHiWOwIJOJGm+1y5htqqkSV6Q0sJPSUsz1cgZ1yud6s3lmLDSS1hxqfdDRr/m9982np5eXyunVTI5OQlQ2r2cdIhw2/QBTMbJqxsTamqXkPSYSBprgKD5OABHuCGOWjjtgAyEZIlO2PTN59QqnsFiU22NVqq4fuZCKuz0joRkDOWUrAuBcuymDZbwIZdQCwjyraFF4wye5j8ToyPmxmMEg2aJ+D0aA2iCE0IEbDo2TFsOdSlFJZ1shBYDs6QLXO0upR38ftwyVhwAz7xcfKyuwK0wqWheBS1j3o9Q0vtsMnD8Q1hXD9ICOoNcapeXHcK+fb+2aC9raFXlyzqQN29DeG2Ya8d+1bt0aaTSm74hcy9G7U7KPl2AkE3DyZfqOOTWiaV2QgI5ZBNJQ+QRoQ90aYHeDBYYUQMJbJJ8gEjBbzHk+HVvN4a0OqAEilaszGM81HAOKYPEykSvAWDGetabPLwZcX65ozTacX5wwvKyZ5bTtmkhxzyy0s2ncTV1D44fdxRRSYVBtt+7DPNnA0GtSwVakFeQK/IAmgCSQFSrAmahBG8SCAJLyBOHZBezP2yybse2LSO/WrDNO8fPWN7vmF7vuP2y+8NBnZnFeoppgYfb+T3RAOWPc6EeiRskAeaj2ucZnCXpvxQDMIcgkE9HOthSCJgYdJhksMIJ8l7ssgmaWcPQMYkaq8fyWFLhYNVVdRubTttr9j2ii4NuCtKV6xNQJcn1O6wH0/5Mqh/Pj6WWw4RU9wbOrwnPtvxuXZSYzyHyxdRPmDoiaB9QhF2I+xmpgME8khX9gjEVSGCvm2kN2twE2F0n3rKo0YEtD5FSFltGuoSPVgx/ZIIhYxcgZJRsmVSa842zVdhRX8CujAYMliF9j5xXurf+UEuSImpLGF/IIMkEov4aO97MMdEIL0Zs0janPNE5hAMlvPRFePr44ynYRCc+h8ZTbtt6GJOKi82sbUAxtYjOujqHZ0RjSxyHpaJRObwsSDMP8cYVnevaLuNT+ldLbPqpswdjbmxZ0YUfLhOcd0g6v0gdlVFHuWqBqyBoGK79iDN2tGxj2ney8d9OwJvPsBZhywynJSGEXLnw5gsVAwH6uxMN9TWG+W/T8baO+VsPyeHKTPj8nTCsha8eXvB+e0F62XF0xffmETV04pyyp5tuXJ+TjahuEwCzayT+ec8oBI0n5yMxsgmxoMPmeHh+YNZi9cJnzxQhTCKsQdUndNhrSjNob3Q7+y14f7+hnrfcP3l9+ak3t9w/+hqCjXP29T6dJWU+DwRnIzklQ61w0N2HGc6yOSHNRL9hOGc2MUDokl71tFdpDdqijId7vjco98tmIfkQaUHhuvio21sfI0Soamr7vS5p2sX1CbYm+BeXV0FHQU7qhKWrz5DwHj64D1yLsi5YFkXIGrZsY8Hxni4EpH1Rl35NwLcZ8y6UESYRmH0NrDMYiZbwj2cuUNmKjLu8ewYmYsvHBVUIUr2+2QFyAC4FIrWbTxD74LkdyZTKG9ny+j8+3BSOdu015yzOYYeI0OOUdmBxXU4oupkk36tEHoMJAdmHfBgbPzpwUY2KWrsIEg3VqE7i662EacQa0SkfQqyHhwVBGNyZ9t29K1iv95GQ6GqIquOxk94ZDwiZExnEXCe3xD4LZjfD9tFnhH6fg0iRbOitzkpr0+52Kb2gE1wgD/wYFwHDKlOMbZF8MAONQIFDU/zUMd88Yj3eAwoP551xLWgEf0eIC6yCcckaugACYQOaIAbNV8Zdn8FU5/SoarEUVuzz5x8La5rwXpasJ5XrE8r1svJGqtPBeuTwX5BnAhm3aB6PxBb/HH0xMN1YhjhsY7JAxCPxCE0ILBxT4Ynj3Wg49+jqTvqnx5wRktCkGjato+RKs3HiNw+eka9bbh+9Z1P071bDaqazp00sVHv47XtZMwHzLUXAdYIiomOPvMxawEeFWwc6o8R9MzkEm8Y++bY5xgTjOOejmA7orgI9ihsYxowbl4XU/sAwGL7ogOjd9REcQWtqzGRu9UwlTuUG+63Hbls2K4GjVqW6coh6TEQCSM6PvthPygB2j6bDvrn2kmxQ09cMrh0o0Tv2SO7NBYpkTmXyMCnrIgPhlOFoMNH+3jtyhkzgEMrCkDQiJDQQUKDraLQMeAOAEI4dOjJleIbOKGzsV00W5E0xVcQmKw0L6rImYBuNawA9CQl/51JUz1GmpHCH/zQ+L2uLz63mLRL6w21NbTaoGpOqhBsGGEiE6I91Hza/Y56y6i3O4oLvspSEKM+2m7O6f7V96jXO24fvUfvVtlf3z6hPJ0M3igGjXF+2UL9CYe+/PH0UtF4ySkPrUTTilO02h8IFFFTYFgAoRlIwshj+Jo+vOUxq4J/DfWH9pBN2THrAAS45hmn9LEm8cdPMoOoEWyFav4gkEzqfRcFBeGAfG2GYXb2ZgRLAQ+PGiamw1xLmlqUPkfr8uaM5bzg8uEFlw+fsDydcP7wyQYPPs3JtsNJObkFRG5g6VM+5accw5iSqaWTCdtaW4EOaI+K31uKqzidECITEOuJnMGTjkzKWhAa9ue7wVg+kHC/77i+e28tCb/8zozubUO/7VZ37jNDDfNrrEYe/x6wY3wWd1Jj/eisKUWWEfBtQHt50O/TRDlEAdKhalJFsbkQwd179JqYUr5ERO0NklYv8/3hOntlMcHl9bSCst2zfa+o1LB36x+VZsM7W3OVd3HilBB6VRB11FvFnnfc399wP5+wnhbs5wVaC3TfrH9T1PsN7SqQr3M+oALEhO36/JmWya+6k/rRH/1R/MRP/AR+7ud+DufzGX/wD/5B/N2/+3fx23/7bx+/8/3f//346Z/+6Ye/+0t/6S/hH/7Df/grezNPZYOibMKiDakktGzD+cAdUZJ+rCDbwfCI3SmyIAz6qPmqyFDsUqtnVI86xfPfTCaKaerp8XCnRYeMJzK5cBwzVx64dmIgpwD5gAWK7j8fvV9j485zOkJwMUAvYEDBIwHjSFEXsbobw2puAaUMaM8LqlJNMdxYdAafsUM40WtSt90MwfU+pHd4Mcq6jVzo0J6gIg/wiGV8s8x8vMqT3XfY9fHVoSKTkAkEJJyxGPrgmUTUZGy0uk8KPdwPwEoBiM+v8xoa5Xyq1L9cBfM+Tlr+px40CT7HjH/QsQd5hBwGO3xU4EGU1gIPd04i2Ft3Nt8Mnph5kCQikh8Ej8Qu2cSD6BElw/G/4/Ue2d0nf8YBi+l4gfgGcyr1/OxeKAZydj5/OClyzS17/0iiYo2b2KvNjOvbPv4d91uaou0ddau4v7sZe+/91WpOtw2398/W3P3uaiSK+w7dm60VmZmPqU7QTFReZM3xOfRwjoO0pDpehz37Pjqq6H070u/js7ofnsxMv6fRTiKHtTim0Ohcx0f7YsnVi2w/rmc4F3+YvFL8HiNRcjIHIcbVa/eRJNsOkg5UcqJUNwKZ3yPbcyaEcAy69uv10/fG4fhVd1I//dM/jR/4gR/A93zP96C1hr/+1/86/ugf/aP4z//5P+Pp6Wn83l/4C38Bf+fv/J3x/eVy+RW/lyk85DHyO50WpNaR7jvSYrRiqckNVzgrz6wcsiHiww0FQIJMZiQzzzrV+LlnJ+QOKwwGYIs0JdvwxWG87Gyd4waDZ14QW6iiNt9p1FrJR6SDUTKBSIzE4PT5fGjI7Zi+96EnDAdjKocMSgP6sYXefbFXjz6D0j60BBRetwqByh19K2j3zRWPF0hrcGnyMY21Xjds1xu25yt6N0kWKgWUkmVbdQVnhjY28VARzLlgfhHGjbbMVunQLXZwVGYcvF9NCao2bC8EYTXYhTSbYglAkrg+xjJTVW8Z8CzFHXc4qOHQJWSQMEko8X2cU0SMR8MT53z8eERDKDlqFKHuMepRDoNZLuQwpXpN1Oy5RcI+eHEoYYjpVALmmGNdFoTtN8dUhqagO6g4PQV8foipj1gz2DgUfj0PTiqgOKK4JsNTYcgmfCxSPDa2hlrL3FMvHVRcbPHov3vgtF+vNlJlr0Cw2xssi7pVVxjZ8fzV99juG+63DbfnG6pnV7I3aG3G7IQ1ZQ+HHnVQnQ4p7k30rKnf69iX/eBEIjgauWYEIDkNVjLzUZMS7uimg6piEJxlVtbv1uQQ6IpvFZpBS/idQX/3IBThbPQxCIXPeGOx/i4m+MgiRuKM7KoapPBWm4Z+uwMboWmH7Js11rv0GUQGaSil+My21u/3b5B230/+5E8+fP+P//E/xm/6Tb8JP/MzP4Pv+77vG89fLhd8x3d8x/+v90qLOahyNtWJ9c0JBLWIv4s5AG/SI2DQWkcNIRyNQ0Agg7f0yKgBRu1CI533CJgjgkJEVkAZTioh5+Q3KLrPzRF0WJ+CwUPRU8Euh2TnYdG0gsh6ZLImJGdzBasrFm84InNANqwUiChMR/Z01HAbQRPCkFjTnw3Yc4bSYCwNs4WQNRnCq66mET+XUBlvVnCutfrcJ6DXHb0Wm9u0796zYc7JSBT2nsjZM8/J4ordHeZN/X5qn02Y0YjZalDQw0nNbG0YFs8kAqoVsREUcGiU5GhIp1E8suTiPOIUgyUXJInpv5z27H8XFGCrSdBQN4msib2FImSRLNId6dyhrwiHCMXeS9Sm+rYuqNLRRh+aGXoWEzN9cPQiptRSGzoT2m3DzpYVg9SEffc6lEDKac4W4nHevn5G0BARu84MODz5Jx2RIjoicPTnM3o73JEDxCmx1q53H8mzQZrtiV4VbTPNxuevmAzWu6+8x/2+4X7fcb3e0VrDvpmmHURQ1IhDxriFZ8VHskec1qGe6ntlXFdnCRs0fFCuh+0nTT7PyWtGcQ2NBHJImY/rHZM4dXRk43r4W3dLdcAE1GZanPvefM0xUrPzabWaQouTnUjU+j/J2kAyElTNKLCzqE/LgrUsKCkjQUGtoT1fzfZsNyNMbbsJR/uaZcLBSREo2zV43u6fshgej1/zmtRXv/pVAMC3fMu3PDz/T//pP8U/+Sf/BN/xHd+BP/En/gT+5t/8m5+aTW3bhm2bMu8fffQRABw64RNUbUqstG4zYNbdor9Rm5JHmAGHWD0Wn0YmMZlTGUDk+GOui+MgMazsmP6n9LI3ZoaeqofF5JhzzLXqEOQjsOjnlJiGJBLIa8pE6OTNfYiMSkfX+ZjRNJzRjLYGHf34iM8R/w0owLK5jxE4jtnDiMbm54qHhBHx2p249Iy0BmnVBunB4DYhAqU86OwRbT5ATHHTDlBGNAQHpbg3G/LWncgy+mNGVSFgDx4BQShSB9wbcFIU5BFR6CesnaCUR9AyGV7xUpb56NHqvlgzR3IFkytXRGispiIPchjzwOg6itE+5Cjj/Gc3QgCox4g6WHER8WrrECa0rbpChIISoa9+LbsY4QWmzZZFocUMrAaUQzODjCkBx+zrU51UHEwPv6OP3zy8wHEvBWGn7xXtvqNXI8q0XdG2hv22m6iwCwpvmzmp+21D6x3VMygbfWLBUfKM8BN85PQfsSf8s1uvIuy++UeRuN4HGxJCuSPbDtLJIeD42HrB8TH/P37dg2/xdTFU+7tNYmAmtJqGpJvUqfbv6ZYhDMDMENWy5xAdMIQomcNRmDPeTFapXZ9d4X5D26oHEnMQp3UGkek7EuG+fxM4KRHBX/7Lfxl/6A/9IfzO3/k7x/N/5s/8Gfzm3/yb8V3f9V342Z/9WfzVv/pX8V/+y3/BT/zET3zi6/zoj/4ofuRHfuRjz+fFZOYBGhMj2YeuiSi4JEgT4L4766mNTR7OY2xulYOYrOtgsTFfchj+WIyxsFLAFNOpFG+OLEQuXmusplhIA3L0qFi7olMHmGy0uh40RBGZmr8Wk7O1aGROjXwxoqOpqU40MeWIiOiOhAn/sAfnZD1lzMkWlFpPT2IaxA6bLlxMqv9katV5XUwpg72b3LNUI5AEvXtGhKrqs5wq5L7ZoMha7Xp6dMlrAeWMdLmAC0DLwcgdDcXB+QVba7/u2K8btpvJHlmfVDd83VajvZBHiuGoMAyJXRNTye/YfZxLQJ7QeRoMWHOkhpe26H/xLDq7w6FDgMAHyAeHR5ArrG8pGor7zJr8GgacaejbvK9w+NqyUUXiBElAzsngZIk6yoTmIlihMO5QdGFTI9kbZK+o1zu4JOR3RpjIp4J8NuHf9e3ZpiefV5uUHLJVAQu5MsSDlNUIbg6Z34g/ItiLbP0QJLxw7mOvHZ+PgMx7fOq9ot5Mq7Heu8N9O25ffcbm9afbtuO27Xjeq8FmXQbUDQaE2dow9EVWN/YxHtoDBoGEyHvXABUeMHGESMoEZCfHlGw6mE5BB5Hdczqykn3gKBEKE0hNTT2Ebhk8a4+wJEw96+9Q1GYEhu2+D+g7eyDYexvTdUmsrpxjrZG6RA9AmXx8yoLTecV6WrHkhAQB6o6236D7jvrRV23czbajVZ/1xuz7O/ZglCMU1337+IX9hOPX1En9wA/8AP7Tf/pP+Lf/9t8+PP8X/+JfHP/+Xb/rd+E7v/M78Yf/8B/Gz//8z+O3/bbf9rHX+eEf/mF8+ctfHt9/9NFH+NKXvjSikSEJfyrIIih1RTlvUBHsXq/i6hGDXyS7aDO6NgMgCJICSB171TBvbqEiYmRQSM0fviR/RN1Ax+vPrwOit0Yca+4VQiMvVoZtwmzUjf9TFPpjlAcCJZmOMCHqZjpm3rzMnMawPDoImYK8v8dhqIPRScUWaTnZxNO8LC4saWrVIDLVcZ/bFBnMLM4aDKvNp94ygVqemQSbAePh8AKm8g9P805FZDqiZ28+NFpxM6X21g+GPF5kZiH2rFr9yuHSqOG03m3mGMIH2d8nHM7FURkMhB8oaU5ydpOE6B8TPSQJ/gehmRdQsdlgmZ9fY/IrQSl5jxT5j46U6Bn5Zq/vdDWKcUDEZi+OjmC2FgjBegCbOfXmDdK0M3rt4MJI94K8VeS1mObiWtD8e3Z6c5CY8mpjNHTJw3FZYzZ59oZ5HLKIo5MaiiUOd9r1OmRrhBmABGJBjoMI0GvQziv6vaFv1WtOxz4/Hb1PDw2x49zGKY5MZtTgHtJyGgFsBEAMAnerF7LXipgBTgZ7cSjkpMdsKl70+JKJCJnDSbENHwUGAtDVGoqj0Rwe7DBNC6c6gzr1nqbQT4x5fNAZTod9BMGmbGebFZYXk8tKfg7o3epP+24Qfq3WkB0nopYNm+Pso+dSoLjv3+A+qR/8wR/Ev/yX/xL/5t/8G3z3d3/31/zd7/3e7wUA/Nf/+l8/0Umt64o1VHcPB8FuLDvkUE4m/S4iWO4nqAL5dJ8q2d0pAQ77TSjOqeZEAIlHwQpiHfCY/8FI6ykweByC/IiWFS4p5JEN2S57IGD4PwQKNHtvBXkkgxGV2dseHdUjeoI413FNgAQzPKY9Hp8Qg3mk8XdHJ+VRWgK5VH9QZC1bLacV5XzCcjljvZyRTzam3UYqeK9V7bP5saszrOKhQGuQndFvNzTpxtpy4zpUKxTQS4dqnsb8YBSO8KKO/hfT36vbPsVBWxvZncJvCjA2KMTzU/Ko0x3U3puNdXEnBZjhD1ydiNwtxTVPw07lnMbAQAYGJh/Q2zGzHLAY8wM8SCKANG+a7p7lEYSzZ1NpRMrHQIDJ7m9JMyoPMdnu9ZFYS+TX0epkAibTAaTaodTR68wSkDanxdt4lbQUrLcNabVMqpwWd0zRLJqw+Nh2OftcJ6dYH9f+CGCYpiM7OFATBzaDdnRSKX43UAwNpQMGUQLAECH0JmhbQ7vammi33bJEd1IhvTQYbXFih2BvXC+KIOAQTIzMMGrVNAkgPGswCkJyJ5VSPGg0RaecwJxmMMfykPmGg1IiLMw2Zj4lNJjiTSI4TZ1MER0zSbXaqOf5fq87NcTKjinL6jO7BmGFBg6CIde0FJR1scdSLEsmb+7fd8i2QbYdWqu9HoIExQNNaA7D17ZDRXCv36BMSlXxQz/0Q/jn//yf41//63+N3/pbf+vX/Zv/+B//IwDgO7/zO3+lbwbAF0wyIoXBSoJ63iBdkNcFUjtSscY8CRglojG3hIMKzWxyI56sBJkh1AVGJAwFayDDmJpvB2zAbGTI7ARTx6ClGlASALAZzsyETIxCZIwa8pk+Gm+pw+BH/cze3z5LOJolohy2uTLHybFxdl3EX1utBwUm4VSgKMkjt2LR03JesVzOOL15wunNBcvlZKPi4ZTzZte23Y12Xvcd27Nj0/cKaRWQjqYC9IYsHXzPkJSgmcG5IC8L9NSB5QW2coTH/NoOp7JX7Pcd290aC/f7PnXWvBAc62PoE8a96TQy3do79tb8a8wdC8hlQmrZmW9M5szJI9YBy4S8TUCBfq7zoxyc01FnMgIfh++MZWWjLOyWEzQBSmxrKeyqPEJRyQv4ESB1N3S104CdEGvALKF9LzGv6mMbLNIHUCK0+w5KjHrdwEtGWYsxal15IpWE7ESmtCQsl9XZt2bUhlGPl3a2V0CGITMk4pp6+7yXIHhDqjemLnlAbRpqCzalD0CCdtia3Bv6ZhkUdYP0sweDCyd07haYgca9XHhmLpnnpOIg2wRkGlcz5s9BXRTXf5KciMTJa1ssWBNQEpAjo4oaTUBi3WTUOHWoqsHpzObdRLwmrdZv6QoRojZX6uikQNYOkx26L4kHKjMYwN6UjygNuF05LCib/bYuKOcVy5szTm8uWM8LlqcVSRuoV5Oia3aOXArACSktft5l1I07AagVKs15Pw/h9qcev+pO6gd+4Afw4z/+4/gX/+Jf4O3bt/iFX/gFAMCHH36I8/mMn//5n8eP//iP44//8T+Ob/3Wb8XP/uzP4q/8lb+C7/u+78Pv/t2/+1f0XkfMOqIyHrROj+BcKilkZR5S9U85Dvbw4RuDAmk4i8iQiDCzmZFB0YAQwjF1jbXswwwRiuYR3ZKR7IgMDvPftbh9fOrx3cisPJRPnulNyMeMDKs1fBICdrIxDDHfxhqkzDlnMmdpQpee5hcTG81rcWNU3KC4Ibhbz1R1R9H2inq3pt5eO7Q3QLqRPaBoTMhiUz6TZov0cp6f8eiYHlLVed8NWvSa1GD0HSSPZF6jUWAHjc97bDztsYlERvNr0IaTBzLx23O8ujmodFx3MeMJ04i5atuIvidcdRDOjXMhnZli3BfAQFsRL7pPZmZAiY9r14xtzDNiACwH+oFvGatdTFLJgLnxuK/UHRU1U0GgZDVIrgnis4g4J4uuS0Jfsk3RLRnautWzVhtCCD6yRCfkBTUVfk4JIRjcIuBwR0Vk2YesJsgMLS6Flg515jD2npELBrRniusYwV0ic1SxVwL+Pv6MP+Ex7AdFbc+g9YCWnVM1pxOMpawO3WGsnSE6HGtjrP1DQBMBEFstikCAmmSauNvpCpAKktuZwUJkc1Ls2VgYEvW1E4FbMJyPiylq7uyTtfNaUE4Lytke+bSAO4E6bO/m7CNVyILDvFrAkDN6l4OeKUw8mzpYOj7L8avupP7BP/gHAIDv//7vf3j+H/2jf4Q/9+f+HJZlwb/6V/8KP/ZjP4bn52d86Utfwp/6U38Kf+Nv/I1f8XvFDKGjpE5sfE4TGhi9J36jwxCYJZEBx4wI4/C1y+w5GswomNnK5DUh9h6W4QDtq03VVWwuO1RlNtdO6G2eu7oMEScaDONZxArn+OigfD16ap+QfXZPE0HK1jPT1Ga5xJA0FcugQBZ9C8H6sFSwEIxCn8iLpAtOT2d7vLlguawoq5FVAt67ffSMulVs768DcttvN6MGbzugza5zY6Alq5WsxaShmMEFYxQDSh7jwEdQcfBSRjk2jcS67Tb+/XrHfrVR8G2rJrciQfi2eyFumEUiLTUdrMikarcu+zD8wcycjMZZe2LAiDFEKPmg0ecQVKhsi8qhzoDh+FPKj/JdI1P0TIMShBTG+YzkOaYCizd3zixtZNRhcDGNI2BQcowbj5/HZVDA4B897AuEgzw6K0AqACb0vVtG5VpwzIx9sYCQixk1LmzGzGGivC4D3kSw5zzDioyLc3L9y27387oZU89rFykRypqRl4T1zWnA0Eg+/gYRLPjod2/AnfB+9IulwTyFCroQOtkrMKK2aPBaPkKMHgSqBwDRWFv7ZN/mnkbmkiBgUmTv72MQiopJg7ljHvcr9nPYEmYwq/VYClthi20WMqs5UmVFVgs0ux5Gt/jND5sXdklhAZl2u/kifTAQQUbmGrA2E9JakJaM0wdPOH/4BucP3+DNt36I5bzgfFmBtgOtYpeO7vcXYuFOWk9AyqCUTXy7ddBa0GpFup+MYPWNoqA/UG0/4fjSl770MbWJ/9sjCudRzwhHM+sgGIjFsTgd/QajtnFwUNY3Y1+PkXUTqx9FP2PUw0C2+AICOAqBKowy3ropEu8qAzYcGmdupGJxRI/NjNzss9LBQQ1DEkdEXR4NF3Zlbkk2NsSbHuGOlw8F4zgNVlOIX5iQE6Fkqy0spwXLecHiWHTI38QsHmPW+fDA5zvqVtFqRd3r6FdjjwxjgyMdmE3rAlpX0GkFLQVcfDZV4sM18o89+r9k6vPFJNVmFHTtRzrtx7Nms8MRQQLRJxUahJEtATOSBXyUgUfAUTOkwOtDNSDgs6gA6Xz/WdOYRJHhnIZHMVM2Jjv70D9bR67RqNEqNSPhyOY5WGWYKgQB89q/3Vj7cxaFewBEQXs/FM8Vh6/+e57pPIyLSGzGvhGoMrjuiEnZeSmoS0FeVw8YvQeOE/LJJvUCMFmznEbfW71u2K93tPuOtm2AKhIrZM1ohQFpyKfFnl+WIVRMbHqCuWSID+aULjY6/eC5434EhJsOmZRlH3xoI3l5n6bBt3pQt+CTLJPJiX0mmw4FF4KXDiIQGPJB4vdbRpARe3o8+LBmDucp8Lqz3b7H2rn/frBnhWZLhdmx2Wjs+NBw4hPms+xpfXPG+uaM89sLTm8vKG4T0DO0VqDu6EvGkXXJ62otJTkjh5PKjFYb8mbtQfhGNfP+eh7ihsluVISNCh3Z0SHvGYY84IYonGI4p6nYgMn46jMLsohFfeCZLXKw0UA50ulYVDwHojUBKhR1mq8xe4g5jdQ/k490ODaEIiKt6aCOo0WOmRsopH6c8aMKNMPiuy+gmKND4gxHYSAxkiYkKJZk2UHJCafzavWo84rl5AVTNwbiMFu9b9ifbQz77d0NrVaD3rrLokCskMxkzimot0sBrwV8PiGdTkjnM/i0gpbFdNyYEY3T8zgw0sShPle3jqx6DMzzDNQuTzDLJgwXgYjCVCaCTRZOyt7WnQUwsmkip0pEwd8dVM6uu+a2O9r/R5DBoYx+cFCYsA58DavXIFQTlKejMQc16eMmYxVwoNVHwfE9Duf8WJP01TIGZA6CTwRzYWT0mOf7X8Wy61OUGarQztBuHlwPhVzObEywUrCsq0HwJWC6jNIaUikGH+ZsCiSi6M109up1Mzrz7Q5oB0HQFhsVAmko+zoh77KY8U48x/c4zChdgNRB7gwiqOPYg4frEnDu+PrAwIydSMMWNBHTvsQgWEPVGmFz3HvP0BL4QVIIJFCWkSU9FBiHHXl0kuR152A2RnBOcGh6LDgabMMZ0oZz8rYDd1iKCVNqIlA2R1/OK5bzCae3F5w/eML5wyec3j5hORUsp8Xqpq2CtKNvRpKJNZJOJ9efzNbY3zrSWgYTV1VBt2+QLNKv5zEolDRvVne1AXEasvYDa8WN+SwcTobU7POd+HxE3UHfHLWKyJQOSgyuMwN27F2zM8JUkRqh9wTufSz4pRSXpUnG2IEiC6wmRDZLyPqsMGG+oZjxmK1SRPaRfQTLDAD1jtQFsjdTOG82NyqcNHvkvJivwpppKGacnhaUteB0WVDWjJTJ5JHUNLvqbcN+tTrUvu3unCxbJO9XS4lRFkYqCeeLObrTZcXpcjLh0jcXpMWm/aanJxuiNwRFHzOhEdAfsPTjvKSXNZrp4jGMktsI4JAp783GrIQStP0tjw3e4439NWwctvUFpSUPOaFo2hWRkaGK35twTsPgxWdSo8GPYMPhPh3QdARQ3hzaIzDxzCa84suMUQ+IdhiqB7javg9SCbshtVxuXIF5Wg93geYXYyKZ4Xa0oUuzcMz7tnJKqMviIqqLC0JntL0hLfbVsmcaUlZ981aF3frqIB2kDbozeokBpoq8LEjLCmIBpeKKGAuWpzOIGPXeoEToXbBVv89+T+OyBdkl0ItBkkhpZjGHLOp4kUfAGBmoOOU3hnIOpwG4FwfE6ntCfSApxAof2n1c7f63hwxOZ++d+FrT+MHh10E04GxRaytoKqjOYu1eg9V4edcPzEvBslpQcfniW6yXE95+2xdw+fANzl94g/MHF/+d7AzUFSmx6Xk+TTGGtCwGuebk/VkdbXs7mu5Vgfz8DRKY/fU8wiipY3BBSzZlAxkyN6OO4zvuwfkcMPeATcjTZxcV9p/gYPLm4gYiGmZE3xayGTFmp6OTIiVB7rPHaikZmU0zjV1tOYkiwRh50QiMw2NCmY9OKs4ozmmoNjCb4noSZGBsDGvwVJAGWw1YksF8aybk7NJOp4JSZqMpVE1CBWQ0790IE71NogK8PkPZo1ofPZ5LwvK0oizF6lrOEMyXM3gpSKfVp7zmkUHRS6NwuFPxicOPHWtXD/cpnARhiPuGYQ/x2diwfVxXI7BMp+GvP5hW7FFiiIN6vUI8zvaMnY7r6RCNv/xUoSsY7x0dbiAZBJzDh3Z/SSBSQA/vgYNRHGueDtds/jvgywn3+e2L33jhncZ1wwtjHZDfgMs9K1WBaoeyT7Hu1m+jTcFNbIIAJzey5CKybINDxeZ+aa3Qan08MZBTyBrxpVvAFU6NRUHZIXNvPpcmVveqHVSqjajgWbtRjVU0azF8cFjhmD55HU7fYWQLDImzo40YEkc4fI0tLYHmeAbMPC7xAE4O9+AYOAfSM2rlhzUSZVAijMC6u1xWMO2iZ3RApK5un09GM18uK9anE9bLGevTCcvF0BTrlcrgkgAx24K+QnIafaMEIJXF7GFKg7zCJUPFRJChQEuffF1fHp9rJzUOVevVaTb0rm/VKKzHsc8AjpsxIuqAPQY1OCA21TGriajPupXIWMSTBecF45RASwEV55kmy6ZUChY1imhyo3bK2aivClCzkdkU0iye4Yyd5Jv+oZ/j+PEDAvAlzW5Ik0NQSQEtGaV35NZtDICqwx2EnG10Q0o+wsGzqsTqEiYC9Ip+hzXqKVxqZsN+24c2X1qLwV4pIZ1tMVs9y/59upz8OVvwqWSUdbGRJofBecTpgXAQB4GGSkY0cZsqRh5Eme5ssSgCD5PhjipeyYRiLarcah0SMuPe0iGKjn6ufJjLsxQTD87pIMhqVO4AlQk+7feQbIezP0bGRI4OjtNjKInXpNw0ka1JZkJUlPggxjrWrf+UmawmIlZjoUjGMTMqBQbLlGHGYNpIGtvl0f27k4vz9/+pYEBHMQUXvRuoRARUcWPYwLmCS0ZvahOe7xVCTgyJBmyJeo45NPLgbTQ3w4SEYyCpirVQECfkhbFcDG5a9wZhwi4CeC2kO1koPncEFbEf0rG5ljkqNjjcIURLAgHQnIbji6w6A1YGsDs1oUInqITeJZE7cfbKUKA0IVWEQ0asE+qtXdB6f1BCj7XAM8wZmXj13+sa2ipAzjTYe+vljLwUXN5esJ5POD2d8OaLH2C9rHjzrR+4wzphORvkz9lnB7tNs1lz57E+2PUIbXF7/TdGG0Vycf547+snHZ9rJxV9JnboSCXbiPAn3DfwXGYr4At5z0lEyjxGarAbklDLDrugKtADZJNdzib5yBBKDCRrjDWSgO3ylDKYHMrzQvuSEpJ4sVth8IiYqCwAg/a8R0eA2XQ4ih6IFMIMkIjJsJCPgXc4hJmRCVhyQgxZE180xmFg5MxYlmTZUzECBqs1lUIEfROgdUiqIE5QgY+YtsZZwLW9UkFaF/CSsTydkMJJnQyGWYdjMiq7iZSWOUPpoQ71ImL3ezzv41TEsGzmMMbes4wHsoxnMwwY686Nv8T0Yn/MvqdwhAlw1ealZCwl+dfsyuF2/UnU6jEvp8qO7GPetvFVPRjyzGj+/DEQOV4F9gVJCDbeIXL392CN6N6cnTAse+aDKvbhXD4JygrUYWSq8dvHvw3j4418VpDXKRii4byBDrGSmTY3toCmHdQTIDoguObNu4SQGDMjxQQoJQ8kvRdKGb2bugRx94edLZeErIpyWdFEUGpDua3oqkj3za6y1yApgjuaGdTDNTlemkNmxbA9nl2aSKFOLz+0owznhAfav/p6iX0b5zNqSIeMJ2rsIyuSGNch2HofGVWs6eyfydZy3DD7GdEMnLIzMct5HYSIpy+8xXJecXo64/LB0yBOmcoETdFjmdcnygzHHjG8uIakMBbmYWnnWvBZjs+3k0pmmCzqcLZfbQ/joUPqiLxmNKwz2SKRQWHikZ6OxnFnMTEAygp1mCzYakFNTdFR73CCRUMYneicQlVh1qIKmSI1Nxm4svbg7LBHQNH4GFCfPtzk6Z7jd5xW7TJF5JkcEaFwsgF/eY7uYI4GSUZZsjssmIOUBtmModdag4DQyOM0xZh027sYsyoz8vmEcjkhnxasby/Iqzkp65OxXpohoeN6ZZSnAnRQk18ErR/7wKOG4E5k9MVF0HKA1eyMY4vOUSTkmEvUJmPe1nx9mve22Osvizmndcmmck8eKXe3ypFdPATeH/8gx3uqgwP80oV98iWI7FCZ/G91OhDMInmwvsAMEVNPUTempLOxdxriY5Ywr4PHQR6VH84urh2O5A5z+jYaxRyjBQWBTMhwUKSwveIM0KZG6a4ebSe2DKWwj3YYaZ017CoSRBnSgVYFoAZK7QGGxZqRLwuKCJbWka93ZDEIMESnCRg9jg+QrGfsergTGhfnuA6VB8lEAWf1wYNMszehCH/U0BwTvrvXoxBEGK+buoOKgY7BPp3QnWVHm88N6zI/S4eXGBAwJjxOMidqpo5RiklYrecV57dnLJcznrwOtV5OOL05W3P2qSBn9qTIyWaOPCG+AghVF7xYW3RYY+M+glx39esfn28nla3Y17s5qObSOPW+e8d6G8VhzhZZUWI0OUQ6XvSloEcz+aaAF7qtl8iGAIorCGO8ZuC55qAITQ1zbWKyJcr+3iWhoIDSbOqLwHvc28gifJSCEtmoBfLFq8ct8+CvzIA0AVghVK05Uk1ElJL1e4TxCgUMAKauIUZCAZw40Bq02ZwYqQ26bcFh9ijWI2XPNvKTwV+ntxesHzxhuZxw+vBiUZorEnAy8kRMKJ59J4c+ocOi/vQ6wMyI2WV4TNXAvlJKTmeLjeJr5XDNwpAkkAt38simisOGizP2gimWnJJfckIpGQsbMYW9AUuj9sAWaNCDYaJxjyBmkGhYfYcmx6mqNRsbPjIGGsb9mqohQe95XAd2jWKDhIoC0JWQeI52aQfIz9iq8FooIbmGZBi5kamNtaYDLm8exdcR6U8Fg6RuJAFIVxAZ/EhOaOtEQGNI6jYjyZ0UwXr1KCcjAUn2YI/RjUmEunWIVAgIvQpSyWhbH2K4XIwSXc6r3xdCk45yKlARtNsdNTF020Fe03poLYkowz+0+NDJuBJGsDOmXBMnDHmGbqiJtRB0hwU5Wk3Ua2o6M17xlpGYD6bA0NPrrZmif5fDV8HeO7ZumVRkVnHWxe1LYVPNAJNP/6WhHZgSYTkVlMuK9e0Tnr7tA6xPZzx92xdQ1nWKB7OPK2kNvVXoBoBMz9MITq4W4vs3nGlIegEYZRTOCerZ6idGYJ9yfK6dVGzGYFSF2Khpt9msI/tFWG1Fne4djZRWTZ+v5y8asFDMiXH2g0GDCCOK0Zw42X6ulwbFrkbxVCLw6MEhdE5gtYVJXcBNEPOBRlE9eTQFHCCw0QUx6gYjcnajQfA/6jKivpg6a4NiMKKt8Z8atVlUnPKqphBRG/q9WvH6vhsEIwqihACYounWAnZCLhnFI69yWmwsuQuQRmYSTuk4zA84OKW4BjhEYKMOZw58+DTmofvGAbkyO8z6chfogJEimzLlCIfsBB6BThmc0E2LgYDFmzszkfe+vHAPA2qM2tmMBiwLmd+Mfhl1UV1ybQpVkMM7pCaNQ6Lj81AsAI1M8OA8yMjOsUaZrE7jI4ECLTW2ojiLVTHW+yDsECbbL26Lr7NxNRUvICiHTQck7dmFL0yFZ09k+ICSkyc8ngimmriTGnPQ4ISXZIQHcZSgh8o2ESAwxXsA0gtAQIYrWkSAuhptWkWxXlZzFu5YtAmodbunikGaOZIWZoYfi08N9fLP3roxB6HicJrNoBqsTzLJJLuVU7TaVGJkZqn+PxOCVfTezQGGKsogQATpZ2ZWAznwbBu+RziH9JRB+9lJTevJgsjVHwbNryaDtJhIMMEdjmtJCszJaingLkjF7UrQ3f08pTXftjrsZALArBAku22H9fS1js+1kxpRjguN7tcd9bqh3jYbJV29dXZE636xmiCpGJzX54YKmxlOiGX2JVAyRYiADgeLzlkGUfitrWPrHbfasEuHqCJnM96ndQFOHS0lSLIhhknU2H2woms4z2jo5G4SQCME1Rk9D1uFqDP45xABJ4N5Uu9DwDXw7pABqtq9XwJGdyavL/Ruj22zPqu9zjpHREWJkTQjhdxLIpQ1YT0vWC8r1svJspwlz6xpPHDAqz/ZQX3q/XajSp6ZmWO0GldfZl+MNfUe/taNZUB/GRZpqjJ6zkMFPfGk78djIWsLWGBNn0ki6vWHF3kojFjAyod7ZA213uqgAlby+WFmrki7OwS1Udxi+n1RAyCYE2LPqI35ffyABHAa1zbIAOoOJ+KUKLw3xqiPDOOG6aBCumfcD/W0yHujYrV1n/Bc+4Eh6V3XJnLsmoAadR8bCZEApOaEAcZknXmAFaxLjumzDg1rV9O16wpOjHarLn2WUG8b8rpg2SrWN2dTTFiN4ELnBdovWJYMFsG+ZmwloWaG7M1bNA4ZeHhWP2JSgB76bUWNvLDXin3fnFptAtULM3oyBq+kDHLbE8ELs4/AEUVRJ1wkHghPbzZWqHl7hLVIOHW8d/Te/GtHUx1Oykr0bsgS2+dfMk5vToYAnDJyZtuvCw+6/uWDi+lzPp1c/SMb8aV39OsN3VXO0Y02n9YFaV2R1hX5fBp2sDeTJmv7PgR8Q2S4nM6mqL4sABN6+wbJIv26HuKL2+tQbdvR7q4btzeIT6AkpwzPCH1GzOL2JKA9szOhVjCjV/KomDy8fMSu7fdUrUbTmw1Rq62ZUYjpsAIkgTHQOCGpIqsXhpnBmYy2TgyNzCeZJpwKQ4SHTllkUCG91D2qVhg8aahIAzQBLKDO42+a2GK/tzZ6JxpF5KquxC3gWkEi5tTdUGfPijIpSNhVDmRc0+O02SBqTAd1MOR4zPiPjmr+7EWkpfNXDFlzxYdiDL9g+WnUBmPMw8hgpjFOTFajYyMYxAyfqFH1bnpxuZG9nhp0q50hSQ6vpyOzDKjm5SkrjEwwri9HthPxtBFd2OFnSDdHFQ3ROjNjBOyrOvrmBuMRGMryJq9j1zwQALiTMtZfZNLzPoSYMtMjn80ktEzQ9MEJxj0f8Qd5lu5ZptrnjTe3DAlITjWcrEh9yC71kEVZIzKBUjhABSShgyCJIE3mKBCx/r0YY5Kr1aMMiXAiSUpY1uJTeE2aSEqD5Go9aK5AMxhpEQT6olTPeEUF1DxqivpRCLaSsUeF2Gu5Mn6vk98XtgA1iVrww4xFsq0nnc6ptmZBeGQoalBfdWZf0MpVg+hlDjBla/0oLm12/uBp9Dzm7JBftr6o4rTzVMqwc+q9lVJttli/b5D7Hdq6Odl1McWQUzeWZgonVSGto95vg+WZirWWSFNv7jWm5379Bo/q+PU4Rl+Uy+M0p56PelRXJw+QF36m7E1stGjwZIc8hqMakIUd0fNi3zisM7bxAaf3PgCT6mmm/NAF1A3W2UWhbLWsDF/8iZFTcqWBqIdZyMbJ58d0N7yhOTfqAsb2qUG+UOt4T+rQkafnxFG0tkis9Y59N92trXfsKtaB7vUEUkXuVtcqqshMvsHgo84BVkE0bRJmrW6KqI4LNu+ZXSxE8/WA/d0QPkBoR4uvx6+xGSfUF6MPOCWDUunAlgtDfICuRpBy+Br3URToXcDoliEnCzaiwRadx2vHGorNPQgROGRRAWfF/RaTBu0DOSKw+JRiyAgSIN0drWB6+HhRn9kVn4hz4FQjqBp1Al/5YwgjAKYDHTg+izuTUMuO+zKXPdm9HnsHsGGLlj2I/2zcWL9henifgLSjJIeHX41MxtCDDiMXCZHNUVLbxpFAUicIyyTMOOQfEw3EAw0+wNLWXF6s7idikN/e0NlGzmiIE6vZl2DCjhVMtocg0zGPc9dJiFCYU3fw0kgSgCtTGFLCoshqny8n65Hig5PqIthrNQfl0GRk/BFoinQEzTwa0NlHgZgi/WI14g9MKeL0tJreZCJrTXNF+eSkJgJ5g7w3VPsAzH6zh7Rm13Q355SaaR8GqtTajt4a6vU6ndRSwNn0PlMprmfNaPffAE7K2Hwmx9/uO9rNoL563dC3aj1NyZhZ4qMCVKwhlcSyAySrNZGz8MihCQWmXt7YTS8Pfz7CVBWL0HxMBHsAFQwwrQ2t66iZdPjY9JyhuVuBlRlRcSYmI2VIHv1c6gs2ZsE0L5zW1ocOl7I18ZK6NBKbEYnaXWsNtQv2bcfeOu69jayqiQxjlWHR7gJr9E3MOAHImXHmPPq5zADreH1rsGxhLUFj6tsB1huwx6yhPHqtF5cZmEGDTkMc2Vvy+/wgI2OI+vjTo6Py0/BTsnO3Te9GUsQyJjfSmrrd1xdO2KStPGN0PHLQh19kVi9rOibGKVYT5KgmTSbnx2pecd5xSYJe7FkNPLsM5OB4neclVIcf01jnkTU8ruW41mF8rUfLZlo5c43UWaoGhWWH6FReGG4VdJ25cdRvh+YmzaBQHCIP5XJhdj09Bmue+oMjnQbEpY7U2b3ic6PSUtDupmaRSnLYETZrDACXjOWyQtcCOS2WSfVgJ3oALHNd2+XRAb9Buylf9IoEQWU2LVFY1pY5WS+bO6iqwC7GZOxkeyepEUiyQzpRL60O5d1rHYM4Y00Fw6+JWPOzrwpisqb5y4rTacHl7QVvvvDGep6+9QsmFn1ZkUv0g+pYz+FE6m23elcX1GeD+epHz55JbSOToqUj3Tp4reDnihDGtUyqYr/e7F6KmpMqBdt1B5eCsq6gxHj+jSCLNCRxugzjPaSSDjpuw9Ewj4IvcIiEQlrCN/ogIhx6FPwd7e8O/x7RU2xs0eEcQuWEgSEuOWb3wCIqIacTCwZMMYVf3YjHCIPk2UaE6NEYGI7CIZyIyO11gynon8sbH4OsoVEHiGvoLCFoED+ACoIoIyXLCpWAIop8iDRn4bsPAksY6TEgcsB+NOuECq/BOUTll/Xop/T4Pw1DfriPh9tDcR9n/H8I1ue/xoyuhwUVWdCk+saaagAIfXyGcFTixXGDaW3jH6Wajm5qOBcciRMY8Y2SM/b0+Be+UMGHz6VTww1A1FqH4smQ2qGZXQ2/MT8/4O8ZdZjIzMa5xzn6NReAIGNelkKRICCnageTL0aYh4MS799T6Lj/B4xwvg08UPA9FTJT3R1VrLWRogY05XdZXekf6KCt2uBNz6S4mNSU/akMtYdYi5wdbUn60NLCvie40zhHFoKIzVsDFH0xxiCD0D0PNYKWKbEjCBOe75gQra9fkTG7rEfQgLmng17eIvDxvxtNz74kiGezeXGSyHpZsZ7tsZwK8mos3JQj8z9Q3n0QpEj36dod+/MdslfU6w7ZfbJxlFCUIdqszSB0G1XRa0VvFfV2H/Ak1w4uHQIG54q6NRAzbttvAIFZaB9GNsY3SI0eqWqLlghck9PED9NBw8ClA2RHQNdu81nE2D+RGY3oHcBA3Udq7xmO6qCzWgNiGF+PttXVlr3GEFGlWKZvjjV79B6QZMHYSCAa7CZyh2jvjcH2ATDcFHxRG4hwMJyuOGrO1DgZNk3Y1Sn0RV0APu9HzGk1saI1lwz2XqleBXWrSLfdggEiUE6uZRjnj8H0iTpCLj5JNqehJj+grRcZ1JSReeFcfcrqTF3oeLPGtR6w4gv3FMAt/N4q/HqSopNYlireE3Z0tJ5JJTYZqUwHAHhkJ/ZvfnzL6TTVLnL07BHcgSqBOA/IdkhF4fCaI08kgH08hXf6W5bCB6cdb2zfRB/V9P3T8WtkDQckQUnc+RCYvbWBxMbfqDlqU3OYNRIzpoTufxt1HZMzSZOFqXDNv6mi0Lu44DIju6MtKa7pJIZMWwBfB30oqVNi0H0b9RKJHjqf+8VOVmAia4+IVxQYdNi7y6z5OvMgL5wvM1BKtrEeOaPVhrY1hGJGrOXI/rr41G+J+rE/70FtE7U15AGqOgzdABtq6Fk+AQM1gSMJXBjLqWA9rXj68AnnywlvvvAWTx9ccLosVosqGakcRsp0GmSVXu1zVi+VNIf5pDb022bjb3ofgSBXALuCUofy5o6uTye17xYM2I0zFZr3m0Pz1h5w+0aN6vj1POYws5nFxNfIrAjGyBH16DWcis6ajhVo5wI0vNprSQIXYQ0DFAZQx2tZM6LTcJtpjbkqqe1JGCtrUHwB74uIl4pQOuoPOju4cwKlDkkOizVL+ylZp35Q5AP6AqKupofrIjOMF5OYGQwrMiaSKTcDFXEJ51wtyyzMSTTvd2nNHr1azwqniv15A4jRasfuM4eQ04Au4bUDSo6DJ+s9yiUhS0bOeVDVh4F3Q34UkQ24ZUCL0mcWHffQrz7FCAocfN7wXJMRZTOAfAyJ6qhZwdcIJKY7OXTmRerMJh8lIjZIclx/jHOI/8f9DjcZ2ozqkBfckJuNskmqeqgBWQbCgaKOSFrdGBKZ8SeK5uhjXTBOxYygusMb8kaeiUeGYhT4uYaIjbwT15/YZYhEnc6tEPbhfD4JWEQGWhE8RBMqYK/RkLMtJ5U9lBRa74e5TYLk85KiTywMfBA4/EM4suK2oBnELmQMwx5OOc165rKYckgpxRXPeRI4RAfc9xAEkdmBlDPGuB5m5NLQuI5aeSzDHstYjZADMVuBedrzfcY9Im+uTwalM0DNhQnU6s5272F9kKeCy9MZp8sJT194g/P5hMsHNkU3lwRIh1RF632+Z9Mx8qbtpsEZpLO2V7T7Dm0dUuus4ZEFP5wJ1BrAXsv2nq5WK6Q31FpHoOsaa+Bb84DdKOj3/TeCk+oCTfKgc4XDAsOoC3hu4Zv8YWouMPDdaESzGUwwirjv0xRqCPHeeoi6gaH9Fg13A0ogmo4JGBBgOKxwUg/zouDRikstkfcviVo6xy0B3CdcEsbIYZjxNMJZYVhocmdtxtkGNwoDoqEzRqPGFIrmbdp0dDFmWO+K3rxxuTakraLeNigIrVqKH4tzRMwuL8TZGFa5ZPebeThZQhoyO8eL/SAmLBO2NEc1C95TDX1ey3gtfXi9+aOhfu1klYBbx5wwtWxSu+XQAsCEXQNuNZktShZ45MiCMe/D8VQ0zuaQacWMKhsP7s7RHUpAY+MeB9zo62+GT/Pn8V50fOPx5MHpHX6BaGZNs9uBhnEmf5LdQZISQGGMjVYfzsdIAzNjZD3Azg7vxt5RnRT9o3AquSE/TkuOPXf8dIc7PQMaRw4aDLbuehivzjRJNs30JlXVdCDZx6wrfB+7tFr34DGud+zPuDcg9JTAyoPMFdnpzKwNYVEywWeJJRDBiipiBhkzAerTsaGuph/9ljI/NANcEsp5xfnpjPPTyeY+nVesT6upSjC5soePbfdYuDVDoFrtqHfTOa233UhfezXFGTEF81DlYE5g9tEwJADTGGfU9joGHLZW3e5iiCQgNQ8qTAnn9hvBSbXrjibJaKddHlZrCGqGw4paAVQgiay3Q52GLd3GNbhzST0IA0AhgxuIxRoj3YirTgViwey835rDFWLF5VA9JxCQZkRtjXzurMKoeXc4hWCtDwYM6DGag1E7UJPPojF8mHMe2l1DSZ3CEU6FDYXXqyBIvuEdUYQwoRH8MwKNyKDPLoMYQW5ZtANSBZ069ncbdBOjmC53IGdIZhv46IZXbIX7vB8b17GcFnzwxbc4nVdc3p5xDgHaZZnXAwE7YbKuIkH0HrLmmo2RxerY/Z98DJ/tL2Riwe6geEpRkf/yiPLDmAKAWnbcWZEleoHYx8nPsQ8WGD94XHNUEVO58bbNq17DmM4zcnhCMOrSg9M6ZkoaUf5MLR4/9KOLBKBQmqrgHVHTjE1kAdaAGTkcFg8ljMgGeUCxwbAj9G43XkWGY7O6y5w8PeqJoBll+5o7RlszC57EAR6Td4+X2D5jBDCtd1QR7KHOoJYRR8Z+cqHg07oMHc64fpaRmUUn8V5Gotk47mQspIRUyIb8IVlNvPYBezJgKu1qTD6O8xA1wEWjdhejM6w8kJhAXLAgY8VRuitknADONi5mfXPG0wdvcHpzxoff+gGWpeC0ZqBVoDXU5+tkQFebQl2repApaJupZtStjpqUwXtBHrL1nIfKTh8Z/O6qGPteUX0fVukj+FDGKHuMmAvAVrdP3J8vj8+1k+r7js7Zupt7Hzv/AeJ4iFKsL0XEsqdQwTYn1b2z2qIfi3imFIqtpRnpiTcxBhRmw+XUHZRFbdbzENNUFTG0zhYiDtF2OKnImoKh5dp2DptEVhJGf8R2B4NlwpIhLjkdVMy9eQioMRlk8e9odgVmRB8iqhYZ0yMc0q03pYFAvBvEkioqEToUO7yXC2pOyqWG9v2M9bRYFCuKnBOKY9WSvG8orPUxOn7hfQK6e5k8jc9KhxuI6RTm13CC/tnDQPnamUbx6KQCKsOoGTHIAxMaI0E0Xm+sy+M5TpLC4xkr1LG/MVokJJd8LU2jfHR+h2vi56Q4viG5R7DnQvvP2IV2f9oDk23CgWEQx+tTrJi51yj6zPxDqmeZR3g7UOdHp23/jhEZKXrSklovGwWDkl84ZBcEDs+MyCjt+6iHjMxMbF5Y9BZZTcqmVvfEgDcHp4j6PSiiIfFksGsmBmfrzWLVsWcjkBqQbMJg1EZwmES83cEDX2NS+TV6hFfZMMU5Jdgz0JkJexvFkpCW4k7qCaenM85PZ5P0ymRQOIBerTVnu95RN0Fvgr1ORKTtpmxR9zYCDbjMU6zv+JycGKIRPBBqOKnaUFv3zKpbpkgwtucIwmgEz60JPsvxuXZS2/srSieLAvbZxAccHFUsNmcxKQt6N5x79zENtXdse/OoSUdRdYYr07mMSZzdnFFAGAGDV7Gmw66wVNzhMSWaGm2+yeN9OCJIn2TJoYtVYm6LMwC9T8cgSp/OSjNK8UTMlRJ0DDRkdasmMtJ2wI2d6uy5OfyMyWRMiCyb7P7zkHIh77RUsijMiCsdnRkdwNWbhG8eyXZVwBlIy1JweWOQBLqgfrgjwWjGUHWdsSm/NI7Yw4H0HBKGg+1/jM4/IaWKCC8g2hGhUtDA7d/x8ypWc6waWRiPyDJqONaMa0Y6AV4bciNFH/sk4QUfMioiNQjK1yBhgHheiAfIOFL2E56NxMO1RgARb0PzSsTnslEQtm5EyA23junT4oafcBgAyO5Ax/XBAaKF1R/VMn4iJxUBYCFwD6JGZFIYjacRpKVkQPhaFJm7jWB3Q1yKZTtzSq7XmINIQHNE+zhoKnN0tQnbtZmTqtL9LtqQ1ESELe3DEQZUCGAITBdY3al4Tx6nBF7KgK8D1g84C8k/Y5ynsxsjY+/C6KRDU9IcuTgLmMbYeURdOifQkkfwGiN/8slUNU5vL7h88IT1ycRijaDRsbcK2RXtXrE933D76IrtZtnUtns21xW1eQ/W6BOTEeCOQIMJLYmPxXHavwJba6bIvjfs7qTGnjpk/YEuRGYcozu+3vG5dlL1/Q17Z3QvAI6C6TiC0uzRIZlOXSeMqKo2GbNZSOxmwJlRYyYKW8G4i8m/bP77vesYARFY/Ojy9/eXwOg924rhaDHOg5htnpI31KWl+HRaW5gI1pS/9946ttawBQtqFLvnxmeKSavhqGB0et8g5JRXhQ5nVx2CMAPJM1yneM2ZQcT/H2fj2KKssFHaNz/H93s1JyUCeKS8lGLD7LaK61KQFDjljHUpYBCWpQDZzWQ4I53GOKCvYAmyG42RfbI3PkdxegTbwaqKjEinXNFcMiA8UoBrXH8PNkCKBKN6K0+H1/x6NW8sJcCyR0QWFQYQo3YWmWDcuwiSQnjYnFw4DcvMVXQ2sR4gP3MjwUJzEI/8foXx9Psp5H0tBKc42/qK5vAIonLIYHFkKXD9Nlv3OSXXOGRfMt7tJWZoU/RQkYyAyNMpk9jye5yEIeyOQxK6dBe+JZxytjEyXjMKB+4RKMaGe0mh9ENHVIN58+F1V53kp7E2/L6IeJZDhJXtM/box0sEqs0h7DRYgoVpnPcxjoqANLPDvTlZBptmcJgp9CQnJMYDYWFXbkgm91SMsbxcVpSTOanT24vp751XC1RaRUsZRMnQo66oW8N2r6h7x3032K91W9uiZhf9oo1zGKUDAbICLKaWMexw62iiw0G1QK5iHZMTx7wWFZJbjT75fr08PtdOqm0bGpVBwQ6obhzHSNtxEiUdG7P3x4dvmWmKvcfBnBsgCOaRuFZZOAeXG43MK4yiW9iXrKSI2kE8ZilxzqaZ5dkT+UYwteUgd1hTX3UHGxFwpOMWXUfB37+qwwhhtA9Gzfbr7MMIhxVQAh3mv4QxIfx/5P1NqHXZdhYAP2PMOdfa57xV916UmB+4iRAwipiOjRjwDzVKlHQMNiKIoqgg2shVkICisRN72onYCbERQ1AQO/auDRtiQAQJaRhMGgmiseH3JbfqPWevNecc42uMn7n2W3XvLfPlGoraxanznr+9155rzvHzjGc84/reFlxmwenEhEGdx9lxjIH7eeKctonNqDGkTTvMqjjePmGrBefzbg3ZtWJ2Vwoo7DU3N5BE6zo8e0mR2Xcd1aBs2ol3GwFEXLhi5QYJiq1kxNZd1vweu99m8A0KXU5vqvFEJpxcQpZNB3wawdIygo/QI8d1unOCZwt5o/wKI1tjDxCWA3En5QxRkekOUBF9U8aq8oZcosv9D2aqQ9WyMjmtxYyM0rp+aL6uZQ2mJg/CmhtElAhCqM2TKjCjeRdeU4ogDxB2mFIVIpzElS0cYbk45YuBCxTzI3hv5gL+o+uPLxms+P5dDEOD80UldQxRCiYTUK0dg6Y1Dxtj1WaxFScKFWZXPr9co38UJnilz+AwXqQghs+juji4yGTIA7ziChFtbyhbxe3NE+qt4fbmCfvzzcSdd1PUmNDUzRQlG2sy1IgS58B5ToxwUpf9Ho9wUtWdJhMe9l6cpTmXXQwdwWUmVhZVHOornnUWYnySx6faSZ1feUXrbIdOTRnZsFQsLPhy0MVlbibg0kCCMVxleGjA/hGA2p85NDQc9jlk4pSJ7ph3UMtTgSAiVwBKKwrN2SueplskxCitoj7tqHtDe+/J/n3bbdQyGW57nh3H/cTbtweO+4mX18Mgzm4QJRPQPJJl9cZSrE2SbwS5FA5dAl0UdxG8jpWZsff+lEvGEn+Tau+AydbAmfD+xDGKfbh2YT+jY956RFAYdYrBgqLoX/kQJxRHLbjvG1gU+60BstnG9mwyjRIZtMRQH8uwoZ0T25snqCrG64EBuz49aTUue5biUnkO0YVrjybqZcfE35dlSUs81fNKd3LeNq3mAE+xKDFnsFDUTOz14rkjYw0I1faRZO2hKLyGSAu6y4PvAQh7vxJ8f4XLlQGRiTn70nRzSnopPlXVe6rUHW02dkdNdi7yhMhl7Ii/48ikCjF0U1Qp3s7Aq1bmAViJQMLhv+scqxX4UK6/bBVR7wwjXTnO1toDMW07buklXbbr1oDgF0QZUXwEj0Yi9exQDY0ZojZI0EfuhHFGFRt3owBPu55J1uINNgdamDGbZX1bKQ5ZLlFgIGAzdnhzbcuoRWVfnF9nZPo0J2hOgBnVM9ZaTMW83TabARWz22rJHkLiCpQKUIWgQJTQB3AOxf0clgG5k0qdRF+isGUjejzJ2wEYD6rrC97z9+B2xzJL9kGv1ury0KIhnwEnNfvALAOgYhnKiEZPTUv8rhRNGKKARNbvX8IsVSe1aEazU4IwsQ5wRPMlCA8Bu4Rf9IivOFMnhuiVYoycWm1WUb3tqHs1ZeGtgTdjcGkcnD7N2PdhDYNjyaQEpVzhhlEfD+t605oQU3ay68oSukdCU4M8YafHsjHKYjk8mhIKGvGKVpcneycK1MC1fSS4qgmojgmcHXp0zPtpH1vFPAdKKdBNoJ7NxZTkqO+QH/TabBxIu22QMdGetuT2GrQu0LH6flQsVXmIVHUV+t91Cn6jHbp4/LvwR7nUF/hQNCA9J41cbkc4qSDeAAaPKWtmvjFaI7I1QPNr61nyjMbXNfriwkmN3rOXLFhzzSPgWj14iYKVrNdMwx3nRCTJE+GgomYGhjHBAJPiCu2+4JUDuRc5oObwM5eMnsNVqEI0OtsVgTuUi0OLbcbQh7NNWPdu2XzNexB7EZmE+fNdApNQDImA67KdL8/7WEdUkLc2en0WAMQHIYoHByvkRZyhK6M334ZGsLQoQlN0jV0/hwXEZ81Bn3LRGlyoR/xz7dRYTUkiV0xCtmGTQ1dQlihMOKyLSVnZviYxJF+a/JwqsucsWIpBgolMigAM/iw4qdeOgQ7whMKHH34MHT03mGh22acsiuW43kHvaJ2IdXpPSWXoEfpZvuODZhyKAxz0SkRmQWncikcTrRlpoNaC7dawbQ371rC9uWUmVbZqOmOq0DEtIzlOnK8nztcDx9FxnsNkh2Rm3xWAxJZILpCek0FC8igzyGnMxnNOHGPiHgVP+EgHkOu1ubyT1zNsM3qtjSiVnq+Zm/Vf+Qez1U98TSps05UpKGNA7x1S75hvG8aHb9EZGC9vjAUVopfLsi2ojYC6NYgHJv39J1M/6MOn9TI6w2pfvHrXrLnXdOiKK7wqrebTjGqXNcuCb8BT6+vHYn0gJdFnFnWvzIg8so8BgeMSBBUSr3NZzYbVb6ZeYL/c+MYqQ7DUJLJBgc4BmQOjHzbOQSZAlj3NJtjaBqCZ4QzJJNVsykVAehGMed3VeDKS3zcntURgp68X0eo1i0cyTwNpiOzq4qiuhzUUGxa7DHme01HQpfk4Ik/PSmL4YgQL8TvFsxoUvvQlmlOIYZDiYr/KnMSj6102J7UyXFU7MyqG0rBY4z1mNd1B9raE2DNZ4+OsN62HB6YXQo/VQyd0TLAoSh8rA1fFft9RCjvDuQKzQD8yKiYcVTgpSiGdoYpTTPg2/oIcj429Ln5tIKthIr+XN9h+N/o0QQ+2MWaytVgDe7aVGX+dx6faSckpEB6JsU9BGqNrxAPAIx1aabWYuGPVS8zhB0JFrf5EE0SuY+ybxuCHAiVkGluZcxuLFyCLwiuEJv1TW8W2NTw97Wit4vnNDW1r2G8N27PNcNme94QDz+OEaWEZc3G4aKaOaRCfG5NQjeC4fpGIq/NAI5qMxbS5IhO71remU3MVljEkNG3YkufonOwqZBa1ojbb1OxTQU1zcBQbSTJ9IxeyIvRGZIypOYCzY77e0V9eUQtjvNxRasE4N/BW3aDB6lNuBBkENEK7WdDw9Lk31hysitoq+t5w1Ip5dozX0/pWpklnBbQVFHeF+vBjWiNP3AikfI5tIvddlMwzdrpyBCVGZoruNGQmFclBqFhHJpvGnIuPuVh2lyIQdsIFdNU2E2OlUC5RU6V35Q1joHaMOQBYJhVOXomxUVn30YMY+Ghw5PnRpKcHSy4ifCbL/MLBxwiKeYmu81xxsFgpayTMQUwyCDxzjXA805yU7V9fewVCpX9OsSb0udIHX6rMCMSvVskjexCixysyprAT7KotxcWQpygGr9rQVmII5iJupKKCNy4LFDotaxhjopWSPXgB+xvcymgwRfaC8uCo4jwNtX1y72PVns8OLozue3n2gdoqZA7UVmC9oBNNdrMd57Cetci28lDHmnlgwU7S8YVkumY+sac9yGDf+5e1UTWdRRJJMCfqhuwIkzmxEDYIW/FZcFKu2WfCnLTgHckV99RoNbMCkfovaSC9rFU4KYVgzgUzRAqehWpitLqcVFgXgWQdCxd9utYqtt20tdpesb+5pRDk9rTbAL99W1DESZnxrSF+LtcUaTlWzwHTcrIIajyQtTmDfvRxyqdoOt8wpGuF1noQReQcGy6K1+t3r0bJMikTFd3YFLmnw2WFHacmV98w2hGkdxsNsJ05IiBEgh+i8sBsFK4fWFC1oT3Z2o1zJNygUzEcLpw0gGG6akxhhA3KssxMr7bwmohn/1QMvIv3H7BIrEN8L9YoHJU95zL015rUeg0NoGm9V6J0ViGZE0mDxL0FJZT6IB3lfYBj2p1lBXhOMAuYBVLUKewrAEvF78xEltpDXHOoSITqtQ0pNE3J2DspBAtNan71n/HFIEdGZSMi/P2K+OZ2NXXPmtTPb4jORstEKJuv9dKlp4lIsFYmR0RglYSyspna70VIOJmyCtZEAL5kQnEt/nmtmV3PFMpm4sI2EJU8qwiHGAb6Sl0PyDky2SmagWQXS3diMGJxJ3G+3MEM9Nc7SrMROuSBk4xpJZCLGstj5I7LPva1okVyiFrggiYtCGWiNTECjtx4QEeiKT+3zsK7n9e/P8njU+2k5jkw0V0lwD/8LpN7e1XOjWRQXDgnQiOCMKOq0YfzHopYVpYQILmSAJljatWo1FvLqANe25J5kc6vhh1vT3sWN5/ff0bbNzx97skgvn1D21tSqU1FQdDPE3zaCWSYikSjxWZTj2RDXaL65+vEWIvcI/KcF807KwyrZiLv2aAd6VpWih7pesq/0II1M/q9eKigTm9xoN1BiU+XZd/8mzOgiiowjI4+Xg+czDjevoBqQb1t7rhNzJaiAHSJ7BqqU5mBedtQWkF/3nNK67ifOD7YcL4cGEdHfzkMGgGc9kDOTLSPNZZbEmYLPpaDQ/mmw4En/OcRcyFCtNnFcyicgKHIplkF8rCuGWcAl3d6ShQglOWEEBlaMPSwIlTPlGwaljVUzzAcc4KKDVRkmd4TRjlV9xgzJcICKpvzY6BJn31WqjUwE5A/D+MqvgeNhOPnptjQTpaCUhWIqNyzCfLsCqoZlEXNWD2LUvHMbQomCENg7RxqLDTz6LAAEWbUrVamrmbhTEoKwsVyZk3MGW9JxZ9pO7Ie6cGqQXyakFyfI5XJw8gHZb5kaYDQqu1XUUUtdh6rMyjB7MEA0Pv0/k0jHp3e+FqYDE2ZE3Ke2FgxjztYBsZxYHuzY3/znI7qfHlFvx+Qs0O7aYqy+qTpwq6kobmXoheteq2d3e5FQGFDT20MfbAyh8OT6iS0UMgJ+7Amo0fuFRv/k9n5T7WTktDJC8w1ohM4HOFfJ+Ti0VLx2oOqRUikDCXJ6DQidxXJ3ouY3Ftd9LE205+LQxgHSmawdgjsfQ37m5s7qSfc3ntCuzXs791Qt4bq453ZcmjQmMCAUarr6gMqtZgkiV1Ypu+ZEao+dK2Hs40BZiktFIcNyGipFRslULw3qpKpeje+FDppZU8PeYJeaMnZS+2BglLq2Ck4DfpygFHXMSf/OLzydAmXbiyx2bwJVa1HJTIYp/SWVgEC2tzttZkwh4Bd0SLuoU7BYLiqu+TPgMhMFkspPuXPEQxAAavV4uxWUx7ugABjrSI7CqOf9HPPANIxMeVHZmQBJfrvKjkFHXgYNWI/99CBHj+UCoRiFEQK4D/s9SEWrR9juFqKS9qE0wYu0CSBSFL/MvZUzOIaKovtpUBhySBCxQ14rHFhq1V6Iy/7/ovMRwOH8htBkcqKLMUMXjmoBqX9YgNYFURijioJBovBFsgFyPqAiqplmqqYkxBjPQx6NJsQ93H4R78Qj0L3sTgj1qZvOwIj1jNZi6K60Csz+agLt1VRGxPJ0oWEyj9sP+k0hyCFMI4ThYHjpYAKoBrPa2Fkvx8Yd9PjkygVqDno6p6asIZ+kp/LypS9X9F7SF5X51LQtopo1O3ewoLTSF0yBDpGzsuKlhiTOQvrgXfqcV/98al2UmNMDB6gC95+LcqGQrAqvM7iRlwYIAufdToThzkj3QisA7cLbVl2AsS+mWO5Pd2cscepW7YMIltzbq24vf9kTurNE27vWwa1v/dk9POtJjYPGGORusudDO+J2CqkV8heMdlFYT3ShL+nIgobOx6GY41MWD1kAWlYRFZLSTYa02IZsWc5NY3mylzsCfz3fF0zCMg741E0FEUjI6CEKWr0kXidgP1PZp8Yx8D5coBbQ326oz7vABPKaGZIfIJphMFGYFFgq+bUnfU3tmYO975ZhuNjQ1QEdJizm86OzGvGgkQiiMmg5QLrWFakZtXc+RZmp2BHvWrR5qN2EbT2YEpe61rBgEpI6hLFArHOV/hwXXXcCxYBqEBZjXaskkYVDitNWJG8erAiCpzTZMFeek+IKbZ/vMYqlNt1xl4y5Qdkv113eCpUvgsZvEsiqIUAFZRZUKaJC7NRDaFNUVFzjEQ25rojj/XPHXZpiwDCR3NmvcWhJwV8KgFgF3qJOujCsvMoX9ypGLPWtCCDcCSiNll32jifLuptKaYNOFyajcnWVzx4HFPynk51yr5ff9RprCtcEv2QMVYQLkvyLeqpMgnSgX4/QDpBLBAZ6Ocdcw7ELKv+2jHuFvRJNx0/VhO63ZzOPr0hHX6NpbD3fRU7/9Uh2er9WVGa8D7GLvYe63GiHx2jD8jRgSlGkMIS1l5QOIAYjvl1Hp9qJzXHwORp0AoTqNjbyUZUvmRTMMhJxEZw8HBHJh6FzYVT5+aOCDei/2KwRbD0tmfTnqt7BbBqV9GXUnfPpN4zJ7U97UaS8LHOJW5+RJqqAFlGsEWN5egGVXhUY0SKngVRhLMaE2rNSwhZG8RzXo5zsG5IFbcazoowpiZV2hg4S2wz9MmumcG1djO8mI14LUV+be9pRbZmRBZbLhtCQdChmH3ifDmsmbmZLplOQWkFKptnYwAhxlEErm8QKDbYYatm7OrW7R5WRt9tf/T74e/Fro37sEzQe7mUCIUeK0QSWapnU6kCQSuDesDw/U0noKqACQ6v3ql4L+HEg0VndR9NkorVUQKMRR76FUzB50+RjfYQYEhzWJbA7HOda7M5ThSRMzkV3gzuOSQbu8Og5PgXULZZ1OgBimAQcDFXwTksIwu4r7JlRAUWHJJnZ9OzrVJmMmwxJ2pzqaGIsulSZ/K9BF6N3hrtH0yobEgDK0DVzwcR5izQ4SQC9qzE31dxIkd150iA1+YEMqwcIHPiBDDInT6AAWNpdlni0tmaErlc2B7/cjokv9TefbwMGZQZNTWrPfv0cGhKN7ofRitAZftgimsd6Pc7RIbLwDGYCsbd5kP1lwPjOM3OqTmOLeqK8HUk60uMVpkIoOutuchAtfr5VtGedpNqKmyZ1BAc9xPncaIfJ/qLT/E9h0nDqWZQG5EFU8wb/9qPT7WTWoP+HPIpmlBBzmMqbgAJIO9fIPLUWcUFJdlYSW4ogqYe8FRCOZ4Kl4g2arF+J68pZXGWl5MqtWJzaM8c1QbODCqG/61U36J22yCqgva0pbK3zol5MgaTU+0n1Kno5hfcMdBcZJB0Hgs2CsNT08j7Zg+YjtyRuAOJehRijfxaAzLKuVMi67XSoC99MiS04pkV+fO7wVRRGx1wdvBxoryeFikWwnbcwMyQ0DGDG+SAKRy6YJSEyKpHsCJBPzedRyKrZ45uyvdUGCTr/gWjKS46csQrlJQ/pUdHlZF5LvjqPbn+bcBNwZwKvx4ssWv/VdSmrssYDmjddwBqc8EUMEVuz7I59j9HY7R1Vvl2uPR2rQ8j4qz3F5kA0apVXtlZRpu+TpGNKjGDSTDFM2lZwRORZcWFhxFrCGAuSVS5LpYqspEcnvmDCIV1yQYVNmftay8sYAGEZqq2x6RqitfIWtiCCpnVa1+egEHB0fsU9+q6dojgzckElzXLWsyK7dY9i+zdnWaK8zrTktypXY4MmKO1Y5EX7M/M4aEDnQ47V2DMu7GDZ+9mM5yBZ4GecxxzHRzi87p728w51afNJdsqNp9U0N7c0kmNyKT2hnav6PeKk9ler3ZTtHfSly2afS76W+Sk/uE//If40R/90Yfvfdd3fRf+23/7bwCA+/2Ov/23/zZ+5md+Bsdx4E/9qT+Ff/bP/hm++Zu/+f/6tcaYmDyhxTYSaYJHiKFm1OoysA7hzDJRCmUtaRCMkiwEnoLi1FoQkjrbqpElrKAIx50ts9q8wF9aQd2a9+l4JFJt1ktpBbXZjaZC2f9DD4dRUKiAC0HVng8g1N3o6e2pmXF9PZ1eOjDuVhCdxwlhgnRKRfiEaBzaUcDeE5BRekFBU04YSy89Eqn1xlHjWE7K+q001TiimJ2HT1ftK14PZNpfSyIKbsjtns0pUAz0t4fX96zwPo4OrhVzWF1gY4JWRaGaSh6RUQFAacUPMGE6DFhbwXjuoEI4Xx2qKARujCHDIEA1pqhMIASJRW1OkuiSsplqQrCFDLY0tqLBWleKPrAgvniXxrDOCCL/n5JUMCdRvb8GvIR+wxmmpFFE6hdYcjBAIphwrcZSwWIqLMzF2iZcf84ysJkQaQmoDerwpUkSRfZtTsob0snPj676zLU2E+0apGLkBfHeMVGQU5aVhkFpQOr6sRK0cmbEwSKMtYqljXpJZUo0ooSMlwKzOFEIBO2UgwfFA1Txeis5EYBLccMNJ0FJZrQTijoJKk6AIIOYgwwRLSjCrhhCpuFX39kPBKCVYnumcDa1Rr2LLme1wsgeKCYpbA3U5qS2rWC7mRO5Pe/gyiBHDlQU58thR3Ao5mFjQ/rLYZnlmBeha3dORDnR2fo0TUd0e3ND2Rra+zfU3ZUt3nck6PnmEGBJskw/O/r9RD87zg/vkHM4zOgsQ3eS0dso9/L1TDyAb1Am9Xt/7+/Fl7/85fUidb3MD//wD+Pf/bt/h3/9r/81Pv/5z+Nv/s2/iT/7Z/8s/uN//I//168TUfwiCywmULKnPKoGBRxHQFGoFhSxQWeqilKrsZ+Ism5w7e9ongYzkx8oy2REvBjJVi8JWZLSWjqpejPHVZo1meYgQ7oQACL8ZwAw2SQQoT15LYYtyprbABc2ZuNpQ8SkDLiAIaAC8RpARlmE3Py5dggYSVeYhncjwtjAnP9OZQa48Y3oUp3GrhfiAQKdIU/3LXYrugx3/o0ARDa3Cmd3AV7G+XIHyKm2odTRmx0wJ0VQWpfISjT7b0w+qUDF/mZ73g16OW2KqKqivZ4AgOmD6sygiRMVjAElfn+i3SQoxUEAKbyaFDPDIMrbCra9KLTqORZUWlQ5L4vKRAkfhdtIGJEpMwqODBTLSYGMAl21+qgZBokZAyZyI1lywCMAVBGIFmzVmJhTfETGxzgp9jMR5ymCmxxIeKkb5Qbwz9ds0hxYqIA4TZoEUgcQbL9iC6eXNQMt2Z0rahHN9RGNKXuu6WxYmuwkG2P8+tJlpuO7HuES4/Zdr5nJyAaDCVUtKAEAdV2/JE2QZTvlck3kmUvz3kojDSligGFk65HhFr9WKpfsmqwJvXmdetsr6l4Tjk/2bh8mbXWKB7Pizb621uQXdKWF5xryquvaqPdiyjiO/tTWrJYeOqPVCERVFaUaurT1hlZszt/VSc1zeNuQ1ffmhk/0+IY4qVorvuVbvuUj3//1X/91/MRP/AR++qd/Gn/sj/0xAMBP/uRP4vf8nt+Dn/3Zn8Uf+AN/4GOf7zgOHMcakPWVr3wFwGJMkcbQNTWRSpe8X3JHCwKwzRIRlxorDIA0yRudGUdZN656ZB71HJ0TMjqkG2YN2O83VzNvtw3FMd2yuYMrJdPq66GFXUqyuQCAWwWKYPPrKM1S69kn6taM/XYOMBNm7UthWhRUOiBedL+0hr9rQNaa+HiJi3GNLArupEzvjTNbB9NifUVkD2S0fH2w11dsQB6tQqqGBphAxMEHiecyg1e2CpmC7dngvrI1zNtm96qKZUwJU+abgvr9WEbGIm6ZNtZexUd5E3mNiiDD1w9IeAiqllmropRAKmxdUpssiCBwWAxIh+6xBZgooTR1R2qqHvJAyDA41hvIw+HTgp0TxmZe8BdFwAZguk4eEWhO8CwoTmKILKB6xG/3hpJaLtIw2QxdDOPcYiREEjlsX4i/H6Vo+L1AX7GPfN2vxfI0ihmbKeAOSmlCui8YAawFVNe+jF2V+/XqoCKjhhl85sseEDtzXHgFrnDkM7IZ3zXrld6RziJ3UkxoXhOYJTIr8kb49bvmoFZ2H/cvsm5TPIc1YHtQZ4EjZwAUzcPqdSmQMVujv7K5HFj2h50WQc2jQ3yQoXrjr47VGE1eu8sAMrKpLGWYcyr14qhaNTbyVi1Q9IzLJjXYe9ykYe5GQ99vu7F1z5HMwhjpM7tl0Pp6DZu/+uMb4qT++3//7/i2b/s23G43fO/3fi9+7Md+DN/+7d+O//Jf/gt67/gTf+JP5O/+7t/9u/Ht3/7t+E//6T99VSf1Yz/2Yx+BEO3hW0pX81tkQ5M6RAwuI1FwUe9s98PCViRsQf8mZKNrTMhl7+2g4odaYfRXtZS1vxgsZU6kWpT7fLOsJViGhVZNB0D2+uDjbxARDIYKR7HZOF/y2pqMma9XzgFSwqwFcKUBFQWfJTFqa7SzxtZ46OWzXjOpSP/9eoMlybUZvOJOVlXBY42HH1CMQSmXQ+R9E1iOUdUEaQmLYSYwBWa7FoczQCiDUaeNoaZqo0q2D0yNIwIAgFCqMcQi672uKa1TaMap2WFqT5sV3H29SmFI7x6s2DVE3S/EaWO9GgW7bpm1MOaLvfSYLcSvRx0gZblAPnPM5grJXNT0NLy+5sauqk4AKSu75aUEEmtdnVlXzmGqIj6tGBHMuVGNPMoAsWKRP6yXS8aaJRQQZlwTkQ3djDJYKsR7hglaKhwVVvesxYK8Sp51kkPAuU6GTMhUSAdizIiqtQlEXflSKLSsyOFRiloorSBPRUJhKhXUdZrAqeagQdjfEiU8TvAzHqSGK4sWFlxFvyIKMNnGfZh469oalNvEa23umIsHCKYQ472KOt2xuq2pjLrtNl+u1VwsdYJYbSWdRClWux5nh/QT436iv321SdXn9PFFBrTYWnPi/nQ594nshKMX69ckdzTke439PEZQx4EOuRp+OLfWTChY5lrHcYxUyhBRlLefzP38pjup7/me78G/+Bf/At/1Xd+F//W//hd+9Ed/FH/oD/0h/PzP/zx+9Vd/Fdu24Qtf+MLD33zzN38zfvVXf/WrPueP/MiP4Etf+lJ+/ZWvfAVf/OIXAVxqG/BoUnzTTtuIk9kFOHGxIKsQzK5GHM5JlBMqKm2RGxi28U1ex9Lm2SeIjd45zw5p1R3Fhe0WqQe5LpxYV3js4itxG5d/JxFAGVq856TZ5ihRgARMNVrEU+8LnFjcQU7P/OTxJfTh1ZBiuLlpL07LlB0qimP/qgCKjR0XAtqw8vJ0ii5UVr3k8joWaXsGBoc4NLIAScPedPW+jLODS0E/Otph00Vnn+AaahTmNA3GibULmxXpgh9CNey9VPFosJko7W0zerGzJomAKGBr0mRtPTLW9jcWjKXI1sJR+V/k56tz8iQeSkCg8uJZqhJyQmxoENp9Xawr258lKcAI2AkAD7HMiQg0LMOYg6MYluokvNCzNMAqxbOCleWUq5PyzC3zFV3U+rj2yDB92R/ldfjatHxxOJG1OlzFgAVjCEgZ6eHjNSiwUPEbQYgddSGSeBMuBavV9oAQrRE28fpxPRbxLgjSSSB50/x+MgzSM4LJosNf660pYRWBcSI57phVEaNV3J9ZO0ItSboqe7MArJA5KQK4RgBtoZEOp8X3YRN4fR8bUcIPaezQCLZzj14C6Hfuh0wBsViw2AeoEMZRHfWxthue5kjNRhbPagmotrNLU+g0WL64IO4cFSKCbtbi6z5+053U93//9+e/v/u7vxvf8z3fg+/4ju/Av/pX/wpPT0+/oefc9x37vn/k+7EJBL6Rp2TNoIgmrMOFIbXaCOjICDx6qKgmCBl0TNVUGK43qy9x9R6GMdFfD/RXH3p4HJBp45ZLYWBObE8NUEHxnhCdBTrLowQMkzFjnIIN4KslViujKQ4BksMKRKaCPCxEmmeH9AbIxDwrQGb4FQBGREfiAwqxnBytNbRvB3RGIT0INAbv1Yqnbcussx4n2tFBxOi9gwnogw1G6zP7tAAk7VnI9d38sE6ZTl+22h4B2EtBG8W6112Pb/vwZod2q9je3AAAda+onkUp+wH8ahlqwE4ZgJSUUppjmKIFm7ZaPzpGOzICTGfDKwqVMGR9plEjIA3fxzmqWHYFLCoFUNxpV/joDI+USynYb801Hytas2y2tHqZQWbZIZWSnnB6b027G7lmnAPjPL0BVLJ+Sx48CZw+L2K1QicSQZA1ljCuUbsx+ahVi5pqwqMBPUY9ZSPLxJrDXAFhZS0k1sQRCihhYhjhQ9XWvkwv6vtm9OtQN4bWL+VOT1fIp77n2A2ulbfIM1g2yru/hngdMrU7p9UmdfoYdGevXVVCGP4+Fbk2cY4mjJb+GBDS+oLinAlIJxTT4MjWUJ8atqcbnj73Ptq2YXtzu8yWi7rf6sk8X+/o54njwxfcP3yL/npHf7lDhpoQMSzLIapr/XKNVluDV8ccejWhZiEjSE1VlKOj3CvGMUyK7NY8izKGM6dCjO3Ztm8LCWhua/aWclEqCm3XMPmrP77hFPQvfOEL+F2/63fhF3/xF/F93/d9OM8Tv/Zrv/aQTf3v//2/P7aG9fUeoSkW02ehmnNrFNGPQimDBPYpqczLoMWh8YMRjiRomKWFnIsZoVl4wQNzAioYpBgvFYUU4+0NrIrOxrILBkwUcItTz4s0hww+JpKhxywnUvOAeViK0eQU1kgc6hSVcgCgihE1FgyiENcihFqxNuYgWZuKQijyFwJE0MiKvOowU/X+LiJL+WPQoMxpBsMNeoepSoMsYkzD4ZG4+nMC5iQHTKkghCqtpgOUyZjDBXHPbv0Xd4M0Si0WLdZiyhHOvstI0V7Ro0IgKPIGP7gWoqeXAfvq0w6IorSCk1dnf96FcFLqTMQpEBo+u2euqD6yFDx+DoMQGUlkGOpRPtjg0+pTV2/Pux94L1j7nlyHf2VV8Uri8B63inF0My7F6qbSLRAwbE6WY/W8VTn2h9dBIkHJzNpfJQyz5G7JD8ss1j0kd3Kgy/7251Q/s1fEmUkdgqR0+gSAxNO/qOeqQvx7SXfQd/ea34pLVnVtzDb5M8nvm1K/U8DdmM6pF6kor4N7q8hVcBdYtdm45w9b8bITNNyWQ//BRK57Q3vasb/3hNv7z2i3Hdt7T6n4IBrCspEt9WTUHa93nPc7+v3APE8EzGFJtkHwtjX1ciJXgqgqgJjeJnmCU4hycKVcMkqubKIDjtpwOKmjY/OZVurnqLa6al6+BOQB5VXH8Ws9vuFO6sMPP8Qv/dIv4S/8hb+A3//7fz9aa/j3//7f4wd/8AcBAL/wC7+AX/mVX8H3fu/3/l8/dwyjU/jNEHUjNaFqtFeF0axBBJr+2SPJ5RR8AfnS0FaLU8rNKKhHXpQRu0ESCiNRjMpgCPqHGyATRAoZHVRr0jvZ03huRtzgwlBZNzAn+/q1PD48Ewuw3yN2bgwepqNmEkreyKoM1eq8EUrIETQxJ6KAkIdtqIljjtXn6Di4Zz2VUfaG7XlP+nKMsJYxzNiO6Iy3OgU8ar1KASnBol83XFMJ0x1V9nB47aSyDaVkl0rq9xP91RwVt+Kwn1hEFpYOSIMVjxhTYg7KqPtzGD4ONRgC+8q2xmlsTB3iQzSXMQmYL4vBRC7kSRa0BGnkur8eQB+swMhrfGDvzPQMKeoN+5ubOalbs9aGq5PyPUoeQMQemd2cVNlcVuroOGuBeBN4Co6OAZ0RSRt7MRllyi6UvN4BXdaWwhFR1Fu8XET59h4ypVg/TRD3nfsT5wqwbFrJHZ26w/TXZ8kzah6FMMmm58Zz5rNy9ACtazCtSg9hXZEFcybxyupEGlbb9AHdxvRwTGLvInsD3VFFr2O8YDr3h+++828KRITAraDd7Hzt7z3h9oX3sN12bO8/Z4A7eof0gfPFmvplSjqo+9sXnC93jOOAHh2ANbgTDD0wO6APezKcpbiD0tA1K/b+BQCLwfrFgzKd0xVcOG88bd5y87Sjnzu22wYoUFsFnjTLBEtUF4lkfZLHb7qT+jt/5+/gB37gB/Ad3/Ed+J//83/iH/yDf4BSCn7oh34In//85/FX/spfwZe+9CX8tt/22/C5z30Of+tv/S187/d+71clTXytxzkmapkosI58az4NuRdr9Ax4xSAWN/Ic0R0t6aApuXjwwu+sFkUXd1LTo1QZ0yidvSN6azpPYBx4LYrxsmHeX8Bbc0NUvABuUFVpDfub2zI6F8gv5+w0S89D18ywaPZu8kjQXUCTI2abIAiIxCMVQqmW4qvChgySp/ZkGy4Yen3a6Oeukq/nRQXbUK2g3Lx3wov3pdn1zzHAxYy1RYiWmRELhGykvIKW7uFWUarV5ngyxrC+poj0izoxAMgCbj8tKzjvB/r9tNc9B0oz9h9782gqwQJLqmpOyGnK6v3DO/px4ni5O1V3AmpOo90qSjOR3/15T7psPIxFZdc0vTbWXw8zGN0KzDGnLCL2UNG+xtTBzuNmeD6VAt6baTQ2mzZcHda0UeFbtjNw8wPvEJC1SZS0fDLMGY/7clLb0+aSUyfExzfM44T26Yomfr8vkCWBE97K9+77olhRwph9cFV5MVR5AAjx4gFAfTYToKhkZIOoS+V98gKdPXc0d9uPzRcFPGz7PP5Nno0kJyicoSKV0wOCe5jjNWRJDrmTKp5JFSxHZaQQGwh4iia0mc4p4WzJGW5LiNmU38PBrp4q08i0epY1OtfdzsTTF97D0+fex/MXPo83v/1zaLcbtjc3h+UU+tbIPHNMnPeO17d3vP3KW5wvL3j58AXjuEPObm0M6ZwuGfG7D9XsBYwZegoy7JZN/JlLQfNzGHqhkwk4Dda0MS1qpImtYXvasN02PL+9o+0NN2/+jT68zMgJuL99/SqW/fHxm+6k/sf/+B/4oR/6Ifyf//N/8E3f9E34g3/wD+Jnf/Zn8U3f9E0AgH/yT/4JmBk/+IM/+NDM+xt5TLVxE0QWYYVRIPKkgS0qF1kU9azLeO6pMt1J6TseXkHdDpeK1aQsevaIXAIWmAAE8wQIgvFaABnmLFqzuhgXcKmgatpxdWsgWG+WNjc2djkIIUcWcXYPJYvLDgEywgGL17rgzlkdyrsYxajDXKjLVjVnrwNYqp8K1jP6qygzoPh7rmZYS6v28ymoU2ymzZiorWDOAnF9Np2mwh0utdaCWtnorM1qClM4HdZkg854aqrOB2ZiihuScNZ1blgSVByiCgwjCt/qQYWJ1zoD6tXV0L2gy1kvLCjVs6ZrDc/X0mAgxSwGeQBW85zsPXZTk2VphARdWUPEIg7flK2aCHErKLcts6jq7Qv7G5szZu0MJZvEF3PU2ZdlZd1aJQOugH6JLPLmYkxQ6dMCOR5peKDiauIElYUWrLdOmanGRNYCQXWYddjxM4PlRm+oZo8TgwBGNigzkMhoako63GZHeTmZa5b8kYdg7feEdpH1m6WmcZmIPG00vGn6rT6lyEf58tQTSD3P+NDYX/Fi7kZIzfGQr1fYk+uEWhMDsLYGJkZhtdrqblJD7fmG9rzbx21D2bcVRMOCpDkE4xzoh0Pgpwkxz2H1XVZc2gAeclr4stuxijOD5cjl+vuiUBJUUZOT0mjGdtRlGgza1Vo56BwYY6CfHQRg7Bswzd4F6y8nmBNwvB74JI/fdCf1Mz/zM1/z57fbDT/+4z+OH//xH///+7XOOdF8NEZqQ1EwiGzTAD5mQxiYw9hyscGAB4MCwCCE4Q2zfSS7L2pQ8zi9DyFUhQdIBoacwMk49MRoFeOtZ1HEECo+tqNie/OMsm3o778xqmnUFdwGUK2W+j8/g9uG9p6CWvOI22iBxB7t6kVWiXCpuyzSQib1XguCkzCIxJo8NcREo6Ap6SyD1cXpWOww1VbtwDiLcbttIBgBAcVIIcKMPgU8RvbO1FZRqxECts0UPEDmeEY3OE+GwVIspkFX3AkjItcZsN10HD2YlOLQaTgnNb22afTc/mIZ2Ouvf4j+euD1g7dJzd5uG9rWUOoNrTXLTJ4XYy5AEiLKQGfcuw2dux1GUDhMyFOmDaiM31tVioiegFKtCbJszZRKNpeecTZXu1nDZHvevfu/Xlh9Kxp9oA2HUfLouO7VVeUnxnO3lgknU8g5cH74inl2nJUxKmHe/X7SXAw7WQEGaDXxMrl+oAp4mvgqtOBUU2foToQZc4Bg53FUGwIIbam64Jeb122e017PevycpRgVKiLAZbouq5r7HrIgOLkEXlG77p4JnbIm3xq5AGiwUR+VKI2iOSWyminIv15w7gqQ3En5JcaQTGuINUZmCLe2yigMtKIuAABszw1tb3j+bZ/D0+ffx9MX3sft829QtgaupjIjojal+95x//CO1w9f8frBC14/fEU/DpxO74YY0GeHJkgT7zIqY51CU4NSHX8SJRtQ1VCNqRZAlFhrNb3Oow+cY+B+9qzPtc3qUPcPXrHtDU/PNty1XILbyDbfvr58Ijv/qdbuW5sxsijbQFN1ddTryqB0UgpfxgaTyCbiQHjkJ8WgwGjojYKqibs6TCRmJHVOb2okzEOho0NHX42PcNiuNkgfKG2DnqfBfTWa/pAMH94aZlfU2wBKQXXna1CgH9LoW/EietCVQ80C8x2QKTKqshr5QuzUNAltupLBdRad11pcTLSsPpnL+huN32pTqgX7sCbb4gqYmwjaEFdYRjq6fW/YdnNSxGtEx7ndDZJ7ZeiwNoIajpIJ0a+Z1283OB3YgyMTgTgVt3vm1O8n+ssrztcT58vdoT74uBOB1mgMJVPnZoLp3S0IJzI6C2Kq17DGpT43UdpI9lWURQw+tX8ke3S3WlNxfbSoWYY0TXvavBm8ZjaLgL0SIdaHqB0gKCuIfHy5718j8ZhSifQBIsU4jFgTxlWGVcwpAgBatP7LNnIFEgJTAY2BWQjDZ5SV4ZAcBF1m6v8Vh6gnm/wQ5/3LZ7a7SWYEoxdHAx0BsmYYwF+kYkE5z6GNIg45OvlBXVPQbYP1bLsMlT+3m/RFtkinKLm+WcemtR8uvw3kLTYNvOYszeYqNIEklEKoUY4sQHu2kT3bmyfLpG67kxFq2rl5JQ85jDtCwWE6XIp1Xyx7q0Cq8X98Jpo2IiBUJoCXzNSDvmiWT2Igo6BPwdmHZacyMXpBrR0kin6vkKMn6YdbNA2bbX457h97Te8+Pt1OKrKEjJ4W20jUTW5E2dMCBFbGDPppOLCIiCiKtwwaq3HWvu9OLdSUo8lPTLFYh42znsc0skG/dsAb5IdSIafRneW4L8fiURh7sypvm9WNxgTvO4gLuNYQ3s56E1wNIAvp7qzSoPkBUoda/Bh6UZq8WKwoyijCAAREUfNgtFpds7CkVhqiluCD5MwpMaAVejNNvDKM0ThF0GYoeJBBWbVg2zdsN3NShVePx1kZ4zxxh0LOCekjdf+sRrvEXy9BoeNMzurMmqFnvX1Yfebl7k7qbg7r5Z6OrfjwSGkV2JqRRNjqcNxaTlg2JyXQaa0MMqbVGpqN8T4LW4Nj7QlFRtbDvCJIY3yyd/D76IML3JdKJbctI/HwS1klCogsazPLUdn9ZnARiBRQGVAxpurcrJYHWHARzemqCj66naHOCIXb5aPCmVEaLZPjAXhOtMGYYo275Fn98AAQsAyFUEw3M7YQPVKi4aOH1c+veuAZe3a5p2D+AaF7F+ffGHkLCp4yvdF/OSlPOFJpg4m8HhPA3QosMgtBlqoRsuSUxp8u98MIu6braQjEdtuMALNVby8gOMhiTurp4qSebgb3uhq8+hBUcSc1Lg5qnq6J5yQoQgGRmpNiBvm4DpsyEFe3jk08HqDJUMVx21S8mTwCbiONGLt1iBGbTm8c72NgFFMnoTnRa4XcT9S6yD6rnEJ4PT8DTurdaPoK2YUKt2HsJj2iAxCyxlMEFHbpZ0jLJ9YrgGnd9RkxqVoNa3RgTmPxqdWkLPxTaDdnpdGnptGDZI5qvh5AYRytJnQRvSjWxLej7jtECO29AWo7AGf21QoLjpzV5bRVFUF7vhl7S30mVRkAdRtEKIB4iKiqWVejs4PHBA9zbhGJkkdP276jbQ2Vi9UURCHnAIp65C3ZhwaP9qs0iAj2GXI/yEykeENw2ypadR1EiMundByk6AeBe8dgYGACApejib5Vfai/qczMbFXE4ViDZPurZ2ZOthjHwPGVF8PwXw4LMqCYOtH7QFPBgIDG7sPdNlRuoFaSVaZC0GJ7SzwLrlvBPC1KnnNiHDX3Vxj0qA2tGiEvKKgyauOsIRXPiBPqDMDRUgxzrtNVGdzpoRTL1q9wILkT8QK61IKyTchePavass4VBBQUew/OW03iSNC8lZAZeykFJIQyC6YMKARjMlpjTDDKJO+LscxKlaA687xdnc5yvvbjIVEpgZ0TV9ll30/FXTaLmiad15pCeSbEjqPnycRmLyYCTuCA9zzR5etgGnpNzVoPFgXd7Is78uzSNttRisF6W63Y982ncL9xKaOGuhVvalYP9AT1ZnJDu0/wrltL5tscE/3oOF7uON4eOF8Oo5l3y9aZGIUKtDQQ2z6pHHqL4aQu9V2smpSZPPt+MPBoa9Yy49qjkVGR/+GcM+eGjW6TBMaYGNOGHU4fj3KKmKboMXwfhxYjeXaHz5aTygXHu30bHuFETiuSUiURsa2iuz3yn3q5lQ9Oyo2EGzhcsrf8e8USvQ1IAqueosRA7wiMnRmw0czVSyyEej9B1TT6ajcdLIv8vR8o6gRlSetn/aJVL9OIT8slKAV9mpylKDkoLuALiz6RRjTESAlkfniIM/DsoMZQOPX+IHO45JkfJxASEj61LUXy4s4Zc/osJ8YoBPEoUxmQi4EmWs4pOZtXZQCvGY7Xwz6OA+fbF6sX3Y8cxjbigPduTEkoxMkoszJkqxAm6Bg2uMcj+TyltpHyepgBLeZFtXn/nQQoollLMpYlZYgeETqzOtU6qNe+t5g8ujBjqL6nNKR6pk+TIsp1ViI3BL7HHaOyc0Dginz9Mu1vSx+ofTO9vt0CDL5X0BAbsXAxbLnRmVPChz3FKZVRBhmUVQlVrf4i06bimo1Ue38Bz6dYbGQwmudnjX/RVZtK9qG/Vw2ijEaMmGryMUk468+abjBAbT/a1yw9HNUijrDfd3EwIqcxw8+OCiBLiYOLDwysa4J3CMK2W0NtPkiQQn9ltbzwBVaPgHJ6XdHGbYwkb0Eiu7QmaeVy0SRcOosxfPMqgfQu9JelhFA0cTvC7iyJGZgBoxqsushjbgN1bQ+Km2HAICTe77heAyD9t0hx4rfiEdFQ4NwEJ1IANjzMmnOgji8Dq/D68NDLZyLA9dRwmT8V6ucQ+9B0UkHYiC3gfVWXGpg1zZpBHBp9HJSbs24b2gDmAGh/hSijvXm1WS7O8iLgQqCwDa7e4S1PO0gFcj8SSjOqMAOlAbDx1dJNBLI6fdqkhgZiVHVsolqrCeqKQhw2M6jRIiGZrmPoArvwjJC9LhWMuZI9Ps0p03CodEKOgUkCwUTBhGCCaYL962XUJQ+2ihFWZHTISdbj5dnY+cFbnB++xfnyivODDzFPH0U/JuZQDJe1mj0CDWDMDuonunRUEtDskOfdINV9s9GlHnCYCkVIzpixMCV5QSmxByO48UNfyEkivjnSeFpjrU5Ahqsn1AqdLesRkUFnkNRP6JjmRB3mw7Z50yW8jsZhXe3B5FCxZ69RU2olUgpQK+h9ArWg94GirvyexBR4S4LDfU6HZyGITNTOaFvBPhnPUh3O8lrKGGgCVAgsx6IHGNdDQDsfMIbZmE5uYHbhXAF55sDMqO5QSgSc6sQIWX1/cX+g0aiLJH3EF0GHr143q0zJ8rN95yLWvNoPAvI2HbyQakJmJMWhve1m2dP+tGbJGVEKDrVYZl5byZqmnS0LKMX17s77ieP1QH850e8d2gV2NAi1VCjIJwxc7dD6fA22H+2e5h6yOVIV2203e3PbwJuNOZqidn5Oy6CiwT7KHgURcDEqXCsRhGLJotvRhKtW4NM/A0MP1b260kKTo9udLYhcMYMNCXJqZiyU/XShyctAJ8YcNRiHD6+jQGzAmrtIZvvkg8gYaq8ZRlytljGzoAsfpUBwLMOgpAmjMZ8T8+hWT3k9MLaG+Xyz525lQUeeynPUp5xFhFZAaDatmAt433N0wwwn5fWJwLmtNjSzTsdszYA6TWbf7Gv0ny02oERUzz7nhxmbVKvpFKCQGa3SVgahaoV2U5I/Iedhn/sJjGFzjmg1b5ZqGZb5x0VYkW4K8IbTn7h/+ILDHdXxlQ/MuR6nD8hcNlecS0wwSRzWiUGCsVdwIYzjNKbl2W1/sEkOzTF8gNxwI4hMsAL+1BFZDiBikbPUknsuIai5HJ0l1R7F7jfw1lAV4FqtBuaMTh0+7DIzKU4IUKcAJSjzfgBybyP3tsGP9v2yC+pUKAjbGxtZ0s9+UWvH2g+uF8fONrSa1gRNRW0Foow5KwSCOglcgNEJowPUhweOM5tmWWENp8Qp7mpBOBmrjmxkybSFtH6myBJKNITT+jvDKzA8kxKP6Mnr0sVhvBK9iBS9lRd9QaxAF25aKV6A4DR9XZ4VQA6YI29wb0aSKEH2gcAEBvx+g7IeEHXdbClwUkHU1kZIWx3dAqyxap02naGYFuG6yw80eERWGJOdaWWRjxk9p9q5sU4bUCuUYK+rw3rGxvRzML0fy+qN6oFr8cCh8dJrDELbtZUYWFT/r/f4VDspcWfhWAiSqRSpZ24khwVSxeGKygJwqMErvvYdWj8PHDvw8mATLZjFnFRMCLVAVg2qEY8WJkHJDYvDE34E3OkRTCzVEYQxIefAvB+Y9x3zdhpbjc3IGQMHea1x4KKB11QEPGUvFeX5ZlVaLumkSmWMY6BsJjM0hzV36rj0B3nGRH1axpKsq+gXE4j4gWODfWphVNogqKb7B0EhoF4koESDcNIhvUPOE3Ke0N6BOUAyweTGudh74hKHHu70pwnZKjAOm+J7vn3F8eELzg/f4v7BW4hLyNg6W0CgEVi4AoGIKVkP8ppWZczewf6hUCgThjcEj/OA9O5wahTWyZ9uOSmFAtPZga52AvjcKpFsJlZXqTCJmQ0yFWVMEBeHMr1OQjCCyFxZoPpIc0oY2l6XcJEO8cNgqEA4KgZXoGiD7LYT29MOVaAdp10jYqqxZSUlCuAO25ZWoQJQUZTOqFqwSQGook4CsQ1h7KQQHcBUsI4Ud03ShweXHmtClVyJhDEImJh5NjUCMcAyIeaECA2IXZBfQIbFX6/CCEOhXB8OKpxUJLrhRB2kB2A1Kw07kyaElqEJZ+eM3Vpp3bNwUjJMmkzte2FjEm7LDHixSI3VNzBOcwzT+7osY2JosYA1+s6Wc7pm9LQc6sNjBbpBwAqVndoatBZ7/9PqeVNsoGWUC6ACVmtMtri/pLNvvp58san6zjWUi439Wo9PuZOyTTS9oMwePTI5lOYRR/gk9YOdGmxx7yK6uPguxRIv8FjtggYSLMcv9n0ClIrRwms1Y80EgtHUZZyYY0BpoCgAFlQlKBWACkrdwFxRazNar5IpJBBwfvAWJigwUbcKGbeU8I/5SOobF4FDN+vRKhtQn3YTrnzzxmnwNYkT/f5kRvfe0d9av8/x9o7hEjrSLeIHjIEnOgEfFDi9HmWfHXpiOB5P0GND2ytoPnmdZa6mYiJzTL1D7gfm/cB4ecF8fYWcHTSc1VcYdd/AzSaB1n1Hve02s8vFg6UPaNLMT5yvd5wvd9xf7jhe7rbufYBiAGC9sB9dfieMx+iC3k/Qweivd8skilGwFIr+aj0p/fXVFBtcad8gqJIRo/hkZMU1Si4IBZRQ0Y9MKgY32n3dUJ8Hyr5DwCj7hgayzJg9c2dGHN0QXhXAxnP3gaQdh/H0uqNF6VfDas3iFeYgZU7wVqEMlK2hv56oT3s2RcfYmnbbDE6uDJ3maIkV9Wyoe8HWO+ac2M9iPTyvilPuEEygW+8UvG8x+nksSPNgDQZRCwrUp9I6lgkDlxSuGLsQEfJaCQJOd7kfb26ny0muEcwRck0f/Hn844rEZNUMjz+IbKRStl9wMTskYuzS42VgnIRyLx5ouTPbKuq2pWBwqTXVQwKS7sfAOKxxdzpUDViJwLJCqyWbfBeWJwjDdrn6uP6I3YMgxc0g+bptBvfdNtTb5j1TAtHTsrpuZIkIrkitSbvxIpwEA7dSrOkKyh94ALC+tE/y+FQ7KQBJPbUg06JH8Qwj86Zram5/tP5JtLgTLsIIMuMlV8TEEzaNaC+2szs49dpPQnjBV2WCU6zMSDK7Pp3zT7mh1GZkA2fjALCIOWjU94beKsZxmKJ7ki6wVBWchmqGuPqIAjIjvzXU22aGstYUWCU2BQLb8ITZuo0sKSsSW9IvTvf3HhQJ6CuclHrWIwQIMIpF86MZYcJ6wOrFSY1siDZavzkTHcMcWgx/S4zf6dpby96fYGEq8KBEYQVdL76HAIdnO3xxGv6GvOlbsw4whx1GnB107wAPq3m83tHvd/SXV8zjSEHdQoxCi3qtEnOC1J0DZWM3QIkAmMHxyJgJVEwol1o3xzOGaQjOac4yawycWfQ1wLK9AFgN1Z0ZkzdvUzLkVhE9XpfBWlC3ZplUH4u1yeSabYKYTF13uwcmF+bXJ8PHwwBcC+a0XixSAUa3vsOpkJgqpqF24o5TPdONjiWqNpARBPZAEKRGNGFk/06JUTuKzMjebcJ6COAJq17jRjW+5z9eyZLGF5aJOe7x8Fy5hrQyp8ieRNQkhs4JmQQZ5L2BBN2a7ZeYWn6tHSlS7WQGvHYhKhjMyU6SUyg5LC/hEOIN0OVSKa8/SB6LTXohXrVqTeW1eGYaWVDU+BZmHqWV4s8Xs8pyLS7LnhZE37kfn+DxqXZSETjM6bpTpAaHwaZfquN9HishseRpqbayw2tkNyMpEA4bRoE3NqZv14uDiuZWgonERKRHOQIBmFCuaTyoVLAqGhWgNFBt4GLK4uxSmQyn/qqg+8FUNZq5ioL3DU0UOk0D35h/fv2lot52GCGD0Z5uFiU9P1kW4cZZRdGcLTSPjnNvmEcHlWLaeNvhCgoOq4lBe3MoMAIzn1af8foIEzCmQhnoGNDRUWz+LOboUJney8WZAY2jmw7e0TFPizwxp0FRxcZU1NuG/b1nlH1D2S0zRMgWiV2LycKMReYkBsgHxhVkEFBaS9o1lL09YbjBtK5+sI2tN1TNgwFVnC8fmpN6+4J5v0PHvBzO2Au6akQxxIsIxiQwuFXZoT+n4i5KukLBoPO0YKt3zMIo04gRxPCRJMugKTzIGuYAZI5lBOJ5Xa2E6rU5c1HVuXqzJryJ1LOleQ7s98N6dUQSHquuN0lMSf+vW8HsHf0wFe45Onpj1KIo2oE7YYjVN8LA6XAYFjayXkBQsoCt5vU7rEUAWNGqQb/7zfr3aq1ex1KM03shRR4csd+EC7Tl2Uwp7huuxnwZF1MvQUam6aAuf2OZLDJDIidZiSikCzAUdCJlMIvXjPXpZjW2VrMNRv3vaArG6TDf4TWpPjCHOz4Ec8+fVDVhY3lQ3oisMNJEWvuCaGVwW8X2tNsontuOslfwbsEsqYJEfUbf9A+xQbIIkgRWXY+XOIGt47Key4riEvF//cen2klBNVN88rpB9ANREBmuEQTgm8+XSgTTmTReLQLgRUlSFDKKLftrQYM44c4MlD1QZJ4Oc2oaSvZo2mo8xhJShyioNFBp4GI1I3NSlAdC1aKv2Qf6/QQKo78eoFLQ7ieI2KBDONFi2ipwKdCIMl2stDTPpJjNWPp7IT9cUVAerUAVKFtB3Qv6bnI6MpaTGudpTrEAVOgdYVkTG+VL1tXPE3hx2vuYa56XR+fj9cA8TsjRIWc3dW6nGqN4jw7RUtXwWWCqijGi417WIVaDXokruDYQMZRmEkuuKuIxsVQGW/+bQ3Czd4z7gUCLjNklmK8H5IiP0yCP5ClEgVqN9RmOCit4QGlAqaBmU1epuV5g9ByVgIs536MJ7E6vEWFlUIgNG4QQb2Tt0Z+mCQUW730p25qPVjYfoVA8wyQA1WVryAgAs0+0W8uaVNZvnE7P7BnxnChex6vHhnkemL070UXBOqHHKzopMM80dPCWhyQjgUAsYDTr+6rWAiEx2MzVG1preHpzQ60VtW753nHvKH0gZMbmIEyymlRVNSiajdVo7FdOqacHvC/O+pxAgfVhDVoDItMBunN3p2eCD96/ZpEgEiaMOrQQRNjqPWfBPBvmeYJLwTgOULFBg+Pops13euNuX9kUxAJCjYzQM3YRAYlPbACSYJEQcWb17uSc/FK2hu35hrI3mxPle5KG1Tpp2geLCTlH4dzzN7ddbhHVvr7GBiHPFlJU0Y95zs8Eu0+zaxywzQ5mUzNWy9STNYSA9JAeXNUEMQVrUqwhdAGJSM6loSBLPHj/jJ0TTxdZ+KCoGCV8urERh2aILLIvLjxbSuZsUb+JtH0OBfcBOoxKza1iHgNcekIEoRSez+vzparDfFyrQ23Rg2HPTYAPIvQNXUw0lmI+lU/SlG4NfDLFmG9j2HuzNAJcHO4jBWF6L4wNi5tegwgViJiF5OGvOaizp5K4uNyUeqYUNy2ox8Sc6x0F3IRE3OhZelDAtZkkFTijZ+YlsxRRfFCIBWrNwZOsMRre/+KMLjnNkUo3aBLTxnOHWgHHbpghizSdJcdQFhv+VgEupg/IVCyzvqibU/FAwiOpaEQl16h8JPREL5HNPRIvsouzSKMQX3aba1aHTyRuRi3Woii0sgkOZXpni9ZNMDdvTM7sBGnYCYD6NGpmwuwN3BrmUWxMDazZXWfHeNlAOiG92oh4b9FQV0wXjdZdM6qFjdKvxmm2JuVa0G7VndSOUhvattl7nxZwUgltTsEgAquAVVDURpFELS2gQlOU4QW3pZMSIzlM2Jw1hydN5onzrKXhD447BcqmCRVmd1XYBlULxsbIESpUCsrRwUXdSTmj7zRW3xw2MkSc5Ui+b6+KGHb9ilCMDMjfsqal/Wh2h1J6qWw2xyp6LKNFAVHjcGIOOVOSM7OMu5Ub0q9F0zaGVQ1bPZ21aEK/nwUn5R+h1l3heD/gZAYzQnL5/UirVQ0aGGqimAPh8Re2Krz6J1Y2hXRy154Y8YgQI+RjzGAbCy0O5IrAClUoVYOkqLlRihTYmWIQ6IBvYkJ/uYOIcT69WCG7jzwoqsiR4pY9lVTTttlDdUEbIt6D6I6FgFIJRAWEHfPWMMeedHRJuE+NEt8HzpdXjPM0o9iHpyTTC+kDcpg23ugDs98R0k/BZAqarCmbu/H37CTWAGRac9ys9kUOM0yKkQuWPUXDo4Qhd9V5rhuIJkDDX39lqdYIPM2RIKC55RDmsCCAyckTqss5jWmDt6Ysw+PBTNzD0I6bU1zXkY0Ao2xEmW1HfX5GuW3eB3chdDA7GYIsC389QXdjuEVQpu7koUs2Zw5Bj1aCKT4LjHwMiM37afuGttuomLpV792pabyvTZ1Zh4iDFsnGpQcraqL11nxkecc8dsgYaLtJArVbQyHgfH1F2SrmcVi2dZgSfT9OaxwWBVhARSzp3G0idNl3ULXP25MNA3z+/OdQ2+bqLIo5Fbe3dxz3E/eXA/eXu2nd3e+gOUFjoJHBU7srn9TmKvROeAoYLNZXprNOneCDaBdA1Jevuo7mjAAbYaIkIA50wdmMnpUAcKj79OyxoNxPm8RQGqhsOI+J85w4Xw6c9xPnOdCHQIaAprXdGN0bC8b04I/Y6nQELMV9RxBKayi7Z6m+L1KN3xEGdWciZ8e8n5D7AT070AfYbcG1sBQB78fBpmGfh1jmZHp/Nu33dfRPZOc/1U4q44coBIYECCg/R10qMykgHVc0D05gOSl1diAZ31+YI0B6wFDjtSOmIERmh6wrWTQT1xB8cW87draZ0pLLiWsDLhlb9CKFAThPzPthf6Nih8uJGYaJX7T8Us+P07AQvLVHBeJGWj3yh4o9leGRyYLMmVsipik3hjEYz4p6rkZgiwxPSKekaFvxOCC8BRkxea9X4OljZsZpaZI5gTkneBjcRWMCY0IKrVuRjo9A6nplRfzDnEsEACGTAyV3IguWi91xhW/sFtGCh+MFPVDRCFjycJJTQjn3nDiMFaQau77qavcNZXMntZWEDCPbNhhM7P64Wr1IQEnqjlYT8hSX0BExTTUJFOAcoFJQ+8R2Gxh9AwDMYcV7iBp7kHy2WUTfVxjs4eBdomSy/j44vBh1Kx0F0BEnzSCtVi3DOe6Y94ZRrI5lztgnHHvGVFrxERYb2rPVIct+M327fcfTe08omzkpWwNDAmr0cBU2J1VdPaR3FLEererSU6WxyRTVmrPfMosQwRwVcjosV8wh05zwqVNGfCLyzEq8pDCAq5MCbN9HTQdYNaMpCeeLWgmA6wQVQe9qKhPnSMQgYDJVH0ioCnHFmmvdyRJe/1ytFls9a6pbQ/HR7wHtcbOgLhrANVokAuqPvrwZTMnc7fm4NjRfH4kyYbUGjMioPgs1qaA7AgDIitfFtabY2Vb+w4Tyovwpluek6GR3I6ywLRhitAQFXR0VwkEp9HJHwlYZwVARY63tD+ol9Sd3TsUo6Gysr+vtWiMA7ICbajUwfP7K0SpkdMxjA2+bd/87642QmVN+RLMhkOycGARocMOZdQGEk2sF5movBkk1a0vb8+4Nhj2V4eXs6K+vGHe2KNH7OuZhtSbMaGAkYzLypQ43x0UsU20oI0/0YwCl47yfEDAaF5Mr8pqIImoKDJpiDdHO7tNzeIloqYRMcZizIMkN6r1szGTisrWg7M5y2lYNjE5nu3nGHqcvsm9wsO4KVLzOBYdfqCZRBm0DbTv4dkN5uvm4juL33uuXYlCvyIBOV8pwh5SO3IOHdFRjGjTkTioCMHVNv7I3bE82ObXfT2y3DdqfIc87ZG8AbLZWqV6rcUj0eo4eLZPtc/WBoyrW11W3mmSKum+ot93G2d/vaM839Nc7xv2O88MPMc4D+OBDI4t0I1JQbahPG/b3btiennB7/33UbUN9esL25hn1dsPT5983de1ty6Oyv97Qj4Hj5cDxckc/O84PN8uGjgPoHTTNUZWQLHrerEXj+cmYo605M1ftjN3vmOeJ8bal0gjE913uPdfigwIYNuGWJCWVIEY40D4seJs+lr5PU2MQAt87Zldw3cB1x5imutHvVpMaw+Fsl4kKUMQktawWlJJKEQgyp1hxwHnt5k5/M5V9q4MGwxKGfEzxmVVWYjBS08j5axkhRgxD73inh8CGMkEIB9XFlEG6XK3eV398qp1UqQVbqYnVm5MiVOakQkaGcyVEBu0cgOtRuVpy0q3d4aAYHg1FMPksrUXSLxGRrz/MXrqoqKtPUDRcabhKMjiKl3KEPdHE8k/LSdGcQAfG6x0qE8SwyHRrKDdn792eXF27rPcQzYTxEv76oRQ+j9MO4P3MYitqsyi/IHs/kgkGWJ2qide8KmbfMA6TV+pOmxZvBFUljGFQjA415QQAxIoSlJZMUxjinWmiChIjhHAfkKODXu5oan1x9Wl3x1xTtdnIIwqmnli5nh3TGXbWHKlGlQZ8jpDVDQyzN7ZXu+2oW8P2/GTGa9u81miwIk0BaocyI8bPx5kspdg8rcKLEcUCRO8PtwxOVoBSXFfRD/MkzC7ZlyJDXLPNMk0Z3rvmNGCDsKPvyhQBRAR9WPP1UGAG7LdVnPfTJLT6MCc1BsZxw/60AzJdfqtlBs7FAhW6bP6FTWCFyX4GbcgnuXI8JWGDa8U4T9SnJ8+k7rh/8AbjuINuT6j3O/p5GOJXG7Y37+H23vvYnp/x9PnPm2TY8xPazZz69uaNU6Zbnpm6VYw+sT/vOF93jHPgfN4MXnx9xXx9Nc3G3r151SSJ6l6xvdlRbsZwI5eh0jkwj5udkefdA7phimgCpBo6swsfAz6v1jJfZ95otyBNuGCS7RuNoEPU7y2geoCLgKpgKBu/5HAJroBD3UnB74EJAIQjMJyEvScv60+t2giYvaG92b3nsKE+m8o+FU56u0DBYwCExej1pv2wSXq534lkuKG7cCD9GsMoGqxpLQJi2ad8TJb+MY9PtZOqvPokCE7vJDLWDrDwZf/5g9+mdb5C4j/GKDMuhAK3oTkWOv6cFnx2DRyW0nVMejVnFfBQNhQ7nBZNpnadlBBRXrgGKcIcE6AY1cRdy9hsz04BcYGMbdV01k56KLZnsVrWtNp5mpNSIDXOAKNExzygeP+F2Si+bNlQqd4Pwz66wtWUo04W9lS8Mk6kYLVMhJVS0kURG5xtmq/76DEE6DamAByGj0212llrXAqkIuFC07aTJTMTCJ138as/Py6KFja3qaHsm0MiG0ptKG2zmuYUa5Kuw2jkxNmjxQ4r54yvWpA3MvqBhKBcoN6gGvXSSMjESxoygTnUItljJBliniMVCAIuyjQOwTpVpykbTBpR64AFZuSw6eiWTUgfqEzWBDwFpRDabve2tut+htVrYGuWjjEjKnx0v7H3zLATZZhQug1xHMcN8zhAtaLf75jE4NcNfBwQUXCp2N68wf7+G+zPb3B7/z1zTE831NsNpVmhn6tlu3FcSimoYxphqFXM3tEaYd4reiN0FshJmHdnprExFUs11mC9bahPpl0HACoTc6+YZ8OoLid2dKuDTvVGypAkcwQ/6psyEkafwOplUgJ4QsS5wTIhQ0EkgHZIUVCBKVeq7QW9Zs8aPVuaNfQg/ygRQoB6pVp8YfK55NHNZI/a05Y1yICMuY81WFORdcmEnh9wdqQDWnBjBDCU5y72D5itNnf5/Eken24nVRpabbk+lS7ClfFLal9L5DsRAMKiBlGX9pnTajTqCmBEcOAPUHeEfugiUo+sCoTE4tl7UYorH7PDJ1E/mNHTpOHoKK8pSg3R2KjejGejCAQn2WiNMU3KiGtF7d3VCQDy4vs8u0F8oeWWBeFlYOCyQlZHsm52VXhfh4CaogwbYV88Y7nWtgozqCqYajrGUS5ON50vw/qDfONGlO1Ua4uo1DvbASFj15FH/2MqtE/wy91gu9G9jjKx7Q2VXWeMi2VSYNC055uVQYMw0xGuBwGoTsduNxs0WG879icf2f707HBps8mkU8DnBA0FdhtBYI61G0GhMNCqUc235ormADCdwqymoCCE0gXgif7aDSrrzsQTU/KY51z1JRfOnS7mKSO6k0OP2/dL7GqvAwbRxYrgXh/zoGTcT+Ds6FuFvB7o7z1hf94wjxPttuH2/pOPjKjZuhBCuct4hV7gTAgqYabqkbwTFHhr4N0CqPnm2Ru5O7bPvY9xHNg+9zmc9ztOV/GgUtCennB78wbt+QlP779vbRS7tVOEaLHVX1MOFtJ0Rf/nZq0J9xvGcWC83nD8esW4Hzg/qIhZYvVm06a3m0GM7Y1laqHFp06xH6/PLht2ukSR+FBr8pErjjqk4xmmRzkGRq32dR+gZm0Tyh04B0R9yvcERu8ACUATkwpEXWh3OgrhIrtQG9JJHnCKEEgtMCLW1UMFIHQ9qVbTXHRHXG8b6vOe2bL4qI1Yv3KMHFFjDjAcogUlD4Hv5aGXf6k7KqoFBcDGBJYCmsUys6If+xzvPj7VTipYUMDHZDNYKGksrP070uRF3w11AHIvQTCslz1qWVpjuNA6A/0ISMw+TN6kJBQRnfmWEZlzEgQr7ILlRmSKVDOz9NkjI1Wnmota/4LPxZp9gni4/t3A3Lw+NEwyicXggHDbdEnPg5Ju2UYUcxXQaVGdAsRWRM25Qxd66lK7WLpxjqB54daUHZiL9QSEIy9s5IEMPy2L05Crskgjo7yggBt7kKwJdLBj8+r1oIAjre8rRoEE7Vw9klNY3a4UmwNVasF226ynbN+c8WUis2sGj5NfSsxtagaLRvIQ99/bCUIwGIjDrRDY2BRrbBYQW4YEIvC03hL1rHl2M2QzJq8O8XqdM0UjmnnHTgQEF/c4Lt/9n49QMLhGzmGZFps8EERysjARnNrfzIlWk6DKmViw3xeZS2h3St6zCrXgJusjVnuMTFNKgdQKhYJbs1u+7ainCQETM9ptx/bk8N7NAofqsCHFcM/I8DP3N/IGSSgw2EiV0NLUYcFbCPTqFD+nJdUXgn4fBAolM6wAgb3Pj+pE8d5HQqAmXh9Xe24iGFFCAS4zA08SBtEEFbV6HiyrikzGSgs210wAjGkq5JaUzwUjJ4XfMijJ2hitLBu5fR36X+zNaOQNJ2WbZqbqhJEpSvZM2Vr45sosXte//dUy04r+s1D1ADJrJ2aICmbyrr/241PtpLRUMxz21UciZTikE7pI4aAM3bDJobYBvOlOfVItLIcqKijqMvhq9FKEk8LVOPkmZxtCmPCBZ1JMfmA4xESDXYYFmfj1LsJLCKFG1geQEngGddy6u+gcJsR5PzH2A1w55ZNm76kmoBfa8NKJK+BRrZg/AETRfkxTlj9Mb2/42PdSC8QhTC/2GZxxDsg5khUE9emgpaBUawa11N4bpWMiscOc4iKqVoSfGf2ZmnT13/VofQzoaTUhjJmd7xYsmIjtLD7Ezmv/tRhEKMqAVusLrYztZqy6/fmWTsqM4ILyEs0igrI35LYNtN2SqccElELZiwYuxshXQJQtY3cMl0QxnNABYswu4Dpix6ZklfWnDYtsx3wMBCJvyoh2OSwTGyUTIrj098S4dIOLBHKemINxuBxV1CXP24ZxDuxvOupts0nDXtOooUsHg62DdBO9bQErQXdw0wyAUsYKAFrUz6ZpMo6Buu/YTyPh2NR6W8u2G/TWbq5XGRJeAftSkDpW1gB3xsX7pUplTFcmJwLGbQMIqXiSwWRbBpujsdpUkWEK8KbAUPqG2r25eupa82CBev/TUIVWAYTARQAUV6OZQJ9QVggpJgb6NHWK2U0fc7oKvHcd5i3PIDqYImrBnVz3AAGs9dGuIOwU5fsLB8Q12MAMmeyDCyfq0VD3inL4ZN6QyDK2xqMd9teRFM2l5RijtskWJmdpRQRrJsrXfnyqndQgwiC2jMeLhvGwpbrgaFgHtbuDGk6agFpdgZnBStjJjFxlI2Nc+qf9yQOLdoZaW5BYvTWUwn6gTSIk6kByyTqs1+DyvAqsKcGLtq7+cgtjDmacT90EIwYSmlhsMWUKjn4ppxUHg8cLmFwU3DawEIoAUxnaXalZxCSGfDlLGRip8VXyUIaW1zhMQHe8HtZHJKb7VmuFbBuIyN+vZvSbtaJ4zx61kkOG1dWYt1uzWgkHrK2ocLjDZVp0zITLVnal7pzsvbOacCrBDG3bLJAoraDtWx4kqBjJQxTKBkkIVVMyYTYZq22Cd5O4Qp2e0EdUb0nFmIIxBL1bJjSHM7IYNr5dLNuULvmegXdqAFGLiA+NjXLZD/55tVwgw2eGiXgOj7hnOrpoCFVgDMw7gD5BCvT76bTojrY3V8mvaE872l6X9pxnUcN7nXTOVDlREZsd5tAfgAv5JtokAPiIdBChbBuaN3kSyKFWN6Y+h4yiyfnrPAhYQwiZgVpt2W5LWX6exlqLNgNznDaGhWoBaVltEuTGNmovXnfUcQmLNViz4rWrgXEP1XyjkI9p+2EOwf2c1gt1DByHEWPG2bP1bhpwmJk/gWyGVrw/mE2J92p1U29TmDPPnIgpRQT8my0M4USiZggLKso+UYfNv6pPG2q3YGWKoIpgcM9+MWQt7JLVx/kOe7gbtd9QDLvmIGIIfwYyKfE014JwT70zY4rH+lr00vXs/45Q2QaoMQopqjup4lDFx+Gva/N6ZOKijDVSaK9dRbYRIp1ZK8ju84++r+iuMH/FCxW8dLoThRit/64sQcqreKvVDIpH3ZGKO5RWK2iq9WZUcV3S6dCi5FgAnYLi9OJQHmDGxUkdKRQb9Slmgpbi6gYAu5PKwrq/J5k+lNIzIRUjmmQm1apNe6WgVogL9Go6JRVrcEQCpeqlLwIKQ6s5c+XiUB+hbdV7akwDjpwYkBT4gFIgUZK09WeHKlsDiEFlPtSNrVduYk7FcOdkUI43dqpC2BogJrNlDnPtMY1rUGRWuggKGjEMsKqimaU8wF8Osyi5QCts38c+NCFfGGw3rAemF3IFd7uG2Yc7EJdGGg2lcQYoMmzSsUyjJ5dSXFXE6iNzq+u6QBYk5FkiXz8/u1xs5IcVPLJFIcgvH3cGAx5/wPIvp8hf3F/HhvoVVWMEhkqLGMRm74Ex3QFx7NVsK0B+zU7rFY1GZ295COJKnz6nzWuA5zDKuahp8o2J8xScfeLwj9EnujP+prgCjjv1UBcPm5OKFpoUBbMxzJmpULYnPELyKc+U64O1Z0h9BEtB2azJt+xW841ZauowP6Y8LjstaBFe9uBmrRymquNwtirIkYGun4HJvEKWDhN5DuUOnR0eC+Z3ZFBTxGm5Pm8GsA1MNmumkkUrG9vfFtXMbEUpn8/gHzjM5xTPrbpRLelidHhWElRinw5rm4eWwcYKRPLh8kVQTWiD2UdPl5qQGcd4d4VF6+fAODtKq8bcG0ZpZVGvCcEObmFQq87sUtBQgxOHgHU6DXwZR3FWmWVDcCV0c0oB+di0WrPlVuRWg2veaZgFPGNQ0yibZOKzqgwim3NVt4pta0aOKGwTc13Vwich2PqeA1JOZJYIGxdCWwWeNsxKKKyQXqy2Rla3qtvKCghOy+/i6iUEKdWFaW3mukFZDLa6NngqlGdmbgAwfHzJcfiIBZ8DpMFsjGSWJU+2lIsYauwEvVjbhIKvcDalMTDschmw6JWpddHhNUgAfh/nGKZDGPPDHFHobxWzdMxjYrxa8+359jAB0ufdRnR4fxG8GTzIATqHN8hWYA7MbQPmNLbkvqFsW2rGhdOVNKQLNsMle4kFExeEDuRhwXyP63VdtvxeMCGdrKR+eCNT7fc7IIqTX31cRUN9ejYY8LZnDSxUHKBIOrY45Cdj5iDM/uELxuuB/uEr+usd8xzo3frzpsAc0ph4eXF1jPuB49Xmd529exCtiQkxWUtNdYet/u9wDBrvlZ3lq8Bk70c8/W8IqIdliNHTqKNckBvPqkAOewu2ecPtc88AM3rvKE8bym3DuJ8uLDBWwCNB4omafHUCSsP+fEsI3YI1l+6SiV4+A7JI7z4iprLgRl1ZwR2U64NdNzIhCBDk8mAxEG1BhXIxEgKgGHf6oxFuZkx+FarLiHfXU5uLPIEAES+OKjMNptVDkBADLQp2EEaCPQcygoU4VO19HCpBTfaoU1e4E9laZgfsJIdaUTyzYVr6eTHWwARJ4dh4yDe54Ygx6QC4eU1gLmN07aAQWQVmJrVGX7FaWJBNStQIgp3ojj2z4uk0+jFMHNWzVy4F2Czy52JOSKoJb5I7qVKLR6eAylgZL8J/UBJzqJAPtsSFaMJWQ4smaFWnjqtrrc1sfFb1vaaam1Q9W1FIwoR5T65F7/j8uHHXvghjHvvIC3F8KZBTi8I43Ekxuohlct4ELP7+BQD1jgEFu7p96RUxnbnUgroVZ5ZNyOhQv3dSCNoHTrKaDzGhjkVSiOuJczO950scBga8vhsFfl6MwQdo87I4uSyezTyE95czarVTdyT30z5eDvSXO0SsAmS1qYZ2TmMTntPntjkhxr3Co5NyeC+e+4NXjMOd1P2073VDboYozi7oU/B67zjPgfs5cQxDLbrDnZHFA3CNSXu7woQgfF1hz3RU4fynRVI0Bng447cPlG5nRVzBImntsZ+AVKsJZXRVxe14Tgp7dyc1z9O0Nn2Pe5HVR30UbG+s52xzJ8VMObQxZpc1fAYyqXjE0c6UUy+Oyh3UDIqzrqiNA7cmy6IaB6yEh0hPHAIKKjtyfIMkNKZz+sgPh7Xmgr9GpvEPZscMYHwnDqKJ6mWGFz8jXlGySRaVZSxBUGfxmeKCP810GOJiSBddOYybG7hSwEooG0BloowJaWsj2+XF1foawBtVi8NKbKPqzYnWJH7Ymvur+vrHDClmwmAAMmDMKHjvisMOUSvye6KwkeIspnyO2m06aVk1jNKCbGGjJErlxUIDAPbGW/Hi8zny8AsiKFhOW8NwChL+JWajlsMN1lSnJg/0c+aohahbkEPH+fD9ZZ4jVnW58WWT6bK/KX/26KQ8i8wghi7kk2pabe68zLHbvLHBHRgTIxymKFRMX42nESFmt1pTfz2sf6wW1C32rfp9s8ioMCwYGANzayZrdTsg5w3zqTshYWkCDlcwENUF71WbCEBBkLi+18vR8VfPfW3ZEpYRz/3uH070kDFwfviK/vqK8+0Ljg/frppatVEu7c2zj7c5TLaqRYMvEgoVERdENuc3D5MH6x/aQMz+csfwFoIxl8LC4c7o7f3E2QeOo+PsI6feJlzn77GIITYMhUhAfRenfXkY+uG2KM5bsSkP9ThNry+GJw4Xcs4s1v7HtaCooqlif+9mzE4A/Thx3k+rP/dptP7TlCjGObK5k6shSrf336DuDbc3z66EQz6pYKKcJlw9ymegJlVgb4CAGBuUcejMIp2n2pFJwUkSvvELr3HHznz2nh214neSHMxQVyKoCqqYE9Fp0IfOgVkYk5Hsm5zxMr1nQJHUzOgRSip4ZlNmmJQCEtJ0UFxKfqbMokK6yB2UktU1p1pnfFBbfaIn2I1hRKXsjaxNQWxUc/Xrv6o222PlqjoHZm/O8DoSTgloJDQFFe/YFzcY8+w2zoEEnRU0Olis8z6IJyXZXOT+MPrXjJ2JaZNelQ2aAiEdFcigURkF4pGbMSwX/V5dOmDWYiSMMXztGWibj9eouaYhZpvK68OaO5dunsE2/bDZVpK4/SVbxgI9FUgjYfvVup/CDtkobl3qKRlAvxPswAMyETCxxxD2XqUIilotjquviTdeG3HD++MUlpE6u1nGBEgSaptnxyyHi8/ySuZ8fgBhOssRGK8VtRXM+z2nKdfnm9OZqyvqT3SnnE9xJ1UKSvPencvIkqujWkjJCkJT085ZBwHvEYIJB5Cow1zDJiy/3tFfXnC+fet9SsOp5wX1yaG/51eUtrnkWM2NHFlI9q7FJOthMGl8Pd2JDQnCFnCfgi6C+zlwjolzCk4nNEh6XSD64AoY0QJeKGwX+5iM9YhsKgbARqMvdTuD4+woW83g0Bi1vpoB+RESKiYCFE9OLqouFG2lhDmmsYnPjtFNIDhg1RiY+PTmGXWr2J9uGWyMbtMKDO4T4OWT0fs+5U5Ksz5B5MxIN+wP0vC6GhoBIOSC2LviDfeNiNVhQmDVsTziZsCglOm/NzyqJ7hTYisOq2ajXUI79soI+ZBkudH6sJw+LHpESu/8XkBfCRM6dKdOGXD/EhlUyp2oS+xfGYWeRibUh1hEWBbna/lYkPYMcvrwwmpsuRxlUOqDk1pKBf52nOwwK2MeBHltQO+YtUALQeBjFHiN5ggjpZeeqihFLFKKT6+N1/L3FCK2ATOtbCNmSDndfk5gWAOl9UQ1GxdC5DI46o5JXHHAD9xYkMcMmG/O1N8jpXeyUF9DW0iDLn2vCTwajliAXZII6qK/K0yg2Of+PPEQJ7aQwJo8gzgj9jygoCFfRIiHrZ1OV/D2rARQXx+yNWYjt4hrRNoWtWiYyViO4tCtdO8NjPURa9KlWm3d5sR5P1wdw7OFUlG3ka0LqQrvZyFVOhRJ64/GZau3SUJ7cQbZDX/UMHVOjLtpB/bXO/rbIw131MzmsP6t2hW8dde/bF/dSbn4s0yxuWguezSnIShDTJ6qywp8xxS3LfpA3oysOUJQI3BF66D3/tHjngoK+EogvZdqej2v+BSDd0Rqr/2jCaUSQDDFnrpZoKAw2Na0Ie399b2lk2pHSztTvE/09vyE0iq22541RHanXZqhGuOzAPft/hGH2j6ZqOYU2wRH0C89wjAWnzmnQkAr7qT8OdQhQVUjWJwiGF5bYsBG008rRhepkFKgo/iUTx8PDYcTI0Pi4gbXC7CRRfkB5IsBi8LyFVqL58oeEWIwrTEBEVGLF/XnsOF3pqI8wWWi7AYtrQZ9AvksISoWsaHqJdOIbBPLWfn6Bu4dBfM5Rp6QzKTKVSE6Dp9nmEMwXwrmVoD7K3ic0I1BnTHJYKbizaAGA5Eba1cSd2ObKZoz1IgI2gaUKkKah0DW54Fi2UmoXddq16KaslBzDAtmRNEHXAtPcd67MbJeTp+W2o2q7RGlhBZiNOH6iJN3nXsGI3lnfVSMGzBR27vxu1UF1WuRQT82B2XvP1mAukgp8ewyGDwGpFumz83mQuWe84hZtpYN6lBkVkjq8J+zJlVtpA2wtpx9dpiHJJtmZzOG5ryfrvTebNwGFygz+hzoY+J4PdxhaTZCb/sNXKopyRRXTvA9pESYMKM+PPCcbuxjpEwEQSyr5zF1NqP5vHdIPzG7TRQI5RXySJdfhjH8tsOZadGPSb7esTaBlEiyTNesIHc+cOekilNgcJ9Y9jRkBcRWfnC5LxU0FDAZiasxo5aCjRmVvZZ66Xu067pAZ6JQtuucYuNBRAV69UgXu/G4SZHKMlxsj7W9ZVvEdERi9uG1V4P71KntVkMu2HYTMqit+fl3qNnXS1XBH34GMqlK9mFLbRFzkCWuH1GMjEPuv21RCeBqavQQyeRvuvGNCEVFrfbkoqNhwwXWk2GfV5YDCudSFkvoYryzEIzLhSksM1Kn1a+w9QJ9+C+74buK3CKyqLl6svQSQV3xtxxb7plT1jPIqbd+fRF1hZMitgKozpKCnPb33vPjOPTVSZkVNHVz7SWzMaPYuiqA/168oyxnET+MAwhoIgWwPBwNWnyOHfA1zLpNqakan8MsfbgjxvAI2HtmZKKfA8f9xOgT56sJ6cZAOklSjDuplLyKi16O6SFbBvLaok463VnNuJPhh4UwyJiWGpkvPFKOTFquAYSjCsKmNuJrUaZnPK6tGHXKaO4sFdAmCSvqFCiJDYJ0o7vel78eBZppjanGnFRXaSAAEywGzfK0eyhE6GOij4HX+2FCutMo6FwqxmFK7MPnPNnoENtDAsJwJ3U6hD/mRQbKnQVNAas5qYYF41PskemN52NmT5zByQpTOBfPRIeRN3gilG1WbdcdeNbANDO2OFzxq4bo4FJ2iLVEBqmEIG4xCggN6k4q2H2B+vie8r9dASSZN45/hr2wg+VBMj18PAR6DwbIv80MONys6qojjkgYu3hCZkVtM21LvE71yc+llgQmrW4e5Q9Fmzs+yeNT7aQ2ImxurDWhDyxcFisrWmKMADTwXkIhTa2/tIdYPCsKi+HpsTkqV/meNoTOy9+X18jOU6cu14Qz8NDzcd0gj9qCwErnM7W/OKuoJxkM9ljXehjf4PWoKIwbRdGfnxmkYqg3L6PKrhKAh74WZEFaVS3r8uixXKO4cKoJUXoPj6+MTjEY66zAGCgl6oIeRPjIA4NL473E3ShO0MCCPtnQecpmSoeu5GJUfKlDlZtaBe/bWjsfqIgxQfcT6APzPtGn4jgGjpfTxj68HAbp9SX4avj+TKbXVXSV8sDTO/ePLooWroCiDi/n5SpYALAaYYMtK4CE4skKNiT3puSuiqxehtUYxZUTYvJqzArLeqRnZkTW1GuBXZoXPCqvr/OEuGdkjdYkLh7MgOgETwV3BRVzqkNgtZgx8Ho/fQieoPhUgG2bKKVgq9YiwI44KPAwWuculiX0cFKqPo7FeniqnUJsQA4HZCBHwJszs8yLPCjMuLQLDJ87E3JdTsfPSWwsjZ9QOo+A9MOexL0NhZtxWTv2A84EVJiklA1wtlJGJbIWDOYU0M5AdUV/F5ux/hEOI2SeQmkie8+SHPyYVS274zJHHOdoOeVSK3IY6pR10PxcFg9SOSD4jPlX0H/Kb9HQw9/5O38nfvmXf/kj3/8bf+NvMBa+nAABAABJREFU4Md//MfxR//oH8V/+A//4eFnf/2v/3X883/+z/+vXysyqYgeZpxbJkxnwlQijFhsAI8YzMUZXX7mNhaVYPBSYUy1RtKi4dQyf/OHPcuiz5pmXU7Fjc5uXgyvdePDoEX4TOumq35kI15fmIOOHnBgMNFit14iv7VT/E063VtX+LcMP6/IPzKacMThQIx1x3jwBLg61MuGh0exfi+4MjQUBWpkm/ZnIfyLOW2kBQE2jsA3ePHgms0gxiuEU4x7kNcuxl5EYVMvaN6M6w5OithARQwoW/Zgc/IE5zFwHt0gvrPnILjIUHOEwcffnpTLeRfq+7jHuz8VhTsGqzUxbFK07U/bLwo4c9UHTKrVtAoZhb+ykUZKdZX6FH2tCTkzM6iRESaYMTQcXUwwXn07wGUrqYk0K7wW5c2JrHZ/ihCYFUwT5LXEIepOauL1cFabAMwDXBQyGZUFaIIadUmyszwBnGr1nfucGKJGpffsVaaxzEjMSRUYRFiJbcK2nzLGhVDh31l4RgRi8NqrXhC8dYbir3zD+/0gH7RpvxBHb/o1TrX6eDyP3Se4ZBBhA1DJnFTznxV4My8vh70yoIszwerphDsmrgVta97/VZMenkLRH4H8LkHw9bl9T+X31BrTqdhEhGAAxzj73PfAO7Dk4y6v2ydzP7/pTuo//+f/jHmZXf/zP//z+L7v+z78uT/35/J7f/Wv/lX8o3/0j/Lr5+fn39BrRRExN5b/MwqMQYqIznvbGLTuw8NjObG4dUy2QYoywAJS2+gBMj0SGIDl7qIvhS//pvVjUFp99cK68f/edXyXq7v4nHikwoDv9pyMypT7LQxZhi8Z9dkrKXRtJEIarnyOyzXFNYaPiwm0j+mK/030d+RTRzShi6zBtKK6iO6dyRcZm4oCpLBeYI/iQWAFhD0DuDC7ky0ZzwU4ndw/askmbPi4DQCWXU6xERyghGkiSs9+OX8vSgvCUybkbKG1xHkfH2A+z6Dw8HwPO2ftRM+y7P5Hpmmvuc6+K6d7/QGOIlR2cWIuFhepwcxFDRKtRJaduVzWtc0hAx8Sq8HqygQzsMIFOVCD+yIAIVjGIlCrDZHVBlWtDtOnoA9xarZlGZ5nQ0ggxZTrVysgZe0mCVFeiwoHYH1z3vA9JY+ZEPsAzdUAu86C78vITLH2GACDALE0NuP1053FHlAgG+UDTtdFirmWHBZF3iHtcETwoBuUwbcRJ+J+X0sIcc3k13E5p4Q8SyVEoWtJ6PRqih6ym+texTq3irV1146zvcPq54XSwqxruDzH9bkTegeMifoJHr/pTuqbvumbHr7+x//4H+M7v/M78Uf+yB/J7z0/P+NbvuVbPvFzHseB4zjy66985SsAItJARi1x2KAKYWOoTP/cL8aZaR3GlMMB1kay30QhsgzFX4vUDkF55zmsb2mJi5pVoKWp5XRYJZf18ZuUoziwNkc+MptaGygYesnM8TlOYXxTuThrYnjcjZdMEbmR3PBEFJTGFx/dZbljaR1Gf8L1z+v33nG6ZOsgMaqjthzjwLcNdGz2PEMWk8uVoedAFm9tXiAZa62ZRBQ8i2Ufy86biYbCo3BiBkqxScalmJ4br3xY1SjXyHsYVHrruwrjsGSFDN4bveQQSYkR6HAVCqXHJbw4ljB17N9vnhVyFNMvhq2bx7Q974SWEpmUKsb0abziklQEbKVgKwVaBAyvA84JccFPGWK6etsiKLA73FIImGSZ52BvC5S1aSKrBZKUdLpTmgTodMPq3J6A24JM0J2WLfMSEioDugIEXD9kEYlI3PElTGfPmxJSsqD+BUzQCjrJ8yayEM1P6voDXR8KDS+znEt6t8uNzedHPJtluGrQXjL6vHYGzbm+YJguZSFyaBKufoOsVX0cXJxlAw5yCTKwDIJVqaad99hvqMmGhTM/8z1kcLveG1326uXt+rqXzNw1slNc1lE+Zr3CfmQ2+PUf39Ca1Hme+Kmf+il86Utfenjj//Jf/kv81E/9FL7lW74FP/ADP4C///f//tfMpn7sx34MP/qjP/qR778LoTB0RZqw5TFHYzFqpKO1FJfEsSwnHMm1iG1RVxhtBlthwJhCETFz4k5uDC8UWUXKhZgAqm3yqB1RjOu9voeH92NG7ooF27/EagU5b8qd1cNGxjKIceBU3BBYAy55OpRZ0bqEPNjXh+KdR0Rjlw2cf+cbMbI1ujyDehbFbJp63NxJ7Tt4P41BiYkcguhrOVP70JXPJxmjjNRGwce9Dzp9q+CtRbqNaHymZkY52FoWFdqtlCoubKqoe0smEpGNRGlnuyiUm9EvoZPYO+YBCE+DvjyMvk5HXvFmmlEQoh7hPVIcdHRKBzBCHQBWJ2KyXicKJyWC6U24XvZHgWCSaVvKpNVjpeoZFuX+LA6F2jXLJbNbAUvurXwH67cUwUw0KE48qyxqTjUIL/7yflZNvDleJwK+QvbBAduG8/Q9JsRQNhkzqCAUO9lcsT2zCJo7xlKK1XW8npNlmLz7j04qa2/AQ2tLQF4Be0cMd/24npVoexmXWlRO/lZNZZtCBuGac6IMvK8EiYezHbVut1/RR7gQFK9B0aWVgygZsNGfOHvBHBM8H+Hq0PG7mqGPSaXSBjwEE75O0GA+hhyav4cQ5C70zhN97cc31En923/7b/Frv/Zr+Et/6S/l9/78n//z+I7v+A5827d9G37u534Of/fv/l38wi/8Av7Nv/k3X/V5fuRHfgRf+tKX8uuvfOUr+OIXv2gbmCk3vi2uPtxYm2ESRgpZ1CvMLudvfxmCs+LRXggxxg2zQrNm1BTF0agHhXM02+RFaNfSso56hshSTqd0LCa7E9FF1oJ0QQshhKuI+sQECzmhQOyIBpkgN5Q5piu8AB9UR2xsHV+y5WzI/g54TJQ++lhOczm3ZYyvhde8M+EN6XLIagF5JlWebuDjtChXuzMUefXCuO7XHDYJ2NA8UzkoZWWDoVDN1ZwUeYNmvC5KqC9wBs/OGfFGxAndFC36O/z7c0wbRzJD6XpAhivA9w45Czq8d85VKIJQQAgD4hcZ+yiDKVs7UY/y3ah1mGEbErOGTIGg+LWT36ghkvAXYMHaJLJeLTI2FalndxyajIAWl7Op1ce9c2YmYYkJDqkzWQ+ZOsTq78ETnQzuhiKhvyk2SUB9HlFUfghsTcps+7t4DZGZUbj4PLDI7qIny/5W/D025hQmZgATAonVJEKDOwAnHFRmN/7BB438QHNWXDREwwNIZsnglfO2rdBsOSt/Jv9sa4HMnmLqwpxR2VMPTAzaa7SyTWuNCRtzOZfpDRd0zS7gbBOGXSQ54PqLLWG4Ks4YSx2j1ouk0TvEhzjLul4+TWw6KF+Qx5+azRH1njQjFpFfP0eA6PZUHwhXX/3xDXVSP/ETP4Hv//7vx7d927fl9/7aX/tr+e/f9/t+H771W78Vf/yP/3H80i/9Er7zO7/zY59n33fs+8fQFS9ROwg2KfMS1liE5ZE7kDetlJJ/N/1w9WnK1aIrsnhETP1AeIFQiaAUM59smF6qBYgVFu0mzizyMxGKkyiKN71y8mNjc/CKvgGAimG/Kpe9pJlRRZUG7jOpXJxNqF7MARkdBjXa9wMmfAwHATjGHLT2PJKxia/6aPHPizOjdz5/7INgDkoaym2HiKCcE/UUKJ8Y8mqSMyLeRCveOGkfIK8T+GBH1YqoRVkmZMSIgPaoFq8Z8KISU8Tw6oGC/WwDcg7Y7APjjTUsRtNmTMcdZ0xsvVv3/f2OYzNlhv6CpP8HoASOvRIfsUP9/xngKDDZen3UghxMyR5vvWTKZkgIfA041GsZUYPAyhTUG4xBPsKBLcJGnZBaUD27JIUNp4vs4XJTyRGDZaQuEX8YLI3aQ+yEQCO8RuxBU+U4R9FTyGi1mHMpJWvKcRYUBComTcYOpTUhDAJEebHMxCcZgLB5BlXZiRO5fH7tsa89mCMiCEIOzUtzZAzLBfmF37gQlPL+IRt3z2kOqofqjUhmZglBIhzTqqHTJZO5ZlHRPlB8NFCpjLYZVb/6LLOHIDcyGRXI2Y38xcC4mablPE+UrVyynevr4RJUveOK/IsoraggnbwpcHSMl7ujC0dug3q7GXFn30Gl2MiaT/D4hjmpX/7lX8aXv/zlr5khAcD3fM/3AAB+8Rd/8as6qa/6yHDDD6FHeGsbBZZt4wqCHWNEise0vLsQ6nRYZcVLyOeI+YnLULhxScdICzYA1iFWXeMoyIr+UZ+CbyjHF9MQ5GbzwrUqOwPYnIxBWFjOKWGt+L69f3NuVi8h2CYFACoGqIGCLu/WyOV0PhIjKdZX13/n+lxvSwBCj+n85XjbWvClNtUaqDZQEXf+6krrQZ8XXAu88VRJ8nBGZZAiIoOKfyeGn1JNV3cqq4jrWa8qsnu+XpyTjaiQHOk+CqFXNvRwDoxCIBnea+VOigh6HWJ5zfxzj7GDVQphEyrmKcaUE/ZI3vYwl9U0Dvh4TF/WGFkTtasrwUIzS7I7K8oQmrm/Ja6HruhE3uTHHZFB1QWSgmelfr44f2ex9AiememFJMIEchmryhdnFufl6qRyZ8V8Lof6PHsx4ylOOfcpzdfnWzvwIZC6vt2rg7jaANspa9fQ5Q/sSDz2RuVHUOQTsr/sgccIAEEuiOzj6qToohRSCidTzz4W1RwgY2UCFnRrnKFpI0lcrX8N0bxcwrqUhxV6Jxa17yUiYBZviWyLowsnxuv94a95bgAxqJqc1Cd5fMOc1E/+5E/id/yO34E/82f+zNf8vf/6X/8rAOBbv/VbfwOv4oceUaw0SMLlx2wbO3bPoMvoczuEU4xl1OfE3dUGrOnXKaVsB8Jsvg0Gs9Pl9SeKCa5ryxpS4k4zlQA8InU4RYtH7wgnlF4qN2NuwMqIzGhOG6sw50jmITe+fJSc48KeUdlwOkV/FcxSULo1snLlzOqMGh9O0Q06LwbbijvxeJrjXTtc8oARfMy2vjo9w8wVXBu4CXi7gbYT6ArBqw0O7JJZTLKhGK6OTj7vZkO57ShPO/hpB9020LaBXRQ06k8B9wJxqNbVERUjvxUboa6iqFtNIdHoM8uhlXEIxzQh0dcD4+WO182mIh+7RfVzSmbbCk45xKVMv2jMMRNKANAQlDkxSzHViIOznhH1leYDNQnAlKV8EMMMY/RM9AdFYJEsRaeHh2FHMVgre1ti716RCbqwGj28Zmdj1lKSmixpVEtC69WHBmY96JKFIREE+92A8a9O5eIR7HJRrF4nXndThZBD8hKvQ4vkhMhULg5G15693pd0OrRsyxoxcnVTj8FOZFLRjpi9bx4IT0UGuiw2c60gmfsPUF2IskYwx4XWYNWtmABzIRRXH0MBqJITucxJCTwDjBxdFJClWel1jcvboeWoPnJw6fJv4BG6XvvLRvd09LcvGK+vOD/4IB3hPN6g7LsJS9eKfn9995U+9vENcVIigp/8yZ/EX/yLfxG1rpf4pV/6Jfz0T/80/vSf/tP47b/9t+Pnfu7n8MM//MP4w3/4D+O7v/u7/+9fiCLnWY26Fj2448h1jc15dSbm+afIQ9RjmzOiF05VijDVcQMDpogDyflKcb/0nTSEMm1egyxtk+Q9vzx/9A+1WzNnyQSZ1SCw0XMz1X1LFg97oya3yBjVZIuUIDrAzJidUZrpo+m2nFRSweuq2XApCYs9WIpLmB0TUBWwWkfS2QkfDccvbz6MX9KdTcVdFBhD0fvE6eMPkg3F7FIrZGvztKHedpSnG8rtBr7tVodqVmNJDnNapmVQ4u1kAqtw+FNhvAK6RKB6+XfotlUbP14Yo9kHaGIexqYynbRHJyWuBDJnPN91KVbzZx0TfQq0VZx9gDaTYII7qcKMxstJSUx9dsUFqKKIuuoCfECdX7/fP4LDQWS1TftsMHCMLMlb5pl91qMCnQjDza7W7Q4hz4RH9lFnCijL+pQojvCFTIRkpvKlgH/NWlZw4ZQJZp+/Z/R1y8w460xxfq/QYZ5n8mzAjW04mRW8rD2vbowJH5NlvrvFCTabjYLBB4NrNeyTfT1xGW4YWorFHfqlGTYp5VvNMTPE5vGmionnQiHxXqPnsLLpVxS7nhAHLpXB1WSPqFwy4jiT17OyYmdoOqoViNp6BMISiIdpGc7eMe73S5AJzG4SStQqztf7115If3xDnNSXv/xl/Mqv/Ar+8l/+yw/f37YNX/7yl/FP/+k/xdu3b/HFL34RP/iDP4i/9/f+3m/odRY3R/PrBb2F818Rz8O6+6GNgq/VkoPtFgeFl6QKIlbyBzkp9wJ1RQ0rDlW4Trp8L5/i6sTU60pXjL9YZlT3ZputsM1gkonZXakciro1cLVR69wC3rqk4F7X0aHJAqrupDBcCaO6GkZhsNRFNlAkA3JlSe/eBHNMK8Ba5iQD1fQGl4cfhOy/8fUUUYzhk0rPAbhxpspJqy27jbFvTzvq0w316QZ+2g3r3jZztNVbAXiZtI/FLPJ2ulEs/p4K5z1KEo0uhzWHUc8tmjXVdiLBODdTm54xQLH4XjGhWvUamzoMtAxilPPNSbUxobWgngO8FUi3umSFy+cQo/i6i0qyDmMyMg2XCBKbEWXQztqAi60WrE8PmOJrXwaEs04D5hDP5VwxgMoI4n3+Ll964BK68z1ydT6PQsvL2a37cnEcAEDm5BSrsV4EOaX7+ojzGx8BH+YZdQid3Wk8bFR9PO+xF+K3PnIi4nk1nGPAlQbxW4LjDdggZ7LC+uyYQc2mRJdrb5OfffKBkqmpp9NhfPG9OZcjTh1RAqHA2LxzTd2tnCjNqmM9vpF0SO+8P3vz654oFtQc+yrQHiNpmCo8xCdqh5OqFeelrehrPb4hTupP/sk/+dH6AYAvfvGLH1Gb+M15xKJ5dOU3ivXamLuciKh4dmXQCQBIzVIqmhdbr9Fq0H3t79dGBbKDBII1OsRsMF0QE28C5ov+VnitCK80n9SinlZRbhvqbjNaQN6X0ruP+VYfv2CTbCkgAjgTUFyJWb0b36+6+ITf3kpmUuzCq3X30QT+kXWdcCoPy+7vwdmNXHwmskfeZuAAysgLHznVV6hUpveUnAPn2XEeHZBphJOtAK2iPG3Y39tRt4btvSe05xva8w31zRvPohpQqymmR2gMABR9Pm5aIih0y/Lgv/LeLGMc9yl61Mq0eVKzFtS9Ynbr7JcxMI4zHVtkHQpyyBBeD3D5roxUo64Bc9JT8PT2QO/d5vh4XYzdMK8iOxKKtOnII8dHxMe4n9AxbRzIxMO8ouv9TNQ5tqJapO67KbUDr6wuZt/jV+NtXsZJPJTL/u5rRumUsW4NJ4nInyf/bmW/8W+LISyDUNIUn5V5PZ/rEY6y+AuS6qU2fVmLwGIRAYrrYOKxtkRkz2GBXFgDa0IuYo4bIIwIlMTazxR4aC6v+55jLTafSN22lvPAuAajzxzzFEE/D8xTcT88i0bIKhFoI1Tv7yuuqUgk/nwF2/s3C/D2hroVn73Gaa8+sjEeXPLlB7TyqbzP2WJC3kRuTD/xycVUKujDV6AUvL3/FmZS/+8e70b4dHEI67sPODTgNScsNQki623xA1Ad5svxHZHN0uqof0Dz8Hj73J7Hic1rWkXRxwJu/HU26V6ikxyp4POSSJ0hNNkbW53FxZxOTzWEZefS1xoD2cNARgudh7OqClvzaymQMVG2Cd4FRQAuAq5yMdy4fDY1eCrIGtbF0mfmmf1cmTnGp2VM1L+2orM4iUUcQnRgwZuAy7ah7M3mFO07yrat6am0eqs4aOB6dU5hNK9Wk4DL/afLO6Dr76h/V9WzdhNCTIOqgAwbvhgGNIgacCdlwygvrQF5+5cTHz4KhErB6B1trxiurE4e9V+lucKIhpOSmHN1doxiTEEhQGfJgCqUz2MdHhL8gL4cZTDEQfKOPdgyijO1Hjms07wXHk5KLvPlDLwDNVEYuwcndb0z64xdXsn6B/0NhsOVMKSerSdbMKBLXO7B5RFOOmjp4ZxMTRz5qoTFVkS8F782I2uo9YrBSzMejBRXhKi1om4NzSfhbnvDvje0vXn9ySF8HywqKkDvGDK83SH7jS2jNMMFbsXOCBsoQC6BZuiMnR8O9ZWHc5u5tn9xsZyZ3fqW9aOVgXXYOKaUaOPCCXXObsiIdhv/chyfASe17If9i70KGQ1+YQRWTQkwI86oDhmgFEwF2uV5o08hMjHo6jhfEi7wZ3vHOV1fi8JQUxIdHh1V/IccjRBaaeGuLCJlrzfZ6ZtRN4jGS39Bw4MFc3R3TBcnNUcW/UOIU2X6+tkIC64V7fkZ9XZDvT2hPj/ZpM2tBUs+33lID6WD4wLabLVtdpNZi8jscs/nZ89M4j8/5yZ5oz5vx7TjCly+qBSUfUN9fkZ72rC9/wZ131BvW2aCxrBWQCcmJhZUiTSEcZDCWaWRjF6m9MLITCusZd5XUm9MpofmYfWMJt/rxfnE+07V8ozWL7+mcIejePIpqN2ZhDkl+eo8gYuY8MQ8zUn118MIHfcTR2HMw8Q8Zx8gr/PFe3GLmoGaYunN9blGvAO2PDFc9OFcERYZIk6FxpsDEHViOw25b+1eICPwIPDEbKfL0qeDe8znQxorshxLnYOylIGiu5QsqxCtzRjPFA7ZndKSXbJetes6AAs+jJl2xhCVvMJC9kubcrSmGyRJhLoVbJs5pOf3n7HvG977/HvYnjbstw3705Yj24PBOsWnGb/ccai1Y8xSMH09aqugvaE+7Whbw/60ozVGLVF/MoivPd9Qtg31aUPZQ3B4BSuEr/K43Nbr7+QRY85rrrcdOgfG083aSbpg3F8TyhfVz0gmFYblEgYGZCAeKcVhTJWISFB1HTIF8kbHzwBvepTY9FacXBna0lVbmZFeDqz/DIG/e9H4mlHF+wgrFXWKaN502vOcVmMoMfbCh84px3MooGakdE6M87S/u0zgNOqpq3Y7XCjnmbUt67uo2O4d9XaiPg9sXWzD7Vv2i+TBZvJhcAVFTBUZwVaioPlHJL0ifvhTWG3nQo9dMe2ClTzDUX+vyTz0Ghp5HU1BmNPGK2hg9FjbIhwTiN4ZWXARWCU/yEHddhKFOMU5srAMHGM/0Ypy4YER+dCuqwkMI6vpsPVdG5lbIeqI3Iwl2HpzKr6sqPXyN6YAbntmuJOqW0XfGvp2QEUx6mFqHcwQJitmx/68NKSHg+rTIMdjjDTacYY2r9XGxFjAdNwIenFUFFGHZ+8RsfMDSnCFhsNz6cVh5b3LxfkYIwkYCcW/mQy7DDItcCnstSy/NPL7IZ4hrWzJ1SJiMKHaelw1HAmmj2h+SS7Nvpz33d1iOis3TgAT2taw7RtuTzue33vC7bbj+fPP2J527E87tqfNFepLOpAxBrgPDAjaHBiiaGOgiICIcXva7Tnf3LC1iv3WrO+sGNEomL/ldrMZX635Gea1/h/dkZeFpnxfsQhXlMnOUIGGkxJBe/MGc7hS/euBKRPH2TGH4PW3sib1/+wRLvxd7OHioK7fDmeRBsYdiBKhgpYBCWVfmo+imUAexGAPAZRNj3GX6WJI1muvw5bRJ1bUmVCcis1dcaMzp4CHuGRPNDmtKJMc3oMAqhFN+yhrL1KqKGZAf31g3A/M4cwbsQK7Yd8VYyjaOdGGQpVQWoMOsTEMDMCzL5Bld9xMAw+izgwkm2PEwbsEwrIEBLMi1gtzzqHOGGt/CbUvB5yQEkfRExKsuGFhhDHowuD4ahMWgyzmVwWDiiklslTsZ+o1BOvBwRpjAixB2thQDDd8lIxAaHkHPdLH8/9x3uny7aCbc7VGyzbdSV1gpbXDkErsy0mNZZQKW/bEhNk7CIRJcKWBeBu2NkZeib5BwTkn7r0nA7YQOVmj5hy3yBB8A5rAK/ycpepG9OcFrRorUCCy1oxY2zjS/jvXqQEIH31xFNdbsYIhLDFXe2JX1rffE4Ur+T/2MqWDcqcVGdQQwTklnVlmsgRwSIzpI+wXgQhBwR6oZYZYCrZWse0Nt9uGp+cb9qcdt/efsT9t2J53bLctxWHjjVLvwFlQ50DrA0PMWalab9/+dMO+N+y3DVuraHtFK6asw5sTpLZmEmR+dpPc4lnhRx6PG/cjP4qADfDzUhiqBsmrCOrTDeWweWzCBRMdp9um8/gtGtXx//RxgWUUK3pKNhP8IF3gFPjNYDeAhaKxly4YNOLYpWW1UdTrJi1HEzmwm2PCdWST/+7Km67OKh8a0bVAB2FioN+74czt7pHdhMiWDJ3spyB1hpLNAYIItBuzJqbHyjSYQMa0kc/Hgdk7ztd79kxYRFWxvwraU0d7MzFORd03yJvuAaBR2uHOhFtDqc0UI247UC4GxWs1awnDYJiTkhGRv8kKzaiZAQ4ZNNRtAComlxPCuUBOjx3HadAVs6mFiGL04SPcndXoV5MOySNHrj62oFq3PntvWm0lC9alLGeWM7byMK97uDI278XS1RixwpLLb+vlO1ef5z8mLn7NC36KdbtEZPnamZn6+5cx/f0ZW0zGRPfPoxygWoxgkfsunJ29VofikIGjD7w9Doe8JioXIxo1yzBNySU5fUit8nhe8g6gCBhjKjUjjXXUaOYF9oUAhZ1C784x19mdydIY1PxeNqjq5Rrib9j6ya71SVJTU9eA9bxtYPjX3Z1TF8ExxUZtSNQE7emLK8ST2j0ijTuPdJKRTRUyJii3gtvTjv35hqf3nvHe595gf3PDmy8Y3Lc972jB6q0lbQufFbUPgG0/t1vD/rQBcCe1b2hbxXOzWpQJ1wJEao6qVatF3TZjEm6merPk5r9aNvX1H4ns1Go29enJVCUEGEKYXMEvBxQMuQ9MZYz6GRgfH484uqG7F1F0UEvj8fF01vXvYOjmocvQ//FvMtDPoCkKzRFdrog76k9h3Fa52P9WA7zwbEPER8BPg9TO7pVPeJTiDgr80X0V2ZjXnqQPk/Zx52TZVUc/Tsw+0M9u4ylUbPhoUVA5oVQBPlH3DoAwWzFWMsOmmnrWI1MhTcFHB5itaF+MbRiOH4hsCRe4T73rXTCOjnH2nHAb2oLMLgMjlFE34FnSGBidQHc/XEQY7oz76WOtpzmqMJ/hpKo3RJZgPwV9v5iDmps5rLabYnipgtJs7LlqWUHGJUC6Ln9syHSPtL53jUT1na8T/FU8fL5GuGbrI4q/qIIooGRZC4vRyUOJgD7yUcBlQoXXwclrWN9QBQRBYLH1ZJgzEbFZ1peGv9zX6+1GVuEguXnSi3OOhvjHpteIzIUs0CuKlU0FwuFOCup9YBKwse1lONKR9yOOlz4GnlDN0fM57kOi8fbxY8TPdAU+Cf/rdX/r5faF7YhMytsH2AYZtlrQtormbL66mxMpWzMHEs38vlcE1ljXesUczQI4mIPmwtiawXutusKOWuBAeiGpcAjTXunn73imr+Ko3v22Xr+plwDc5clU1EhOtx2lT/DthjIUfDst8JTPgJO6ShtZ97lLG6lBH2EmCzl+rIvuag9ynT0/VNGF7Z8pI7LH0fP5+lgOKoyO6uMMmOgZiWF8j09gkBrUmiox1+vpeRrMQlZvqX1AdQ0Ko62AYgPr6okhsQF+enbIceJ8uWN0G4E+p6XZ4+wJC+Zsm6JgVghOTGGIMrht0KmolVF9wI05qWCfDVDpoNiQbUMoaUhMxnUYygxIHGi1etmYOF7uGMeJ8/VAPw4XkDVJorbVZLNZz4fV1s7XO7h39NcTgNUfx2nj28+jo4djHpIF9DicdbPsorY1BK7dmr+esaxKK9ieNtRm36tJB/belatCxzuZ1cftkWQ5Yhmxj4P8rsbiCo1eNlhmWwGL2d9FHuOUcL6QdOLDCTimwMEgKZdsI5yggKZRqqNuI84UVZmumcc2+TjHo7hrIvYDec2mwngLsgvJa1fq51cAdPUZUz4Xi8jG3dcqqKWYmCzMJ8aI+GhQ5ji3qqn1uAgmvj6+XjLVhG/9OsSzpwyaZA2yFHdMQxfkF3BfBD5VFawBdrpokxfHIpOK1TVRZJ+yWwr2VrFtFfvesN3so+0b2t5sP27lInPk959NbQU6wSRoBdZErlYvbMWapivUJ02P5Zi2CnKNy0AGcu5aerCP27uf9OF2zvsKuDUoMaoq2hQIFez3E9o2nCCUPtD39nWfFfi0OymE/BESW47hb4FJs6dL5BsKQKpeZ7XVO7YflAUuUZGjA7b4WDdPPXNb0ZUauQGr09ye4MKDukTh5tcicvV6jDjseJIdOLIDNYcx1Wa36hlmgVa2DEcEOjrmvVsD3d2Ujk349EA/h2VP05k1Ixh/K2omt35zKniIZSN9YJZi7BwUcLUmP/eJdh0qmOcEl4F5Toxq2Rd1cx5zTPv7UGBwOvPsA3MOHC+vGEfH+XrgfD0wzoHRO3Q+jozANL08yPQaC4OoJLLT+8AcYlN0x0T3D1GL/sHRDGxjyWut5qhqQdtbOsW2NdRWsD/vaM0cWAtVD8+4uKwRCdmbdqlzXvsgH4JUz0CTAHFxVB/J0EBYtcZl8COzCq3G3EsRoB0Gd5pU04H+crcP3wvj6MYAnNPXNogMK7Cy/kFC8w9WAkuodatPvfU67RW+VoXJP3n4pg7iqbq6xsryQQKhaU7K61+nkzUAQi0TbVZIFZsDBte8dFYrvE9QltZU1pXCsVOePTVVDXePWXcSufQRzotjdfgwzmgcW3jySJRtAHQ51g+WKe5V2A0X2C3MST8vLhu1nEbYGmuotlr4hYxFtoYWjE6wDJRxmkMmAsTbaeYEiTkpmzhQwHP3TDYIP5ca+Tvwde7Jd77Wd78gPDTvXzc8FSPX1K2hPt2gRLgd74O3DVqrneGn7SOv+XGPT7WTAhZi8eCwgBxrHQ4lvg+sCMe/sE8BGeh1o8fCu6EMK3FJ6U2ja0EAhOi/wIP6RTqni4MKdlG+D43sUKEYIOXMFkWskA4RjEKAVKAtJ4XhDupjPoZ/SAznS8IC1in0+VQyHcab5hjnsK52YYJw8SDZQmBxWEWGUUzFHdIkS+NF1F97OEtxpDEdvZuTevuC7vWzfhgrUfrMBSG/RypiqguDAB5ISrHfpnEavHeeA30M9CE4h0+sVSRjrDgcWVtF9bpU25orSlsjZW0F8+z29bFh3jpKrWi35k6qeFZFPiPHII6kq2d2Ef9eAUkoRX9EUTsYhtc9cqm/aKwHrUwJXNJIiks2zdMh3ns3p3Q/bW3dQQ2XmrLhgN5wmSQcN8AcihbsTsoYs5VsDlKw+jgPR+DO/v7IzHu0Aavvc1JjzGoyWC2rGdODimlyUARAZGVP5cLIC5TDpHY8WrpkUiuDicDBnF5cg8BG2EcwO0IZJJAXXX2Q18fD+lD0QF2dNKUjc790yXhpQdjMCcWWqHUG7HbdIyL+HMt+LGTHnBTJBGY31Md/R4GLk5pArYvQoXjYk/a+rg7q6+RNHwMTfBQP8GfyPcXeB6YA9veeQa1CC2P2iVn5q/z14+PT7aR8J7E7iYDX1PXaVl3ANrn4+RfAMisg8WSJTe4bBLhmwWFwln4ZsDZ9RlxwplKMjY+icQxGzB0SIZO/DaysTL3DX6cZI/LBeqUVQAWjVejoqK2gFjaugoaTMhLC8cEL+nHifDkw7laLwpgOYWJllMSrUdYnCzM7VVjUHA4biSM2uVFtGVAfGigEHYB0wbx3QAnznD5eY+B8NSOZMKPXOMZ5YoyB+8sLxtlxHkbyUDWacAy/q0zZfB1eNTTo1D24KiVRYoxpxe4pJhqswCmaNUJmd1LVHVZhtGokiVoL9q2iloLb04bWakIxpRpbyhhXnGKfpUafCaejCXIA4PAHIkiPWTvzss/MuHEpacCCbRXCtjZA7h2HVsjYlA+Z1ER/NUd0/8pbHG/vON7e8fLrH6DfT9zfvlrdb0xEO4TV6JyGHCxNrGF8GzNErCGzevLWYIYjJI5UXa8vRsf4exX1/RGQsiqYZgZ8IgKB37tQ5p7GqGWFjZLXiVQ0ADJrQvRvzelyYZ6p55E351FKyb8famf19Em5XWyfGCvW1TwAF6WFrwOve+hZYQjVNh8nYnvV6VGRXdkBA+DkG7YaaNs3a97dm6199XYFEczjBGRC+wlxNl7xURzEhOmBaH95i/ODD9E/fIvx4dvse4wWgeKOlkGoT08o+w3VHXiWHsJOZRD9da3tJ3wsD8hcgEZoDv8RF4wx8fT+CZmC7YOnT/SMn3InheycZ4Url1t678flAmUBlucgVYdXgHERmPWP9Sex6S5O5QrX5a9FNLX6PjKN/kg2hTQuZnsvkJ9DQpKQRVB81eR2RNAJ0FGgxejEpAKdpjAwe3fIzLKmwOcZ8brstGBTjI/3Y4a2+LA5zwpEbZbRmJjMYJpQj5DUo1swsu40+wTQbd5SH0YzfXvgvB9O2HAnNQd678tJdXdSnmEwGYusshEWclx6wjFR3/LrV3JmGgy+cWhRpqQCdawic4ziYMi0OoFOYxBKMWcuhUFzQlq1nrLTMq2xBQuQUXyAW2kt61TBVgs5IHNYvmcuUJX1ckUzthvTZB2W7NtSn5pqc7QkQ/SAGc2x2B4LI91fT4yz4/jwFcfLHefbO86XO/ph2VSQUwpxrm3ubUU6z4TMNOq4riTuZycy3WxOpnWWIgOU6JGSaEC2rJB82nGAWdHSUS5n6xLmeUa1smpkQhGv4wy9HCq4InmSaKDiDEYflMmz1mT1rRjxof4Z5MaeA3jThEQL+xRhX8cHEoJFE4jeIa4lIebqPYnRIgDPluXswBzQQtDptU8RaC2uBmPtBXJ2zPvdVMbfvoWOYfWncFJxfaXaJIDWFtJzQXTCrCVaG9nfxzzezSw/+rA/NojT1k0dVWKvq9V9A5dp508UfX4GiBNARAy+0MXmyhS+QH7hdDzbWv3gC6K7bt5rw2REVQ9hht9gde8XEEBE0YXXhNGr7Mzq8YmG0kh1deGQcMWIiHoAixingD0TmrVD+0CrjFFcfV0FkOHjIwb6/cD0Lm+IbZrKNaEYg06ieG0RFRXPpKJB14kmOg1CikF4fIneUZwkMRQgM5DzHAARzuPEOIZF8y93z5YO7/2aOLtlUq+vrxhj4Owdc04ogMIVrVRstWLza6lxqFTSSMUOUKXLPURSiUPk1VhZYeMlI2wmOzDjHChMmMyQwhhMkFbQK6O1irNxZl8xt8cUQEpmVCHLRGS832xGxTr3GgQSz6QgknukbSGD09JJQZZorNVM/IneYWcBjgRMsbaDs+P+wQvOV3NQLx++tfW/H0lnrz7kkFRRAnpkNibbFIw5bR969rqchvUFhdqReC+URK1HLk5OHJJzB2CcVHsOBmy8BFkDeEMcE4eJiDJjKx6IxilMKD7kv+ZShsizq26Q/bOSq47ratQNeDGUKoKVW/xaQWbwQYwKD+6AdEoPs68+hkgTdcvSLBOvW0N7upnu5NNuWVWrIDU9zj5P259QVyovmG+eXE+zQmVinqc5pw/f4vjKV3D8f/6/1pTfreeIAM/wG/anJ9DWQPuWDfJULpTza4CS9g0LFtSP/vhrP65ezteiuIABK4hNGLv5Xh40P9GzfqqdFEUmhUsERtF3YQe3+8YN8UkrZFpkvdxEFEnJR6mtRtRwbICdi4zC4ucRKV3ghZLO6ZJBxTVHdMWLgiE6PTO41MTcURnyZoVfqIIHW3ZTHOOGeCblE3injTkP6IPIHQ4CrpBUoZiKhCIjE6jupDgkaQL288iVPQomZjOyzFDqIK9fwd9Vv3tE/3LH6dF9PztEpjPvOsacOI7/H3n/Emrdmp2F488Y7zvnXGvv75yqlH9yKUgwjSA2NCGKQbSRYEBL8BZF4gWDgUSEeEkahoDBJAoJKqJRiGBHBcWeQW0EggmkYRlMQlraMBC1YSoidTnft/dac76X8W+My/uuvfc5dSpa/PyoWbXO3t/aa83LexnXZzxjR6kVeykBGc+po6WGnrvGsDmNnI2d3+PhwxMedXIhy83DTsNRgNessXmXFF6A5b06oREpDU4hyFFtrIHKFHubvBeXKSvmrMzrFMUpweDgHr3nTsjzH10iT7GsC1LKgSQkJgv3Ge+ieVX2YFOI0a5lxdFeXnA8XFCuO47LVSmSSkXZCxxhSPYzpaSGEIBGjNoFR6k4jopaizF5CFLUF5nnBli4To3B6gZCF3Rp4R24v5QjZKvGjlC3sJN20IXtCXHlYIaeKgQrGhZFG2LaJze8ehamD0XFnotyA8c3st+XRSrcaySPXNBkV3oYD+HF+l7PlhPkyYMaaMqp9ca2Wbh4wXJ3Rt4WrOcTsrH5QzSv28oO9AppGt7nnLF1jWQs5w2A5X9rhZQCOQrq5YK272jXq44LEfKyIFtX6sXmaLSueRLpoSGfwgjwXz7As5qH81aP2TkhcSIyOg62aIiW0gB5+VIATohTGEEXNjAJhmlNQgWYJ+IJM4eXfTkChNPMWEgiBGKEBQ0sMSmhGRkVkF+/Mb/QvCjI7mMOLeI23BLFgCJaLwRYfYvR9RBCSaEpdb/0Fon5CD+S55Ik8jhdxMg4NY9GT16hpCzuD4wQjleWozOI7R6Z0MtAcx1X9arK5XqTk2qGqKq1oramoclaFZ3nuSZL9JEQFjAoAYr65mCDEIGWkYr/PhQV/NlNMHphtdh8EE3WvI+vWY1i89pFn7M1t461mFl/FyuC1Fg7oiNwtg+y5UApvHkHmbjnomE0ifCqbBUpZ0ipQfrp4cFu4BV4wz1XThM5qIcTy66F0cUg/VGHVhtarbFXetNuxN2BByAUaPjrqA2lKoxfunYHnqAVMZi9m/EHbZM+mvsN9okI2ZHljTHHMMzQ5MnAjPXK0fiQ46oeEZFYjzcvXxgYhoHvL3FqXUEo6BFwNA9pPJ1FaFxZjT2tQAczSHkiF46tbZ+1fKX2P1ssD7ViOY2cFFvhuPQGtK7h/FogpaAtVlC+aMEtG5uL9wyT1iCmrPpxoF13RPxFBGDC0tvEXs/jRTMqkebZuNkpH6SgbCe9z19k/JUm45CmVIpomcmHOd5uJTUdLmQIoygXZtnV5glSHaA1sdLoM4UVpyEJ1m8L37QecA+qi6BETFu9NbXuNSQw98Xx7wKeD/k8M+4xLI9jmdUNu76Q5l2IgFocWWScadNLN59ZSp4rsXBEKEALJVnliypxU7Dw3IpT1QgUiVM7QDU8mkFKaZYZCCCnOurqNRkrhLIgqHLypoGR8HZ4etO5EhPeHRZGStaKgZ0fz10ZldkuJD1lE5AXggJLAGT3Uu1vLkzGNhu0V8mur7yNsFoencEO8xAgA7lpgBNwgrA2iexMtj7Uk1dPWEJhrqTrbrG6mcSMbn3Batawn65JC1vWPgwPM/HJoMUz/UHv3tZelVQ9jmhzr+HCQVkUK1Q0PFo7cHXAyVFQjOsx9cGWICbkPHgHjI6zpXdUMWUVwKPxXXIwk1MdTSEyYhrRB1ZVES07wgtyw83Dn1WFu+hLTCmOOZ5N1PEek85xkHsToVte1ednmZRjNEqc0aGBylMhOwVKhiKz0G1aMrZ79Z6WuxO2V2fkNWO5P8H7OB3XC1rpOK4F/djRjgPJlBStG5C1bU5eUhDZOvqSiYMNHx7CdwNmW8Hbqs1AF2uKGm0/+KkL9Pz4PJ7U5z2enf82ZzdSHh98vNVKioAbZTLniuakrrd9EBPgjcTCgsMC0lBDB4EN/kmRx1JPbIQzKqzudgp/BBDDrHEV/yYKCKGo3CMDEICJOOzfJJNKEw9vGbRXdYEDa8HSQOhgqz5X68UUCQ2vKJQUUYQ0HN8u0RV2WMq+ljwR7rdHrPVa1KaQZuzSUVDp9DxKedTDKwikV1zNPR5FSam1PcX6/ROTEAu2OHKv1mh1ZLbahjCW6Vni+W52EAW7SFj+Yv8RX10SawrS4Ygx4QqNAQqE1QBoRFYIKsb55tx3GtqipGSuKSlyjpjRNK6L1oHOTS3caf16rlQYIOGo1zHRCyelrdXqpOoAkDjJ62zpexjLveOOHrmoaizsvcvwNq0Y3Q0guBfrawTT7+TGvINrSPOXFgIjTgH8iHzujIy092Keeo+8o/j/pKnCElW+wytCeAoR0bBn7oCFf+0ezXPr0HE1u9XWwMiD3YybKSBOGo70vSJuJNtnknUPUILmBctpxXpS4ti0asGur0k6TIGTNceEkhrLjAjG2GtMfg/qrcmyIK1rmMLaxUBf6bQhbasW8+YRNvd7nffCEEez1pWn7+Clhoj05F8yvUfPP2Dj9vm0pB5vtZKSWZgDcF6iuSDXOblqbdFLh80yyoBZazDmarWXY9WJI8NUQVURFPvZzMpwIejDneweyEJQmgBHhDt0Y3HA3D1sQQb9fdp+J55TYD5DPKzB6KvlV3pAZ1Na9Fxm7fmmciWiVnEHYSTjhThoarw2y5Wj9qWaciL+sE8tIc8R9B6homCbmCHU9pwMMpi5Kn82Ao3MjIUTsqEN0xSGdCXluZAmHRWCOjmhbr1nFyy4tXb1PiZWAHeX4XUyA5DDMhSUf25mbRd0HTvuACmTXQNwiODoCoN39Fi2e0qLeqrJaIqIWT1AbpDabyzM4O7zw5rfOSRfa9Z6hFFLbUaFVQPW7WHx5MYLz0pqgIZKqcr8bewLHnlwwavK32uihpktQCgmNxbdBtJiUlYB6fxxSdGbKTwpL47mqb8RDUVdm/IaS4NQR0dDl6rsD1JjfjRqqHkunkFKpvySzzcxQF4XZSFiE9UMKGODG3OTsmPLBSarr0tpKgHwxUcUfaCW04q8LNjuzljOK7ZXZ6yvTkhbRj4tKp9aAx8J1Ix6KyVt2pkSkBYgabhPlYu+UsrIeUFfVizbKZ6vm2e4vLrHcnfG8u47WN55heX+Hum0gb2H1JMcWignd3J8Hm82+7R3PoRu+Xxq7EOfCG+5knrpiAHX3afCX1w4Rgp1yFkarr29MdVLaQDJCv/h5C7KcIHwwsj+TSKoFlJwmhSCJoG7eVNsScTBoo7wlnzLE4UBMwx5WB0V3JrXe0vQEAjQ1VuysApMMHgzw7ToVCeIwlVbR+cC6h21yw1SDHF6A1jUNpTNcFdG4tW+47VmvQuq1fbMXWAd7kxQRcQiyvVnc+A60IVYzhnLDcODKqkOo0KCAliaz4fPqwngZEn3QAZOh29OgULX9aFtokEaPn2yVsYV6IkXbJKOJ6Ht+aQ+NXA0NFi3hH54YBDlWuwAxDyzae4jzOcekHsfFu5Tr01DboflnlqtkKr1M87SzzdKim8E7MjlSXgUIK1x0XIAVSpslrxAvSuGIOtSCb64ZuedaYBO24KcGKd1QWaKWiP3kr0BX1qX8LTcKNCQZUG5CjpVFcZN178nMJmgHHXG1q/F1gmcl4gqOMCCWgM1A8MQ4G1jVE6I5aZGXIHM6+VFFc+yrorWy3nKCZoRZsjLtGQsJ+u4+85JPan7DevdirRmpE3RttwYrWwAEdZ3Knrd0EvVOsglY311p52ntxU5abhUloy8bZDzHbZ3C5ZSlA3dKK+W+3vk8wnbRz+K7aMfwfLqXr0qA1NouHVa2MPe+IBj9o1+4x/5Aj8I4C1XUnPVfhwe8rO3fcGNsIUL1xEG8OpxBBGmwdVpcoVvXjRONt8LHPY+8hjqaRFSV8r+buefGdVvn2NePYgFFOFC8ZybZwXk9r6IAAvzeet3b8gXITzWOqAqAKyuCKAboRXe6AyDNo/Il9hLSqqZkmpWbBkADsDyDBh5PBinYoSt9MRsHlT2DT97Uez1Ls72MRdTj7CMMwNosaXnoZ6uE71gMyvBw79wY8aE+9jA/o/bNQEZzjc9mboIO4uo0uexxuaUtdYaGRLLEqzD3pLb+L1ZzgHQME+odkHpLbwqpc+R0aRwGpuo4QNN9zOVXZDmZTNz1N3cft/WNbnRMTyPZGG0ZLD9nBPWbUXOrIweZMhUz12ZkuKswpesrsgVdM0ELoQuFakndGFQtfsQL8NArDEFK2jPMV6W2O/RYbj4PNnckEDayGONXsruRUG7Dxg1Vt4WBUMsOc7tMsPD68k9qXVBPq3Ip0W/uybworV23YyPtGYIBMt5Q28ZUjvyoh7bct6UdHbJnq5DzxlpWdC3Dcv5rPnM3oElaTv6+3uk00mbgt6dkU4nzUlZmcnN/KuA+gBF5UbpZOhNe/7moJfe9D/MwXb9HL18wWfHW62k3LMBnuvlcN1ZVZQG8nRynJcs0bC2tW5jCCZt89BAou0ImBncBWx1RnqRicPLFnyT2Quz6n0BMuv1wUq6qElcy5m4gpKnT2BvzkHvm/cALTaxEIZZUrys2kZj25CtLiOfNqtXgLazaA14VGgye+dXIIRqn0J1rdTIi0TYbgoXwJ7Ba2y6CEp1pN+A6LIYDBUGMYblLUQBEsEc79DjyGHoc3VSduwqQIUSk7qCcpaGDJ3fbPOseaCIQA2hMkZSh7JDYf7TULuSGQrDBfvLm8s3fwas/gxYOaGRhgCzeSWhdIGoOZIuVkxpZofrDltnBAGlcQ2Q5r46KaPG3hquR8W+a72clIbUO5J5Rw7DHwg1LT0Qm4MEBRR18578STN7vtDrgzBofGxehJSZQpjQiYGs3nzKOTyC03kLRg+FkwtQKhzMwYvmcJb7O/19Ucg1RHBcrqj7jvwmIT0w8p6RNlL+x+MYjAuwUFtekU9npLwin04wlJF+vjYrLq/gQxGb0jBAMSJxT2BWhbJkrPcnLNuK87uvsJxPQQY7lJSvJQ8JspYT5JmsOCElzYkpQlTv90Qn9LZiOa9mzEALxhOrorO+UoR+4x173ksgWpi9LqAlg+/OSOuK5f4Oi/2etnUAbcJA+RAOlC/7/5PD99MX5kDF8VYrKe37IkOgkCfAEYPCUOGguQz9zLAOXeyQWcwDteUeFzPQxWuSVAgSPCpkHoLJrVBuhnDyOemkeS7qgsXyEwKLELknJLfeGuz84qa6+xpEIM2e60KHe4cSrBFeZZ7WBXmzRO15QzSaOyq4VjQjmHVl5Mg8v6a0EbKaaWdsqIaetH97K4NmSC8PSwqUvunW27E5YyXSZD+hh7Ke5qF4oBAbukLOgWiS6MZC5H18fgmRk5qV1Lw5GYh4vtCAyMZCsn/oPHmKXm5i+fPeS8ToBs1fUkLqqqTcq0txf7df9PBjrAdbQE7FxZ2CxQGixbudYGS63fjv+s1nSCTYxm9g5IFGdUoMff7OYrx5ZmDZmIUxxlORalbLHUwRahLz3ClxAAeWJWM7K0nvumZFpPauRaiGYuVV1+v66qTFr9sWg5IWRj0ykASUgbxncCZt2XIskFJNcGuoOC0r1tOdtkg/nUMol0NryIBuEVUNF3aGGp/dBso9i0RWy7RoqO60YXvnDuudK6k18jsjJUVampBUSY/2MARn3BCIAo9sfeclo2eAljQ8QkMQKmu/sYuIeuKpropsJBo93HICLQtoSeCzek75rCE+NnaUAE1gmnJ52Z+h6bcw02h+52Zr3Czi93PIbn5+AcdbrqQU4hshCMz4NJ3DzE6ZlEY4KZH1dXkam3VPShcpM6OL5nqSL1yKZuLxHQKm2iaFqxcvogOs66/Gy2tX5eIKqmOymKeb8Y7AsSTI7ks8t6PnUSSchZkDQbXYBj2pJ3XasN6fNOHOrEzYpYYiVRLZBsGoseq9RwFpn0J9gPuHYkV6+oweemtdn7H6eADIIpipwYOcE+NZ4iBCNMdLDOEUXoPXL0Vu0JSQQ5gdLBGKCnimqGLO3GOSKQf4PlbeuDtVTqoDxub2r3kex1/uNXa21if2vDkZg4ah5fQ2zGCIsLErRYS30wEthPXwq93LUZoW4JaqzO/VCEbNY0+iJo6Hr8kVFFs7CKiXmySN0KQpuHlV+lh7x9i8ZPCi+SOyUJM31qOcNcxlrU7W86reRUpW11fRrlcNNUsPJbW9e4+0LFhOJ3istVxW1ONAPiUs5wVl37Hcrdov7ThU2VnRsfYF27DeqZJazncxQ8flgnoUAE2pqwxL1ytBWEN+0hu0V5Z6dxqq23B694zt7oy7j77Cdn/GctqGkvIQbiwGivyu2yGWSQWqMb5QB+UMTuvobu2wexqoxGBIJxhFUlNpwmT3p/ksWrK+cgZtm87RtuqcWNj/absgHd05ijNvALn53NP3nv07TiMv/E6x+eJcsQE///FWK6m9d3C3ynWSYFZIhOFRQa0bLfQ1K97qCwCYmy6DmSisfw9rWQJY2FB0zu6Myc3XWp4OgmGNBrgCKlAZVmsFZ0ZAnN+tZgdnODDAcy9DVLj8G55IciFMiKZ2tCzRJjrfn7GeN6yvzmbNMSjvYCN15dIs9KRWXq8G7XWvyoWiTMWy9vwk435G2+3xuVnuh6c7e6k07LTp4aIeBxZCEiIrjLXxIfeg9JmTAAsnJBo1SKoQJqPl5kaeHz4PUdz4NMEEmNLVM44VcntatWMcKcYjnGnfdCCH8xE+FQsyrZM2yQ8m9RRQ21iHNibRNLL2AZSwcyrAxNeje/FiMGnlHsxGo/GUC0/6ZPkTLN9CWE7WumSzHEtK2v/Icp/L3Qm8Zqx3Z60VWhWCzYmRExthbkO7rkEOy4s2+tteqZLKpy2s/uW8opWK5U49mVYOpXiqFfU40I8S/IbMCXlZsJzvNG90OsPLKI6HVbs5J2C57lguC+rlQG/ayVqNMuOSYYrw43I+49Vv+jKsd2fcffQjxhaxIm/LQMpFxGP8KtZGRGpRIVCs2N4MTP1eV5TnYp7QVHtImH6SGcLdepnlDK4bpGm5ggKlcuz/0eAyhcfst+lmYay9z6MrPsB+e/LBycvvgxmE4M/EVqztZ/sSUFLqSSkggQkQElNQJiwmS9BNQZoFg+h3bl1RV1IYm1M87wTA6in8zEIGA6bn7BHjoDh9hPVEFRv7v+29WWEJVLDEzdEAetwKeihQzAQ7sQMmjFtuXbRttFl9Khgkwg0xFiLDI3QFZdcX+3uf7tcVgI/H/NKndo9vKNWb+6YJVenhBFNA40XPWEQiIkO+gc0z9jAaHDBz+7rxmn287VyzW+Q9gEY+avaZVFH5XGL66BgPsoiRChmBrY0wLBzIQdNZp/uZ14LfWhcQ2+YnAG0obc/JYEJS0ngQg2q7t6Y3vajWiZwfE0cutRsruVA370tvMlkRqDIhGIhgzeCsYSn3opazelDr3Ql5c6/eGkcymSdYURkWRm6q4Mxz8dqiqOexhpOUFL7f6op8WhXFeCjjQm8daIqgTHnBamCBfDqF0CQS8JJQa9GiVrZatdrQlhzKU6MEjHQaeZ3TO/dYrRh3PW+a51ldSfmwjr2i4VgrwzC4uzSKmjWZZJMTBqecVQE9gYiHDccEdKvJJFVCYuuBmC30mqweigZ0nQdOlSJe/0Ts4blnM696YAqD+y/PFLM9mzFiQMzAdcXkReop2Sm+BJTUtXYQd/OUCRnayVbYSCJFQgAA04a3eeqAdiKFo/+miZiEM6Btn0UcZXwrNKV7PYXyuwkIrZuLDhWgwZRMw1omMUHT3VORKPysfbCy61kwha80VKkelG0SCEAGLzWKHhca+bQinzcDMCDCd96WXmHYys7tHU8hxrowLeYOrRFzZJ0/c3LtA6jgI+2o6oixxWqCcuIg5Uw85aX8Il5kaf/T6+m1io2Zw83FFAkTI0M5EzUXZXkU0Vl6yWgY5x/Iu9kv8nnT+X9qR7qims4nt56YK6oIXU7fxHxPvsae3p/dU4T8TICQebqRa7LPcmtIrWGB6DolBdMoGKSheF1X01qthbui8TKD1gVL0tolmJJqVhDcTYF4nsU7Ga93BgTYMvLCA01nyLf13khU31Fy1LStGgI0tCa6A3JO8JbvnFXo5m1TlN+EnEurGkzL+YRmTBi9Wn+0UvXlSpqd7WGL0gsRQFrHcXdC3Q8sd2v0L2v7YEZx/kvAFONpw3p3xnJ3wt1HP4K8berpWa6NswndSciIG7ldefakdbRE1qBUIEXnPUoIkoVJc1Zvaiq2fbJwdIuzK6LZU0F4Xxb6ibGjkA++0p4qo6deDT37y81B8y/x0CEzunXc7ruGYHut4UXxtprXDWXFmOv/PuB4q5WUhy+GQhkJbY+HzhDtHgNLIHStqyDLDckI5AytNnli/vuN9SuAaKzfecccfrt4gSYMyWZ/G/cOaOJWAQfOYu21RtVDL3Ff0B5CbuW7J/F0QU8K1G74xoO5jS95/sGfT4aCmmPsE0TVhXubPIcomAVriC0pvNiv6zU2wRBvTAPqBQ76Jx0vUyBihdhkDA6gSUlJzGI8I03PGEP0fJtFqcDsFc6hYf+yKaeQQZN1IzYmvrIIdLPv6YW5oek/N5Rb01z4dcPrfHZOTH8bZRXJBRMUWCLSIQy0puAYbUfRI9Qn0BBgJgutWo0SA2GZgwnUOxhGq+WdiZeM7W5FcrSaMcPnzOAlGTQ7WVNJzV35i4y4WGzOKVmI0dCzxNany+qkHCbN0JyRIudI94n/5ITuHoUJfzbwhLfIgCDQk7wkCAH5VLDs2p/JSZmlVfUAzDNJm+V1Txu2V3cKRDJPz+u6xh7BWL1u4JEWzUP6KJRP2qaCmUGrQeStdukGHv6SkoqtyGONzkrKv2chQtycx+Th5E3HmiXPe08XinU73nvR8ZmsdS1XUQLcZuHYdhR4U9AsArGQJnGyRpOf/3irlVTrT5WUhWK600kOa7OD0MnRe902wFRDIl7XZMrIzumhrMm4jonThQCABku0U/tTGnkuNgGd2WhlBApVB6L4Vdx78uJPGUqKYXUqZqmrnnIWAIw7JP9dX9N6vNVNGJvJ3fOILcVnh2Kawxmeb2qT8vTQWmIt7lSXU88StUrMg0EiceSjyKS+3HgH3bwoQqGOBkKR0WrBkZWsgztZjCOc+5RyRW/fkG4CtE6hrGzxAMDoW0UU68fHQHMILym+sb/nnxzJ85utDg8Nx0+/npggFh0Xx6UIpholWNEsJtQVG6UUExo0bOfEuF2UHaWKdwIwT52gYBT2sgVVUujGENEs/GcWel4dCJG1IDWrklKMi6HZLGS1rAvSmoKVIbunYOFCoqRLJOexXsmBAiNUFQYWm3BNCdQTuHf0ljScxoxuHasBVWKcs768KNjnKDN6U6CCg4VasVxUqRbu63CQUlpXsIX1lvuzQr4Nys227oZglwj5BvAk9eAElMyQzOg5mzIlA5osxsaRxvnmmrixescWTxT7NdaV/20y0l5a/1a1HcZpkGTHNX0tAkOC3q7t+aSzkdW7KDvIUVAuF/TjQL3uA6kLDfUquAtGXP35j7daSQVCTCQEZVgJwAihwUJFZAEgUjgoQQs5lf7GaFLCJPJEt26U5BNnwgsheMLtApMgs37WqVY8JBWxa9j9THBt7yrrbQdGA0ZVJonsGVkXZ8ZEcDAbS0aIOucVurEPtKNaOADWVr5GWCdaycMXIvkQ6J4xnaPElvoEXozsY8RkyifeG3PkhaBLTqGwHVip5KAmjU1ZqiIUNHQVrmJhP1NU7kWJew2iFxKD8AmA2UYLK5FIrT24wrPQYZebPJb/HjbIJIhmD80dUFXiwyDycfHv3u5t3fhO+eOKVY0Gtwp4eFJkvpqHSbM32pst5UFnZdkPlKr1b5INTu7kvksGlqRwcZ2ceLHlCrRsQ6/NhuDT9hKmpM6bFbcyhqyWUKTZ2L8TD8bw0ajRlI+N2Zghsu3iHgHfCkX3Ms1/pK6MICF8/RTOV5mcHzD7sCta1EAavU99uroMBeUhKBsPzoqYS6uCETjnyWu5ucF4FHKFZag/IYL0DFka2MPphFFwvxhJMTGeLJb3P55FUJ7fys3hxfXWmVncwPcxSwNRC+IxR/R+J5wOM6ilNZUt+47jzQPqdUd582BUXgldgHw+Ry6tHeVDPerbraRAtwlyd30xVEe3EFELASdqh4axPwsX91t10WuCUkLQ+EvM2hX3XCaXxa1fT+gDNAkUI7OU0dag9lsPKnIRbukAoRRnD8jvOZwc+LPPHpIlLq27rmuOZr2fJBTU8EZHCG2MoxoCQ+EkKNpuDscwj9ofp9Vx78OFl4d01FJ2JeX3OqF+ZCgAF97a63Ay3WKiJTbJCGfcrgGhmJSAsTcZSspPEzLHXMcwSkgmZWTnicvfGkXj8hSj+dSr8+89e58slzV5qaGkkjXaywnexiIS7KRKxkNKHQJURiMgkyDXqh5Hb5ZXsq6wVuNEpqQomfJj5QfUkNeiob3TaoCIjPW0gjPry2W19OFZ83R/rqR8vTicerjnY5HF2OuDC+bDkZXzRIkhEMWAQBiKz9aeK5QBZFEhzL2j94TkVF+tj4iCz4XnjNj6hpF/H7H257U2y554PCGVst3HdbrGTT3gS6H7G1fluSvj63T60jOdMu2PaPtSStRGuufG2TgCJYNYAl07X+olYtm4Rdt7fo12FNT9wHG9BpArnQ4gJeRSAYIx3Xz+461WUhsTVhqU/x4GAVxgKCFsE4lwh3pSPTwAx2qp9TNqhAAYRYwK387OzA0Mi1p/183SY6OqFT7ILXWTeHhJ762K1hKVrh1CXUnd7Ey7hmO21LvS/IcYxY5uiOlL4ptWLcVWKuq1oKSkAk8Ex8MV5bpr7yGD3sZ1eXCmESEY15mt5QUYTYbiAREWs+4X9vYBGBY0VGiBp/GYvMpwAaCcIPpcsOpaBCODU9/EM7oX4uPfgN4JPY0YvfvGEUYx5eKw7FBScutF+ffDeicbFkdxYpynQY2jqGEiAXevYQOeSB17ZwijEEw0ryZTzpEIp0DhZWsvz8EiPtrIO7u6QLCXgrwfSPuBviRlOO9d+RBzwt35jCUbOm9RhZVOi0LLT1rbxIvR8jhn3UkVVj5Z9+A0+ZQGtx4QeDJ6I2ffdu7FaT6eHjKLesR46C8EV1RqtVsdX1XwhPRBzMs5j4EM61JDgRBbj6LRCTcwg/B5Ntame6VwGfHyvWNcb86VCqDr3WrQQqJjnOem7fzLD39zby96cC/800dMQTMqC9p1R7lctYt3aaGk0rZpbdu2gbKyq1OEXm19Pb1FH77uBrHyfNZScZiCujxcbPwSsKyaJlhW5Lqi7PvL4/jk+IKV1M/93M/h7/ydv4Nf/MVfxK/92q/hX//rf40/+kf/6LhvEfyNv/E38E/+yT/BZz/7Wfye3/N78BM/8RP4uq/7uvjMpz/9afylv/SX8G//7b8FM+OP//E/jn/wD/4BXr169QXdS2alvvEJ9hCTD16TqZOujFAR3IsSMYit5kOCIDX67iAg0OzWuBkXFItNABOuw5txIT5CIN4SgOJrMryi8ARubUe3xW+E5+TpiTjUAHHdGzSPe1PduruK5sDqXlD3qi3mw6JCWLpDGeg5dDMT0AWJO1goCleVZsqq45OTkI7wpt4Ph1Xt9+ituCGEbrwb3bzc2IuCYXTAvDKZouSiCEm4x8hTqI9UuasxO9BOQhKhjpnz78aSN+XqHs3sOfnRbX1VQ3ZCEB1nk41ZD29pAmDY+SKvGXUxo7BcbN2p4iHNGUUDvRx0UcmEiIa29BzC6immUsD7At4XICe01tC6qBLKCedtw5IStrxoK/mcQiGtd6vWAa2qpBQartxzKSekNQ8vyWen9zCO0M0TZK/dYSvou0Wd3Ry2xJ57UP5nX+UvCepnHw4Ln+CZT/LlZvsaU8cBV5ry5PSzgfPE03ly/Q+k/DFjZ9Q60I3rNYcOfd15+NhYh8cCVKtvfHa+Fxqfib+5rOmq0NtRUC9X1ONAvRxmIybkUwEvC1r1mrWGvC7WvBEAnocibyI2vUd+3WsmPVKk3JOC4yhAzsjXXbtIX79ISurh4QFf//Vfj+/8zu/Et33btz37+9/+238bP/7jP45/9s/+Gb72a78WP/iDP4jf//t/P/7zf/7POJ1OAIA/82f+DH7t134NP/3TP41SCv78n//z+O7v/m78y3/5L7+ge3Ewgh5DsqozMXI7Sr45eP5UoujcdrOiRVQ5td5HZ1Eg3N6gX3LUjltgUAhtcJBBczdEuAl7dVKB64zUccsStk68Odaa09F4/s0h8h0QnjbivKlcMdimsgXquadWqzYitALQVo1GB6pMtOrZNrIvMFHvAAxk0YR6wkj0zkqK3YOMXMQANsAUqJh/qLqW0MX76HA0IwnHzn5mclQf3eSbIsTbteeQz5MqKy8EFkt063e6PxuG8tOLTePm5xb1sHV4h8IceS0JB4C6orhy17YjLOP+vdJ/jhI5tFtRa0ORO9WTUw7xYoohJSyWG0lu7bL+7jkfYV2b5SjIR8GyH8jbopyKXdvFp8TYFvWGFnaEHmO5M9bue20tkTel0+KsOalk7dC9xoi9CHzy3uOnIFBdZPc52pYzbjdBzObz917SWPHxWRrfHrNKm+fObszqhWjynF66WEzUk58v3cv4Az19G7A+bnaNl57Jz/3ECPMQpANtdG0+aediV5pu4fYQGPJOGTq0W/YVx5uLRnaIkU9nLcSuoqjGtqoxvziYxw2q6aJ+X62PHLhR1bWu0avqkalOSPsBcEJad6TWcXyxPKlPfOIT+MQnPvHi30QEf//v/3389b/+1/FH/sgfAQD883/+z/EVX/EV+Mmf/El8+7d/O/7Lf/kv+Kmf+in8p//0n/A7f+fvBAD8w3/4D/EH/+AfxN/9u38XH//4xz/0vbh3EQYRwskI76mKI5u8gn8Mup7DGxTKSHpPC3e8hikz5knGRLnSsvYLwFAWjsLrQORsEgGdVUkuHSFchqGm1p/JMSSCUf146viJdRUKjUZSOyk8mOzcrTW00lCOqqG+YvVSRKCsSyGKlm1he70HqGsYa/b2TCE6YWpK1r+KHcVl3pMBAdyQFAh6N2FqzfVaByyHjdqHIrgxIP2aoNjMbHMqHeolhRIcSirawft82IohSBQFOwJxzquZ62k30zEXFjcL2e7dhYqgEyOT0vN4OJZ8ztOEQjSAh4dSXOm4oomCTgMtpG0JJuxsPYEir5RmRJwxR0DQSkUpBWUvOK6HWbkYnHDesZkQ4cP1bkNeMra7E/JZW0os2zruz+c0TQrXD98HFvqZ1wccZUe6Fp6Hq2Rs5Pc7XKGYt0/JutQuun493Bd5pGQ5lckomDbL8/M+e6BYbL/xg176lcKIAvC+z+wMGr0WqyuTEZFYshVNejH++9+ky0ZNEWiBcd0PHI9XPH7uDVrraE1A6wW8LFjfObCcN6x3J7R37pG3FQLlROSp3Xs0MbVaqN66AiWuBfWo2rzTeS6bfv543NFLh1RBWjIev1hK6oOOX/3VX8WnPvUpfOu3fmu895GPfATf9E3fhE9+8pP49m//dnzyk5/ERz/60VBQAPCt3/qtYGb8/M//PP7YH/tjz8677zv26YHee++96a8jWe3+iCuql176QZmatM2eC4Kmf/zbfro3Q5PFYp7AbLGRCdGIEmB4OXO/GmeW7iH4JJK7sL8DgAMPXHkMhTntaombG2EYEzxPSSUHJ18fIAMehcbsFpoAEh11/TrDhyG7N9h3vJGes1p4Hsqhpz7Iqsv78EbC05UoKWiT/nV1EmEZSPw3ygV8TnH7vZtjyE19ZL+GPzeNFuYzIi/axNuzBhARTm8lkftiUbCF13Ml/76PA4/xcbCCN6TkJQ3vxLwqXhfwmpT54LRajmiJjqxsrcA9NxXFnNDcQCo5uPO0t5fEXCVOIZPZrreetQZoOW/IJ1WKGtrjUITu+T2T35OhdrOhbrzo6Us3npQN6hQCvlloQHg+sfJZyzzIEInkxlaaFeLQUM/Ww/sop6d6Sl5QVPTCe89O/34nHE88/WL/EAyD15WA5dtcCXuHgPF8TxRgnO39tB9GLWapqEW7OKMBXCqELXfGjLyt+rNab7Vpzrp3fm4KOe+toV6LphIOpehSOeNeXEfbD6ApQpNTwrFfP3gQ7fi/qqQ+9alPAQC+4iu+4ub9r/iKr4i/fepTn8KXf/mX395EzvjYxz4Wn3l6/OiP/ih++Id/+Nn7yhU3BAUwlJFTxtz6p+M3L5IVIPIjYSdYGIOgea/EU5fXWJ8EIQZLR+9mIcKEnaGblGWCQ/EQEC0TFkYUEydLykf1i+/zJ4oq08i70c3mtvdYN6kLF3/dWEAyIQntWZkIeTHamiXFBmq22Ih25YYDNNxpY+ksGjzlRviG0NIWNo/n7yJAB7p0VCEcpqCO1i0kNc1UPP+t3JvNiJsMhvhw2P3F/FqNGSxZPsvASTnH3PmQkitIE3Z2P8GDB+PFc4UnWpitOSldT929EM/nRGErBSSbrb5ohmkr48GiLA53K5a7LRSIIuvSaL/uP328BdGVV2uBBorzJhRsz+iGTD4tVrSrAImUkob2XMnM4Z5nx1A0Nx8JTTgLud/A4bdLxvySs9Z4kRaOR9+y8E7TFBL7kNf9P7i939Bxu5hD8c8ovHq9Kgq3teDiA2DrmT2U8L5j68olQu+mwDsItVYce8F+LRCuoJRRmrLqt65jvHaokSO6rj3/VI8SKYNy2XWdXQ/U40A7CsrDriwehxZK91px9AMgxp4eQcS4HP8fKKkv1vEDP/AD+L7v+77493vvvYev/uqvhlgOadDH6BHtNuCeixHMQoaXZV6Ue1SulAbcfDTNmxu+BTFtWPRs+knvwHMyaqneWuXhCXmeB3JDKSQsIyb2+Y6bUB/C3ZsFHXuIxqrcVY7IsHCThaOYsBhz8rKOJVGvB3qQYupFu8XiLBh5swFUMCRgsurJseZE0S8JBEjXe2og6yqrPIzOD0gmbJndA3UnwRXXrUclNq6uqHywY7z9LRoeLex+mDRX6Lx/MxRemcpnf/v5MRdYh4VrCUlvnZ63JYR/XhfzniaFlNNQOCZg00lJV/PdqsrJGulxcs49vRbzmAeXfNJSWLC99rDQQ+HE4LiiZvA6FGZ44TPM3XXUU29j8lKFJw/FzbOnX5p0XYwrGQjGfn9hkGNOAFjsVwEl3lXX52LOhX6gYnzpb/TSP1988/Oc7v3PHX+ZPSm7/8jJmvfUq9I+SW3grPNIprDEPHeZx/Mmx4ZpvNig5k7DpG3qkTuQGpyFfYbWi3l20jo6aZ2VF0Hvj9eglioPV625vB5GU1XQLpdQsu04FLhlzVVVYDIey/8HSuorv/IrAQC//uu/jq/6qq+K93/9138d3/AN3xCf+V//63/dfK/Wik9/+tPx/afHtm3YvL/MdDjzwS1Ka1IKZDDzLhaOwWAwh3tUKvAAl/MuqG4V1NyVdD50/7nNjunz5kXdbC6JDeywd8JoSAfMRZ1D8X6QgTeeZQhIb7/twk4tSovdS6jjEEIpJ81BLBnrto5nI0JLBlPvAulNPal+a6GNUOMI8d2EdwLVZ8/CDmhQ1nhtYGhNDN0bMoGaZMj8sX3Gg8+elBofDi7BQC1Az8WurMjGX4bS0oJueVZr975otBA4Q0HdeDP+PYNhO6VO3owRPLMyh3tvJvM8FRHHFm5RNvt8XrHcrYGy4yiKteu6p+rGjllhXkMUlf2TIJ+91DA0Mkd+KuaTJqU9D8FLthRhABJu3nwyaM+H0b/8vp979hbp54kJ0vnJn+jJfU/K6kX993l22Uv3P8mMz3ercbxkWMrtnwBTNJHvsTY6tdqSJvTWAj4vorci01nMTLm56I0hmfNo7dE6qDQgunlTGD++kZTXE6AuKPuBWiouby7YH684LjuONxeNulxVGaFV9GOHVP3ZjgNSlDwgWIFAuBzH+4/5dPxfVVJf+7Vfi6/8yq/Ev//3/z6U0nvvvYef//mfx1/8i38RAPC7f/fvxmc/+1n84i/+In7H7/gdAICf+ZmfQe8d3/RN3/QFXe/pelNhAYAUYefrPpGGlACJxPz8HQ8NEhAKai5Kdbj10803X59tM36QQlOlaLRHrVnoS9xkuflssKAD8TeN5g4hIDAy1ae+2lNLF4hmjOOGtR9NsvDOen82VoHNoLkqAOo1oR11qk0x1Wl/d2/pRjFhWGFhIZsyG94uor1JgzYFVITfMJUJGlKNR4unGcp75B+NB9EBLHaRgOSLWEtzmsKpuj4C+Dfc6htjh1nDI1rnZWFBYnRW8gaBeu85WTfVnJCtBmk9rVhPK7Z3zljvT9Y47xRceI7q48mSdcOCrYYprUvUMAXzto99jM3TSR+AniQphm4W3BSDQDfKyo2Km3H/IEvpg47f6Pfe77uufMRZOyiMTHnpix/q+v8nN/nBh+C22NbefPmSbpUCQ/B4iE5G4S9oFPaThc9BQzENb0puTqq9wDKW0wm9C84fOZDPBcu5KLiFE9azkgKv5xPW04qUs3pQR0VrHY9vHrBfDrz36fdwfbjg+nDB8fpRvb39UCb61kGtKMtFLfpqDa1Wy4epLLuWL5KSevPmDX7lV34l/v2rv/qr+OVf/mV87GMfw9d8zdfgr/7Vv4q/9bf+Fr7u674uIOgf//jHo5bqt/7W34o/8Af+AL7ru74L//gf/2OUUvA93/M9+PZv//YvCNkHDCtwWBAWkuPRcI7JiL1NKEJU0DtJqBc4eB2GPL/AjVIQf1/GWvM7MNvU+LBud7fXRN3QHhlQIBBzcQGD0eMW4ebnC9i8KaxYivK89qqLwsc9JxHt392TstCRtl/QFwFKFZQzOLdROGpCNMILzzyHSYVYeBGCqYcMW2sTt/bHMD0PjdKAsftU+BfCW3KFaWNhijFYzWkoKee/A2gYFD7vrpdEInYyGxizlzTCvs5JOKi2tF29kemawtKxTeFBLXeb5ZeS1aF4XmkCGLCyIiRvJLjqvGgI90mt0VNhN1vrYmt8srBHmM9X65i7p397pqje5wgn5ZkXdfv3+Pf7/u2FC714bdurL1/u+Zfo+e/Pho2e7uMXvnzz6+2JaFpHcY+xreXms9NGwfylwZZCAFl4ztjpu4ixpo/ozE19JY2yGNwoKbsDs+aIOFCiy/kEThmcjG2CE5bTSbkKt1URfUwKsqiKCr48XHG97Hh4/YDLG1NSby7aHXk/lK6qC7hbt+Ra0FsFWlcGexk53L1+kWiRfuEXfgHf8i3fEv/2XNF3fMd34J/+03+Kv/bX/hoeHh7w3d/93fjsZz+L3/t7fy9+6qd+KmqkAOBf/It/ge/5nu/B7/t9vy+KeX/8x3/8C70VEwQaXw8DxJLA3gBRACTqRnxKICO86y7YTFA5Aa3pHziLn8KOvZKfbteXrTplujDBCkOOEW42USgoryFoo5PvrNKAUSg6UznBlKz391Ek761SFT9/7ailo5YGcAMn9YI8ge4up3LBOQJsxWLt5nWdd6SjQHq7yW1F6FAwktR8G9YMb8bdHBf6PJoXSh1Fn8nAKZgLeUlHM3ovxXj3mw1OtkmdaNjzlJP9qEq2q7eSu4FQIqRB4K7PAnX8phzYEOAgg293Rmpk9XkdsI7PCsFXsMFi3tSSM9aTct6t92ds795huz/h/JG7ALfchtbsKd2rSgO954wTNAmpm0UjJgoJ8MKtYb9RrEMf19n7uvH4aYzbs1DF02Pov5f++fwe/28ek1LxTRv//nz3/T7nsl8/4HjZBXpfPfphD5kVlB2svINp20aIzlk7nIxWoOE1N4SN9cY5MOPZpugGpYTltCkCUkgh6LUNFnprceKpAhGglIr98YrLwxWf+d+fw+PDBZ/+X5/WkN/DBeXxCqkVqbYoZE/OPCIteBGl92jzIwCu9YvkSX3zN3/z8wGdDiLCj/zIj+BHfuRH3vczH/vYx77gwt2XjrSpIOhRq6KWgvPI6UbraDQUFpkA9e63jpJj29hO0gnSMI8wo5uwckXlQAtXMF1MOdk1GNCTpFkB6W/u8bjCEkvc+yecl2/uzhobLzlIwM8z/qZQT1dGFcyMej0AgVljPWCjvqGdtNS9T/daJMTUUGjupQ7vlcZPqFKf79+7+UL6EIieiwLC4sqw8+WE5jyCw04NxgmCPTAxFMI+xh/2NxEj7DXyXo/qkmifJe4CsWcO2L7Pr4zWF3pLUwLertN48pKaVatZcTfMq2Ky1ixM4YFqTsqMgU0NgUBeJgeZcIxTgBUcuODGAVOEsZ8pKKLb9ioAbjLqiNuM3/2zNP0SYljGh8Z6eHJd//fYfHh2vOyIvHiaYdE8P83t81L88HsNzMULX709nmjVm2u8/G164bfwoJ7KwtmheqIx5fYPt9/xCEAfxp0bydqTzEv5SeWGs8X49yeSXGk1hAMlCoaSyNWxNYY8KxF1a9YVgjzXqYZQM4j58bjj+uaCx9ePePO5N3h884jXn3vA/njB/rijXXegNeSubCtJgOQRImmhNF3uuQHZvhS4+7RnTQ5hDwBePOt1RdIBR4IFvxqcFofG9yL+hKCTE2Z0Dx3SKAHtrihEglqFMZgHhFWZkFAUoLrFN8hhbaKAyT3XX50Tbq7BATSxP1rL0+StSXhRZB4Tc0U91MrirARB3fjOIlwGRI7lWehIRv3ZjXCiySrHUN5hwMM3mzJxBCcaAKdzcYHCogXK6hUmJBqb7mmEJASnKJWVwMKqUw5KMMA0rffBMCKCLgyrQTXvzHJpHjIxr3QGaOhaAtDVWPGyAldCAI1wLM/5SERNFE+eUDLmiGStLLI1AoxCXBvfm9yQCZU53zfPVTARTMJS/L9PptSVkQMFbmTyVIB+e54PVi7zdz+snzEU0u3fI6T70oXl9p7me73VBTNjwwu3+kw540M84JM/idyeW6Yxf/IIL93Ms9uyvRZdsSNkjSggh3dOFgHVDpBRonUzeo3dXH/W2Bde4pC3CRVK1gNsXdF7V2b20LocUfTuPHzXwzypCy5vHvH45hGXN4/YLzv26w45irKt+HOY3NJ8mZHRuacvo0NB/XA66u1WUnfv3uO8bCp8zQrRPU6hPHpzoEIHt4YmgtQU5ec1VmNV6c9Ar1pRKnuoD7Y+7XpOnwQg6nAagNQFYmSWibwpna4rb4egraB1xrQXqIQn4nHbp4wLswF9U7jsC6M2QIB2LbCSb/R1CZgzxMAPYt6UaRZnlWAmTYD650ozi02pIJ52Op5imfqjjw02WiCMCjaKDUdR47PmFMrFOxGLgUkiNBkaiyxXR4PPlAnS3XBwRWVdfS3sp2GLQRwszGpAiMrXZJ5bsoGmWdnAgBNWCMuJQG0Gxvg8CLzYOZ7DB5kM6GCIS15StEt3L2kg0gZkP473yz29KFAnDT8dzz86r6z3PdmHOv7Pvv1+Z8Tzx3gfT+fmLl5QTP/Xbumlyz+9nkwe02w4PPG4PBLg69z3TDeKoVqMxaEUSO3BsRlelu2R5i3qDfrdm3bFdcGQVkXtbq80xJxP2gyS3GDugt4ErdWpxY/+LHtBKRUP7z3g8fUjHl4/4vrZBxyXq5WnVGuMaSTM07T1SWk9U+rAFzRVb7WSWk4L1mXVHEQfq4NAYSH32sNaoKYhH0rPlZSO46ykyLjo1IIWoUDjCWkykQzajjYEpG95tYIQVpCyYosqPhn1KkIyJhPj9xmI52G2m+S9/c0FZIQKSMN6RFqMSwAk9VHQK9Mu8ueZ4a7M6gH5v42eZa6yjZzGdI/RssIXp83J8KQsrGlhLNgYaCdjg9aGkusxfrN0uGnDYWPrjRLjfUyTMI3T/PeXXhpO8S/PP30OnstI93q9uNcV8Exe+74b0Tylp7VI9NKFwnUd8zbrbp/TQHth/uP4lytVH5uA67tHSiO7K9PvT1yV25PGr9NnJxfFqafm78RH4rrjMULKy3xthOIOBojwom/HQcaJnt12LFnCgG0Tbuuzhj10e7ww9zcXv7GnXDjPf5/2uP83lJMRs1p/t2YghVYb6lFUhjUP1YsibL2tjxX6tlLRa9G6KisZIRgP37JoL6d1wdK6dT9WUmdpTpemSrE5k0RtOPaCWhr2Nxccjzvq9VAvrbVgUwlYuwgSiZaohOXsMvWJsPA1TnOA/f2Pt1pJ3b1zh/N6Cq2NeWH4ArDYbasN2TygKkNBuZDvsbA1/MRExkU3EtW9A8VoRFpt5lV5+A2Au7nuMVgBXXCdsSBDN2VqSd3eiQ4o0DpxTDB4dvQY3RTvu+cVgAIBGpVB9lmbMUnkF8JK3ZqgMdpeojbDa2vqZdcaqdK0ILT3eR+Pfdt7rMMeRYhDWYXQcO+URi2aV8CDBkxekY9W2DjJInvKkdeawj2DYcTujLQ4N0KlNLUKsU9p6Fb90yj3jQ2ESVL52pqUpP2vSQ+eSPXGBNwTch/dlYfV/GQBu6Jy8ESEXPD5j9nQsJ8jZP2CQiGYUqJJwE9lyq7APq+78PS6T36frhfP+NJpJsU4BPtz4+nm8LAnGz8m+dw9vR8zaJ55L/GVMc/v98i/keOpfRD7ecozTfvcaYWaGYNlL8qreSjfYi0Nx/WI5qTdwEZOdOnM5r11LaAtQ0n5EKdtQV4WnB925NOG7f6s7VnMaO2uFK8FvTaU49DymKrdElpt2C8Hjr2g7AdwVKQuOOeERsqo4vll2P1I07xwJ4VVxyxOhnYYqx/ieKuV1LJmbKfVhLRMTMMw3ihTUl5535qi32Qoplk5OHScLdmYrHcPWbin94586GKqpYGgk9wJ4UEwYElzDes44ssDQ5kJaIzkORNzsT2P4hvLlZHnPxINJZXcynUlCyjQjN0r8rAY1NPobCFG4zvzwiBSS6zamulVn9eROPWidEjtKJDaoqbrac3lMJwkKPubK6s+yKmICGxFxYxBvjqHszyMCYix05soNaMCPm9dUHrHaMUiEQsHWTF1/E5RWD0XW7u8il8m5oab3EzXtusyQfl17ThrvrI9e+fkPh44XqNA8va9F5F6NwmTIeiG8MUQgE+8j6cwZACj2NfmXciV1XRJscLYMELk5m+3isCvdxvSmfPCAMV1b69DIaxcgRHcwJDImXYTdsEXSWQotxTWuxsVw6mZxsDvKxSd3o8Qwmi002Lk9cbwj5POa3M4s7MHNYbmyViIr5exdtyIa61pBwLrvdRqw/VRe7zt+4Hr4xX1aNgv+2hS2oaSUsXQgTrCfa1auK9UvTECsoX7jr1hOa1YHy5KGmzUb711tKOi2D3U/VDARPXOCYoSbuZdpW6NT3LSfnbZoiwG5mgAGkjlL1wIWZ2ht2uJ6MELxtQLx1utpLwPTpCA8tgNYh3tWq6QJui9Re7Ke57MCLvuC5W0wRyz1ql4kaV0URRMKpZQL1pRTUAVJVIckGo2JvIUZKC60C0MxIRUNZ5LTfMpDdoE0XSHUd0M4tNknpQLWZhV7IKqizG5sylMaM6SzGvp3APEAajAEvOAeoVaRdwGQ0dvqIfSIvU6IKSz/FPje/IufFP2iR9wsoZViHPQF/nt3Njwdr/qHZm4nCxjnyvvVxOv2RMlKwW2MCkzq1KcQqXsVp3fXFh5/mDTM8lQTjeKarpPB7174XfUhjlVFd8qqriOC+xpbkZoKO4CHjqJfJ08ydnF+PShRGxUgz7JJ82NAvdkpstg+FZxLyNs8+Q63dq8uMccYzmMomfK1+5hMGU4AMjO4+zfpWpes3UbI+uOKxIM6M+s8WmNuLIbZRBeH2de2PQ1Lw6+1VF+r5NR9sIh039CQcGeZwrnBfLWwna1aBeCelTslyuq1SDt+4H9suPycEU5KvbLgeqsE5bn5S7hvVBT9Fyvyo/Xe7Peca6kCtKS0TuwXBfUowYtl4h5UqWYx2Z7vg2Qldoi3lYHg7WFteZRY/VsBqmAuvIJNtYWPDdGBKdhbDCrwfohjrdaSXlVGFtS2mGWsehEIC3fJPQ9ge6eUS0NIl0FuoXD8qbucF5GsWWzENayHzjygbpnJEA9jeTehyUQk3ZQXVdlCVhyDmHAxwEuFbt0ZADcO3BQeAojGqGcg5mAhQk5aY5MUWlQ7jvryQS4zBkbJV5d+xF7Q0BybrWuiJ7eFbnRi4WbzELWEMKw0pxiZ1yNngg5vV4zr7W61zp5Ugms9WqM4C9U7U4ATHlZfUWEfib4qvaoERxdu8zuVX82cVCEGQmAeZ5GnpsG2tPVEkFBLTPtlf7Bhfq0zJzF2YQARELhJQ/lQpCyMngsa8a6ZSzbgmVT2HlajVV8SdY+5UOEOmLNuqCQsG4jnCtDcamyaAhUpeg6Ci7AJYHZ23qk517jrJzj+urNiIdzpIcikT4MmGgFLq6clOrJvdOxciiAIjMJse+fdmjeo11343trYbnnbQOfVvCygJclwuk+oRRjoGFu707txgLbfk7rAq2ZZDNwp+mm2WBCKDj/5xgc+zl7UpOMcTCSOAiiat7HPZb98YpyFJTrgeubC8pR8PjeI67XA9frgcfHK2qp2K/VoODWKgcYtUgioN50foxQ2L00M2mMhzFhf9w1J7WtSMtieXZouLFUJYatBtQwQ8wfms37cWSrkyW4WvcwfdgN4TPoXmKo0eYF6YuBuVAH8fUHHW+1kmpHRcsVLJq46y5smIdAYgUtqIWjG546gWxTMTV0ITAZQzSbJ8SjRfewPBFMAkiMlliv3TqkU4RMmBPykrBYo7i85BCO2vOIkJeMZPkihTYPr2Ioqlsh6rKTSAuP9QnliSsyDvdMRhzY3ifc7DgRaHEtAO9do8rJN7oDU/SsZEXPQ+QjrMc5jOqvOGZLd9KjcYbp7yPXMXtPVgPVOqr0Ee7znCSMg88eXj2mKY93o6Lc25rH+xaW7Z6LGzR9ej7Y55Vzz9gnlqzGybZYYbRSI+XVlJMzl89kntNcxFiEd9xvBF7v2ifKWbJnReWGR++uNKIPteYjEyNXFQ7a76vbPXDM5a0Dpefs3VGaRjRsVrvmQ+p0P9W+OFnKOZnnQnAPjQjwdhOcc6xNbyHT9l1DV9dLEKs6nU8vDak18NrAa0f0U7IxJPe4PTfSNXej1rt1KegJPam55IztT51WIkx1V0MYPxmgmK/h6bvANoVruaa6H9rHbT9Qrtpw9Ppw1b5O1x2XNxfUveDxzQW7KamreVD7UdGbrgFFEGttoZajiBq5ZpQ4833rQ0kpKlXroNyLS1nrKEderCqjuSnUQErbIzMnJGZI0kLimS9SgFuQlMkJghlxEyjIG3Uuq7Lty4fTUW+3kjquBxZaYvNzFwUJpOFRufCZQ4Gt6++eLzE3Rr0XGQg65sEwDWEIi3YDTgRkxrIkVYyTtHWyULVaBut1HFcGpYLlKCitI5eClBi9O6+c5bU8zKdOYuSjaNI0XUaowmWMrochAH1BBZze+wJhPHo0MOvDSh7uvoT+AEzQEwUC0t4M1IKj87xDZ51yAk7lMBjpzWIj6DuO0jSr3cNJ3v+m9Y7SGo7WUbq292hdQ6V+GwkwK88V1PCsOD6lR5QaTC8/QiH1Zpu7WQt2VVbEjATgxAQxZoi8Lsg543R/wunVGae7E7b7k3a8NXJZVxgezr3FH8oYQ1eMlgtQ9FYPS7yVOqEuTTB3FTjSlSfNRWuyNiF528zby5rbocmrsYcPwWtekyskDydJb5By6HhYWwbNgzjFjSkfTuq5kNOEjWXAWal48rLAm/aJ5YyrsWe3xwv6cQCtmRe4IN+fke/uwNsKPp1GkfM0eWTrBnbf/n3OScdoXYCcACRdJ769p5WhHoFpK57m6DZoEHMFwQQj9zlTAEQvFcfDFXUv2B+v2O336+sHlF3rj65vNLR3ebxgPwr2veC6V9TWUZpEWo5tTWuvMmsJgx7h1vZESQECJmWT6K0hpYS6FiTvSQUEj2irzeiP6jB67KHTxKbSvcGmCZtA9U5rVqSruGXCQtotGsxYNq0RXE+bGelfAsCJy+sLqJL150nIe7VWA9oim4wd4Fbzq+CsRYtdy15MIIui+YxypOekAtOUlLvc9ahoewmhIW20FifAclB6/TQRuPoCV6iyxofXVlHLgrJkXYBT2GZJ2sMqEH3ADfRcdzswSMdk7FUPb3h/p8zGbjCseV1gqhS4NbRqiEfLJflm8zAb9BKBkMvG/D7CZAil6OGdKRpiY3/700/qAkLCCp2LGlUptN5QredUaRbmC1QkhgCZBeIYqZijeVswPLzq494jEipkG7g3lFpRW0MpNRChjtpcLIzHa8Z21vbrp/sztrsT1vsNp3fvsJ437Xpr/aGiDUYMwRjfyGFMYaJ6VNSrsk8fj1cLz1Rlxu5DUVHvkTxvtZhDJhpqzNqLKmW7X2vpHg30Jgk8wotdw2+topVBFNqPHb0piqzuO1pTj8p91bSuoJSRlhVwSjErKwCR8sXljGXb7B6SNtgMJXWgPT5CipKUctaasuWyI90d4HUFn7ZgDPE1F/lGEaBVQBRcwEbMC2YkAXhdISB00kaCnoWLdRnGHRQ06yzpUQLgHtsUPbBow5izguOyq9f03gPq5cD+cMH+5oK6a4ivGmLuuOyopaJYXqjXBileCOi551uDFGI50en6rSl5q+5XsUiC5ZFopAaEuzLMgGOP3RiqE9qYLBTrymx0mJ7zjRRbj2A5YHYP1NDNibGcV+R1xXZ3UqPjSyHcd+wFmY4QvL1JNO7LBh9Hds45854A1G4b316upCQxUtJzzBx3IELrinBpR7NmgB4DVgHnaKlBp+PemHep1XvWCvCOlNU6yZazIhGgpRA4mW9bmd94hYD1o8IoQRgaaooVcrTscBAHLynaPGhPp4ZOgt4JpFQOFtLqEeJSuiLYkkN4mTccd/5zVhYv9gXCbdTEnkUEgR70EN/zPOL0MutRN6pfUoMz9PQCHuadbi0+T+OW4nqW62vdlWJDbQ2HtbwQwMoTCNlarOdtxen+hGXL2O7vsN6pYlrPG5bTFs0nI9w3m+249SydvqodNfIYx6OWA1zfXFRJ7UW9l1BSfVJSHbUWOPw9LYsqhf1AWqwN/bJoqG9CyemNWOLf8xumkNqxQ8qkpGpFKwfqbr8Hh9zgf0vrSQ0WIgtd6uArcXHGsp3AbOuxqZJqlwvaUdAuj0AtQG+aV1kWtC5ItYPXDbSXGyUVTDOAhu5tHzG0LggiSFvRPkyta++2LrFG57Dz7TqeXuFlSuyTCI2bUdcOBUSU624e1IH99aO2tHh9wf7wqF6VtbeoR0ExqHc/zOio3VixYR2kJULV862QPYIYYrlFiB23NwzdPxrwIFAC0Kc0iD+HTM81v2DArN4VGEEMJbzUQeJp/JMBhBL7slJ5SokjP7uetXZL6gvy4YXjrVZSD28e0A9RZmhmLMuBlBg5JSxLCgCDQx61gFdU6Hjr5KOGosmsHlA17ruUk1rUAtTWNLRUayRE3YsiQUwOYAuIWdsyE4Fbsy68unCIgJwS1iVDtgWoK2pmLIxo7Z5EJ3+hwXrBU5iOWWWT8aKG1dwBRKPgZO04lox83sKrUzAIInHaLe7vBX3qLTTzVtRzcf3BbDk5VhQPQXN4RAB1DX2qV6iorBE1IFOUHGSWAR6wB5ihul5fpQrJmlva75obmZK7cBte9MFFKam6dG2q2BnJcoEgY5qw0OpgjdD/td4goAgtllpxOQ6U2nDUEiGy80lbuy/3Z5zvzzi/OuPuI/dYtgXbO3dYziuWbcV6b6zSp81AEzzkhwCivF0Dmlw0h1FLw/5wQTVKmv2NNpm7vn5AMw+mW8iPpBoQZeSqXEl1iMG2k3IGLupVpWXRkJyTlcINFESYU3offYHKjl4OVV7lUMVVD0ODGaIMKsDyejLvZ7OFSGieXwOsxXvCsm5KoprzqLMxBdj3q+aTTElRTkivr+DtUfsg5QUjSWuk0jkNbkXW8PmyMPK6QjrAWwW4oln/JO1yK6GfQwmRroVR5EvD2Jkg5UoUYF6MeVDHZUfdC47HC/bXjyjXA4+feY1yUaVVLledv2sZZTFaEwPugtz9kRT71jA6z4XHIqNUXISsxQ3QibUchvVe9TxzI0+b46Y1TDfyxPaQ13dKa8NTA7Quy4UG+k3NWsoKqNgsnL0sCduawJam4KQMM8vdCWldsd7dgZcF6/G8R+BLx1utpI6jIVEBd1VEvQM5MXpWAZeY1CKx5F2zROJeK2rVotxetOMkgwAWSNcF31mbi6nHLShNBWWtVWlKuoClm0s9uN1AjIQG5cdSy9s9OSJCs8JYiEQdVM7aHgO9QdgQSd29FgdWjBwboMSzRBIKqhtKTjkD9eWFr2DtDutUPJw93CeKPWcCt6zeRiqxADsh4N6aGzKl2AlZxHJLsPEFmFIoqZQaAELyDxHCqwwlNT3Ps/offy8SANPvIuM5x+lVUWlcxKxAMsXTw7OLJK5Z9cG64NYkWUjYvafasNcauTCfi40JyAnZ2m9s92ec3jlj2bR31HJaTTmtEXLVEB9FHq6jh8UedDh7wXHR5Pr+5oJyPbC/ueD65hH1euD63gNacSWlngb1Fp2ePWSjbRFsLNmAPrt6VCmpZ8KcQCmHAPPxUsfMkZ0a7uvlAFqBNA0z9qYMB9XzU00FP4jRG4FSR1rtvESRy+t2P8SMvFZNyudF140AUjQP5T8hHa12pZNqBLbfkZN5aTDwhymplJCzGqcpJZAsAHdwcwDMAMNQV0BBrCKZ1iIAEi8UxkAId8vZdmdkkQjJesfauh84Hq7Y3zyqcnqjyqo87qjX3VrDD+ALWyQhWYiRSJuUdShlVw9FZYeHGWNdqwcLC196Oo0w6i2ZgBFg9vU+RRXsbQ/fiwMrxM03I9/26wVbCiMvhoY+rVi3Beu64HzKSIZI5qxe1nLekNYVy/kMzgvk+BLISe1HAUkCJ02O5tzUi8oJUhsSKwLPjZBq9TTXSUlFbRMIkhMkJQNR6Cx7uOuo6llUz9X0Hkn6xIRsqKncYbkcvcfUrM2zKRm3nqSppZaYsS0ZnRVurp04a7AvDkVlis6Wk0gbRcmwOiFbtJIaRFRgLw7PX6wvkXWFdVeqtQaUCu5NN/yRTEjoptDwqOd/FLYtAGpPyBAl052AAKJaG106mHvUdAEIUMsoknZP6pZYU5/vNtzghwvjqAuzMfLQY2ht807gniY/qV2yei3nMNPb6MZcIThqxVErLkfB5Sg4WkORjoSEBQk9J2BdsNyfsL17h/NHX+HuY+9og8NXp4CcO4INdm0NpalwJEPfC6yUoSjj9P6gyunymdc4Ljuurx9V4O0Hrq/fmCd1jXAY9aaJ9CFlR6h2ygUhpegfxHlRwc/ZvB2O0GkLgdyBqu1apB5aeGcvhZ+XgOW33o0mJ6EUgLmBDzMYQNHks0q3YmcFWDAzcsrIRLqfurFmN+tHBAFRVYG4V1DK5sVTjJ0bPKMEQKHWOWfNnXBH2rSFDRs3HbnH7r1Z4EM3Ql4Ags3GGVecoaE3Dc+5F1WPgnaY97tr/ul486i/v/egBbPGd6eAF12rLqaFHNatXlEG4IwonWhweYqG5nvrQcdFFjkZoUEOUgGVUb5HxFoW2UYReUqUr1GRKTbuNFsNqqCGkrKSn5SQDAix3Z9wvjvhdFrx6tWKnBjZSi6YWUsIFm26SCmB9wlQ9gHH262kagNRs8SgJ/4t4SeAcAestbQAOJoqmmupaM2QZ6Lx3tVh6xYHphDGHbWLfqd3HLUF80GChRSs2JdZxUSzGHEXQSpJLUFS19tZL0btEaK5GBNBUkVPjM7GZNynmK/BaHUNefuRhiYammomtHOxFhgpIbcGloZOpCibZFBSInXpCfai8RmL9TsDswDBuO6/gxCfpcUgx6yCkKqGCrrNjyspD/flnEZNW/f4uqGYZtOOPM8EQzwCnQnJmmp1s4FbfFysn68rb7s0AdTV8+zMgFB4ge6F+Zw2E+yl1FBUR2sKdwe0an7h8KDWd87Y3rnD9u49Tu/eYzmt2O620YPL+MluwpiYTF1oyK/sRXMVry+4vqckng+ffg/lsmN/86ie1H5gf3hAK4fmiOoO6jq/CUAmIzOGhX5lvHTeGN7cLi0riBI4LdCeWGzM+5YOMQvavRnqHawV65FDlJ40dykONrFwkhl3bFpYQDgMGbnXqmOMrtEGZiwpY2FGZsJqHjLbOKkhoeuVawOoWJjJntNzwKQWfTOoughBVmCp6kUFMYJ5PuoFeD7O58G8LPeUjB5IYeTVfhr1UG1WcNynHKECWxQ0cUWxPGJ9vMbnUbsJfjU4/d7VwBuep9izdeu+0EGRc6qto1EDdaO1styqWHjSlVJiGr3udMK0nOAmKmHC1POzZEYnJWTW8GFjbWfTiNDToHpLWUsu7u5O2NYVr9494/7uhNN5w/2rE7IhoF02pNU8+WXVvXG5fCg5/1YrKYc4MyxC1S3s1AiNOhS/4sa1oLZqyL424NGiSqADSieErslFcqtFQ3utNhRLog9mA5XwnBi6HJRBwi1Xrf+wRWEGSNA1NXfidWEwMXixfBK50NblOfd8Ek/0WD8kcUtcOqp7HqTuejJ0WqDggInscxCjxsvHysIoEUrwsJzVRxEoFjUYozFf8uJIQmrNQphjIzh6MgAEFoIDKHJ2rgB9A4MoPJ6ovaB4fM2T+b35d54c5ifdhkymGL97a2pgWFjXDBrPy3VRrxE0UV4tarEvJ30FzNzyfg7zHXyEMmD9rujNqKrXYiGhK46Hi+WhLigXDRsdDxfU48BxuaiSKlegHCCpyGJhZXKvQq3wUbdmqEVy0IZGGpgzemrQTDqjGM9addIKjXNZKM6DPj7uOgkiHGPn0QOHTPcgXha0pjRWe20KXOqWryXCkjLWlLAaoIShEGsP57qnwz2y8QPcYOcgZmt7Y7mZ2tBSdsYeVVqKL1GQVAAedB13SLxfaw10peeaylVrndp+GPx/KKleFM3XSsVxuZrXpH2WWlEUZG8dqC1KNcbWmuoYfX/TgO135vCuCGqEz6uamc3f8qiERHgvE4WM8eJ9BVq5gupxJiIz7zyMlxIkazF2ywmdtW1RTym8KG+VdHd/wrqtuHv3DnempM6vTkimpCLMb+UXKa8gJhT+EmCcqNKRTcBy70BzQWbKBRQLFyKWS9IYd9S7ALo4ZGxqZWlAAAs8F9WMMdg/x+ZxJFKlaJFsTdqjo0vVySjejEPNVPX0xJK7jLyqtZGThht7T6hk/Fe1xSJOhsrrgHLpNQKahgIqBIeFXdjGRZiQtwVICadagZoMxKH3qtQsGqqo+66opFoNPGCFyYmwZNZQn0gUOqeFTVBnpDVF/Q0bSgsi6Knpc5jlmK2gNa+WDyHSBH1TwAC4apF1BZyD0MMhXmEvopsPrPVuBONi9M0OROFzMkWaYFYlUdz/09ogz70dVQ2Z61Fw9Iaj1uj0m1iRmNuy4HzacD5vOJ1PCje/UxRf3jLStoSCFSfaNYtbqjECeKjWLPbrmwvKZcfls29w+exr7A8XPH7mPdR9x/54wXFRJVWuD2h1Rzt2UNtBvUKkQXiygl3QwZWUmK1AACfNeeQNwgmcVghldGIcwmgdOLpa7hCYhw8sxMbgAYCSObwChjbU9Go1pRdTpQcZIePSBEcTXI6CvamH2kQNzC0lbDljSwktqUe10DAjPMQ95sx7vFHsQQ1dJkXtmTWmwAcv77b2Lk0NTn0QAaoq2WYAIidbbaaYyl7QDq1x6qWi7keQqMLpjjy/XRvKvltd2wHx0F4xlhKPopF7UUbV9ULBntd/CbO+aHhSqTFSZbTWkRlRCK22sIT3xDZHcCMsoowG/rgJpFu0I5p0Lkh3Z81h322QJQE5A6vWwKVV90LOGef7E9Y14/7+jO18wnZasJ23KMOJnLrVr3pjxbo8NyhfOt5qJRWwbP83gOHGGnDT48umkNgs7wj1uIWO6VxhrNj3YoENVrOwgp7fFODCwfJdYt6Uc+JpLFq0vxAAEcPWEAPGJJNShkCBGwFntxCZAzVQG1LNSo9SWSlSgFH8asn/1ipqreCatJ7FQiV1L6ilqPDbD/RaUYu1BrBYN7OG57pZ1myoqOxKKSeDFBs4QJ1LY1kWzcnZ0Cj8WZk4OCkruyegOTdQUlQRHZrDEGoa0iNFC/bJ6iQZrBexFmiG/g+miRv6Iws93FTM2+Zv5j2VZl6z5yAtJuhKLnNSJKhbiHPOkIZodUUUISRLmDsV18xKcDxqeOh4uOrvF22NUPdDrffjMA/qQKsHWnUl1ZBQNU9BgEiKcbqhlvIdIlnhw13nRbhZ80+PTABaoqMKJosj5URF3xSegkcCejeFIBZ6d7ojtr2AUGDOEFLNy3c1okXXgkbZDE1XLXrfIdyJ1R82uDmgAp3NiJtf7GUX7MwXes/ODei1W928p2aI3+OiBttx3SMMezwa1P4oUUTNLluaGr69GYDCFJ4TwfoTkIV5h8d/C4YaWtmfF/BiaDGv/CZfRASCst048s5LU7zRq4OOIkIEz3NxrAvtTmDjaHWmeV2wnlak04rlnTvQuoC2BWxRgmReVM4Jp/OGvCSczyespyVygsmo6ny/qZfvXSGUoeXDHG+1kkoYsGx68gIsSiC3CDCGtZan8T3GbG9h9OQJBTWsk0TusY8CW56EJ/ysYq65FdZ5fQ8s1s5QgUIAclfNqVZV0rBSN1DCFOrzdhswBUWtIfWmG7xVVVKe6BVTUqWgZEY9CpgJJTEoqQA7rlpsqD8PA21Y7VcTeMsS5jxC17aQ86r0JnldlNEgKXzfPZqUk4UZxnwtBlFdz5pAVdZtCQuWd20XQKmAagWOotZetU3UbzfvDePFrMDMQ03sHH5qhMw0Lcw8LEwTnKWp51Sa5k5cYcUzsXoTS9J4/VBUfm2Me7I1FCwexhLRrIBcWjd2awVMXF8rEuz63gP21xrm09DRgWLz08qBelzR2oFerqC2g3tDg/YPS0zoMpp0uniyaLMabeggZECSuZApWp+0pqG+o2EgykSNqESEDFIvKQpoYfRCfYQXIeEhgDw8PYAxrSuAqbSG2qvlS7oBP4DK6gF5aNtNCd+tHHMvFuozPrmcNfy0ZCzrquwfy6LIQS9gt2npxq6AAp2D3nHs6jWNItyqtU3XA+WoOB5369tURrPMaa57sz1eSjBdsDcXxfDwAdwqKKeNmsLUscaB4VUljnIT7jrHjS0P3bsZHj3GKjCLPvaUNIc14oaRKlB7wvj5jC1n3Vac7s9Yziecvuwd5POmZSynVXkUpw4Py7po/mldlALMeU+ZosEief7anwdA+lIATqw5YUvWV9U2aaCE4kUhxDS8J1hFNFLWzUonwpIGhc5kYyPZd1d2686ahcHhnbZJXFm9AK2ei1BH4hzozazfZorJ0lzMCZRVSUkesO18Wqx6OyE7iWtm5P2A1tgxUtZumu4ZondIqWj7gQqxdu0aCqrXK4oJwXLdo7Eam1eXUx5USkSxiDlp6/N1W4L+yYEfHZovU2OJhqJlsmI+rR3KE8rQBXY1yp/ysKMcBXzdR3PDWmMza12IRA+pDisDsHCXK5M1jdYmN0XRLlvsP0Eg2ztqtXyU8QP6udnCsepFsYJmLGfjyAEx9mhuI1xcD1NMlyvK5TCIuYaE6q5MDq1quK9erfDz4aoQ80MLdsXg3q0V9FYgrUB6AbrRFEHHRj2WZGOeNPRDvpJnRQoLv6jwM84U9KYtR0oXNPH8iHouAsB5+BxKb38GR6jcvW1yyWdJw+GBLonRekJP2YNwVrjON55TzM/00/cUgxQAkkZeMC0Ltrs75HXFcjpFTU42kmf3LHttKGHEdexXZXvYH5Tk1eenWs3TcVQU491T5SPRjWD1fYFR59ZrB0m3NJpGFpKLIBlP4Up8PsQWpEiz7xJIET/60eTtbaCGgp/NlKKGWIfTGPRFAERahJZba0Me2fiyeTrLumme9axlFev9Ca8+8grL/Qnr/QnLWUFBackh75zdJ1sHcPLwnkcXLJc7OlBPxuWHON5qJbUwY3E32MMxNHjaTL/AzRI260/buUek2xKNg1ZlHjsPPYTVxAgycJ6UU3hTcV0aiWQHNHQnQrX4h1W9R2GqAyLcsiH18xyUkLdFCzNTBvcGNnogEGH13AkNdCOru2aFhxWSGL2UoIKRajUv1XrROCknJe1FmHXx5jzanGuITwsk86aWa8rOMu6QVomxd++CmWxxJ6Q1G4+dChqH9nK2cKTVEUnvqqyaFw/2QD65clJrkEbo1j1P83x07viGtcMP83VG3qaP36NWhryS3pSUKUFdYxTe+kzp1A2QI10MWu7MAgfq9UCxcFK1fEerDeVROd2cScJbMwzaox4eCaIdh73vPhNRrC2vofNIgeEchqfpzBeJ0YVNKel3GjyHhVA+U3zC9pOHrfQqTlQbuVrflHZ3iRiJu3mijC6qRCEm9FkNPvc6hg815squFLV/o6RBvaa8rbYul2DWuEFYNkGHjmu1gv7royqnoCkyZvJWG/brgVJMSdm6JBFIsqJhGft+MDZI3HSELWM4aNK4t4L6aamFejoMYg2lSidEf3b4FIzcnG12lUtWrB9s+6rJQklxM8or6/kkJsuICcui7CkBCNpWLKcN22kzeq+T7uPoOOH7ika4O5SvzBP37JDnb714vNVK6rxknKe4prvUjmzh4UQBgEEqdTGlruAG2N8TTfUE0zWSCbYlJSV0NICEyBRyIt+kIyHvympaszZXEvcarQVas75OzRLfnt/QQri0KmvEdn8yZFkOFoa0Zmz7obmeJeOwhmlq9WlildGBUoKNIlgPWtXizFYg7Yj+PZIywAJOK/KSsJ7X4HxL1otGOeuSVZvrg/aioAyIwZalmTWvISBOZESnC9b7TRkQthwCXZkWql7n8QoY+4cIQEdRId2t0Boc5LIdiHCCt8+Il3nWN3tkyjneGAi+jmiEKxPDwhkZ9/cnnE8nnM+bvufNLL1+5jCWh6mW7njc0faCq3lI5aLFuQ5bbtavqxgSrBlLdq9VseBmETFIvTeygkp4GInCq4F5k0IMcBrhJcAEewLnFZxXpO0MSgskbzqmnYLvDhgWdigl2yhjfU8eARghC3GrpLohA7clI7HywK2csKWK1pTMVPmaNQqypBTGop5NwuNVS9NpvsyLP52w3Wnx9OnVK6R1QT5p0SgbG7uI6Dg3fa7SqjUX3PH4nhK9Xt9cDM1X1PttXUE01bxry99kYmARICV09holGqNFbKkBCRl0y7mJMIAkRswiLkCwqOjjmjHSOogb0EZoMPKN/tPkBVlagLO2HNL2GDo/wYVprDLuSbn75d5QWnRszx95hfW84fxKPart/qSh+swRPtW9a8ZTH/WhLXJcPQxcYUHwRc778PMcb7WSyka7MQcJ3KMKBfXC9zQZOehQaH7Z9yJY6+85AwTzgOfSuMI83jeeGA1r1mP0ZBsziE17V7lea1iYbIvGmyZmU1TujfRO4M5oWwZB0OqqBcZJi/E8eRudfEVpc9Cb5r/gnqfmbJppaOlAYlGSgqzFeMtpsQ6faq2yMX6n5Bx+lnchZ4LQgByhG4WLjUrkLMxTtATsPHDEjHZSktS0F1XQTb0sFuup08U2cIcXcA2v7cl8wq1yiXn1PM2weMccJWYIASsj6srW04Zlybi7P+N02nA6b1is/QaAAEDUoyjKtHDw7zkD9vXNBeXhagwEV82FHEUT7vbdm5ygSFjKDswQZiROIEkgzoA0u3eJtSXkxa45lIqveZhw52VBXlYgLZC0IDVVfmzzzpqMtEjd1CpmqCb/f3j+bM0Ew6K29e453Wzhqc3QXol0zYnle9O0FtUD8dCx7zVVAJSy7YlleFDrirysVkC9RAG1AAb5N0Ql6VwdpeDYD+zXHZfXj6jHgevD1chdq85J71rw36wVjBf4p9kYpghzR2gT6vmwr3Mfez/E5IbrGIxIS3QJh6EkYR4aa22SxvSHR0a+ju30bAW9EQ7MyRjwda26uHIgV5d5E1jeNbEx1GdsdyctrdgWi5hMtGYwRelRGgcGuQvuSsprIg3I4t47iBRc8iGOt1tJ0Qj3hULBcIPnxTFNx817NP2DCAGacG+oE6JTrluF0iWs2Wfn9XUTrhRGXQ9D3Xbz2FgUWu2tllsxgYtkJAA0CHMXVVScbeKF0XvH2harW1EhvCwJDGhC/iiGvJKhMLoWAZJZrpr3IrRExnenz5qSQsbzqp7Usm3I26Y0P0lj0tqYURdpt1Bf80CchaIIzQaJbkJWnuNKOcOy4QD0vXxouMUVc2pNee8cyNK1Hk1/94LPKd/i1utk8QO4tT7NHY75hlFUQVQJUDKmDm29sawL7t+5w7atOJ1WrNtqfcIQaLFyZRBX9YqLgkGOhwvqteDy3oPR4hw43lyj/YZ4wr3WQABqLZ17SqqYxGtXUkaXhp4yROq08PTBhRKEkiopZrWQbX9QzuBlRVpW5HUDeIHwikYCboLE3drCSDDBZ6YolRihHb+3scY1okbhWdC0P9C7ekhMABbNFzKjNo5wZYCQ4EKdMEJgNkfJkKRm7ad11fYj66q/WzNE5yMUsZC2h3QtFLtfVUGpknpQYITTFbU2ShJMQbWb8N14JVO2KZmCMsUCU7xhQMyHxkRj3FQ53SqqbtcUKDTeIegOWLkxxCY5lzJDiDWPbpDvZOCm9bzCe+T5cpFJQfkPZ4JJOWE5b5Fzzqbsor4RgPcZc8Ltej0i3B1AjMWN62z1gwliFGG9zev3/Y+3WkndWsoYYY/JhwpLBYjaqGIFutKnmgKmgHIO1JgLTnhFINAEYm64o/xG9MiUGPndDGpIMUvX8e8BFTXWC83JdqAlSE0AFPmSjMi2WxgJUJcdVhtBrYIhWBOBTguWrHmYVirKNUFqAVrX52SBJ9NjQSKDaQX1hpYYPTdDSi04329YTifcvXuH5XRCPmtSVQtyDQrcRRvVZcLRK6QyqBN6UuWusTjRBLCHNycQiRsKZIl87hztTvKasGwZIg3raYEkgiRCJQJqQ4UEzHfM+DTnwcUHOMpzDuv5Z134MoxkGMoByWtC2lac37nDelrx6t1XWA355JYlekfbC/beUa67WsFNDM5srTWM9Voh5QXteoyeXbaueuvhVce9EtkcJaCvCnBYqlb/29CiN0CUgVyYVIkxQ9IagsJDBJQyKK+gtAC8gjhDWMldE3UshixtCQH+W1LGkniAG6ZQNnA75m6Y3Xhv0HWWALAor1wmRueExgoDj+eF01QpupDm/I7dPy8qLPOmAjQtq3pOyVCKJji9gLoVa6netNaxN8s1HcVatV9RS0M5rCOtOFLRdYkBsQwgcMoJp5yxLlqAnCxfKRhrW3NXoxfb7L0H3ZLl+oIRxH5q4X038IoCWRqRUUmReXEjhHiTh7UwXCVgEQFyAi0ZSAkLKLoSpzQ6Q5ArND+/o18Tq/J3kIQ19/QcLEQNs3ZUo38qOB4fo/hZz2VUbNtqqN4zeFGiY2JGedifivQXj7daScHDZe4+u3IIK47Cs1H3VsP8bVoU5jdjWk83SirgokHt7xhO3LpQfh/Tv28ABLiVj+61kd9DV1nTbRH0asztTROcbEWgIqThLlderUVCV607rWEiAbA0ZRsAhiC367lCj9qflMBQ1GMyiOmyZCyrvYxmXz0pVmSeCVUShcKmzJDMkMZI5j5KcBY50shmw0MbXaJmSQfGNh474sg2TGYkSUhd+18lsfi2iBXOD+9oWLXDQCH7fZ4EmX+qJQEmFf7e6C1nZatfrW5kWbKhGa1nTve28h049BrelFCV1K4b2XJTrRR0F4jGfq1TYvH8ENgxGIDXYSFBmtY5ScpqqFjRrH/Ww33C2QpBTbiQhYwo6wvJzLPhGSm4QQFJHqRdJuXE4ZlO8zWNpth53FOeRzms/QmVRl7w22dzz+u9Jm/NvETt6Gvcg0npnRxxCA+NoYPq8Jq046xD/Staayi7ImDbUdAtJ+hMIP7cvkxcAeS5/MCUdrb8WbLQqJj3pqGM8fQOlhIZ63GE96BGm//blWvTrtNFFMhinCKTknKvdoAXWBK4a5gPTMilavdvCz0rJdgUaTKviAyJS8BNbitAENO+EkNESzNyhNp0Xe+7gn8OZeYQQOdrychbRVqzphKWDLGWSnU/XlhHz4+3WkmFoWWb4iZ8R8PDcfqf0pUotbRRt+HnYR4URaFZ3BI1q46NH1Cdgh4Ep3FJc/Od+48xUbtM9xUX9aN7UWHXvk5NhQS3hp5YKY9aQ2vFlJuxOlhrbxVQCrhnqGDhpIu19Wax/T6URHTutIJIYiCbUMstck+n84r1bsXpftOGZacN+W4LaiNrZoOaCC0z0KvmjQigWtELoZJY+InBC4GyGRC9RXgFMA/UugKrZusAdXASpASkRUNxTRi5Mbp0pAQrmHblI3D6m05QMIBtPpnmwYe+yxAOQRtla4iZQiit64JtW7FZLiov2WDnomg87yVk0N5qPYWctaDXHtxu4vQ4EXqcRf6T0JALdBIQZUgjm0cVLo2SsQ3UEYZLPo+L5jKI7SeZosoQymiSNMchFnHr1siSoULOTCwvWA4E3gRr1jt2hJgJMX8mv15ENnxcOYTk/J2YG4842JR2V1ae37CcE3M27j0EbZlcjziRGw4lel9V86SUFq2a8Se1hgFn6lQ9KVeq5s1uWUsPzsuCbc1Ysr7YjK+ZEFnZbhBGnJPU+r0OXkkxXITJo66KqYrKqCqCSzdvys5NwODjm6M+ZswxM47esBzZ2gs1lGMFCbBuC9A6+rYoIjcnwIzBMPB9rbB6pdysMNsNbneZjbG/7gfK5YLj8YrLZ9+zViWH8ZVqiFlDsgu2d+419HinHtXD9UuAu8/pXm4gnv4CxsKbFtAzqCeGpzN2Hw0vyl4sBKAPZAoQyiVCEhbeU0VlOROz8MNjemrty9iobknDcl4kgn7dozBXajYl1ZSpvE9klZa3EFLUm/QG64RmMPdut6zhFTEF7Igft/wIHEnyAJ/YPRKcKNcQgiyAkIW9UnRQ5drAi+YFMgHCpqSs8R8CGm/tyYX11ZqiDXuLXkLKuG31QPbM/kLk2yTaE7BMm8mNkFgDsH+bcPCuv02s19jwpGkyYmKdwK1KRLjOEXpuqWv3ZrfeezTVbEXv3xF7M8rziRljjoEbM2TwYhXI1NXjFfe8uKu35PfHlpw2dnOxBLbWSTEECR2MQRIZNaCaOCdFYbmv42wOzxBqpIM1QlXNR1a3j32XIrJB43eLOhB7Tmp4V0GuCvXEIf5c6tkyKcMHbE57V+SedpgO0p9gZ1cl5YCIZuHAHkYG+6r3MRdj+rb7TTxq5BbrsJAjzHeb9/a1NowhH5sR+hvjNZRUFNx6PzX7ji5xz1H5enT/F6bo9R/SGdy77svD8udmVEjrGmI9MqTUKbfNsQb8JxlohpgMkJKUgaY28/xNqd3Isdv93KyRo4omRloLOGfLM6+oewUvCZf9ig9zvNVKSgBMwbcXgxD6uaGgZms16mZCKExKjmZFxTGXarxpuMwtPr+wezkEld+zgNcbGdbziDWNSfbW8nGHvaPR1FStZIVz91slFR1WjfyxW+ijt9Gc0XMf3tIjyEbh4QjjGGDdnhHaEWCux/GcFiey0A60xUk31GG27r85g8g2DTNACWR1XpzUiu5dw2LcGdIZ8OZ5rapi8p+hoOxnayN3I1YTBkTrFBd4MEEQpBc3oZbRfr5O9VE+5yRDaY0Jmb5fdUPWq7IU+GtQ4/RQVE6LJGJAjyfrc0SXaXrPFpa4SNKb4JSnZWMahifqHQt/CWUIE7opGUW0Jp13cAB44lwCK+LWME2oULL6JYpRhedXlJ/SYM3SYlz1awTqHN9lTtOzTRZ5wNcHzVW390ccVp8rWXG5j5N7UBoeV8GqxohSgfXWRr8rQ6CFHBCA+6ivkwg5O9OFXdNAI0vW3NySs9XLpSf3MnJr4ZM/MY61YefY8oHm6/3mc3ML995lavhp0QK7w+bbk0ibGJqiElIDLIq5q7ZyaUtGO0o032RDsg4rHwOckVjbnawannNuQCYaxdFeMBqPqzKnlYJ62Y2aTcBZvd9WGvK2oOwFKWc8Hl8CSkoTihThgyGd/JAQZIxR+S2sHgMwmoJljAQkeMS5b0IVROpWQy3UUUcpVk1uNmAHRggKz39ifM/fFrOyejervndVjq2jlY68JLSjGIuGVZi3EakmVjSaEAePl4eCVLjXuNBc2xUK1oREyqSM5CCUx6taqDlH+wJK6n05Km8U79FggCAYg7IV0VonVuQFXnQotWiCWLyRIIcSKpeLUgHtyrygYYVd2euPBjn0c9y60dNoroNBWDkhkzJC+Gi71YrJqm2iLVi6hYDdww7y2d6VL9BDeQYZR+tav3XV8J23AW9HGcqp1rCAlQXfdPx8DM3kE2A/ZfLohwVERupIssBDu0RJhZIVDwPQGilAudnU3PAVAgeRUyg+3RDe4oEMCUhsHpHfmOuz4UjCKZCaNPP0qymrHl9LpF5dIlV6Dqf3v7PvXQBMBuix8HolRgZrZ2UMT9AL9bt5Q61ZH6tQuAo5762akLTOwd0NNb/88Oz08fXZHdTvitFBI66kckqqLGeWe4+G2PM7eWufXgIxhN6YWhIMQR/zbSAnaL+6hEEESzH6iJ+qikR/WvfpJkq4XVvDftUOziglupZTIjNkJnvZl6HtRU5Jw9vbivLRHedX92jvFC0nMMSf2JpJS0K2MHhOCZVZDeGiyL9GFWBG24uWP2wXcM64lA+npPjzf+T2+Lmf+zn8oT/0h/Dxj38cRISf/MmfjL+VUvD93//9+G2/7bfh/v4eH//4x/Hn/tyfw//8n//z5hy/+Tf/5lAI/vqxH/uxL/RWrGhRPZ0BYwZcOQ0FMhSVKqvBTOGWEqfBwabHZDViBkHMEFQOGGqwVmDORcko2H3phSEch4Vv1pPBX3s1AVnGq/ur2kIoWj3vFDztKPqzVOOH69PLz1OjFYF3C+7WEK6bYmzWgqDuhkrbS6B3ooAvhOP4qShG0nqInIwlg81LE0Cath8vB/qxK3nqvg8i1aMYoao/Qx01ROZFUVeut0RWikBkSf7b8gO3WKPDsP90JWIK6yY3FWGYkWMIrr0YZ2OHOOp4+X2G9+pFjiZkzGP3vOdNeJrInKY5/DJ+F1vnMuKtA25OGcJJX8TaqgL2ivbi9hJ7wRg7QIN6yoywp3vTxzGUk63dGLPe0WS8+vQS6eghpn0/madl4ahkzBE5uuoylpzsleP3ZHuUAbPqNDfrNYEx/jEPBikPRT7W7BhiVUZuKDlabxDV8s1LacEsCuG1hTSey/NzIqaFSGIvkOWJOeiD/DXGOUBDNMiRk22lgLzbv6NOi9latKdgp3FUp67fZhydB66PF1weHvH45gEPr9/o6z172e+Prx/w+PoBF+tjtr+5GBP/rqwpRxlgE0isE7ZQM7uxY2vf25rUvaBcdxwXbUdzPH6R0H0PDw/4+q//enznd34nvu3bvu3mb4+Pj/ilX/ol/OAP/iC+/uu/Hp/5zGfwV/7KX8Ef/sN/GL/wC79w89kf+ZEfwXd913fFv995550v9FaCe4xuEEMwBdXDwvGXc/olwPikGDmNRC6AORqoyqVNlq2HksJyM5wuqVWnYTNz+D1OOynLZ+f3W50EY7dEadd4itnSpmS7R9BtgzpvHAAn+YTH9F2Q+ObsbYyHK1yzBHWTWtijiRYWNwJoRyoN0hHtTXhbISBli5AMTk8eyQVq8vDOKN6DbRgxyxYiaN7QzqwvaQ3lekXZDxy2oJU7rYSi1SZ2tjmghLl5yp24geGNJ5Vaz/M4I7TXXAE9MWbFJ8byFlIaWiooDzuaCY92VULe+nhYEaiFKn1jekjJpnwIxMkt8R9TBMA3/exFQdQgExY0TuiiEPTGBPHmC24M2UmVtd/yKsqcpU4sboEP7OvapbYNBsnEamD3EeFoMdAJOmpv5o228CBu4g9CcMKlgEjQQJUmIuQEA2ZoqwgQoVNCg9I1Favf6xayFudKFAvlymRSWjyt9Tryl/aeq5HZU/LwKFmuq0PXkufM8uRJZatZZMtJuRKAaCF7hyllN0AZEPb2JmyhNPN02zB8urWmoSZgg56vrK1pwKTAiT4btQIHdzTWqE73jtcpYc0JmbSIhW2sjmM3WqQapTitT+1VbATdYFhyQrk7Y91WkNUBojYwE9bzBlj+y5eMhmMnFnpOEOvM3SzMWtsBHIRyPQAiXOsXCd33iU98Ap/4xCde/NtHPvIR/PRP//TNe//oH/0j/K7f9bvwP/7H/8DXfM3XxPvvvPMOvvIrv/ILvfzNoQWui4bxBLpwXRg5QMB+wjU+YIvF181U0e/hN6vek94jdOazcWPoAgAsMU3zBmIIW0M1TFbW5HnoN+0wM3UWCXFut/rEmBYAFUwWupD4rlptczDA8wWz4HSEkPg5yQUdg0ksfyLaNLIofrzmBloqaFdPjbN6Xswy+hix9aFZFg2nQfNVJJMlXjVn0w5tCyLdmgcY7NW9lnLdcZjld+xFlVStaNWMBkxMDBaySTSMDQ+dusANoTENvOfdbiQ2ZmivjaNt0EaESgc8YtMObXzXqvaIGv18EKFh9z70MvzEe9IbiTuj8FV0xZCNmo2fez5V7CdG6d5YB26g+BpVb17zUgbWiBIKRfIluOeJQQnmwnfcTeTnuvi9qAfqeb05PeFdBoanMLU0SSkQc8qDqP3KsnlJKUJRCU0IDRQE3x5ZcBoqHZhuz+83MBTV2FShMsd9mVIiK/zllEeo3N4jrw9jxrJkDZflPHgPxWv9xGLcY2jVszD0n1ETUcpDSdU+YPE7q/FmRk4SRQh32M8+ogBw4xPq/XbPRVvRbMoJa9KC/iQdfS+QWnHUoki/UqxnnIYDfW499JktJCiGZqQuOLZLeLFpzahHCXYbTgwpJaI3Ys1cU2JISpCse7x17SjcLVcOAKX8P1In9bnPfQ5EhI9+9KM37//Yj/0Y/ubf/Jv4mq/5GvzpP/2n8b3f+73I+eXb2fcd+z4e6L333gMApetYczARSJ1CNh6Dtp41gIRbrfkXs7wdEutKSoBOAzKK3oeFOXtS8N/NAzFafuNgAKxL8FMrfT70b0/eBOLcgAkOk2Hem+bG0od7Yn6JSbhMocUQZNMVJZSuxrM7EaiZYIOglQ6ggY4KWip4qWhHQ1rayFGJhSuMUzC1HncevHjeHK419FLQ9qvWC7VmKRhT9KZcjuuBo1Qce8FxFNTaUcyjCyaGG8U0wAEEjcmTmdhuFzi0wDU0mTUPY/mOsTeBPcIVRvcCLSJ2JeQN7aIBngEixJ7Hr+EyMpig7eQOcQ7VGUaKKS25XRc6E5oz0SLPWyXlS8LnHYJAZ0ZIBrqWRexeOtBZBr+hayizjGLszGPxFGyFKam55tDXbWwKC425grJXslxOYutJlLR7azKFxe51mwGShNS76KK1glTRK+Cs87EXXKmKxD9vhpDce4KGpWi0piDWEgx15RhI9vfMAcFflqz1hBPrgvRma1gZUEbhuo6j0wGl8xYGnCspza9qLzdiMqZ7DiWUbOwXIBSUihjLfZknJVmLdflkbXOyFhmTCFArChHqDhxQ7/coBUcpxvTfJoS0rs3VGq9SyyicVEk9riEn02JKSrr1kxqAp+YlFoII4yJrXzypXfvAdK/3FJTj/4E6qev1iu///u/Hn/pTfwrvvvtuvP+X//Jfxjd+4zfiYx/7GP7Df/gP+IEf+AH82q/9Gv7e3/t7L57nR3/0R/HDP/zDz96/f/cOd+vJ4tJdESUQtOphpQ5qbXD5TdY20YiHeyOuEOZAbAIVPKagZkUFKza137VxIaIyHFAhJ1AhrcdzhTTe9xoIPdzKVwFCAy04fwW3m3FWQuIPMrsP4UNN9yGak3BdTGbBiyhqTIRAqYNzR8vaXoJzQT2aFlVmivAHMSMti6LezKpqpVnLiYq+C9Aq+rGjXi7RhhsmMATaAXg/Go5Scd0LLpeK2gS1CjzxvyTbVDlZASobS72NTu8gso696DZ2U1UbkWo7ZhDJVEyKUCRJoJ5rbehGOFrbxGzRB4w5ThtzT3B8gN+Sd18lHgnr7saVCBosXCYOgnCzXF+1qUW9N0FtVmrV9LvsYbBpDUAk0iHd64AMXu01PMSszVah5MvAqMFxz9OFWIMKy0aEAsR73nfKvdtMynyg3sdqSLisbV9IGfVdWa2L9iFbthxWeXSpZUa2czfB4NLbC2qpKI9Xy81WNam6PZN5pxoyZWN+12EMyp9Fa6w4TQSs6xIlFLTkUZdlYWRVTops89RAM0JaegQEFUQNLOYxJ0KydiHru0p8m06bRQy0hUvdD9TLgXK9amTBvcMJPNOJRog62GmMFYMAZEXMprMywKesnmpvDfVy4MKMKxEe3zyoB947Sq3a2LO2qA/050y2yzqZQQnB/vgI6VWbox4H8rrg8XNbACU0kqVKsRd9Du3eq94jjgpK1oBVxEAtDb0WfJjji6akSin4k3/yT0JE8BM/8RM3f/u+7/u++P23//bfjnVd8Rf+wl/Aj/7oj2Lbtmfn+oEf+IGb77z33nv46q/+aqynBeu6RjJf0VeGdjG3nz0W7YoDvlhH8ZuHJNwKI6sTYYFCdV2JTOcYHtUknCx8BjEkTyiFJ4eMX2Zrzy1RzxN4eOhGOU3nENuQgFu9k5KdDsJtofOzO5LhYYiFF1x4S6NAuHm9hJOiBq1PVlaDZD2/ydpDS+sgrmh2py0xuuUPPendSjHFkCzZT9YaoeEoDYeTfCrTiiol9zXncFLUzvhsaTxczBiRLtE3yXyLIdGnkB/NL7cAmrOCt0nPh9sygkk0K6ox4A4ScGCC34MjQAUO4e43ObLJHFKlIKOwM+pmQqHNczv+HQrJHreJesziDCSdlbtPRr7p5ryT8mtE4VWp0qJQUjxYi1X4J218p/VF2eDjhMwD/qzQZDZaI0v4BwtCsjycofm6gGuLsexH1dtzOilzYwXuydp88AizsgF4lMnfiFdXbYqYN+P9W5xjzr0s9dSTKayUB9aM9kMbiNaCXrU4mpoChDgn5JNyC66vzkibFsMTJ1VSe0HdFpQlg1c2j7zFCtWebzDP17wn80Ck9fBqkY0X72zchTmrIisVVDt2I4WdSwhMn8c55tfTw2HltVQPP1gdYFWQS9IeYQStUURXbk039hgAZQORJQI1uwNx3/zzH18UJeUK6r//9/+On/mZn7nxol46vumbvgm1Vvy3//bf8Ft+y2959vdt215UXnevzjivJ0WglYpSD1A3gs8pXuxCxKGVyVqeO6u4V1n7BiWoa90ACPeAD3s05KVOrG6pkitFq1d4rl2eHioQPPfEQNRsDHiuKa5J0zgFy7y4Xlpks2Up03uTya3/tVBFa7etFgQAlQo+KmoqKNcCSgnlWmxTdyvSZRBn8LpoaM+q+9te0HJCWzKkHkqhdMkmSDpasaI/MJopqf0A9tJw2SsuuyqpLhqNyck8YbawTNYkrff70hAvgRoZhYv6BZ1086h3MDapNyuYDY9AaYptvGa1OAbWmFtgxDhHjHCEvEJXkXpWZPE3setquKqhS0NpR9TEmHRSm9aQUrWrkipN56h6cggSrUjYb8rWSchuDM+twkN0ZLVKLZCpffo8cKOHVTHNnhSUpby5ciLNJ6VMWLYNS844nbaoKxqFuDRCfy5ALSyGpCG2aK6ZkxpAabTbSIsizLoprG51UiLqket+6jHvTDBkHQUX3XLetGvvumA5K69cPisrgrb6UNoryimiL9E/yWuxuuC4XFGvOyAVJAWNuo6nsfuv794h351x/v99BOm0IZ1P+pwwT8p7V+2HQupbG0ap70HR54nC9641kxHt95DiZhyGzMp0sh+aP05PEdCs5SosIwwjUM+SLGxv8+X30mtTMFdrOI5dC31fJ51v94iZsYaXnJDXVfdE4gD68s7gTlaForvgwxz/15WUK6j/+l//K372Z38Wv+k3/abP+51f/uVfBjPjy7/8y7+ga+VFOeakN624nhBe4eKLCQmi4JxLizEjZCMJtb97IZ02GbP4cDe3WmznT1axtiTQ93o3+12ewpynvBAwWaiAs1G4BXiT7CVPqk8J+GcjMH9GnvzFQBvWPgHT9z18qZ7XEEuDusWQXaQ+ByeD9ZaMtlfUXFH3irRV8NKQuoWWPGSUAEkMbn14rTmpkiKgl6JFlhBQUeCBQu6NVNN6+LRmYyiidFEUPq0yZuQEyllhvSbMpXeQCzAxqw4m/InQLI9RAy6tG5VtHiNKR+HMTl5VTOD4ECg+HISdTxWVGTYuzDu0vktIEXKtWxO+rs/tHlQmN15J1+G0PGI52qeTGzJ+eH7E7t9LBBxz6bVFjnh1EM1s9dCk8Pw905/6MeLoNss8WruspxPWZcF23kbhq99phOQ0YuoWmsAZNAxpajlOLwwXCFK23CYEackK3kmM1roZYWJjzmGcEpMqvpywnDbty/bqrI39zivWuxPSkrHcqZLKm3lSFu7zYn5f22R8jdI7KGldIWpBkoqWAGRRUMi6aAfquxO2Vyek7YR0Z0qKgGxlIOt50z5kBviaPXmYgdSbNidtu4bxGo19IeZ9shHGgjQ8qkrMIzUKAkk5Y1lXJZdJDNQWhoiX06w5Y2FGztp5weH4vrhU1ooaVUzoDKAuyImBRVunCDLQNZyq+T7Vj3lbABaIVPRKqPxFatXx5s0b/Mqv/Er8+1d/9Vfxy7/8y/jYxz6Gr/qqr8Kf+BN/Ar/0S7+Ef/fv/h1aa/jUpz4FAPjYxz6GdV3xyU9+Ej//8z+Pb/mWb8E777yDT37yk/je7/1e/Nk/+2fxZV/2ZV/QvTj1vJN9umejkzy8EV1nhkDzWgXry+TsvgAsMakurUAUYQ4jrpxCOH4+F04eYlG7GBhsxgMyOh8zZc/8c3hGbt1bgZ7Lx8kbujGZJx9paLJxjnEMVSdT/P5WQUpcxx/5JtRnXpXWYY3+R2Jf8JwCi0BSj3BLTwwpJ/WejhPyddNW62nXTShNlVTzV1dWpCdRARkTYJ6U5Q6SCcw2vMBkuaZkxakiHDxpGrJyxJSe2cODrlzcg41WIK7UpyGNwui5vsgU0tBhFEpK2Aq/u6iSMmVZWkPrHaV1RNGto/JohOBi/kOBUtxnNHcUUmojmQwTX+PTv8NrlEkRY/qOjeNTCyneB+AdeZM3tFyzelLLgvV0UnRc4ogxSe2x/qTrflEmCECSjbUDO6w1R1q1MWZPXZti9nZTr6QKysbcb9bD5eZt86rs23lbsN5pG/T17oT11Unv+W54UinQeCNMNhe+S/fQm+Y+5VjBbUMjAVgjEXlbsZ437Rxw2pBOK/J5A2UtbE6LhdC97tBb2AAGlDJ0cinox4FWCFUaehUN0Vohu7BuBZrnx8ZWiQFgIW8tQk5rxgIBNQa4hfhJZuAvWemfsnXfTVaDZSeOvGnvNWrEWLqCN2jsBzZ54i1ewNAGp9yBvqBn0iLfD3F8wUrqF37hF/At3/It8W/PFX3Hd3wHfuiHfgj/5t/8GwDAN3zDN9x872d/9mfxzd/8zdi2Df/qX/0r/NAP/RD2fcfXfu3X4nu/93tvck4f9milaD1F0SJTzS5bN1iYtSgW8iH3OUchnS7kSVlER1ntUMpokPjfZOPErybwgXCH96K8bTWQLreFxENBya2iElVOTQb5JBHAMFCAw+Zd7cSiHAtofudWOdldmrcYBZpTrgJwj28GeVhiPjG4aFgh7wXMCeWyR++efNLaKc5ZvYZEACnng4ZWtQkgQcCLcsr1JqC0oBwNQgdq2yHGrdarAE3HKkPrg2Sqw1iWHCztebVeN6akpHYgVVA1JubeNBZulDG6NztABdIYQloY7EKOzevxTTsXb486J4raPFdQ8V54UHa+AKtRLJ3ZG1Hmi4Zi3V9LFSRSzy67O4dhfCXz3snh9wBWck+KrPULEC0wppCw2J4IL4gG2CGJeW3uRdk19bN4VgzvCp2s8eVpU+LVu9OG+/sTliXjtK6xCvteDfxQrUZIYguxeS0pcwxQ3hbzjOw+7H2PXpDxy9nghFLy39lDfNuijf+2Fdsr7eB7/si9Kqj7k3pVa8Zy3sCLtUW3pomwXMvk26gs6Fp3yInQtoycgLYltOsJ2DUclnLG8u4rpJPlpNYV6bRqp1aiQL6KcQkGZZvHoot2Z26tQKRByo72+EaLci/XiDJgWbWFydaApAjFclgTzaayL2VtYKidtTNqVYOoTgX5vsZXQ+ytVkDte0HrzRp62dG7IiwZqqRzJgVQGYG0168iaasOr6daT4uCJo4d0iqW6/MUzkvHF6ykvvmbv3mErl44PuhvAPCN3/iN+I//8T9+oZd98WhHRYNh9I1doVvNSoAg4D6O6wJDykjXvkcuzm3hIeoV+mjfLW6yuBVtkt28HPWculnCxn4d/HJQOKntpyegL0RiGlOoEAhlIh6ysZCUIwyfHTTU0tOfT484NwZwI4SYhx7hTB1W2+A5PgdRFIed6vNSYvTmtEkIDkCr0FQBty7g1rW/zLYhlQZeVnAREFe9A+lRiMogJAbESFYduhyozCnZbpWqY65ElEDTy4K61rVkJkhryCQQamidQEY0mkARMr4J6YYScuXDQ1lNntStoHSrErOMm7TUGOuR+/E/Tl4ZRoEyAchEaIlBQkEltJgnxQ5lEwW8xHyPGR3Q91DAGMz9T9aT364afYqPTPZdIS3aTsbFuK0Z27Zg3RYs6xJQbRXEGtJstaEYdVQ3pKTmKxhJOiAJrVQQE1pt2gWgJWv7bt61s52Y9z4zNRg1OMBadEuJDbywIJ83bPcn5NOK9f6M9c48qTsNAS6n1brIjtopq8K1KQtrDlYfj7Qq/lDqBhKtG5JFQ4Sck+ahts0Qg7fGsUA0SsKiFGueH7Li21513J2ZpV53HJcLyn5gf7yYkgJobco0LgTKHeAUwCYHxzCTNui0dZoNet5k7H/PFSqBrjF+TF2UxQgB2sGQVtCqrwtBtn5TeVmR0gLOi/WhSjcdfSEZ0jVtgN7Q15sA9fsebzV33/HmEWntStVRKspltzYJCm2O+g0TAr1rPUOrLVxi34RwfrveAOe5c6sUgFbojaCKMIBuCeUuCussDdf9QDV4J8x9X2Et3CfI+nxYRD4WTuk9ZGuCaLGeKALLXekZsOHH5NxNMnH8NocdZVJ2qrRnoakqSzv5wjjsJHpbUbGc1LWA8456WdUaW/IQ6k6U6IKcNUeVBEi1Id8V9E5YThe0SqBUQWREPaK5oWxj5iG+ZKCXNVkhqOUY2frnhLC3Z2TzpLl3JBtjtIbUGuhISKVpW/GmKNAkSrEUwAkarSq8lms0EqRJCVHMhw/3ANb4MBOUFcS43Ew6jBCvzyeDSXNsmYxt27wmMY9Mz6EhNmbCYmucejfiYYY4pL3rSgrbcYoN+T27hzQtj+k5EArK+S05tEvCsqoQurs7Yd0WnO9OWLdVGRkAhS7Xin0/UK2vVreSEbLr5sQKZzbot4goiz77NjTotbPO72W0dYGNifFNwlB5+bQirRnrO3eae7o/4fSO5qLOH7nHclqx3mkLGs7a9Zrc4Jk82HmHuSIhcXSkKKEyE/qqpRcoxe6JkU4beFHo+UyFBEBRgWJgHLZzdwHQdC92LfYt1yuubx6wf+49vP7fn0a5XnF5uIT9nLYz0rJiuatI2wmUFm1+UNXIZgJySsBpQzQbBab0ha5hB0osWUN92UK3bNEBNQwq2n6BtIJerlofRtD+dcZpSKRdrVNewXnRjt6n1UKoUKNcKTaQHh/xYY63Wkntbx5Bi1pntTbsl9047Eq0gU5klCoEoCqUV6haFXyDwxKcWVwTITqInuQFAG19YC5RMkQRGWLKwhilOWRaGRK8gl8bvXEUzc0hfgrLmkJRCayGhhRd1r0tg4yFNTusM8faZMjbv0eYwpW1pysDlCCjKFP6EJleROyKqzcxr0kRRnUv4JxQdoWRp20JNFEIdHs+VSAWiKWk6Aq2NufRiK/atT2B7IW2ZAYCjXPpIKE3rWSH0fh0KxgEEGGGzEu4sNwbWuvIxRrf1aZ1HV0ZPdie2Ytbo+iTyPKXDGeK9oH25HSEjm2qFJoN+JPHXz1PKaaUoAqJWEw5W/2Xv5htiQwFQa6kNS+tArt19APKTs5aUA5meKO6cdi6tvU9ZzpBQ2HGQrJ1C710tP/g7An2hNOSsCxJ0ZfmETerayr7gevjFfWo2C+7lS70ETY0I8ypdqR3eMPPemhbB5jgdr5EqbZG2CHsanRRVsDFen9C3hacv+yVKakzTq/OyNtiwImsrShszabFKIvcI47NGVOmQ2HjoSCTFBGMnqzVTF101zGp0WbKyeO+YZyYNxjAKg/7GVCiXnbUyxWX1494/NwbXD77Gu995nM4LjseHx4NHEHIp4q0rtiKWEuMRfkbuwRXp7e1Aaz8xpRlMuh/ytoolTNj3ZYATeQ12z72+23o+0VzyGUPOZGzMmkwp8C1M2lTyrwq9F5DrmkIJumQ84oPc7zVSqpcd6QKHKWi1Y5jVx61aizUEIGwtq5GIxB3FRqVQdQNam1hrW5eVCgpw/aTRf6tBQHgDBRkiW8j2Gz+UiGoXFsqGzspBLrLQCABQIQOAbgZK9MrvDi5fX84QfQs8hfRJRqWMiLsacEkGQwCrqDc9ffN6FZuKCgHgjQZhJ5OentUcEoaoqkdnIeyU6k2Kyl/cbxcAd3k/eLGXcmOZ4+wZIRtu46tOIRbr+CUNrRoSEpBAjpHlDXH1VqDFNvITfnT0CW8VWcjIWNQ8NyTW9s6nsMJmdQURqDNJ3FkOP0PkV9yCxtAwtSqnSZeyci1mIJYknl9YmweZmiJFoE+c6sjF+qncw3rZQomdp5Mg5dCxLnMu+TEWAyGvCRCZrKcmYEiquZmj/3A4Z6UUepI13qtZJEBJ2Vu7okkRhfRzrI56307+0cdjDDOUCEC9aKs5mm9OyGfLQ91t2F7dcZ2r/knD/H5i9Jguoh87Y2CGmMzmXyqIAEIso51a6pwbe+RsVk4uMPP5V6hK+tuRdm9qcHUa8Vx3VGuO66P6jk9vnnEw+tH7NcrHt9cghx4aYR8NHRJyKWDcwUoqbftOS+JCQd5dCAlY6hIwWqec8J6t4WCUiVFE6KxoR+rsmOUI4wb9aCsO0SzWoluoXMPpa4L8nmxpK9Ko/Lhon1vt5La31yA7N5Lx2VX0EKp2o2WQEFPnwFUYnAXI9ykkUKAgHsDxPNIPar1FV1lRWmMoNsBdUt9yMhd9ZA8Y48ThfAPpoHpb6pQKJjaE8ySNE2jPGeToPqAw/PIfKO8JmufoBsEequ1dxRDlDnqjUSCeVkbN3pYwtF9BSCgXA8LjUATwuFJaQGkWmuW+zJLrl4VFViuGi6se0MtXTn5OgxmbQzdojU9JRBK3r6AlAUCBC5Gq9S60QRpR1NPJ5zXFUsCsvW4Sikh2bPwUtDXOjV06yYgWli1aqkrMkpDfzk8qFCafaAjJUyBGPkApaiHEtra/q7FrZIIktlKFmzcMbgJdW5ViCZHpm4LllVzBuiiFDuiDRgVaELoxIrEGjcSBosXErMpKrI801g2szelvyfbM94xIDGQWZCoI/UKqgLsHRUE6cB+PXBcC/bLgeujRjlKwK0FS9LWMgkYgKILKXq01PBEkoENnIVF9aRa7mld4OFgWpN2lT6vOH3kHst5xd2XvRPhvsXCTtmS+U7bBPdOpz1zO5N+jDEMb8oWGyd+VhcZEQ5DcqJD8+Zt7t5sKNkmg8uvFFxfv0G5XPH6f38Gj5/5HB4/+xqf++wbHLsqLuenWXZCXirWXZDzVfuNeajV156PD2s0IG/rjReVcsJ2PmE5Lbh79x7LtmA9rxEZ0dowywPWYnRQNR6VifUyVZQurDbIUdUAS4x0WhU1eX8CZUPlkqAuXwI5qV6rshR4mwkDLzjXFbklbtBQkQ7upOSpbqXClJRpDw1x0RAw5BUeFFqGJoHvm9/WORLb53hiZr/xBkYleXzRhASz8v6lKczlCmr+30vhCD//8EEmFKBtqKiDEg9T6qsY0gciYTETprqcjkiAd2t/nqzlh5LNVvSSRmuEloLDS4h00XYPEaqiqod5YbUb3Hxq/YBBw+OEql0U9SZiDOhdoj6GABQjOw0lxYScG6grl1/y8XG5wo70JPOEOMavG8PE7EkFM4nb0qFvBnpuPp6aFJOOsNFVD4pIwmvyAksiCpTd9CUzqigQV8lq0ET68O5s7r3Y1n+Omx5ecgjiyVhyYfYkoTaMOppBHVCjrokm+qVZ7ZmG2JV01Nq7NOd7HIo6xm8KNYsn9cm6y6YG6VZw7ywQhvIMrj1XFKsi+Zbzhu3VCctJwRH5tGAxbjs2Je/zCc8T22YZwQeXAdPYYXjwmlvp4+UtQdqAdYNFDbWE+G7zqM9lj4Je70NWjoJWtYXO5fUDyvWKN68veHzYcbkcuO4N5VAEqN8ls+4WooreoP+e5M1ARnAoqS6CtDTNE1lNl3dw0NKcKRSajUTWn7lmzXW2ZqNkKl0AqV1DsUW9OUDgVGnsXqsXaBOQti+BcF89ClKnCK95qE3ZffVQFJKRl3YBsVFJmOBXQJAEOkqT9hzJZPHQi71DY1pCqCfSVgMQRktJ2zkgVvukqCiITjtGnF//JkisoAya2MwD/UQDBm1fejYe/p1ALMadDgnpCsoBGkdruNaqcFQNXGh7gUQjvGZhA2/oJwDqfliimbSZWWLtqbQ29KyhJzJJq7RVXYEt14LjYcfxuKNcDu1su2tOUUOKTv8DFAEOcY44QeqCTILU1JOttYWg3LuugaO1EKZq3RKWpu09POcDgRXvMoQBzqSs9aEEOiSNGL4zeDs5aChtRKOMGPsbY3qeojkeKLpWfO2ABTlZjtB46CIXFyeWmHYmRzkmU1JKWQQguPk6ERoRGiSKO9XAUsHiRbyAGVYTghE8cii6Hs2bM2+DbV0STEF1oPWCnpTNoIPQO1SgHsrj2Ks1HpwUpceWBzed5hSJdb1xbehJQ0acEmjNSHnRMoTzCbwkRc0Za4yTrHp4bzmt2N65Q9oylm0Zyik5qTTFmM4K6rmJ4XP3RDm5YvIeVh46tknz/kpkxNnSBeWqYc/L60eUveC47DiuRyAfS1FS5ceHR5TrjofPvYf94QH7wwXXS7H+cMMorrUbT+kBTi3CxrcgKHskD/PtBWnJAMwQyBnbfUPuWoiclkVRkOeBeIwj5vC2nxy6AKWh78rTiXzE33hTJcXWmZtyNpTh/wMEs1/044ll6PDYzByCgycL09PxbswB1g1UgExeK5CQYpkZ75uHQtzSnJwgv77XMm0poREjYWr3LLPgGkrmJrwCSzinFHyDbuFFnQ5ulRpk1LnO0H8nN0gWyvG/O1eXFxu3rqCPZrkcgRg9jsQYeaJYujIvt6qULJRKCIe6F80hmBUlSzLaGr07b8BYHg7U64HjYcf+uKM87tqK46go1cOOKlQbaetzBZOY4rV+PzOp6Rw6CwCA7Z1uRcGtdjRuFsqkEDRPQ29MrB63zaUj+lLkFIzfEUpg7Pk89wrYFgX56ghXVN1v/6crKjWSEkCCzMY8H4W4alG4oJFQbpMXZ+HlbkLTGU6aQL1kmWmQrOYNfp96o4k5cifJhEcg3OCQdVNScOU5wcq1KQsqm5eV9ApdCLVOQBaZUIQ0DLeR5cEQqkbZJKZUmSxsbwoobyu2d86aU9pWE3wJactGcbRiPVuR7nmNEGmEonlc96VohC+gWcj3PuqjunmGbdeWM+040Is2AuxHDdmTsjJXpHWFA3v2xx1lL3j47Bsc1wP7wxXH9UAtDfuh7P97KXh8uOA4Dlwe1KOq+4FWxOoHre0JGCyspQcCBc54Lt4Vvy2YKIJPhNY7Us3G5KE1Xdv9GWnNSkJgESgyaipekkVwYCkNjUrdKCgRCDc1FpY0wDCAMncYFZ3PlXM8fpjjrVZS0WsJbllo18pgeraJ8Vi2FzJ282QEMNok/aj3wRl82RRdS2+tq8nChXpg3r5eCVQn4QGlTJrv80ZBxSZRD0qJPse1eNrQ7hdp6M5kljjgYbohDKHt1/TPRBNADNoUD7MBIQfj+QFEKKajg5qGF7p122xZa1tSydE913M8DgvvdQAs6qFor+rKqWgOsZqCqqK9qNQbQAAafS7FxtmZIeDej7UaGQLY7t28Hg3had2Z/gHmWQwhGTBzD+FaqC8ljtFUIWDnmjyom5Gn4bG4sBvGAuCNAEnY5pyQSNSDZhOGdldPRejN9VxBTgnyudaumRKt43FvaJFgzwtXVAanJn6qpOzJxBCi3esQrZmmjQYxgap6nAJCd6b2LrEW59yqOTA317gdSR9PCj68vC4GHz+B14x8WkfualMapWVbDfackCL/xKPOza8LV7pjOP2Ps4IKz8E8PY0MVNTrVcN31z16pLWj2JyTtuZIWuoAWJ7u8YrjWnB5/Yjjccc1lJSy/u+1YC8FD49XlFJxvewWMm2gquO/WFGAgroYBjOeepAZPMcMT9ieB5FuKssh1UM9Kg0zWnduN/zNq1bgR4roBIy9hYQnJeVsGbr2hQ2fa9aUIxzJ58BBKjx5aB9wvNVKKujlOUWIrLEgdQo6ELLcA1KCpMkSn4R1KBBrI01MYfVpGBAjSQoE0SgwRWOgCiWb9U9Qb8CtGV8/I9w/zjfM77DBp3uim3/Ph4hE99nm1gysy6l5A3PuwYBroaAdOeZeIKC1SV6XE4IK+j02xJYIgKPAUXnlcQeR/lSqKef70jtuuzZE2x93lMsV14cLLg9XHNcdl6u2o96LKqouogSmRkOTkiPLFDnmDegWa+0d9DgQLCLIOcX4eDdVAoBuCjiGfR5RzeIIFCRBkEG35QgtV1Cto6OBuFvu0pBxQHBGukC2SRrKZPL0/A6SGUJKxQUwehRzq0KTIT3nJQM7nyBysb27Jyooovm8AgUMiSkRBwiE9+TtFpL2SoLXuU2eRIAawrPTEFOtDa1VdC/XYAN7sKLdRNg4LV3hY1gdgmdKQweRA2LPVgi6nDfk04bTu/cKiLg74fTRd5C2xZSUMaubQkqLt+MY/HvBThFzMBkNw/0dI+sfc+Vvxetl31EelVh2f+812r6jPj4Or6pUiCEo0qpsEGndLAQquDzsOPYD7336NY6rRRSuCvi6HgVHabiWiod9R20V+16sdrNhgSI/wRQh6ME7Mj/TWDNs64inNUXWUkBrzTRkqZ2W3XLDEFSuqMjHaMiyGM9ubC/cQSlBlg5arNa0SVjavubU8ODbxfwBx1utpLzjp4c9vHHhbG/6xgRTwHLZrJrwl0xpzBQ4gwl7lFkCNvfuwWAsdK9vGowNw2sa7C0DUTgzjftnp6qs+NwAd/gxQhH6rx4WtFvsXdS7kMkrc0v6JkRNJvRhPXJEFZx3TnXvQabusGxCkYy/rBmfXyTI/dUaSBSM0A1w0ZvmnmppKKXqqzaUpqG+2t0L8MJVHl1eJyXl0PK5mV4mqy1jB85TFPwmmgtyh9JX+WSaezIaCBTIr5THxkLr+lE2r6FPgAPcGhe3gz5b5YhrxBq1XzuPs4grVXvLBTozgdNk5U7fl/F1RPJI+Oa+EhtUPFk79DxaZ8AYuvW64ka5KiALH8FDihZKLc0EnADcAWYBTyzkEXqKZ52f2+Y2vOIhFNnYuJW7b1XevfOGfD5huTsNQtjTYi04tC6JmZTP0UN7NClCjNKIeY+OurBp3nzqDLXrCqheD5TLVY2tNw9oV1VS/TiiLMOkEXiv2o13qRoa7oJHC3E/vnnAcS0BoGit616oukdaqdqU0JhryDwhHSbLj9NAmZKH9KbD5Q/RWOKwve3eZZpfTGGk+/y4dwS6nbubw7ccDCnZAUBbhoD6WI83rw8435PjrVZSbGEKhiPTXDCPzkHRK8qEWXgRTCExXNC4EEzszQbVe5idUl24fRL6EshzeaqoTPCHYHSFE4tNr/50w8xyZv6eXURDVn0oK695mtF7RIjwZtw3EGFQIoU/u2Lu7FtLk/nJBTMZyMPO092yrg3EFUSEdhxoiVGvO6olqNWKVKusW1GhFlp7x92K/ai4Fg357bWFl6fGIg1Pxj0+V1LZWj94KM7ILQFM4UFnZPA25Vb8ShSbT6bkrzJQcxgsKTvVSx5agDpYKqj1CCEnVloEUy2jnrcPoT4z4MftTZtUTBCwCDqp99vhtV/2d/cu7NmzgQAAGZ4C6diR6Gc9HJ7seokZa2IsibHljDyNzfA0gNbNWxYtrdA6wh7elDOPHLXhsPKFDhWeiRVxFuUwQIRPdYHRaB7qe8CNRFdOtq/TsiBtGdmU0vLqjPXds9IaWU4qn6z3EyuIZ4So6FYIylCYkfSfvLxxTMao7SnpBhk/DpTHK66vH3A8POLx059D23eUNw9Kpdaa0n/6nKXFPMNsuIKOx8cd+1Hx+r0H2wtH9GVTdKqFv83Q83bsOkyuNDzU52mNW+PTFYuucwerw4wADoTjsmQsS8a6ZiwLY8msRMGmrIK5OIbGz+7DJB6SUY+bySxZvUcRUU64CB34OXkIuA9xvNVKCq6AoBZPk24Cm0M4Jwsb6cZ3DjLAB84cIyzQwUhPXjHRk3U11xmJIGhu3JOZ/R6NJIS2CQE5wrEU3+3TXnYFlWjk0hxSrzVcSs3E3X76xoAM/rlYqKQWIWn1uCPcFhCSMCQNWO10237FeGaCXsctbGmCzpZrYgVF1G0HJ0beVkuYsiX2XbhpmKjUiqM27E1rtY5u3ihZSAMTNRGpVzTCu2Yd2nhT7/AeX14j5p/z/jZp+v40/cPLGaa+Cjmz5ANEIDCrkGzT6wSNEZMRDustEteeaAZ8v7tCGT/9dkS0y26DgInRZKwvbzOzLAl5y1g27YUmAJAEwlo03pjArUGaeqQrCGKFzDknZGYsTFiZYy9o2NjRsS1IYNF6GGlKvaTj5EqqdkFpEr2uiBRBmVjA1JG8BAnQYl0fA9enJsxSGsAG/6mdbFeFj9+fsNyfsL1zpwrq/jQIYS3U5+N5A26iGFhEP6ZmPcy60yp5LmX4wGGWktX4Sde1fRTsrx9wff2I/c0jrq8fFdDwuCsku3cnqoEIQVAhUKBWbYJSOx6uh+acHq7qMdVm4VSYC6udpBc4inOM28KMzITF1zHdOOuxcZONaxBowAwGU1LaV0u5FrdtwbYmrJmRs+VGvSlh7+7ev89BY4zJfmHPVTlL+5ArM43YE6fvA4+3WknNYQKhCX6LMWkhGDxkQSbQ7d9jzPTfLGK5BlULYTsQgEmZBABBxOK5w5OZv0MYm2YObwwr2pSaJzv1TVAIw7EIyX2tCIENL4spVstN3suvP/SkC3OeINmuZOVWYNshT38X9yBVqHp9VDBQWMEiLDwV5xeHGvcg5PUcirO/QxAegKv7EYZ161zHzsecRLTCnUSZJfA8bBvhNaLJTLZnDyjjCLiOcbP1hSlHFPc5gnZiY+JQ6llJEaZiWVeCZAlkvyc4GqubgFMgiK9hdoWbOfo20aKs80IdGdp6ZAFALcE7ETe7DjFpUTsr+jXZOMMJg0U9o9acBFaFeBbRTshsTOkYz+kczK0Pqi3qQ2Z1jBINQJx85NbS5+lldD2hrBZtopk93LetwWqufaaSNS21/cSzlT8mJmr8DCpe912bchq/pk7BDLkfUQRFsQ0lVa5K9lqvB4rxCNajmkB3+jKgd7J1oAqqto6jdFz3HUdp2M2Daq1Hp2k2AzRBc8PMQBf17hnAYpGACF2H/HDb2BQXu9K+LRtQo8i8Z6e0Cs8JYEXFTC8JmXerU3wvCaID8jCjze5i25vTfnPdFJGJLwFPSj0MQoO2wK4MpaAPZWGb3L0bI6QMr8HDDgCi2jM0Ak2u7u1gqqIawtXZDv7/5P1bqG1ddhYMP633Psaca+/9VpJSQhQS1E8QL0xABfHOGNGUV5rceIJ4wHgThQiK8cqoEPEE6oXeeMiFonghIqIiiCgYxQSCNyJEVBAqiknew15rzjFG7639F09rrY+591uVqo+v+H2pWTXftddac805Dr23w9Oe9rQMxnFSqcDMyCKSedxEM1OJ9wlITSDQ4qMUSib74IaP42amUk55eXVjWM/FSaaSSSZ5ZAOGUUXqBEZTbRpbTMdRwPVJOShBPwakDBz3nV3qraLfD9SVkVUI1gajMBuJh6KbOqMvnKFktjbTS50jJqp447FArft1JYRVi2AFCGnIvFoAaEQgKVYaqhJwJ3s+PwXXkY3osSM8RDUAd8LZyGnZ/9MHf95Thd9bHIT3sKWkUc0ML6LLgJXGGJAD0OFSNEaiTq2sRbW1Yr02XF756IciWPsFvXes+4HL0X1yrzNLBUn8KEWmHuNO3cLdx8ts+4GX24ajd2w7x6bADBdh1mWtZfb1mHQHODnJBkMpI1RtOqmlnPeBb4UajreihXK2T89t1wshvqcL1g+esL658uurK5anFe0yBWEfyBfnVc2L56y7jv35BWPfsX3ylkSHbeN6ALMW8b4mlJjIO+tqIVx9f7nj/vYF28sd28vhDeqhj2cJ79qgyKuqUWnDdT3v+8Ha09Fzz1WH3ZvMAaxrrveZvcf4mKhnM2C0iSiEY6oul+UCxNUdnnhEHnB5EwpYFxuQYwf2At3u0KVCtwV6WVnrU5vY7c/3SHsjee0m5HByd18vmVRSyQXeO8Pk3QKeAVIFXX1OEuGYaUTi9QLSdYtZbibyvOTk8DNunqkr4NEG+WEZKfrrAwvO44r38cyM7+ZZWfQsWWREjFSsUOuO6hPz/ZkUyAOVUzBZZjHiwYAc3FcKi/KRHfAAPFMQAzTYiR4ByezFmmfPY5fIHk5Q3nC6uY4BGYUj20/JWTj4cIInAYKHR75voUMqRgKHuaM1oZJEqDaUjN4QqWxGxrATTd+zG3Wtvoiy4wTzz/vwYGH+7RwHE/JJJ6UTY+FbXWoqMlKqQ0TQIF4TaF6srlkLOh+Heb0x+uFi4q8T3yi0UIXZVKuoF0EdA7WvaOGkXPlgEmqQ9HEMxe7El2Nn8+i2H7hvG46jY9t3720yhzyBtRj1amWuMZJYZixhwKS2x/I+4+SRzfs/MtONBttgazafmr00n5bLJt26zAzqoe4UAdt5P0Yg0h2O3nbsL3f0+x23jz4h4eF+T4WI6aS8n+ekpwjMe7/fdzalby5QvJMIFAFP9gw5XV3VXHHDVRr8dQmbw2vAwgw3kIIa605CoWZeL5GSszfVqJVikdHEmj+hNnFd8nZ4QGSjQ3uBer1tNP5bjyMJG2e4Ovfm2R5Gk/nZ6/gHycMHn43HPPev5PHZdlKxJhA9K94HY6fo1Ag7RPE2jZ/DTgrQYKtQU6oUNPBmq5UcORBxtvjvBKCWnwFA8fecRIVJDJ2L7BE+wsMxGjwr8wZb+OcMAULUp5UTHOXYSfVepNTPyo85QYoGx4rNGXcBLPI9OP3XZZPK3GRhaM4Zu/stfo4bcQ36szuo7rAfaoV4gyENWAQUM6sKir7aqc/i7BgG4S8BHZUaYV1m0EimXRAZ4n7OAYTznnta4k515GaNWlK8VrRkBhLTS8NJhYOCa/ypEwe6R8qh5BEyMw2F0SoArQUIGKsFKcMvpsNuEGSjcSkCNRdgFT6rGOfJLYJ2JblAFo5nUDWqO6iiHyMDhyjMax8Yu0OrI8RfD9zvG+7bjtvLHUfvuO97rviycI332rAWZsYM4Gg8lnCmhneg9ne5ZpiOSiKQCjbZpPunbJE36rbriuXKr+3qjqrVhDAT4nsAv06Zqav17y93bJ88Y3++4eX/fIjjdsNxu3G8hnIzkxHY0JYlYcc4hxCRPnbSwo+tY79TwujYwvmcHFXAf+6sqExB0k1VwrPhRBZnsS5lQtTqay6uZ5xXOKFgfhbxfRsqNXKqx50vvcEZgAYdtErjKBgF6A3ot4ZaBOO+QS8riSA6JwY/3M88mPzPtCfnG32KTCZyM9PwDA5/nsdn2knFiPZoShV4QfvkowlNRbjuBIhkKmlGgCrizUDE662UnM4Bj/YA/zcAiM87MtdI8xsxMnC0U8NpAH9Rj/IgPzT+Av/1Iw8DDmNdQpytOIzHkHIufjy5JiUOUfKrGbLmFm3GFigGzKeuyuwyN34v4tGW4jQt2CP5c1Qce9Jc1kY1C/CSA+rmWILhygghZdXNQpAg3DlPwwAzhVoBFBjCCkeBQgsdrUK8RUByAGJdIxL3SDvCfC+Uz6F5Yw7HJGefQU5nEzL6SOWJbHruXhyPqNho+LtrH+5BJkjWGLAUzntanGGKWiErhTuLD6IDACsDKK76oY1LQysHcyqcdg6IKKhyzCF79VKxvLpmpt53asMdN9ZQ+gZG/MdAv1PtoG8d97c3l9+54XbfsG0HZz4N6imGI5nZEaaDEsLQooTEhgELQjEDeb3M1Id1unP1v03tP28UnbOWpoNq7piWHMFOkdIS+m85zv19a2wafU0D+8uG/eWG7eNnPP+fj7A9v+Dlf3+I437H8XLjdG/PNqVE24ErRSSU6DVCZX1pPwaOY+B22yhntB2kiat63S5GvszASJSh5lLEVe4tHUrzNonmTlFEXCJMUofUbNZgoyHd4nzd3iiQpSQNxCGYsifnoEXQiwDGGXoCxXJZUVohROlBzYN39M/60nWkT/n5jBhmgJgzzgB1xfuf7/GZdlIJG3kkHMSHeMi7Xz06EX8t4Ded/2KxUyyVIkyNCgD+uvB/makg9q5405wXSydoND/9nN2EkwpwMNP4mT5n5uK+M6LUbEl49/Geszo7qmg+ns48FS9ODmf+lWds4oQUk1MfmPtWm83QQbjIfq1QTA7IJX728Jqz+oU9nNNshj2lcPE5CUNOyJO+5BSVn2oVoqfPVvPmRb/Hgzs6jSpIxTdxNmShMc4a8uAmY9TsQzV1uLzUwKGKoZQk4vGRbBCQdEIwJ5JA7nlzyrhDX8VosE3BOVdeXzghOV4Y94bVUlMZRQrleZRpaWrLjf3w+U5U/Tj2A8d+sDUgtOc8gJvEHTnBTKfsxwTici3VyN5TII1qjIkImDFgwtl7k6C1G+spQ1WCar80Zp1LnQ274Tjk3RVrHthYZsqjM4s67oT6tuc79rd3bC+c1RRDUqPPK+jrpTU2ysYaiuAOgqHA3hVHV+w+daEfHeKOCBHEIb76NYBDpHD/apM8ktOmPfqN/VkQ6/J0fr5lPVHPDCX3IIAxHP4WQDSyLg+czGC+V7rXOUdvTn6a+yCD6tiJYY/Ofupx07oNnr8LJ5oOanhmFgNpR8dX8vhMO6mjD7SY6YRp3M7Nr2f/HhAZSQmFF65M3F78Rj8sdlBhWpWK2ygz8a2RynpWVNwDmBv18BUTfpvGtMx0AZACUcECFlz50LkG5CzpNOngYcgj1nv3hCM5zIV9/oWFg+RPI2rO4YHhjTzDUJDcIODmixaKM8RjnrWQvRoNvPpAbe4ugRRP1dA4nIfPDT17thjVY5JFZP6Q03kb1qVhWTgploXhwvvhEAfHC4xkrVl+tk5hWwBDWN8cXmlOwgbgRjdgneFF8kG4Tw37cN28aX55T+Pah4M6L+LIeqMxF5XU7aUCLXq5BooYpAB1jSF6ki0YpbWUNwqJJKhCj4PKCJ+8oG8Hbs8vbCDdDjy/OEnivuPonZmvMdo/0/avS8OlNSy1YSnVo/55Bov/c5SpTJIEki6ZSReQTFEQz4kqpKOKHh4XOF2eLj49l3p9xWWG5EQ4ySOJtefZ8vFC6a3bR29x//gZLz/3MZ5/5mPszzfcPnxB3zZmlsfucK0mHB0wHPdpOCquh2GCPtjTtB/dR9ioz8RC1hhnPSvWwqwTZxjr67hE/97pXApCqcbXcAbjM3MCTkEiZtDOnj+HmHMfuZOCw+NO4JJS0IZiGEe7oDZIaVMdQqajese8vPfwGP7BUcGhZRsDuu9kVR6cKbZ/8vbLvNt8fKadVBZI38FM5Z1ngLTZZwRAfE5URNLA7MlJcM7OBALJaCZvVeG7xsaDuP165zjiEewbEVeeBtJziBDOOzuUqNNED1C8/IGqbeGY319IAcNZfI2lHeeb1wYTIvTzLhoMQ4VExVwtr+c5Mzg7DQQ0hOnozbOWcFrhmPJ48vrMiLOIkH0UvTTJbCr5ORF110I8P++dOeHB6wTmfUOpcReUZI0mSkoRdXdUKoRZeJ4FsQSCLQYftBg1raERKMX5ZOPD4zrkjSAbcwh7SOLaC7wptVExwQx1re6klJCvAHUpKMvCcDx85tCENburfxyeQfTbhuO2MXO6kTZ9OFNteFYpcN1KV5wQcbWOUnFZFlxqxRKOX0KNJE7Mg4Uy124dBTo4YFTH8KBK5/qSd3eor+8iLmpanDThzxSIDbJELNvHkD4CoyRK3DZsb2/+vGN/4TWg4DHnmA2fE9kH+9M04TSnZbuyQ8zzimxqaEiEWY6xz90XQWA6HklbkeHwyRlmdvlwNpZGPtpj0mHF789OOtaXr0nuc64Zmqyopxtmp6jvQBokDwDCQVXEeI+wn8nWiKDyDP3Fj+L4gpw2BrQfzORvN+p9bhuh2LdfB05qUsnnIkkjh2kkINMIQ3xMgbDpVU6R/NlAZsNv5LeRX59uTET8EK/9GA2cZAQEIBcJuFBcH7D4op0ZDaCovhFOmQkwx2xLsNr4y3RQ/h5F8DAlNthzQZcPhx62BXGNnN4O4XtwaqpDetFPpWUaVWAWrZNhNefzzBqcU9qdYBCZzBjDIZbppDLAcF8XbKdwTiHVdJ6gGgX3WsuUczKDmMKGZRMP61AjneXZQT3o3SnFWFWowG5xQeNiWRAt1OuIfAYJx/LaKwQ1M4c0Mgl7uKJ8GSgo6XjFjX3auRmu+BpnFFwbjYlBcrwFhPXFvjmkd984BuLljv3l7k5qI8SX8B7XNzMAXkNm+wVLZePv07Kg1YrVFSoyEJBQJQjR0JrBS/d+KzHzMStzi3LtzVpP3vnIXipVtOta3UktKOvJSckM2NKop3Hmteh7x37bsT/fcfvkhu2TG+4fv2B7vqPfdvRtvOek2JTM+mLoydB5u2M+HXPuKSfjpKKGiIsGn7OnUxD4bhSNMCCYQseJ5Lj98XWXNuUUjJ8f2U8ZQbtD0RDvf0I4qBlQR02ae7fRQbXTsxTfy/EImzb3Q5zIQ7gZtibp/wd031kD3PhV+4H714OTomHyCOXsaBBwwmQRZVTjF7gWps9LKakYEQur+mvraUM8LKxYcLGxgDwOFcsO8ugdUTipwLuxfQYj5Hxj/T20sJBqQNbaajgPREoPhEUk88vTeZOHYEfhk20tGGdcxBEcRfZRxSNYId2dMjgOoToDsrohDwfXqtCQ1UJj4t3/1UVBi9dcAKSTCgcxHezccAF/pASSiPfXsN8rcPvZDEtHWcBrUPtwZx1kEzjFXKmnFjDfO/1NJHEMZlHKYXtDzFWcQefh3cXBEIyINgBYAacZp0sJR1v8OhUhY/SEzStYwJbC6Lo6mYKNqlMxOpxUCoE6oUMhOPaBPnbs3XKN7S+kR99+7hMczzfsb2+kXu9uLPoAhqFGTaRWwCcOZ0BQp1zS2haXT6q5DgO6EpF0HrW13BetTSfVRej4gblHE9mI/eSLqriDCuLEhfTzoJ6HI3/3wTjVFcr3juO+4/7JDdvbF9x+7hPcP3nG7eNn7M9315jkkE1AYFa8Lw7oKq6ewb3FwDH21xywyV8bUlYJvhZltgnwdCJTCqapvGNH5vHP9eW/slhjmP1tkPzsCHxzzQXqIpL7nDC3OSnKch3HHxkQ/Qxe+1tO5JTm++xcPDlv1vdugztXMhzHcbhSPLUN+/2O7cOP0G937G+foceBty/Pn/Im7z8+004qFbwxo4iE6mQWfc9KD/HgzXeR3rOTs1n7OJMj3lW3iO/jfs1nGC8/qFM8PAkEviAjcTfgYfb0DC5nWuSPXIA2nSGAiT/bXLjRczWccRaF1Qq+LpxxkZkRiVPVDYZayairhQs8am6Eg4R1i6hfnGb2lHaS5X+ABHzznwKKjIlP+1dOx1U8Q4wgIOBVQDJLKebacqfNC8x6iKWDtAlDBBSZGeasTdrDeiCRQvxexmo73/OgA9NRRe+PyzmV0BycSiAPD54oJ6AubSoqNGrRMdrhsEkdI6nJpgbdnRF4jGRO7i8kA2wvdzL8tiOp89HLk/c81rdnpC3HwtMplVKwtkboNcY1nPbXZOidMtyTE6m1ei+Zt3jkKctpP80LOfUJp77c41o6XbO42xmweT0qCBNJEtkzuxyHj6NIMYXHXZu7N4eozb1k+YnimcI8lMfrIu88ixNewgb5seflCBtwIkmc3lxA+6R+z/K1p2swERe+XWRVkRFy32vaqodBM+frHsSUMznlHKSHM00P92igCO+dMygX5H2547jdsH3yTCf1yScYx4H78wu+ksdn2kmthUag6IxgU1QzovIyL3gazKg3mHcg2axLZSTjKy8lbDI8ckOO6aQUSFp70k5hCdGYuSEThXajxho0jfWAYEBwoKGrK08EY/EUsgSkxPo98ZNwODPqlnRQ3QzbID16878FOOCxipCdZXNgYzDLIpMyAKVMXJvfOw250Um1WrFcFrSlYY1Ct0fBD9Ccn0N+jUi8OFFDTxvcr3/MB6vpkHzTeoQJjezE6wIi0DrScIQTQtTFNBxVOKvJQsy9doJQo/la1E6GiC+sBZzkfFb7Fm/YFtZzSq24+iiMZaEobD078sbxEhzUd0G7LFheX3JstxRxIz+A2wY7OoZPMe57x9HvZBY6bfzoA8dt44DJlxts67D9AEK2x3gtRYDaSqqPhKhr83lSFFl2FYSTwO+0qzYDqWApxtTb2GuluOyQwrRmP10yTd8xglIE0oRZ+YXq5u3S0GIeVNIDTwbAF/SseYZS+YHj5Z71qN1hvrGfpZAEM+wR1NKgNhgMFZnrQObamw/utcwogRSmbuHwS0UpcU1Ooy4yqOaxa/YhYdoKIMlYuU/c9kxPPQMsNeX4DYdRKqI+yqAYiDlR9GBpnuD1twpIKz7luJ3kpgoeEql3A6xwkDb32XC5qRhncv/oE2wffYz97TNe/s/PoN9u2D7+BNoPPN++DpxUZFLiFz7KBxGpJW4uYUzCyJzUFuCOwNPkhxsxQ8Z0VgicViTlSCadem7E7DyPqEUZwQwNt+aFd4CyThDsMEIOlkgCBFz8Jt4BFr+LDerPnHElM/1nY/B8ZoDmMDOP4pRNwN47/SKEH+Na0KAJob3K53Klk1qeVjcuC+oSIy5O9ggTgqW46WxcLOWUwT48z27a8prlf06hPTOeCctElJ3kjXBKGrWlyEhP5AE3SiqnKyGnYymsO1Sc4u94aRI6GkVTa8Vl9ZrOspB80CrauqTCemROy9OaxrmuDcV7wULrsHeqQ2ze67Tddmz7QTXyo6MPxXEM9M1HRmw7pCukDzQlsSLYZ1zvUQfjPZSTAkYJ5yUnKn9e07mO/Ftfb/wmVE4yO3IDqxJBSCwIzLVwyp5mLYoQn7Qo3vOGWswwATK4iHrnOHzExU49PY6QmVJW701jFtaaagGaw7fkB8l8zTzc/Jk5fBYPwnxeu6qV8HRtiJlP72UlmJJjuUbt1Jqhp1qtyAwE0gaJ/41O8osKTtVsBAyNyJiCLWwA+w/Fma2aNisyq4e9ZphkiHceef2zaXnkyJ772xv67Y7bR8+4f/gW+9tn3D5663DfC7R33Lf7p7zr+4/PuJNy9pdOWjbwaSm3ZHSUOa+BBgXzR5hrM97o5KAio6r+Ov5tOidfXLNHJCjkJ2zYFCNU6nyiqRnQUdEh2MDx4d38/T2TAApMNBl8CRWa141kllQDsqJ6hfoznJRnBDEfKiCGs+E/ZYKE+EBmkm+SujCybcG6qhWrT0FdvekyJGwgAEboDEpCsFEHC2cV1z0i1wcn5VlvOmVTvIuDGizv83uYmmedUcgNI5DBxAl6DLq7imSfyTn1o60snJkEuFELgxvQl6C4Dh2d1IrqTqotdFJ1eXRSZWUmVdeG5bomvDU618gYiiPUIV42bPedGnL3DUcfuG8Helef1XVwvMMxUC2Mr0wHHBDsO/T1EuyudxCI92YyGSgimzApr6ckXJCb51P2od9lt/z5Wae+qHBQbW0nsoTvTZs7Kvby7IkKB+WzzeLpTb3qxjzYjBH60KkWLCV0BgtiiOO7hjk//5T5CJBUdRKipi7jg5PyNRSwXvRsIr/OrEgjWj7bH4fU43qFdiQBeQOkx2IHLBzUWa/inWzNBKpBYNJ0kOmIbV7nqNfP8z7tLbNk7vb9SOHd7ZMX7C93tgB8+Bbb27e4ffSMcb9jf36BjYHt68NJTSMnEQHhZDD8hvJxNlwzZYrUOvMbmzCbnBZKKRO6msBypOxedHfplPMGzugPBhPCHwOKYoMLBcCu7K25mytX+/jYIoKl8mOS9RVH7QsqolsUYXwmkmPDc5R4HotfN3+ypqUwFcIghf8Op2UOEYWzrFWwXHwWjWdPtTUsTxfUxb9e1hyfYEYKclyL4sdLR1WwSGF/jX9mdR+zCOnnyStyAzj7qR6NRDo3ERQrM6WO639m86VhnWojEtdExBl9Qjwv7/WEXWL8QaK/Fa47J+7AK9W6m1+bZUGtBYs77upySGGYQx2jXVZIK0CloO04jHWl+4b72xe8/fAT3G8bPv7Zj7HdKWF0v++cxbUzkxpOtRejGvoiZCi2VpNFyMxBUgKMPVd8nspGzEhBYkHxCO4sg5XO3gCIukSVO7eHKN8NpcxgKhuWVzbrRj/U5c0VlzdPuLy+zoyyTfJNvJdF4OGEnMiato+fsb11w/j2BdvzDX2j8oZ1h3pP1iAZojDXtDy1EkRmE6stjHZs+8g8wg6VmFnWnHjSMrvi33hQGfFlBk4GHXRO3fUiz06qJBohJDcVwuxiA6oCsQ4zKuB3UEOS3Z1Bpzxljnkuxj467x3kyBDPPM9f906nPnTCuKfAjf5QMTa+fnu+c82+bHj52Y/opH7mI2wff4zj5QXbRzeMY0e/U3JpO7Ip9Ms+PtNOKqO1+E6QjmVGguUUHZwArRkYJlzB2zthv6Cxx+vk9HfzjiPePKOvSLLxAKWpR3GaGVR8bjcqqXdPwwd43AZn7xlmo3AcQqY+fjB5bjaL6ydHGbWUYD7yq00p/WT/veOkBIAPWAs4pBZ/VtamWiuE/6KXJgz8qQ50Vh+IrKUKpaVMeOxFoj0AWQc4K4MwdzlBIQ/ZFB5eNx25pTEI53QmS5hfvoBKH7D/MoMSQShaiBtjOB1cfBLsnKG1XFZ3SHRSkXm2NjX7HpQxCutTIvKQGez3DfvtjvvzDbfnG+4vd9yeb3RStw23+47eFZsLmA5lLbEAXhdBwsQz25tkh9nXFsoinvdbZKbmxwRI0Rxxk5BUtG/430sp0KIZ/MQIkMxYfX9mf9tCaK/5GPiEip0tWqpf/9Nans5pOP1+kBSxd2wvvF77jaM0kiwxTjWZCOzOdgPCfVnYnzbMcrCoPqwuXwvvZFRskJ2QXJGaZJQHc+F71vC4P3le7/yMu9/bHUqufPNrbRaN5pJNyFFjCt5HPAg8RIQ7RQbyHEwfr2snI7a3znliocSewb//bQQJ207a/8uG/S1bHu5vb6wLPt+x33Ycd4dg+8AYPN/xlfmoz7aTSvhbBBDWCWLj5RiEhMgUFqJ14M0JFXUF0EE5m/ge5nNdjHTdGp8HnBGL/NkjpBH2kQtuRAOoEQKkMS4OzyHJEtlMCkIqpZAUUeC6ezrlaiYfib8PRxXR30zfyXyr+VqqBEyZfnX4hg5pIDZf8IGAUiuCNy9aUNRQraBJRauGZRXUtaCt1anChdHhUBqJvUP3DusDMlzDDNNYmktRxTWO4ZPRmAuAkR+COzmNTdwH5L0wlHmbeU2Sbj6NZrJ9BQAmtBXjI8J5PAQ8wX4qksoP7eJq3UvlSPNTZskRFBwpEaSJgEwfBh6eIJOxdWy3O459x/PPfYTt+Y7nj97ik4/e4n6746MPn12xfM8Mqg/NaKuCrRmo1QOtUwNuFT/+mpmiFYoYqxmO6PmyqLUJrqrM4l3BQ8xgru5tvl8MgPQxrxWiJ4wRc4zDEACo4n1QC9ZXV7Trius3vsb1c69x+eAJ1889YXm6YLkQMoVgKs17zWmMQTmjTgHZ475j7AduHz7TQH78jNuHb2k07/d0VA9rBhGHhMUt6TxiH2U2EtjKCYpOuNH3SvExHyUyqMKsJwJKs5P4l6+/oeaszNlaknqkju0IDFqcFFYpVgxMwoVirmUTofiyCHUijX2ZYnHiDAYl4HvxQM88MBqES4/7jrpsGEMfamHcHxMSVu9B3J7v6NuO24fPuH3CDPblZz/CcduwffKM48b+qLEx09chUC3QLztQcT4+004quqTTUZ0vpteOKPPjdQwb5CvgMYMaxn6iCZOBmyNDUP4sNLnO0VER0rFp6Pjz6FmAIaerGtecs/DEoQ+ZVFQ/mNgGYcTMMzt2wosLdsbnn/qMTllciHwOJwsAlhBfFUGFy9/AhSdVHyixM5Kz+AlUFAKqaIt1aFVO9C0G0QtExxSxNMsF3PfuKgiTBl0UOQZlcQjIHCYSIB2qqKbGoM2jO8Ev4XhnfxhfcbqohofsSb1WGBkAIC7eWpxG31xktHkm5RGxb9Ao8LcLC/vL05rZQMKeV2ZUtdU0XvXk4EqruYTVVcr7rhj7gd3hvf2+4eXDt7i/3PHy0VvcPnnG/b5ju23Yjo7tOHAcVLuIOktBKGoXrLViqZWD8nK8OgOIgDTZb8VMfhiZoLFuombYAMA0xY1DySMEg9PZF9Y7eb/8Dliw+zS9QlkqR8JfV6wf0CFdv+E1rh+8wuXNFeurS8KlISEVE2yP+05K+dFxvFDclXUQ1p/uHz9TZeITNu4ed0b42kmsEIQ49GTnRsDHehHY5+iYnLrzVo9KyQKU2d+tmu9RHe570DrExGFCV9HNAIJkGjbnnEGdG90NDByr0tHUUqBu88z3mflallrTNsQk7oC8c1a5GKQy4FqWhXO8vH4mRtj/2A5IvUPuu0fioLOLPeL7IrKu7e0Nx33nen37gv35jvsnhFrZBtE5EHWQxTuU1/o448tf5vFVO6l/82/+Df7CX/gL+Imf+Al88YtfxD/6R/8Iv+23/bb8/e/9vb8XP/qjP/rwN7/lt/wW/PN//s/z+5/92Z/FH/7Dfxj/5J/8E5RS8L3f+734K3/lr+DNmzdf1bGkUGdAMl5YPHt/biaB85WZogAZNSngkjikbI9I8c2jFyEkFosrDCMwo7BaiqMZXIiqpJiH3l84KY1NAt5kfoy4DItlgTUgyzNbL6O8+OzTcTzCjN4fpWT8xYeHQ62gE6gSVG6ByMlzp6OZEgEGzQZS3RVqFdoMSi478/ZkTpF9RQ01fRwU2GczbzRAQwK082jVQgZmnp+dvs6+e/6kQB5+P1NbP5X4n8sLJUkkXht1iSZevCfhoflMoYRtyiMDbXlaUdaG9fU1m07XVxfWWC4kRoRzyAwje9ESa0HfLWGWfnTstw33tzdsL3e8fPyM7eWO29sX3J9v2LYD+7bj6HxtDxgV0RMIn7wrWCoHQbZanQ5f3EH5V3dSufZVcYyRE5Ob11MWAcTVuD0Y93qGByJxLSOgCJThtGPCCaD6NV4J711eX7G8uuD6wROub65YXz9huV6SlMN7GvBnx7HtXqc7SCt3unm/7xgHi/XHfcf+fCPktx+c9eRySVWKZxqY8FXst9yNAojOzMTPIvZ7NOa+y8yeyhTTOeX+DMjy9H7xPKucc4/D0Rd3WEpdwOGOvpYCa3oKiIPWT0JP/qIYO5TPTsqT9+jBW9qKtiyoLqgLgOPs94N3MOyK2eylW2LWFjOp0Uc6qdvHz1TXd/r/OHhvNOBo1zwczjTuZzWpL/P4qp3U8/MzvuM7vgO///f/fnzP93zPp77mu7/7u/G3//bfzu8vl8vD73/37/7d+OIXv4h/+S//JY7jwO/7fb8P3//934+/9/f+3ld1LLIuKOuSYoglvLxLtMAA7QMyBgY4y8UgbDiDG3Sdc5wO5ZRY9XutpWQGojgV8nHajKdISs3QnEDRh4NlrrvCRmxFt1jCZf59CRguRCEF8FrChBfyo/0xHVaYAzbvAsfQ7JMKxxYjvAXBUjvVfR7eaboAnOoJnEFkECvQXiHo4HwjwI6d8NEYMAi0gAZk75Th8WZK3Tvs6MAg9FltjhQPvD4yuYeHwYkhSKf9zsVwZ3MOWuAMSgBjBhgZGRZhV30tHAPRGtpl9RqSK3D7ZhTvGSL7rKJeFqxvrqhrw+WDJ9abvK4Sf5sj0U/HOE2h18gSEu3Yn90x/dwneP7wE2wvN9x+7mPs982zAvY/2TG8TgE0oyENCam1UBB2qRXXlZT3VltCsKPUCTHCjaGB617VhWEnCzJJQhwCxhzV4OrxvGnv0ZP95bzUlgtN1oraGifsvnmFywev8foXfgPW11e8+qYPsL66Yn1asby6JqkkM7ZOGHR/2XD7kFNxbx++ZZZ+3zA2Kmn0cEz3jRDg0dEPnxvme5YlgVmPe4jl3ZmgREP7RCjONPKCqJ3OTKwGw08Enw5iBR7g+n829+uncfCG+iBNNaB3QAR7764CUjNzE3a5oxUGIwCgdUFOnnboqHoNNcbZ1FYdVr3g8uqJ40kg2G8bjvsByAv6PlxAd6STWtYl2xMCBbg/39C3A/dPbqkT2W93bwvgvYlhoH0YjkHHfP8KvdRX7aS+8IUv4Atf+MKXfc3lcsG3fMu3fOrv/vN//s/45//8n+M//sf/iF/7a38tAOCv/bW/ht/6W38r/uJf/Iv4xb/4F3/Fx1IqJ3hKYv016wnnzOBhw/kjoxb/+lDLATCrj48m8cFxALloxWGOwAIMQBmBB8egQR/UZ15riqgrnEZhVKfx8Tg3sQaEcr4CsyYQjjYi42H8ygCKzi/nAsUZPESSNjfpDPT9q0NKZhhO0UuV8wciQvR4nIRk3xGWndJCn/4wIA2f+VWXL/ViYBqbOgvy8Ig+gb8ing1LBgbweU5kKjpMd3H6fKVBFSF7NKjlQZFu1xWrN99eni6oF58guy45Hj5p21lTi3VlyVQbfRAG2TlSot8pinr4s2/RgNpzomvAvWRC8u5RYJcQ3+pisIuPCM8MSor35sChoghuTrVL8ymvmDXBklmtpeFOAkIY8HeW5XlvSAFrKUtDXRYs10s2fa+v+O/l6UK26Lp43c7h8PN+Ne/DiSGGN9ah+n2DboSSx+ZTZQ9eM3WdRUu4DjODivtzOt60E7FDnEhhcS1k6npKLD3gAeKjLXgIn06Z+7uL95yLWX4f2ZQq9QQTQTCjY4LBKmm/NbL9dKIOfyOkvZj51VJQKvsbm/egtcuKtq6odWFAP4yzsRxMObbDIb3B7KkW9GU9wX3c19vtjrF37Ld7kihGj6kDHuBGv6b5VGgzbPo1clJfyeNf/+t/jW/+5m/GN33TN+E3/sbfiD/7Z/8sfsEv+AUAgB/7sR/DN37jN6aDAoDf9Jt+E0op+A//4T/gt//23/7e+23bhm3b8vuPP/4YAFCvq1Oe2+zhKSXhFFMDNsJtY4xsxIzu+xkpOXh0juC9pyMc0mP7zVxQafSKM+YqqNYggqosbIbUUNCeAUbQHL/tml++yN+HtODzeqZzjMwJNjOoblFXM+w2C7HAmWBx2hSnRY04B/+deJNf+GnCEezZUXE6+hhoZmggri4GFHX6q7KOcBwDvfvE3h4jMc7X2K9pOkObRv1THlHzoMPlzwJSq4uPZF+YTQOAjgIZAwYFmemenfp4j8V7k65vXqGtrCmFk0rW6EkJI2tP1wvWDwjzrW+up7HmM4PK84nmYdgcD+LwR98ObA5NESp5wcuHb/Hy0TOO2x37M7MnHIMTXY1ak8UsSUIigqWw/rS22Ty8LI3kiUKaP2tPszfuvC4iAGm5GNgGUEXQ1PvHSshwxf2JEOK8G07LSgRoXoNaapIknr7pDa6fe43r597g9ec/wHK94PoBr3/13qg8DBOITgq6DuW4e1c3HxtV3u3ozLp2zndS141jvSbWmWQ2kI23mUmdzsWDR4hhiYAg9o/D5fGzaOg+O7Czg4paEWK9MiWbThJhP3zfOaQOzD7Hzac9q5lLbBUMHbjUBm0VFyx+mytCqR0VHEE0NO/F4hBqi4bxRkSg+OgTVWDfO/Y9oHmql+hgIBXXLQZCirBXa/g6Zn1wT8UJ6zFvTX3AqWIbimMo7p2Z+/0rpPf9f+6kvvu7vxvf8z3fg1/6S38p/ut//a/4k3/yT+ILX/gCfuzHfgy1Vvz0T/80vvmbv/nxIFrD5z//efz0T//0p77nj/zIj+CHf/iH3/t5qZw9kyKX64IYfBd01TIURRVlVIin/rMYyBtYjEy3KNJTJ8unqsrs2Qn0QoDZZOqP4C8FJJUy90H3VYP6jU2AxCnppZZc6AANMYNVhx7do2a2d7oGMTX3UMsMKsgfjADTC/nzJFUkxft+BFnZMXXlc2Sj3+FNgwZFtYpmwOIqGcMIXYjTn9kgaJToGZwdFZnhmf78QOM9X9h3fFTWcKrT4KXMayx0UtWlmWqrbIYt/LvQu5OlorryughrS3VdsT7R0Vw/eEUIzwkPRTiXKUe6G9+vtmjGpWpEOzedLvWBsefpORtyh7J47Bs4+nqO+55O6uWjZ2zPN2L6t+2BPg01l/kqQOW91Vr9erAPavEMal1WQkFLSzV3deh3HzPjDQPN2iQvaMxHk5MxToalvn9fzsY9siY4pFRKQbm0lDm6fvCEdr3g1Td+gMsHr3D94BXW11dmryEiGxN6/Z1FLR1xBlHOONDe6ez3A3ZM8d2AoFQnbJxBxklrsoQjVZcBCOcrsbTc6fipJ9qBc7bkNd28HvyFo6OOfLzv1OO6Q8jCK55lhzBxvFcgO11jHAwD3ypT09KqD7uMBqwIdA2+v72Ha1nR1sbG++g/k8LRQqrQYyP864Qn9ZEv5pME3nVShpIKO33vRAaOOTgzmo2zf1QJXR4WZRXala/k8f+5k/odv+N35L9/1a/6Vfj2b/92/D//z/+Df/2v/zW+67u+6//Ve/7QD/0Q/ugf/aP5/ccff4xv/dZvPYlQ1tkc6ZAPJ68WSB1OHa6pSxdwUCw8kRklAbNZs0XWcIp6YtE92FKPhALSe3RUsxEOIhNywYRW4uU5TM4XuZ0gFQSk5q+Pr90I2Rw2I+UZn3i2F5Hk6Tge1ARKOCkjPBA9U5ikElJdowcrKPOEL4M6K0PTSR4+riFmLVlcj+o0dkRheB7Te5DRwzGSsp9OCgCKs6pcPTykmZhNA6NXRpO1OAuO/2ZEeUkjef3gNeqlYbku3NhAjh9HNDQi4MSYd+RZ24PO2cygqCcXENVI6C4p00dnvcmd0v3ti1OmKXs09iM3PFlpkmrYkWUHOWhprg/YmEGVUjliHY+1yrwXamiRuct0RnCjnNlBBEgyi+ixjmYv1dw0OdnWr0t7WggpPa24fPAay9OK6+de4fL6FdbXT1ifLg6Tzr6xeMO8/6es32KJqpIG73Cp9UmOeICgASdzyMygojfN93n2BZrnlob8XfW/D55LHJfg4T/nrZbfmH3az6etidE7VZw2Hueb2d10UmqsMdswWOFQyVYEQ4WtG4wo/f6cYEPApa2oclKXhrauGVCF+HQfg/qPYzgbkrVjPTqgFHAOWr02BcThY/Vj6yPJP7H2RzZ8U/EmauTdOLcrWdRfweNrTkH/Zb/sl+EX/sJfiJ/6qZ/Cd33Xd+FbviAUlhYAAQAASURBVOVb8L//9/9+eE3vHT/7sz/7JetYl8vlPfIFAEAqM4MY0BVd9K2gWoFVXsToV60hQupRMjCo/OwZjRpbbcOokynFpsjqGxeni2u5eQQxcr5jNuiSoePFSlM0FcAEycgFmYMx6bYBGclFv5QCqZSwBz5tExJUeWQMRTYo7mhj/k+I8bYiqNELdKbsO8MwhGtNQcaXKl76wDAWciuAxQBZnAXUKsptQx3snVIUqAm2PRSnjfUfaagiqZCsR9QRdmcJdSRPP2GRkg6g1ABdPGj07Ks29mddXrGu8fTBK9LIPVCh7t3ho7HVnQwbbtdXAdm9QvWG5KArb+POKLYTShJ3EtoKbLQHDUBTh2qcDhxkiNEH9peNeP0zGVBjO+bP7puz1XY8f/yM/b7j/nJL/T1+LqP6VhtEgDUCDUTPU0FrLetozSE+FVfoUHVlAHuoCw43kuGo4OvkDL1SwMtwVnUvEoruMp1UEQrMeqAYEk+XN1fWm15fcP3cayzXlV+fLliuFyyvLo6GtIcAyg8AgUxkU7pZZko2xnRUfq3G6Nl3NCwMv9uHOlX6a60z4Hxv3vn0K9Fud34CFISO18jpd2d/NIPa+fbweykCryUFKSsckXj2y2vcbEqHTaTDJonWN72Kk5qcEq8+qDMRiECclga0RtsyDNu24Tg6WaP7TobpfpDY5PCyAGhS0klZOKlSs26WDmlMiaVDh9ehZu/dSKg5goZ3PPiXeHzNndT//J//Ez/zMz+DX/SLfhEA4Nf/+l+PDz/8ED/xEz+BX/Nrfg0A4F/9q38FVcWv+3W/7qt672jqfL9T21Pm4jWLWlEaDZSpoVRn/BgdFApQTZJpBo+eqhv1gMPs7KD8dYzWCjc9kE4qepVC9yo2t6kXk0+QCyPXRzrsOa9QzEhE43wlGluREaM5BBMF3JhuWyBUiJBzD8eE0h4i4nBWmD0zUfDspmhDABnY+8BydNS9Y9l26sRJpZNCwThOcEv0GRUhhOY1EhWBOS6tWrIvijhewJHllKVEpDvp8WRUizuZ5rWNBWWpuSZaX1JxQJxMsFzWrEFl9lULgCOp6kGxtU6zNDxLCMOotXDgICJ74pOsMgqdbm9ZVN7ekvmUTuogpXq/bT5Fd8tMK+CqOMk5R4trCK4AEmK/rTVf4y0p79OPu4Gz0NdzY++IgqazmXAURVy5ytXXUnPDkg7JjUzoDJKEwnuwvLqiXhquH7zC4oy965tXTtN3yn7ApOXUQnKCuWiIg55vKSIbY1eQAYI5vOQwqO+RaPwOAsZZ1kcEOCUc8zPhDkAwi57uheJuqP9syhDOnRqOKaHsx4/I7wgqxHr2YAEFKprOn5R2RSuUpgollRkcnJiJbhPiKC2m78beOTWmq2fUQxWbT2m+bzuOncGiHodD9wQsKwRW5/FTUGAGk5TKQq6XrEO58zrOxKrcs0S0avkaNfO+ffsWP/VTP5Xf/7f/9t/wkz/5k/j85z+Pz3/+8/jhH/5hfO/3fi++5Vu+Bf/1v/5X/PE//sfxy3/5L8dv+S2/BQDwK3/lr8R3f/d34w/+wT+Iv/E3/gaO48AP/MAP4Hf8jt/xVTH7gNmJTnzLoMNVJsyysbe2lqGOugaVDt4Asw5UjkqvEK+PgIvPob7sGk8npSeVYm+IlOLOiZlU3LPAxSvgqU1Bqeaso2iudbjFZiEb/t4AHVR3HHcfoS2GU9RZZhHWabAxiqNiQpYN/rMTxh+OI1iFAFKehXT5gUNZW9rHcEdFlk6pG6IPTaSirR11V6A0mFREDsDeCq8FCGhcesdoFXoQy+4ikOESG3n6ZY60PkFpsGi25KN4oNyWimVtHBXydEG9LAicJhQP+LYlhwuyaZR9O3xr6pmhY6pp35jVhPERAWot6LfFnV7o30mqRsTo9mPbcfvkBf1+YPvkRor0dqDfNq9NHbM59b6hu15ait8iHLWcdNtmVDvrBDHOYlLM1QAxHncxkO5vU87IzQwON+JBSIFFQ2kEGPxMCGaf2GX1QYfRV1ayrtSuq/eOEdpbrqsz+a7JjAwx2ehrjL0xNzadxXAGZA/24xYCsv0B5qNyg381moPhMCFJECUde2QyxHQxUyE8fp3/nBB+1JrOryNawesdQW7W8uYZzY/x+xOafPwsrxOak2KU/W0G4FINBSPr1QIyOWNq9WSR4lHf11UlSqM+onjtqqti2w8cveP5+YZ9YwN533eiKEM9KPGerBm9QoTOSwogGrJMPLlgGXcPbLfe00mdrwHAwJ+3+StzP1+1k/rxH/9xfOd3fmd+H7Wi7/u+78Nf/+t/Hf/pP/0n/OiP/ig+/PBD/OJf/Ivxm3/zb8af+TN/5gGu+7t/9+/iB37gB/Bd3/Vd2cz7V//qX/1qDyWHmxWdTZMhQtkcQpBaULzYr51Rez0ILZmWVFuwbCO3LNbWU4QZ0NEYfWZUUlxaSdBdLLYDmLeD7yduICrOEZlHPcrP1tOSDjr7xKa9GBvQXtQDHAYLzD1mL0WP7WQgISHAT31I7FgDipHJVQxF4ukFXYPPVmKqaMeASkdvh+spVaAapNBZUXTTM9JaUKuQ+Vj4dRgYTQ+bNGmB1zses9pSPPgAIOKmQ5hViZhTnd2QLhVtqZBlTovN6+q9JRSAdZXt5tDFCEgpNMwonGkuhDlKQS8Vpe4oSyULKqxTkWxaDghvv++4f/JCyvTzRmmY3Yv9zprKYnMfTqTBzCjceYxSfICVwFqlOkepPg6ChABnwGQWMh2dR+6QNJ50AJ47uJNQiZqoR72+YGorQGuQy4L2dMG6NhJOlkrR3LURcr2s6fjXV1e0teHy5ikbnalr6JJQUXuyRHbdOcqsaTg1n9nmnfW6Zzp6QsSu3ef9qgTvA/qWKLJNpADIjHLCIZZ075N5n1kVwJqVeKuH12Dzlb4ncgl4AEn2n02KukwYPrMwHzNfRCjMYaxRV9+Ji6/9i9FZNK15L5dSSJQpPrfM4bho0o6ZUFST94kE1e3T0XHfDuz7gdtt90CJ65H1J0UQcnLdeP9XkknC0QOwwnpvWJC4fgHvDY19OgPkAgY9OlO0L/v4qp3Ub/gNv+GRqv3O41/8i3/x877H5z//+a+6cffTHqMrRvViXTXI0XmxaoFWTf0seN9UaZVFwEbdqDI8UhDDeWgg3gnumK5SsWA41kpYIHqf3EnZdFJ0HDM6TUfhz9yUmAy+cz9X+pOA4tLYxsovmRUQSiyn7GnSZMNBOgDk5xJyTDNKFP+enijqcshMq6pPB4W46oClqoQeAwMFpXSIe0hpdGYFsdBDyd37TkqBBgxRpqGM2pqEUfUNzmMMb+OHmcc664iRHbJYXBPmyCnBznZKsk1hYcAU0BE4v81R814DEXjmfnSMvaLfD6eWI4//iFk6zxQ5DfWIvlPGR6PnyRty+d6nibmGxzYEP15NSMwVIyprLGxcnlAOYl0FJBwQJ8JRBTxjvv484CkTmJIAzGMB1Ep2ZIjAOu2+LQ1rjHePXrOTPFRbqMbB/ig2QSf0FE7YIaJMAWRCeDo4Cn5sPsTwvlNZYj+YRXmjry9FrmkE9B1bZ7Lgylw6CcEH9Jn/RNRM4krwDxIZ8fuSjeE2r1cGAAjZsgioJulqOiqZ98H/jsoSJAVVMPM1AKuviarBMmUwsRSiE1EvTLsRa7uxH6otzKQgrMV3nz+27Qf206wt8TaEs6kpYcfedVAIJjJfaI5eyXtr8HSdfH8WoWyXgEHfV/L4TGv39fuG3WYRTseArg1mLhvSkIKPsgjaWAARStAbnCVUPDOajjcdSXyXC3VCfhGQkSTBGVBktnndpxRPiR1Z8N6j7j0D/bQoDO5kPNtqYKpewX4VK54hNme2hXMq1ccw+Gb0aKy4HMpZLNO8Z6sKcvbPkLBtXmgVKji3InTkVbD0AZRCKZ4+ctFe2oK1Nm4WE1QFRIE66PaKEDaoRVF6B6xMmaZwAjnWhAagq1Pp1QDjpl2L+vWoWSeU4sp/1aikvVSnnXsUPjjsj1g8syvY3MRnupYZ8SEdg1TmyJxYfU/jDgOsK4aHIWZGUsfLlutnu2/oe8ft7Qu2+479fmBLaI+KJ1BFdYckFlJS3qTptcaonw1YQkzMihmyr07oQaAHnonAoRaiNicatkUmTXQgjGqs+DBAtTq8WAtkIYvx+voJ62XFqw+e8OrNEy7XFa/evCKT8sLpuan87rOyot+phWM61UOQNTykYT2z+qIOeNx30vGf73j52Y+xfXLD/aNnHM+bQ7ATdjehyok5VzyMaAGwFJIUGhzyTMp5LgCcPBP8VrvrkYdrNGtm/PmIvwecfBUEK/YPBqKBGmQJnGBN7lfzdVDMINpQVFHHgDhc1o7u95Lny70VJJYZpMZ0ZNSK5jTz5Yk6iFILxbOPjv3o2PfDA6oBG4SEg2CV/XElJLVck9AdYo1AUOZljFllaTv9dXGuUQdfas33YMD8Ncqk/m969D4wyoBVyuxL7TSidUAXJeyU2L6z7NQjjcEGNS1k50V4RBhpGrGZvUh+PRdL45E0Vo8eTJXOxU7pr6rTgfmM9qCezmRSgVlQphabqfg02AilSqoJ0Lh5ZuFGEOL1NbVpqJTQHQwo3j11HtRY3BmXUqmWbTSGpQ0MEdSjo0dGKgXr0rC0BUubU2iDmHGO2jEGDa3j3fDgIJv+xqwndCU8cKiS9QigKDOdJszoUIL0QbXzYP8ZWAOk9lgHNwEoHjtqtiBU41dir5aby5LO7MyxdCQZB9PxO/15uHApenfYaWC7bTj2jvvbO7ZtJ96/HakGX071tGKzbvGYIYqrJMwgaMRnQ7AgWJwn2MkDkmTwRQBmeJihVBA9gCTSwNdarQySWitJ0S8+fPDpg1dY3TE9vbliDThv8aGNq89PWpcpquuqEfn5xrrxJDjxcubMt1Zyf41BCHS/bdiffdTD850KHHeqSVg06QKYFDmHqU/nGvWiFFOOTMHysj3uYeBxX+e2p0MAZlav4Gea7zX19a4w9lzS6gDCenfxv50kpVmTEjgxwts/xKG8ouzrGzqSXg/P1FINRd55BonIR8LUxmbd4XVVx4IdWg7tTq+/Q7yGTWgundPpeR6CGQouWbw4HUcEXeGwiju+cFiAYPzfwu77Wj76caBLRRlx0RwyK5T/EAH0pKPGAWqsV9ioUBdaFE93ApvOICtvQkBJXpyO4TtIlIL/NkyDUcrDKIPQ8+vuqA4HcNOYGym/zBrYOAq3pQ1e8mFxhti+63cJ4OmcKxl0sCHPMzvzonJsJEp9DWZSbhuKCsxn99SlpT+unv21hZHXcRygAgNpz60t/LoshFar9+iUwvRfAaj3G4l4/5YXxY/BbvWjc6KsN/7ug7Ip0WQt1TMvAansjfcg6lBlaZCl8RoPpVxON9S9oyxHwh/R09Qurj3WZnQIwEdfE14a+/CLZxmjAGfn4ZI77ih6H+i94/ay4TgO3J7vCaccUbeyEwSLMFjhoIJZ6sGKr5fD1wSbuGn8ZquBr74IjM59Qv4easjslePjZyRcI+AqQmJLJfEkFN4Xl3169Q1vsD6teHrzhOurCfMlZOrXsa1LUtCTsDIsEYQzPT+8QTIDT4FGP3yA4Sc33D56i/3tDfcP3+J42dCfN4w7teAY/M1oXeBTpI3BWMwlW+CEIZmQ3AxODKcNzr0rEdfKw74PIlZahLAVFj0/cZ15n5ZwcF6rFkjWLuHQ7cP7IeImXq86BlQHxuIDCWO68OlzzscYTdRSSVRqKzPd6lp73fdU/G0xKowk2QqTDVzdyUw40Qk69ZGNGZfhwbmfsqbUDT1nYaeMS+3rAO47+sBRehobKWTw9OIFWgjqMlCaoWIqpM8m4AapfRZSw1H5IuXF95shNetXjOZj43tUD0OVSbvOGTriEX5AMQG/IeNz5Au9fhZ4cvHpgiRNgPUBd8BRzIx+kaEHhiAdARBzrNzweXFT3L9qEc518ffWFgXRSkVwn11jpjRkrWA5CjDcSS0LaltYvG8L/67U3HTZ0HqCnc4tAyNmBIXOlzf/qsOhkiQA/7uAD0qBRQpSWCw3BWTrqN0wuiHGUhQflRG1krpQVDMGD+ZocgPra4frv7mKAboirb1nMgh2pxvdoYreO7pPJj2OjvvLhr2Tpt8j6jfCPxCBhb5gmZTo4ZAdMyBJxWzzdRl4furDRUAEd+LqEfIpWwjHxPVrDq1VmK+js0GLRui2NCyvQptwxdPnKFl0eboQWg5FCFWMnXC5FiE06u0eEeqNPvz68BrF+BaekpDc5EzL2G/7ztEO909esH/8gv58x/bJC4Y3OIe4bToPePuG2IMBrl4PbbG1cPZHklG/5s9Y1yxxn4Gs+8JrnCaCIWxb6UapNSj7m7JuGsgNYhwOCU+zhiOJlAT8LNHXlP7PoIOBUD8OjF5zYm7sqYTz/WqeHUX+NJ2gw7lCRGIpM2OOgYrN11XLtTadChU6YtyLN8sEWsPUYCYIYL15FUzYtcx+rzxJAHqSHfhyj8+0kxqD9QeC7C4dXxQlhDtLoWafgAVnIGG/aISc0U0YRH+EAciEVrz3ojhGA4QzYO+CPkr9nKKygNQe4I545IbxYyszGorGUxplcSFdZlnhL0OZosvchBHZBZspHS8iy2NDsXmjpyqp5BFBzhEVgFnBYspaFwyodFK10UmVtqBUH2txgjB0sBctovvon5hCtN7bElI2gbvH/CEUWDltPYcXUiQ3Il4FyI3sPhpE3fhPlZF2WaF9UABWCupCh23D4Quj0xn7yCbjKMxnDS1umsS2DGc7Hvqi+t5xHB1Hp6p49+sam3f24oWhRUJAOeAxvmokcdEqECZH5muBNFrhoPLz4AYyDG8EQiAklFnTheKul9eu7P6ajbbtsuL6+onOfV0Ql15dYcBUoZ4RQOFOaiSMfHiWfBy8JjF/SHwfLkFlXxevvRm2Oxln28cv6G/v6C+sQVncl1MQKRCyUMNAmqZ4c8Ws+Ra/XhPGn05q/oTQWuqJhQOpswUgsoS4yNWU9kAtnewMbhngziAUgR1OJ1lOfYB+j2MDaREPdE/SS2YMLH3tSdiTsCOxds725RRqR+2xhaN0BZ6CWZOqsS59vaZNknmcefwKFyg+b1OZcKQ7unBS00Lx0eTrIJPah6INRTWyzyAj70+pFarGyG5pjLZrpJqzRiVyTl8ZVsf+Z0OuQzoqUCWTDz7hl9kz/yZkTiQnv5oXOk/ZACLzwulGAkv1+T8ua7OsCy6vri4C2SZ7S+bfBCNsWE/JH1VFVdY+qkNFJSMuOxk/WrGcCSSSvUCjTmfOh5MtCkelw7MtNmMukLag1HVev2FT947YGMwj6tF71k5CRaP770jB5uTemADrgut+Dfl+fRjrRoBLBp37UjxaA41+cZr5cmFD6XJhb1O7NG5w7z1CQGV7J73Zm2qHS8NM/T5MQ+SOKxzUsbP+tB+Dz8EekeGOppVZ0CeUEsHShKrZWgDPyoEV854vrXIch0Mz2cwKsM7lt7aAjExz+SiDByFGwDDGNSxX13J7zUGDy3Xl2JFLm4MHl4q2LB6PGcZ2oB9sTFaP7IHi2f2ScNNQZsq3bcPu12XbD9chDGHlOrUS18XrtIr7C+dEHW/vsO0AdjaMy7A8T/GMEACaGoyjpVEUdFRqOaA04XRxNRnffwjoPIJX5TBOdQ8/4f1g0dJJmRRU9ZE+ApSh0z6YwdSNvXnN1DPgCOAiu4l1HWtiMh/5Q6msSfFYerKPpSqkc82GnQmGcCKRvp7HMXwvKNAHiiqp7bVgaW3aSrcp77qREuOC4jq4zURaUYWo2yOJeJ81/oD4Wvydn9g52xv29ZBJmTPBHPCvqhgqKM4QEhHKfPgNLOoTLYeeIjLMKET8amM27Y2I0y30zzyKTSjFIknJx5n+GxBA7AtIJH7RPQ6XXvJiZTRJesNjWxsi8kuVcHcCNty4d7LRJOCek3HP07Pz8oj6nYJjnD0jBTA6aeKAocZF0A6JwYYZgU7qd/SVAYXwoMWHSsJ7UaydMKCdfs5nDDwsfoQlomGLWoYL2EYPkGpqzQXQWHKbCWpj07CZJkVv2ReIGLRVvq/3bBE25Yh77Z3fe4ZnTv/NVMLPKWotYwzP6i3nk00V+jgaSWOQOH8tSR9WAYppGjgDUpVchP+uYJYgAbP4Z0yICRnQlFKcXCFOMfc94HWkiys/XD54wvrqiuVpxcVnZC3XdY69kSllddzZjLy9ffFm546otbRl8cbmylqjKl5urM1tO4c1qt+v5lODRx9EBdrBRnFV3F7ufO/bhnIMMlWHeXY0o/i4Dhw+O6/T2dCGcw2KdPzG0glJ/lGRyinTFttt1qBjKjMQzfsA1LC4wY7g4jw9u3mgEcFoICUPWc7Dv+c3Ev85Oa9iLhE2nLhlsdcji0SiQgKZLSLCRmDr3H/MlgqkGszqg414zHOQdieuk0SgHH8VtsBbbZpNXc0YApkNx/PMGDz5Gv1KHp9pJ3WoonGXwpROCkNQhMq8UNYn6JSUZAR3SDoiw/Bb4zhMdFh7EoAOOsPdmXLdAFHa8WyUtemoMml5p9M6MPFMzX1/lFJIKa6cpLosFcsSjXhsxgPcN4X8fR8cT9AHbD/oqNyJZL+DnXoebC6P6McySGZ6OuBZToHsoOMaHSTWeSeYDlKmvR7HpiIHNMJR+ZkavO8sk7GZPeGU9YXDCieVIyji2ggzQejAOEiZN9Ap8L2GZ1wyxyn49SVrjJBpZHCqimVthGpqAay5VBWzPd0P6LG7WnnM0umYQ4pjozorUt1BdXXyR4wlcN5FnAui3BjU3uLNzdGn5eLGRiiGTGqvs+AMx4hTqD1aNiRpJ6PxMK5e97KTIZDKcSN1bakGcf2G1xTava4cOxJqEG5P1QcKHrcNm4vgvnz4FmM/cNyPhH+aDx9FKT7cTvF8u+O+77jddxzHAXNoaW0Nl2VB3w6ef+XfHGPg5YWUfdsPHwUjKFITmi9SHLpkoJBIgaMDJLBawtwRJOYJSTAckTXOd41oOqnUrCuT8AAAKigamS9/XmNN6yRPNDizMCBdnGgS6rhkBJ6xyU+ICQCUWkBZpMrgpbLFJALhmsHKhP3CGYZArIlMNfPIcgxeJI02GORxRMgjvg5jP2avoR+bmO8zKWRWJvoymYdzfMm0QPEZXzNZpP+bHqGqG/p7nTxQ0ss7o6cixeVqBgfZRePoONVA4BePeh/5PUdRYM5qUhbKwwkYLKEmex+FBnByUB5Bw29a4NulTJmToGhG5EWoZtZwYgKp7gcQsjBOyY06wcwO+fkJ+cQ//KDMyL4zGy5u6b1mOihzUwXNVSeqDRTQSUW0DK9SKQS10JGykB2LEqdF+44zj4dNh5r9YZicSmZQA2Y+NTaZYnQ6RdX7h9hLVhAGnv+Rw+Gn0WGmGP1ArYJxXCAw6NEoMGzqxAknTfQB60dCkKZ+PKV6vUfcSRkbytM5zUmr8SCzzOHcEiPd/dlqWDB4hEQGZuF9VAy/gdPgDs8u9Zwxe5Ovwai5LMH0igZaOq56oSxRuy54+oY37qReef2Jzbqx9kan9NB+I/17+/gZtw/fYn+54+WjtxzUuPc05nVpQCHdeXeH8/a+YdsP3HzOEEDUwFr0pGlma8cYOIbicDWJGA8BYc8WHOILNqSYpYQXFWPgu3Aa2vzqvxcvupTGniJplA2aWUI4kpkxiKc15wbk6u+ltSRMtgjJSOas0OJizLPOk0seUa+OKjfg9R8TwEpmpyiE3FBIMNFWMuCL4wsoP2fVmfFTRRjAxPRb72dgEFdgVTLDfKyhzz0afjngPMXUG83fFwHq1MkMwzOd3LwZ8dt3TNTP+/hMO6noI+GG9X4FtakKDMIUkeJQ0YFRURbyI6KXKMrz8jGykGTjxefoybA66PLecZ0ljfiDWY+YUVrAP5KGLKKOgHKy816pKdfdgIaTwiDWHJNzz7hjqFnET/IozVKyLJTWi7K/o6jAoKiDlHRuErprw6BLMhZbUTtQO2Q0x87JupvZYlyLx8Uo71y/+ZqYHhzX1DeQbz4dhqE++mN0ZlAeJYbDn1BkvKlABo138THbx32HgFp/YgrUiirM5LQfsNE9Kx0TioxeKXqoaWiithYkEJs1zPM9z3sckG45R+ie8Zi50TpF2+cMFJMAQ7p1/DtZ2Hyf85qLupc35rYrRXWXpxWX1xxCuL66cqSGSxwBmHDsGOjbgeO2Y/d+pf2Fz5B0Cir1GMogJZ2UYt92No4eHEYoAKRWdFf+r4h6DRX3x1DYzoBCoshSbN7XvL/uMMSyX+xhjQsS6kzI3dPZYDXmaJ+lIvQR0xk9eDg8QHGnj8g11wrvnIYmkgHwvsZY/5Lvd7IY7j+sTFIVkQk/Tk9hRCXbaOYel5Pj830yIqA97zdktia+MUsp+XnzullWEuLk1aHLYEHO0UI2jy3Wn8k7TurxkQ7K5u/kU67rpz0+004qLjBHsrtOFMJJMePpYHFT+4AcbPyzaP7MSN1SqwxOYy9A6k7Rx52Zf+8fSxjGqDupPPxmpsgzVkuceiJj5kQDxb4dkGNAxRsc+0C/79TY2js73D0DmcoVcMxceA1AivbAFL5Fvt77mPwqxjG0zk23FMG1CpoYpJDbWMAGaamKgurPgrKsPG9raUQ/7XEKqvw6PkKTRWIjgbCeCftmzLD3jq7spxKv3cTYimjKDmeVuoh+3aVXZtL7AYGgbys39XUlvFq878Rmfa8Y4SmFojroGcVvMzmpb+t0JHauqTkppvB6XmrB0kiOae0ddqlfiwikUnUhFNGH5jVqp+w9IJVqPKaAIaV4bcQd1PrqNDrj1RXLqwuun3NV8tdX1LXmyPsYa3/cNhwvG15+9mPsb294+bmPcf/4mU22L7ccaSKe7dSuCZ/txhENx7aTeu4Nz1x7xQMAAwZridUj+GLAooSiRSj902o9ianOvrYCkNFr/ErKvms/Jq5nuadRBcUbjmMiMwdXet9cafw7o9yauQSWBt1bZwNxyDEVI/uu5d6LjC4ck/iaiDUdTsDXiog3i4Oal40pmhTCsuI1plRl8bpx1HzFd1QGS31M5X6vlyKO1a9XBNbRNzlI7+VsKgN6BEMASUziDFG4UoSx3yoGZELi54+b2/w8p5r///vHZ9pJycnwJ04NzBsB+MaP0QcKG4Ihw2e4IOWQCqLuyMhQjTTWYZT4qScLWwRURBDLTvZHef9TNoZH+CeHyvmNFV+AEdmMMSC9EHYSNsD2wweSbQfrTn24vtd0cvNzEd4vezfOT41IJiIsGELetoBUezNunh7Qj//OBCeBWdbBJLTnREGmxYxE0/HifQcVG2GKe4ajpREeeW3oBPoYzKTGmFR/V+fOyA5xTqf7D/iG5XUdvaMfhf1ZDtfUKu4g7Xz5UIpR9jGyoxIKIjPTnfp4c9R4k7kWFyfFtOpwbnnMBiLSZ8bswYg/j2MkqSXUEsAEFy2x/mkQ4BD0QzaVGUNDTUFY73eSiT7ADSCp+AdHs983HC93HA75cXDjkdOGVdV7X+yhBsvrErB4rHk6UlXD8PVWw9j69SiAQ8Zc12fnNDHk09oSQYEHnt6QL+qvG5IZilReh7pM2n27rJwKfF0JI/p8JFMDfBw6666xVUItJYKJ+FmwDvOFc+c/fvF2u9OaSZiRe6v4XogMOCDJPIb4HGCunygHdPVr5I3Ofk1NToQlv94scfB/otzvtI5OKfesKvek4qEWGHBoiXBbTseDyJqMdiSChgeb8P6/v9zjM+2kUv03jDIwz9xtn5kC6jdh8EYcZujugCLFJ3HBGU2QLOBXG7CTzt2cxRRyJ3Q4nDDrxcMwPHEccFdg7uDASByFzi7qCzqUi8wjFgWI07vRUldvr2pYSmEdxutA2b8BpJOcjsrhCHN3ZJobxXxIGdzILkbjLyhYBpy5IwGRp5NFH5A2GXEmFRPvmA7qfFjIDcAI9IEan5DEhEgS7nOCQvdJvwUzw/WYLj9T5B0H6P/WQdWLfhwoRTCOA6N535JUGglvrkUJCAOAn1asLfXoOI1N0KJt9phEuxjAjLSGo3LiRH5ORMgenIwxXNmjY+8Dm0fyY2gSQyCks5dSElk6u/90orw4hIlccaOuLQVfY79EdB40exvds6g79ucbn2/5NQVee8++OgN1MnNYps3rHs3uBYCGIQw81DTh2lLm6IloIH2YpOuOPUKqQCAAUAHGZYVKKzyPUQiDe58R3OjHNWhPK9Yrx4esPnhRasssRAXALt48O5I6H4zSrIWdHM50UNP2xK3x8PidfknuIykAhmszRrYmkoFFuXCaQ46Iz89Crj9ThVZmOMODXxuzr9DEcs3ymELKKY6XgztVQuEEs0dLaLdqMagUmJ0YtFLcUcXm43vz5CIs9WD8U7yUnP795R6faSfVCovRbBYDzqrApXqN58QyG8bi9n30nDqLUlCk4LIuuTmad9XzT0OUtfhN9KEaRgcFgAtAkOoNnGfjfVaY0WQR1w8Td2wA1DjozACU7r/3JknCXJzn1BP2oQhkeNEms7ciHG5xVmNRo6Cow2BRKJ0jDQitxH5KUMuYFB2OgRcDijljB0pmkHAyr5jAlo0OsBSIuOu2U2SNU/YRkFbUcUJVPowATyPtfMziWoX0/CFzLtalVay1YHVCwgkAgZlDGWBUmfqCdfZ8ZEYT+m+F8IZpQV0MpShGMYgWBipWkraXnf/OSmwC9iYVQzPLgnurPmW1xWdLRqkJ7SnloY4xKPW1d0po9Zg0S6dTBNBSskAeWfSZSBmMwJOrymxp9AHZDpgBfWhSq0mskCSqjPuO+8cv7qCYRakrvOs4EXTODzeYUpxxp8bxOMba0/B7Ozzb49YL8dm5dmsEnVGrA5wcRaFheGNoqUIproBOPUPQ4TJC++EjULrXooDWCmoTLGvBcm1Yn1ZcPnjljnshCaYPoBb0bQeg6JtAq2CIOQTo4aeFQHJe7rnYHzAE+JyrObriYRjqoMOoVpzo01FGTQKQNDakJ7ECls7KFKxRqwLWoa2gwKCjZIZtIkCd9UyDMWg3Qtm0TwUojkZ4CZYNw9yTXbw9QgVNGUistaJKQZXKyb14P5vKJfKlnNHXg5OaCr1ATL6tuYDdCAUW5veXG4TQSvfem1IMTStrGZ4NFQBWOLEXRiqvghTOCBDCHpg7ppkyT0cVfSwBrRimAQ4YRB0yoKoz6y3DM72zrh2bV73uUmaazqdk8MJivZ36cpDYMov04vKDM82P9RLKGoozMWVe8wJGw2WwsK6FdG0RgfZ1qpGHQ/qUlTidVRAOwkFFhMkbJjJnHLVSnD4bJITCxucaUXg0DM73D/jVIOmgopE2CBYPR+fenuK1rHEUM0AK1G9yGP24MAnfAmiF15S1fnqO5o6plrkm4zxJjBmZKcZo9NS6izVxYqCez0tOSzsgygxogYy20xl6c6caIGNg0tX5xwLe07HtDu9RiiimBZ8DiXj/2AvTSJu3QWBmUqffhRFErkHMNZInIee3I5xWfJ8E1OQwJrNDIiDhpMrBwE07swN4P1+tnNrDifKC0gRt5dDLdllROtVqeu+EuQ//KgbYgB0AVLJ0gJhgfMbzzw4qsiibzf3vOimDQQb/htl0TXkw0dgPMskdBl7YzBJdYLpMUQIRd06O1MDXo8GyBqYudybmBJsSpQCuA8g8Thgbl3Ptun1EgcuX0WZEUJ/nj5lYzZr8fM3XRSZ1WSouS00DkA2xHllHRgVws44u0DIg2hMWUWW77tDK/iArvqgLlSyskBas5lmSzXTZ4Lp8WQcmXR18dp2LUcxSkdl8IZjSEQ5fcH0Iqosu0klMWaEo0BcxwlOw99WwwRtfnb2hBcwUAY/E6Y5YdwIJJAZkIxDMi7+S0VScU6h2xygCNeW1GAOHVIxloFqhCkUlmTx6N+I9NTKozKR00srtVEtxtKCB90/B5s8g2VUfO35ZW/YdxYwaYMovjWBuAjnk8HJZqE/XxIN1Iw6MMCxuiKXQ4TZAtNAmDXesY44yPytjFJEkQoQuX0T5mTFkJkkH1V2zsPeOY7DfKgrfcDkqYMJmQWsOZ3RuWXjc9C7uOtiEO3yAY1DSgenRxLPxEsfXOf+qbzuHDO4H9FCe/4h6DHKdxnwx6ufNpvNiVIEoBldzwPuOLW66BwihQTA8GwM47qWWAjSQNl4K6tOKxYkfy4XKFYBBe0ffdhwvN4x9R7/fvI1goFTWGQsGRAZKNdS1Yrk2LK+v9KFqKGtD3w+0pxXjziGLx9sXfn029PvA2OnQ3e+e7khcfsmAJhwVxQemfmdIEvFzaa/yniyNfZ+VNPm6WK6lcr7P5xsv52Owh5fwCAn9iTgy4gxENUWIvaop0JE9obRvitidw22smVG2rLKGxjinPiSUyTA+OaiIpAS+r76Cx2faSbVKuZjqkF9rbJBkD4pH143yKQYDekcZA70CclTI0ZPBVzP7mt6+iCsByEnB1yMSLkC/8TbVqcNhJfTun35mfE1DY0kRtfmGjBSFOHutSLaaOuQTWcOXikQEs7huTmVVK2kXWCubMJKieNfTI3z0HhnBHOeGbz6n+/feISgY9UAxYbNzYdNsTBQO9mSSOc77SeZnAEA2AUbNLaBKP7jmkfOytKxlhOE2WCqBS6hGA1SOr+WkiK0Q7cCw7FZRmVvM5irw8eSYAcMYnlV44RwzcwJkNj168JTZQUBdHnRElkwnRdgvRIhTRQBer4jjLqGFhgfnFJdTgEj3sn+u4+Ba98wAkOT4II+dxoc4qXKK8MGheDG7KVsxnDFmAEQnQZEhfRw/X8C1ONe92SQM1NO+MndQSSB6iLwogmpCJxWzkkLKableUBeqyWjvGPcFrQnG3nBUirXa6DSOlYGPeIGVE50p2mxSEl5uB2WexnXB2Ha0KujbgiKKUoBeeKzaFdbnZj8f9lzbQMAcmcHbCWPI+zWgnYzWcqdCBwSoS0uJL4mapjm07VD58HaVYPVxXtvIexBZ0Dk4YJBDe2L+nioRVHpA7hkVIuA08/qVQIUz4ji5eBKPIluH28W4DDnqw/dH+VqNj/+/6bG2irW11EBbFtJ7W6su0FrQfIwDBKjHgTYGtBbUo6McB/pB5YQzZHQ21EUiGgij4LAC4gbMzTvsBPMhnMskAqSDkhkZR/TP//rnBFYPdzTgZ8eE2Ky9nbzUGVgLGnzzf7D+VRwGYJQKry8TjnQH6s6Udb0ZpDFll4yUAWQ2AKgTOgSQnU5qAWrzzCazKZmOScIBIDeN5O6ZL6nFRWKLQJaWSgqhYN5aS5g3P8tswiUy35MQcPGpxUYFDTXABnTEXfBNJgJaL16EUIRKCSSn+hJ+nb0tsUaKY0pZDz1BvaZTAb737k4qxGip1BCNwnm5CkcoPPbURcn60yOV6HMaYMZ2bqymzTkFWidoMq9jpyzU2Hl8dKyRESADs1KixhR1uPm+EY2QiGR5LumkolfM18aw2QzNeqKfYfFifWXQWS8Ny6sL1tdXXD/3mhR7H8mjB/UX96VgbBtqNaqIHDth5SLMqNxR0WkVkhQqm5HL0mBdsVzooMZ+YF8K+u3uWZgTCgZnrRnUpzpLxlsikZl7IGG+r23w3ydnFTeF9TT+7V4ExQce0klRuV9KSVXy7OkyyzV5zvItFefPQdS0GMXXZvOASsxbZ7yOMZxwEQE4YAnXskfNnxF8qfq+nmtA7XTPK8sm0qoT0JZPXbvvPj7bTmpZsF58RHIrWGPUwGXBel2mQKtb22BQPd13CoLuOw4fSocxUA0usSMRFCJCjsBqw4BmPcoiO7Fp5PwRTMCY0rkWweJ4bjm9NhZRqJ7XtUFqo90fhtI7DUXvKcc0axzIxRebIz/bgWMy/xRVBeKF2oIwYshGPQE755vwmKs77GRcRcblo0kgdD7DCtXU+wCkQ4ywZTltABgNQY4+KALTAukFpjSEmfU4PFZrQbswgmzXi2sacsNGMMHDiNTVXB+uuBCw3xPvq4rsSTqdEKHduXkpeuthRSGwCVGESnzfPavwiaZn3IvGtEydN88EZz9KMPgUh4+tOI7JWNy6ohubYPupFiAR0EhJbceA+M5QT66mOCaNbFLzOkRjtCmdQNbuYp3WOuuAbmGGw5Fj+LENw36CuWINZp3vpGzNIMRFRp0wElJCIiSThFOP0SSHIQPAYEuGRNj6mv1d19dXvPoFH+Dy+oqnb3yN9enqTgrMHvcDx5sVuu3Yny8Yxw7dd3TPrGWpqNcLlidChfWyoF0WlBbztFbvO7pSHuvo2J8W9PtG+vp1xf58g7SGY+vAy46xK6RTv/G8/8N2kHxkqKMwqO2DN0Jdo9L3nw1F93llUgqObfNALRypB2c+NZez23ztHt0bsHdqezotVTyDQ64b0IDZ/NwIItQSV4CBhKCadacZuC/BxATyvpr3ZpoxC4sQvcBrn+tCpum6cG/vnx5gvfv4TDup4hNq69LQlorlumJZFyxPC9YnQgDLZaFRdCelQ1EuC479wLqv2O87m+AOjveWYRCfIySqmaYDpyTgvZz+MUgJckM8KpjVVJmS+PHbQAIkUi1feJL4s6LEmGWd0vqz3+bdWJoZT3QOlRPN3QQ51EzFHox8vm860QnV0FC6k5agAM+sg/UmoQLLoHe3oj7qvWQjYZwTIkPzjmdlPT9rEAKPbhvldlrjoL1gYeXICjNn2MHN2tx07OuSZDjldcqowlPIwk8M/TPVyBKn8Q8npZlBfYqDSp8xoT7xz4dFHc6nM6eMkjP71HIYZo/jONW6Eh7BzELOkbH5MZy16ib9XhAsLZI0aFC6zvEpdFICay5G6sV4JDTp7DQ//p7O3IMbj6jPgVyEdeIXiM2fvi4j6DkV+2dPX5wN2NtUJQf4rU8r1lcXPl97Y7IraJSFWbXpIMtNDGNtkGLQY8HYF1SX2EIpqJcV9XKhwfQgN+bMRbXfWoH2Am0VpopSCxuTXdr+2DogB0Y3mNe5oWPuxyRvlSSQlMrrOQTs5RJFjBgRiDscxTgMUgZTmxidc3ROd2gU5h3uqAABvMaZE6ZTLcX3t6Mqkd1Lrp0IatwGGGtUocQfkL8mosO7EwKyLHfEIcTalXRSPCVv+K0VsjSUiycQ5YFx8iUfn2kn1dYFy3WhivNlwfUbOOp6fX3B5TUhgOW6Jg4aGy6mf46jY79v0GPgeLlzsNp2YLzslH2xA3BmlZw802xQnAaxYjZylhJGnTd59Z83dxIRfUSUnZuUADHQKqRxyJoMybpV8c5SMTqpiGSyMBnfIzIsoSilmQN57PFY/AS6kbgw1DKSWkywmJBeioAlow7CUeNRC5kPcaV4uDTLwJDuxe7TSOp19WsSkMRgv9IIssCM7IO9dXl9RVsXPL15zYh6aemcxrZDD6U2n/Z0WJjtMQk7RI9XqhAAjPGkzhemBJW6ESe2oU51tD4HzkEnvJqTn2P0SxATgDTuvQ8cnZN6j6O7oyLEN7wdopvhSJZn1BX9MzyIKQkjnmqkp4ye06AV6JKQU6iqDGeJDjVmc8o1EJn8xY3e0pobI3ECiqGr9+wNZn1xmasxY6fRMpfz4fE2eB1NovY0UQMRUPNN2NsTgURCUFVwWRuWpeKDD55wua744Btf4/U3vMHl9RWvf8HnsDxdcHnzhOXiU4FLSdhMn1ZmFW+uKRSco2IADjxtDevrV2T3rSRfSK2ZDaY8lCpKqxjHhU3RlxXL8yuYNOwvG1QqTO5QHJBTJlVd4LisywzmAurdOQZmHJThmk3CkfEykzkTisSnXnPgKKcpV9czLACJHEriS2JtOMF9vlbDScX4D8BSCkyUIz2qB2AmAnX4egZ7Hn44jGPwUocFe5FtA/B7vBSwlvh0QYuRMEuFHV8HNanSCqOs6+KjBp7Y+/D6ivX1yij8smQ0E5TnxRlU2geWbYUeHcdlQb9t6PcdOwRj7zRaZrOLPVUJTnUmzML2EjRYA4fB2YTQqjup+NmwyfpJx6Pm2ZxCiDW6ptnwKbtnOi98sXlUGl4jHJUFxTWwccuMSZS9WOLRlqimgSjGKLr4MUUvUVBgz0rbYQRT6DTrVUg1AnWGkpTCQXtCGIfF7IpegDqKj9SI8FwgleNK1ic6qfXV1ecbVReA7dBNYdqhY4e6yjYbtzMHdGNeMtOJgZe1NRSfgYRSaNyLQvSAmbhhhw8/9JMa4Q1ODcf+/lOF/ATBAJmhDa83RSYV7MZo7HxwNHHPTl/TKeX1f8yfUwFDwrjYFF3136UorjsbZkUj109xg1PG8H4lzONDwNpzYrCBScfMhk7HGkasTCc1S3QziLLyuK6Kq0O0lUzMy9rw+hte4Xq94NU3vMb1c69w8Sbcdll8rhnrz7EHgllpOhicKkkEKYEG8PNaQ7tcEuYLWG0GfYXXsYiPYZc5DFMK1vsBSMWxdfSD2nk5eVioDF9bQ3u6ODoy6eX7bcPYCroIxg6va4286RGwqU0iDQoHM6oa2qhQn7IcPZeWWK4+rFNg3hsn+E5EI/LXCMqyrWLufcLX3CeZHcc6cARFPcDqOoeuorDvs5UCtIJ6oZ1e31yxrA26fx2w++riUF84qddXrK/cWb0iBFCXlqyS7M8Z0YeilHo5Bo514XO5A0NxFIFuh88ZQhpkwBzOtTTsdECOz5aC7OaOYELCkQHZuQ5krcIM6TigygFl/CRSVl1pu2gcATKKeYB/cIqY4iKdDeqDNbTpoFQzci6mnF1jCkE9KUCEo2LkJq3mYk39vclkd4NJjn0YVbLyqO6hXosSUegoqEOnQS5zKvH6ik5qecXx5a0WDNnZVC0KswPad4y+e3U3jhEATqrZHjnG+5ZlQV0XbsBSeH1lsCgyhisNWOqfRSDBTzhBfMFWCpjW70U6KCD7Y86OahIXbP77nfX9+P3jvU4ixAniQ8CfKLz28ZJ5Cgk5ZgY3eO/pSEicCK22qZwxo+o48ROukMd+jq6RkCdZXTOjQq5TK8WbTecwRriO4HpdcH1acbmsePMNr7E+rXj1ude4vnnC+nTB+rQS+o1sJQYGYt4TM/4uRaTHOK0xh9BaI1kiFOOdsJQ4pddugpgR6BhKxeV+wCBotx31Tikl2an2XoQTodvasLx+Yq251QyORQTdYVUbzExFZ50w4edTdu8Lztl7DVYVqJq1b3gQCpvr4dEY+J0sp9ObGxZTQSXuJoOieW3mfuJrnc2sig5gN6A7MhPBa5IyWkW9LmhPDtWuC3R9DLS+1OMz7aRKK+xzcKyaA9wuuLyhwnOMyI7IjRkFEq83NbQrR4sv1wXHZcVxWQFV1FpPTopSKY+ts5P80ARAKVAYWpm1AQCE6QRu5Kbaw67GGTquvCAiOAC0wUyvVt6a6BcJUUuIEBJE1ABOsU0uVr4udAGdN3yyVGM+HzIDL3NGA5dq9lJFQ29BaKnVjO6LUmVBQWxd4UPqbLIZ65mVtwisg2wo8ehWkfOI2GvFzbFcWFu8XC9JvdbdINqhxwbd79D9Bj043LLUlUfp9T3nGqe8TlsZvLTrheKijeM3MAbG/aDeG9iUa0HZtLybdFBCI0T5JI++azTFRiRuWVBOSRyday+uOWtnOOH/RrFce2w3qE7HL0mcwIkZ9pi1QfU9hxIN5qQOy/wezDn1tLr9LU/rytmzTEFwnvqbQwHDgEkok1ASKmBuOqkoAUrCl+a1SiyEuOvT6iPsr7i+vuByXfHmm15juax0UD5RmGPd4+R9jlOqV+QmgGORDDBKndCXB07i43uC9DIjOr41HZMADdlOsAj/7jgGrFYcO1sIINF3SMe8vn7Ccl3x9A1vWE9dW9L6S6vYHD0YnftQT2gI8LiGhjkpCFPSLILMUJSRedanyl7c1Fgr3GusIXom7J53slsLRAy1VqBWlHV1aLE5IkUVDXQf+glnqOpgZigRwPOmBzNxva5Eul5dsVwX6I6v6PGZdlJSkBFYCX2yxcU025RMCQhsrj2bzkq8l0WVNYehlEhZ+kk3jH8VkUqQLENpwES8yTWcoccntBz++vhsfu6waO4zzggSccqomyr1aDNSfTs5IXtYgnluuQHjNefFGZ9+yqzOybYjM+f9mTAhi/izWJ7vEEbJA4BQh6D47onOn9lG9KgUTytpoMQAq0BdODiPTqqilIq2znoDT03dKCnM+19sdMAog+uIGB1GcR5lFJ6DahwEDB/UF0oGUsY0EO+mNXFtTgY2Ybcy60OZvZz/8J1b4Jcp3w8IJ8VVpf5hSWR555n9UfmeU70AvqY118jjfYvRJlKYNU0F+lOGdn5//3cxBhox0iZOKenFnoFE0BQIQxFMp3paX2dfQtvP94hhkK1VbyVp2Yg9syXvKfKLSBh7rmYBo3tE71B4cTl9ssTRPh7Nw78kLqfftFIgxcisWwxtXTBcpLZdV4xjoG6HBxjFWYP+ex82ycyv41gb+u7q89HzcloPpJfPo4lYU0C4XsWgojnR4d0TmKHrXICplek/4iU5B94emvg9g9foat6DhpypNhRaqNBR8ngBEUdlDNnTNZWBphpQjqr5Ch6faScFgEayFbQQ0AyZFO+TyhHR+JQNYjG51y+WF8WX6wLtne9TBVLM+xs89jSdRgbMqEoRd1buoPw/U/6H9S3FHKK4D8M2LIuMzSMUHYq1jmmYwEUQApzVs5QzC+9sAOYBILzigxp1QDvNa2gDlnDM+T2GxYY3Gn5FUuDp5NwgeeQPo1agwoOHMiWMqgcS0oT9EubZYCseKBQs1ys3w3rJ+k4tMxOGujbbRpXusW3QY4eNg8dTQJmbZYHUBZAFKdkS2n3O6GpPV9YZfNy52QGUjlDcjrAm2WhAbl4RzAF8J6jvfBfM7/353kQ9jlAXnH7spqHQsbQT9Mdzcu0/JzU8UNCDxIH46lRgpaacFfFM0gOL08yvasw8zs6v1YLzrKuc0RVXo1Y0MywnpGBCfH7m5gbS122MUE+BXIRhFF+X4sxSGskgH0WdN5MjM7YqHD7JYJ/9TdV7nGpb8l4N17TjNGtMFEJCr7DkpG7x6y929vx+rOGoPKOSCnDGp2B9ugIA+sYBkSKFQw8ddlierl6DeUonpQeHRR73DeMY2NvGTPLk+Odn0zGaTaJKLK5qlmssDvQhWMVX8ZC4j8j9bLVALgvKsmB5/USyxrJAhE7qOAakD8jR0fcD6MpBmF1RBnsQQ+OP0mVuC2ItndP2n+fxmXZSqSc2vLH03YwDyJsHeTTAEUmcvU2qKTiEQEisA6PzawzCs4hR5XSDPQM6pewB0QXhJ+GfyKJOT5igFIeFxMU/4dNqc8MT0583HO8tzDmP6Rw8Rm0gFLldAkUr1Apq0eyTCgM4ydxugDybghfnS8Aapwi5+kYriCiKEXH1J6NIOiko1eUtCBneSCktZhuVx7rLMNjo0OOg3M2+z2nEgBspjmAobUGpCxTU3CvqRsiL8qxLNa9HuGBr1JROhXzCh6daVGQEp4wj4MREBB9qTbEGkT1jUoo3U6sbIIO5oVaIzyaa0b54pt9Ce9CvTSniBXKF9XBOlnJaAwHrEIbiPeLxShE0Fx2NHjhBaGE+UsMTwvN7kZBdLOjTIwxPuKwMsOCOKSO3uE5udodgHD6CpZIyH3pyMKWw8F6hfUcpswWilEIjeuU9X9Y1j3fsFMM9djL6VA3s4hXWI72evXr/nQBANcLZhXBX2IS5oR4brKNFYrmuWK4XaFf0G4VtkfBjCQBjXrIoN4S1sbA6Z8jNr2IJ+SLwehlm+8lpLZ4DqQnTIrxz/AORIcbvxXseeb4ekDvDuD5dUNYF6+snr6+toJgB0LqiHgPt6MDesQxmkX3v6Acnhhf4qBowKEIf0L1j3A+IArpv+Eoen20npVPXTiN6zJs+bxaAhEH478f3iYjkXC/gHKPhT4eV0kmdgbFTYn1yiABy8KFATlkVpjqFTRFawDAU0GKJa9NwnCLd01fSw2UyceJ59sSCVD4PBlctBRpOqlZGaW4kgYk6pMP24y35lddnBIz5zucToQhor6SDiiZcOgAw7bFZDDav68QIcjkZBwt45yBttx/vKHJDUGpDaT7Ernkz9GD2p7CEVDhW3bM6/7e4IvjZMGfkbCXvxezxkoeLbb6ITn7qVADn/Q/pGVfq5Pmra5IIqCYPJBwX9y+OpbbijvjkpEQoUeOkGxXPxMFxNCFvI4X3v2IGPCVGfidMF/CczKDr7KjcgZkHSo9WN0567q24XielqbiZGRwyCVQitTDAOH7GVNMIsz9JMJYKPbx6JswrSimo64KxP6EuC+x6TRiz+9yr/X54PxqdlJSCuq4ceHhhS4SuC0orp+vNwOF04Hns6ahEch3Fe+kx0NaFvVUxqRg+1dbeXRfzGY2zc9+Gm2ewlz2Mfo2jDeHTiDSTtTffKi59ZGvpzAqDB5HAGwiTi48JaZcF5XLB5dUV7XLh9fKa1OFivO0YsP3A0hV12bHf6ahiqm9mxAbAndfYDogaupNMfr7HZ9pJ7feObd1xue0orWG/7U5fXVAcO60+K0g8s+H+mmSEaM7rtx39dsfxfMfx/IL+fEO/3TBuLxj3G/S40yAqIEIXUaSd9+VpmXlUcjrWeF06Jnh9IA0CfHHy7wokWYMxg6g51FMxC6Uex7/3eWEggkWVjtWQtNVzzYsNn0jsG4ipFFMVPSBD1ciqFFV8c5+iahHKIhVvsF5fXzkF9tUlThF2F9hRMtsUgN36opDjmK5/OHzVO8ZxQPcN++2Ovu84xnDrWlEuK8qyEiqsDZBK8WA2R/F9EYoQhG3kFGvEtQsqOdePMMs5X9Ozk3rn3qozFE8kR8KBcEmmWkEde2fhmWQQYdHoHI3c0QsV7+EMtuZU6FIKM8kxcACcKGyUQeoG7GY+pRYQU5QiWMvUpiuUyJ6EHK9hTTRhynGZO3hzWArRC4VgkxlSEdxRgYer6i8R3wAZ/LiT1qFsFi0A9oPBzW3D8rJgWRv25xeO2FgEsJ7Qe+z19fUrtMsFlzevksCx++yr2/MdvZNVSayuoj5duS6frnj9jW+wPq2AvsFyWWCXFU2a3+uSQetEbSYzGCEw7MxV9Um/A+J9eyRF9O1IHcVx8LiO+x19v2McPm1bB7K5PO4PuK9KqxiqaB6IF8wJENStnDXGtCV5/TMkmPZJ5utCS4LK/UoNU1eFWK8XtOsF6ytmUvWy+t4SrGbog0zRy0Gh5Pt9x/5yx3HfcXziNfY+OE26D+htRx+GTYGjVrzsN3wlj8+0k+KU1Y5jP9D2A3070JaGsbMzG/CmRI8AJaJync+xdy6ebUe/cxppPMe+0TCOAzGlE6coxnxapblSerz/Y3rtj4ho5J0fntP0iGIjFIrIVqZTktNbMEOfRiHNTEbi4QjDic2MCqUQ15bIlpzF52/pCUoarPjUDJjNYCrJhosI83zsEWnWdWGH/3XNDTSUg/y6M5sC9lB1/TLPEuENlcyiDqp5e7NjhIOCMOIVKToY29IIiWXE+Fg68qx5Zt9xi2TelNPPJswyrw2vRXbb41Q7iI9wxxdU/RJrR+ZnBhuxBOQZbDP/+9IIQ4V+WynibD3QAKjBCofXJfXd729BNPyeGKKf8khnG2sgAgVfF+7H8voE7CfyeNLvJk9x/fKz7USxhmd/wi0Gz2pr9AkN8mFrFYwGwA5XaCCxaawLX3d0pIpFKdhvG/p+4P7JC/qhOLrChBN4697R9wu0D9SlwFSxXhaeUy3QelIkj4DWg9kIas1bQ7SP2TQb99nhUvrukEDi9R/RxNs7B4aq17ndTgk8MCqnrMc4Tyv66sQks97Q8Jtw3znqCooXM8NwVFP0GPzsc1AR9qbwvctJWUJsBtOU6TKIVfa61eIK6o0tHM5ctOHz52IPAxh+jcf2ldH7PtNOarvv2Jcd29s7BILt4xuDkUqIRtcB05aRZxok738J5zT2jv35BfvbF+zPL9g+/BjH8wuO52eM+w1j3wAbgE8CllaA2gBp0JMiQU4dTXfxaY4JCFjhzHw6O6TZQIu5aORUMEc4KNpgSIgg2XsGKPoaqOju2Vd8dWabuurARG7sZHT8/crsOqfvYPNrUc+yZPZTwcqM/i8LWoiBvrnScZpxsNy+Y5gXt3vAdyM1CgXwwXIGOzi3ahw7xujcEK0k9MWeGZ84ayCMObrDgZ6hefYkp/EtMP+MUz9UKuH6QfBenGCVkx0wuLiw90IpDCnrebqnvBcOp6rCZE5EFpSslVGnbsnifq6mcFI+nFNEMGqZKuXh0L2JVzFZfUmqUJtK5/lzOyVB5vAgzzEY3tHr5SPyPDMUtDIV/eFEEDNAxU7oH69SXLZYngYSiNSMk7L92pm/ph4dx85pBv1eSYopBtEDgoGCQRbg2tC3HcvlQkflwcr2csOxHXj74Vv0feDYB1QYxNTrE9ZXV1xeXzmJ+PUTagVsDA9mHG3wJu9UN+md04lDHivqgO5cAct9Fo/QfJSiKB0Y9zvGcaDf7xg7ST8QQ6mCJg0WNbGThNi5Z8rMgOF7zcKJnfb8g5OK4MLXgtfBrJya2xFRqpzewnU6faIBusJkQIWST1JB9MK1HtEqSveJen1QN7BVksW8F0yGQreDVP2uQBHs+x1fyeOrdlL/5t/8G/yFv/AX8BM/8RP44he/iH/0j/4Rfttv+20PJ/hpjz//5/88/tgf+2MAgF/yS34J/sf/+B8Pv/+RH/kR/Ik/8Se+qmMZfaDvB477jlorRR/FjZCxAU+Plrh+RnCddHPtA33boHvH/vYZ+/MLndPbZxy3O8a+QUfnwl1Iv6yXC6RdXAC2uTq2UU3dx09HBJWRNq9MRuHRga8FaCep2ep9QIAbhjAQYL+U+DRgJuhOh5cwuIR0TO1E1wYSa4l8SB4NReYJBhq4ONYy611nQdNMQiI6ci0/eBZJ2MG8SVNQlkoq7nXF8vrJo0Vuyr41RqT7jr4Z4QF1WOV8ow2AKFDhMEhDMbIyg71bGyFeOIwUM6tYipRZfwpYxKN0U6AfnbJFR8gGDVcC4VXLNV1m3YJvMetONLqTEBMXutXqfWI1r50Vd2NFZ6ZRyqmG55TrhdQYE6GTqpTEyaxbAJGC7hFqNUMF10ALMVm/f5NhZ3n+wFynanDlAvPqhDs2dySdSkts9PVpw1oLlkJJrZKDGb22akBPBRKvhxlZqfHgugYOnTXayNRg3vagiuEi+yZK9XoMNCisAiIK3RWjDJIvhkErRVaPbcdxv+HYBo6tZyZlCsKFOnBfKzA67k8LSUBOmol7EAHM8ULnsr/ciK704XUrNqKMna+LbL2kk/FIUt3RewYFb6CvtWC9LD7yIgSYJzU9IPkQpdXhrTKOBAWRaxoayy0TrS6ZrXouVdWSfBFBEqTA8SDWOQcwjgGTAbntkEMh+4AsjbXjpRH+LQXdgKED23bg2DuOo/uQVsu9bKAkFMxtmQjGceAreXzVTur5+Rnf8R3fgd//+38/vud7vue933/xi198+P6f/bN/hj/wB/4Avvd7v/fh53/6T/9p/ME/+Afz+w8++OCrPZRUpB57R290VrHRi1CY0kZg+JhaXJ5F2aHoGyG94/mG/vyC/vKCfr9Dt21qYIkkG6xeLyjrBVIXGCo7+I+RUjHjrDoN5MaNvRlTc4OqrSX6HzBp4JI+IGEkAR56X8LgzbK3nQLkkyU4/ztdEB/xOfDkIaPfE1xIx1ZOGd00IvbwEWFtI3Pxv28VZfXxCteVDLuw+a1y8qsYMDo1+IKwEscYHOQajrKilsU3fs3mWqollAf5H80OZG7CUGDP6DRkgvqgnqOrfcdMpyTRnLKhyJDnBRTPph573+LAHrLkCBY8qz8HdJEdlUL5KLIhm4e7ZTqptc0sW5nu1qWxjUAV1QOkppLRc6yTYjHX7HFtxBJRIyVePSDSOC+QiHGIN/8aKcVBGT8HRArluBoTHI4ZM4J/FFaGSL43G4st91Acb2RWGtmwt38wOPOdNRgkck8rpPLYY8hjPzgXq28dcCclUjDE0GE4XhYUMewvN7axLA3LuuRmMNeY7N72cLzc0DdnlhKjg9SF1QBXNSp+H3OzxToyzfcLeLxWgRQOCUVZJsvVFS5ESgZDZCxywnKMjdfu9uZEIU7FCniGb5RXS1vk7NzqWXH8z1CyDqsKaDdABvq987pWBZZBJ7WykdlqQQft3u5OqnuwFy0SEfKE8kco4esRIy6//OOrdlJf+MIX8IUvfOFL/v5bvuVbHr7/x//4H+M7v/M78ct+2S97+PkHH3zw3mu/1GPbNmzbpCt+/PHHAMB5QKqEgrYd+9s7U2ElPFS9wZd1G18gruPFgrwl1DS2O8b9Dts2yGCDWnXHBBG0pyfUdcXy+jWL862RUHAo9vsBe7nDokAKV4oOxqFygQJw0VDBWiQnlp7mwgLwKNQ7yWUIhilaKTBQD6/534pM6CYw8IC74mfwzw0HxIPwzEnDCPnEUNC4GP84s74ICNPYYTqqYCb6nyBK7xZedylUPb6uaK8v7PNxYzv2A8UU/VawF8WBDhVFhyUlvKwLDUuobPCCugfvXpxlsKJDs/7QB52FFIriBq4eOm5iO0anjt39ZcexD2w3Rt+xJkKZurpDjrmjQaV4YGkBs7UgLok5VV98bLk3G0vpsDNEk++F03o5QW8l5JyCFCSJDpSjASaofUFbO1of6K5mQvVydfFQY19LaDbCm4bNkFaEPtdXIx3LcDhuHyPHaJRaYLWgoKL5BONo6I71q+6szbNRyKltIk69xHWa2Wn14ZSXteHSCPctBW6IB2xUQIcjJtTfK231Z0uo8hRN0bGhJ/Gn6kAZB9AF4+UFhw3cr8sM3kpBW1e0VZ19SK29vm3YPnnxHqfDa40NdblASoPAywEVkGWyhc2iAX2kTmbzYa22cpK1FJJ/pDY/p5rZOAcZGickD6XD7J2U970noSPWTe/UHe17x650GNsxxXVjUKwAWEqQaNwVCOislJkU9QgNcZOtkIGprWFUwSgFu7Mn7/vBoHPvsH1nr5QqFpRsVkcqXYhLeP38j69pTep//a//hX/6T/8pfvRHf/S93/25P/fn8Gf+zJ/Bt33bt+F3/a7fhR/8wR9Ea59+OD/yIz+CH/7hH37v57kOAW62kDDaO0YtLmnEvguoQvsxnRRTBq9D0OgJwEh2aZlmm9dh6uWKurq8/4VOilIrA1UB2Tukc6gafaBPvvX6TuDBgCcGRbyPBXCgLBP1oKgbQNkTFAdh/GbTxsxNlVeE1kZVZ5Ru8f7++0ifZCIEKbti4TDj+p4dldciHjIpy2OGIMVvZ5OrpKBrqH9Qw6wA2rhJrgtEO2xfgLVhgB3DhOeYuYrr7fEMLA2HHTt071CQbqyGHIPRvUlafMNVZVAzho9FECPrq6vPFhvYjwOHU9vRfcBkGAo/zfP50wDOfDmL12H4z4UYkdygkaHCg4wp0+XNyqMCZcB68b4VQJqeag/OQKwE8NpFKeLrdPpmNA5h3LRzzhDg0b7AB2hKRvrZOoC5Lh5GfhiSwRmqK5HNZ2YHMLPwoYDi7wkPnnA6d/F1wm4EA0B4rC5Umrg+rViXinWpqM7kGf2AHtzXxZ2l1DaNfAn1BqRTj4Go0XfoOqkPene0DZ1K6UenBl8ZKNVJEaG3acFM9AAU4ixDRXEPWIpfGRP29pl5Gwv7LkNFv9QKVF67svicpcuVmpieTcXNUG/wL0X8a8HoDToor6SulpMtOQIM6dADntkPHCmuG6iAZFDEGkPJ5Zq/cydiXb3fTnzKtqB3DnzsIri7FuTmqu7a2egbvZda8iO4l852+yt4fE2d1I/+6I/igw8+eA8W/CN/5I/gV//qX43Pf/7z+Hf/7t/hh37oh/DFL34Rf/kv/+VPfZ8f+qEfwh/9o380v//444/xrd/6rWjF6dlwlKl7VrVXdPO6B4xNoH1noTKoo0K9sdqa/9szJ8ekdQzUy5UabiKoT3RS7RUzKsrpANI6hgrK3hnRS0WHYg8h0SgMmFETD55NlZlki9kJ+qDp35VF2GHs8B9GiKj6IjFv6AXwYBzJOrRZ5McDQDUNp4EO1ZAjIgK/pqr3pKnHJOB3YSKNr/C3FDfqhTCfLHXKVUUz7xLNuoC1AutXFGFBvNiBsRQsvTOa9OtNKaM5xdPCoLzccLzccZhBbzsFU/vAdgwc3XAAkGJoKJA2vHH48HlUgt011263Hfs+cN+OHIIp5sLBpWABgxcT6tLVbCabQQeDd2YKrZK9ByfsJDvKg4RwBPEW5rASYMDBTTy0oqg6Q7LlBNTqYx8K4HI1rEEu6jOqAhoym+NANh89UzcakaNnxvZwMEhQbTokMPg4q0bw39Gv52NdPAtTKDNOMTRDGka+zhlpZl6fEienep9cKVheXbBcFrx+c8X1umJdCemqDhz33QWhOd6i+Dor7cIG7nahQ69AGxcoFO3S6Bx6zUxnWXxWmavyA6xPjqNj7If371BQtQRc59mZVM8ohHUbAWhPKverNAGU+zoVzDsbfNVHPAswyweuI1laQ326Tgflj5gGrUOha3OiRvyMxxxQ5/DhqPZ8x9iEtXbPgu971H/CNRuGVgrAZhCK+RoT6DDXBjw8UJPs6zzANofDDC9j4FDDdlKaj6GpqIWklFoAHwaaR1HmJ365x9fUSf2tv/W38Lt/9+/G9Xp9+PnZ4Xz7t3871nXFH/pDfwg/8iM/gsvl8t77XC6XT/15a96FnxIuvrlUOVDMAOhwVhhp5YT6lM2MjQrpMY0zJoYPH+XRt+6wnQG1QaWyyb8riuO2vWvO22ERmAZ/U/YRmGuIUTHCUuKoZOrBaJqZDAuLUUrRwOkdthEj1CelJE846xuYUM0pRiTdlBcGQTtIVtk4LU6PnBhplWx8rd7fEwiKep9LODQ7ZRelFjcaZKjFM5lq53HGAI1tq2iXBRhP3Nid8EVZPsVJGa9Kv28pjaQA+hjYvQXhcAZRH4ZhghgvEIrTDSHkKTiOgaMrtvuB7Ri4bQen76r6/SoYRTHQUM1nhUFQE75wn19oqJcy4VBzZ129J635RSa1m4+sHUS/kK+DooPX8qjO+mtofaCuDSiCZVkoWVNdVX5dHjJj3p8ZWR93tmXsVQgV1QLFwQjZKJ4c9TuPsVl7kskGXYuw/8fvcysFizvx5lODYc4cA4f6iUSdVHLYZ4M4hXzWWcUMslRIa7i+uWJ9uuDNN75hNnVhzWj0geO2YXu549gOHPeA/wVaClRoBKUV1EWwaAdgWK9X7qnO/SOloq4hobZw4vO6YLmwwbd4oFp8rHwkwfWy8tqPzmVY23Rm2ePmKILXfDjB+cDY7yQ99O6ZZkF7WtJBLa/ZjNyul2yhGKe+LDoRnlfsm0kCm/JPx+EtOWY4YNCXO4Yw4D28D1IAVBXUMXskw2IEkQIeXMSCkIBlMQNu9XLGUEXvtJO9u54ikESlOR8vJmrDRQhsDnP9eR5fMyf1b//tv8V/+S//Bf/gH/yDn/e1v+7X/Tr03vHf//t/x6/4Fb/iK/6MKrOZLdQXAMwakMInVXbCQrtLlgwFmtO21WgsF7+IrRC7H4wg2DCpmcKzXUpTNk27QyrDh8M5Fh/TVqkGwJHrsFnfyflPxevfAnSd7iVrxWYYwsyvC51T9W57dUjylKfzEX4poyR4BONGB5ExAaW7oysn+Ea8T8KdVK0lf0dig/fgYEbe0Xsl6ahOckhBljDvOZHZOBx1hbougF1gg0uytMXp6080HEtLSCwlhwpHcY8xuFH8OXyM9zDCZVoKtA5oEYwyoalxKEZX39zc5If3HFURjMIqoCnJAKbMCAxADPerJ0NeT9cvbnSwvAoc9j3dpnfHMiDkkYwBBLMFzwIdxm1LZRYSRfXiUkkRvHh0OjDVWIoAo1WvzXB9FR9cCJ8yS+KMpfOIemeQZUJaCXBIvBQfjBnyR5Lr0PI6GKJ7LhxeM49V1GDFkkwSWfbVlbJfvXnC9dUl+5dGH1NrrxQ69uh3hKu4h5Bwo36jmbJHr7M+zf3nE3gXrrm6rj6Xan0UsvU+tWTBrY0wab+gDvYdqUX9N3rzcpsxYDRXq+lHQolSHJ6U2TTbLhw70i7rqQoRvU3IRRPOkNfBP8cHuY4+INsB2Q+U+w45OqwWJ7E4mcedVLSPzDqouSeanxXtGbSnmPbF7x36SHhaj54BEVmYRHoMyB4qSi15T1dAx///zqT+5t/8m/g1v+bX4Du+4zt+3tf+5E/+JEop+OZv/uav6jNiTlMBRzBDB2wIoAXWeUfM57dYVx+Wx5RbI2OAw1srO6rr2lhYPgYO22C3DdoJAZh1AD0jJjOm48c+sN/u2D0SPzrhvqNH812Kfif0E9AHDR4ZOCbCOopxSmv28YCQnAzWvEgZVcBKFqOB2aQXm4zq4WHAJDcSm/uYccpodMrLcFYbXOOvYolJrbWcjmX4nKhwUpbUbkqpuBz/0wXrdSW0AgA60O87pAgO8eDAoU4UQXm6ol4I6QXEVZfFi8k8lyA99NEhvZNJNgZrSvc7M6n7QeUKNRSZs8RytIMrefOQnM3Xw8mxyKxm6BAUFRyFWXEpBU0VrRYsteLaKrNaodFmbYVMxJyT5E2dAnDNOWX//Aiq/GRfYQYZ5ESjlIJjO1CXChsDy3WFHk+4vDInUrggbK0oC893EckeuLa2FEzeLxvqbeFQzr2jQyAHlQHMC6FEAemgmkwHHE4oIMwGJPmH2bSvvxYST8Fw8yASTmryLIGQmHGUx8qJt6/fXHF584Q33/ga19cUaC1FKHH0cslBpmokBvTt8MGk7nRLRVlWNHpyLE+viHg4z12EE2KXpyesr55w/eCNT4t9zVH01wuWV1d+jqt7iAisFaqYR6103WFv764iET1+ShYqqXHQccDGTlGAnXJeZblwPwi8j3DNbK4uPK/o4exHx37bMjBbLiuh88t0pgZzZYuOcqvAvaK+3CHbQThfSmbAsbSynKrGG2jIOraEbQA4407EZcr87z1wFjOUoahD0ZzJN1yai8IJHtQUh/WXCrTmzltJHOpfIyf19u1b/NRP/VR+/9/+23/DT/7kT+Lzn/88vu3bvg0Aa0b/8B/+Q/ylv/SX3vv7H/uxH8N/+A//Ad/5nd+JDz74AD/2Yz+GH/zBH8Tv+T2/B9/0Td/0VR2LQKlMbsWLksM5/t6rAzgjKKRMkKnsu8ysiNDgxehYSWaU6+lH9wwtggrfiKqMyF25QqPPx290ps0mZ+X9WS8SsscE7KcTd2rizLusaYEOIZhTllEc3smavOs+qPg+sIwjSyI58whKh2PgA1aHs8EsKbStVhe1dWhAjVRUeFTnjoaQ6VmnL2BYZgacoivorhIR0WFAhCnd37xdYJkCsGVt2cVfxnCFdaeTS9C/FaqOh49OEox6tmFANQroFktgw//v/4vrdgoczKJXjQZQjBCW+lppFvChBzrRsBXryJ3UjEDPN/8MzQVsOlUi4jWU4FJCWcIs9lgOiAiOQoKBAFAn+kh1Jo5hrgXhhAAtMmnKQqgTpeAYOtdUFMo9qwPMWYhOl4/15hcniukGZSTuWXs4q7ge4aTyjM2oF+kQoAnYrB7rwNdTSVFi4vB1JeRZj+YQsubpBn09goPUgAxiRW3pYOtKx7A8XVM8dT3p0wVEXSLyPxl5Bk8OCdfi2aoHfDBg4BQwn+2BzqzF70GQi7LZT+AtFEoa996xb3uy9lC43psu6QSyuTzmmJ3W1oTxTpkxMlZNSStTRhmxBh9d2jzmhAPzt6R0Vbe1DSReWQSfPucrYMHhzebh5IbO9/pyj6/aSf34j/84vvM7vzO/j/rS933f9+Hv/J2/AwD4+3//78PM8Dt/5+987+8vlwv+/t//+/hTf+pPYds2/NJf+kvxgz/4gw91qq/4oT4+Y5AsYL3mQh3DL7B6OhqNcN4ha0NhJX7mUWzxBU7FTyiELLE+cNwPNgEHzpdpsRc3/TPgmVqqF/jDJgiSi2mqSxQfQkaiRFPJWlj07YTRFIt+HGaRIY4QkXfU2Eql8CXHjVRXWZB0ymZs6gtNsMUj/XhQ4SKWORI6YlFcnGXI60Z1bkaay4V6a0tj3QajQ+93HH3H8eIGRQkvSBGslwunlz5dgGVBWVijCqXy1LEDcvDkg2QQPBsZrD2iU7wynA/rIJqF3OpGjPRhzxrrQNOKWgxaQSaZnVmPvC7Fr7sVweITjGO9nR0nSlLIco2YzySiYofLVZ2ChvicGBU+DQOdsplCO1UQ4FIzVQrnn7Wa0JwIHFpBZtCtLLBm2SBc1wY1oN73nKLKiHy4lbGZKavmenXfTtZaoYlSo4FTg4uMz8GCU1XeixpGx6bw9y+SW6k4tBb1IPHesHBUKIpmK9pGxYeyVMjgyGx19EF9jcS0X+gC1AXSVpSFYqa1VSyvX2F98xrXz73B0zd+gHZZcXn9lNlMjcAoak0WWS3H5IyhZIvWDRAGR7ofMB1ed+bxjO5DU91B8RFZJjKgCRklRTiojuO+Y7/vuL+9ZSuLmmG5KMplwRLowMmlpB6pzuBWgAy+guEY9UE4mgItaRdYBnjs4Qt+zdlFRe21Aljcx0KADkM38bKAoPeO46geqLCvTr0mefSvkSzSb/gNv2FGA1/i8f3f//34/u///k/93a/+1b8a//7f//uv9mM/9WHGlJrYt8IObppuml7fgnHiUB+dzIBJhY4Qgpz6cbMwkx/i2Kvjr32kEgN/zUY6GQPFjXgz3jguvFgoMUsIM5uIh9N1Q93cakkndQjbXLrTWWcYhFMG5ZnGGfZJ3L0lXOank4ZQlF39DacI/vECA8Z6m4gA6rBPUSx1OrzmtPLLdcWyNCytUoC2d+iLYr/xHIPtqGbJYtIPXmN9uqbkD5sjG9mBMRAuLpM4RCun5tyA0/xYoad+mFpQQEJDLWQpSqneg1XRFo5ZXwfPyaSijoGh5uxMbmIGMaRpq0zWVkbvmJs38wUzYJyyJI+4M0g4/c17a+1dJxURrhl0PzAA9CLolc6p1prv37Sd6oK877JUN/4taxqmQFsadMzPwXD5m8PhXIXDk/Kw7gRkwyrT1ey5gWeqpO3b1PabZhRJaXdckdG7+SIv2PcO2Q7c7zvQOBaiLlQmGTtJMSOupdK5Fl8X4UgYCMGVLwoUFaiL9002LK9fY31DRxWZ1PJ0TYcoNdQeSh73iaQ70ZihVOW/kZSl/UCxDvG9nooQQodSMEUBpNTTsbKXzQaD4f224fb2Bdttw9uP3lItQg2Xo2O5LOhmhNLXBbV62eHo2F/o2PrzBr3vwDFQDVjA/qZQU1+EcHB11Abek6mCvGfndXm2DGczEco57JXTGZcb2ztgwJADB8C+w1a9RMMgq29fB6M6ss3bc36yYYQ97wJuNIf7TGenN9zLc5VnHAnAjX6Bp+CnVBmIsJ2v9w2ZxjFwWmblTu0MiZnI6Odmnzd7Ri0RjUT+ovFDh/a8xj0Nhv9b3n1mBCs4z0my+GCD99xEr8yks89wic6XERkVIAqAUv0ElXg4BA7vcYpqMC4L6Lh171QjdxYQazCG4lp7MbBuGado0zORTx0HjvM9kQnf+X2FacJ6FSSsxIRg8Wwz3p9tBwVtMVihEjc6FadtsPFSu6AMpOrHw+HkupHTT5BwqkrAaNNBp07e6Xzw7vvafEXeC/O+Og+YyEDtkFJIyQ5D7ZkUxzpQJoritJKBihSgH1RVWC9L1lRGPTBCuNePIUg9DwK8wvUoRTy7BUwD/haYlROFPa6P+anZ+0/QicNrg7J3LPcd0ioMguoqLuqC0iMEdU/QaNZcZW5TVUo0GQolkRqH+HFUBwkTdV1d9zGkiCJLf9ynD5MT4msqm3cq1+w7RDu3RwWdnAHRzEzdyOqBkjvUCEw6jzdk3rbbls/uAdOAYekdaAW9K9bLQKs8YT3Ifjy2A7odHMI4lFJZbo8EzHwapuaiRFbruym20tnEfFpSEqYsiDXmwYn6/QyKsvUBFXH0x3mt4i03/etgVIeJx7JZjHaapWKyf5zSzIiYiGmRGc1FYF6aK023gmqEZNp1QdtWjKFo2wGt1NsL7nVESqFsDeE4hGJs6htSUgNuWiEasH7aYEV5HBxw6BCU0MYkZV2AwyFM+tBHxwRENFnmCgJcE8wwcGLzeO0hYRmhtlzUAuIRQpooLAarKoo6xAVJTD2KuOvasCyFCg8g9DnuG3tbuvcgGaPSer2gXlZO/CwV+uqaKs8PFjsDhHMGIqd/z//xN5rXpzmc1zyLoqZhdVyKTlUAYFmwquGiisMNwtbZALnvHftxUO5KFdWj0CbnjR6HaAk/dg2JJGdDhkyMDoQQKwBmwGYoMmG1bCSPewR4b5FHKmUAwaqCoLeaa9HGwjXcI5suEFmBRuWPJgCWCntaqb/Xo3fHYPuBLiDD0U6acU5KiGse8JEqLZoVdZHZU83DPBtxxqJ59qFOZCJphc6mm2dVQ9E/ekbdDuxDsb69OTW88fiG4vARHNttI1MXyDlbpVJBeKhh3zuOrWPfqf03UNBKgzQ6prKu2UQbZJrM9xL2jiCN7Dn1sRv7bSMd/vmO/fmG7S31Pvv9Dhu77yehzmKtqHVljWxtKCsdpRSWJkYf2F6YURx7x/ay4fZ8w0c/+zHutw2ffPw2s8f2lrT169sbLpcV67pgDeZtpzzcODrub+/o+wHZdrSuEDWSfIzrdSk+7VlwYqSekuXcf58Wlp0RgBkUF2XAW8A+uGIGtTHRgV7IMM1YwqDH14GTKsUhodoS4oqx40TBDSMykdw9TLzOGcZ7TykoBvb5XFdGTHvHOCq0zBHRLNQXAB3Fo6sYLEg4jWl0kjOACf9kZElnG7TM6scblN+IgswLmwAbKSmlPxvwTrZ8Nvx1BTAynT9Hr9H4W6KXpyhi4F4K8ooX44VOX3W6AwRrrgizoSJY1gbf87weLoSp3gYw9sMjLaFiR9NPidJmRmen83t4POyU6dg1ahrGXoyocZwHxOUn+OaqAKeQghu2OtxWjo52UGmbnf48B9a4/B5AHjZ3BgZ26iEJaGp4pudBzXz9ibhRqJuGGLrnxQDBiYMQxx+G3XtpWF8tLullLmptgBWvs1U2TdcgAxiz++gJypqQs0SdYj37OizrRw9PR7RMSWJQFQYy4j0+NpUNJuypUyNRQz7JYKbo94KqiiGC/eholx3Lwl4zONzJ7IUBp8hUMgloeAxlD9xBsVM6GEUdhtJ9DtIwtip0BUS5/p3tNgWE/bzUUoniiJlJ3rO1v+zYbzud57ZzerAYRuUwytoasFYUFNaF872ZDWM/OAzQDNt9x/ay4f58x/35xu9vG47Oa1UGafhDDf1y4FgqemWzcxnmtXfFuFN3VDpLEDBXCBHui2AEP05ODpQiFtl5saUs8eOmS48jaTvymYvFTqmtOT39Xdf35R+fbSe1Lix2NoeOfHy0iA+E0wHOenKwyjw2LUjcXrzRTIqLTzrtGiJYni6kZnqj4jg6dGvZOzV2RlcAMuLSEpwxd5Rm6SBYMAZVJSyK5Cwkl5CSqc54c5ZhFWplzWXk3f5RY5HZ9xPOR4PeC4c7RZI5pt7xHg6w1gprNgvDgZ/7xjczlFYmvOILOHD7UgvJGYUssiLOctqNjDszH/ve0ffdYbACWZUbCHC4xs+FqSMSbnm44zMDjseEN13Us1YXz3TR2ehz+ZRdEVT9tnAmjtWSBIa2E1ra7juWSkhtCDPKYsb6YbwtvWQeLxMeJeHGoamYniseZVI6yVsJxCf3avQAAQrNAoHI7DMKeIZfAiFwR+UyYNGsDRWORMEgSmCNCiu1eF3VUF3RnJlmBC0FgUtG0RwnBxXQZ/hRKkJoHqeGgGoEO0WnQxtslE8xXzN0l7EaXcggbQfqdmC5LGhLwxo1NS5yskrNnKnHc2remG9Gzbl9O7Ddd9zvexKepHFU874P1L2j7QN168n6jT6zWCoB1epQHNuBcRyePdFJvXz8guN2x/35jvFCuE+Pu8PKwNINdVFAFlRUoNFJGNisq9bRu6bs0f3lzufzHc8fP2Pbdrw833F01knh+3K777h47fdaGWg1A9UE1CWeVCHHQFW254QKfRFzB+WBj++b3Ce5p07OKnffaR8Jm/NzcCv8+xOmwdsVDkvOKZiv5IcK2Jd8fKad1PL6FZanV1jWFWVpWC8XBBU7p5be9xw7rr175G7k7NcGlQYFIQJxXFwWj2x9smzxwjDnTxHvVY+qxs56DQ0QUNpIAbyIeAlxOQ/HoYTi0XT0pAROSYcyPBOg4W6leIGS7yqC7PKvpeStTvUCH22gvWAIsqEvcPVi6tJMbNQdrcHM0FbNXpBobIYIGtZ50SMSP42+4EhzCrmKG84RQn7bDisNKoSmDFFrc4fiahJlcbKET1adntc/d8yaQPbZCIVX69KwXK8eFCiKGopvahanSXYwHShjuGMpKEbqdFkYrGBpzj4yCqhGsbkPGtPeef0Hcf6AFcOB5JTdU/1iuKZaH5NwU30IY0C5WX8D2PQLwmQBO7kJyK3N9eLkBnMnFUwBz3xDL3K4SngpAj2KiyYXWKnJSh2jY2ifcCss2fMCOF3ccgQ6MNldZZojwBu9WZ8y6HC4D+UUUE/YVnlbKctlhm7sF8NeIMeBdmc/0HVl4Li6MGotgmVhPXO5rFivrC+R1cvmbBr8DS9v74QVu+IYhnXvKEuj9tzBUT9tISv13dptNKiOPrDfdg5RfL5he+b02ftHbzHuO/rzzacmHLDuM6kKoFpQu8HQUFeydofSRsgxuDZNUoF/e7kze7pvuL+9Y/daV1enax90HuaNut33UBWSIYqSCGEpwKwZEFkgJ5Csj8dQQ0TtWqKFIOxMoBqPQV5m0cIGai2ufk+iNTim0hKWV38vdednUlivla8DJ9VWjjdu1wtqa1iuFwTLJzStIAWjHRjRYBkU8lKpGebyRmO4SnQfKbMvQE6XbZcF6gZaS4fWQokgA8pBPFyV6g0Z8SGkmQKnB0dkG29cyD48BPm+AuJnZwN4TsnryTiee1gMs4YwhJufKueeCamhmrngo8C0kta+MDPiCHQeAJlOZUZV5UTKCMagO7rM6KI/5GhA7TSGUqBsuT4tbt8Ytaazi5EgefZZGnDDH6PkvcYY0TpraQ1YFmjrxMfVcnPFeUdd0iCQMg1ywJYxZDCi9IBUi0eUCWlA0kh/2uOBFOC1PPUgJk6swJG0QNQCjkxDIXNkCqbDOH3I+88zEJeGySe6ekOSjYEyCmW+htdaQhvOvG7pbFMPlvO4x+ljRKJe+p4No+M00uZhJwTDPZw9XE/LjGyATtXEu20M2fZgnjFQGkhyJE8LpZha0rH0o1M6aduxb0c6KSkc5XJ/uc/jB1mO4xhI1Q7zfRQElcG60bEfuL+9UZrpvmN7eyP8eN9hricog2kZlSoUhoHSBgwdEM60KsPIpPTr2Xce43bbOO5i29F9ZpVGO0sQEcxHnAy3c4XOLhRsIjBGOCcgVURivzw8BBMKT6eU//Eg730gIlspckWf2itmKPX4s9PfnP/253t8pp3U9Zs+wKs3b7C+ekJbF9JI3eAMz3b2lzv6/UC/bdhf7vz5TsFEtYJjVwzpsI/vqPtAvXfUy5JZTGDl4hpsCDisDzKfihAvNgP3CanqDEFPtSnAC4qMJotHrapzIxeo1x6MAq8IuRkvbzqmHY4pIux4qNcNhhFG2UHdrmHAkcxGQ3MjqYVD+cwVl3Uo6sUL76srMy+NSuXRs+JzbqLBcsKNNIpjP5i13ht6adhRsYt/RXUFZEErDa00oDZIaU5mcBYLkEY2aMp2cAyA7ju6kzHU532V0rCsFwwU1lE6iQWkgLtxHN0TjApUTTqumAHrQoi2KqfFqmH3sQN97zCf31OGnjJhN86OYxmQsJ6OEwMvai8jqAd+egj4jhGteOYppijiwlWB7ePkDHw9sSDN5vXs3QNmK0KNrA6wwZEmuo1TcFExTLDtiu22Y9t3HIO9PoIBEW+Arg5Ny3RSYbSYjYvXYc/xhWd4ljE5QkORLVMFqIpaKTlVBhvDRef1PXtlZsyEZdfLgmVdcH119f66NddkPzr2+47b2xfcPn7G7eWO2ycvhBb7wLHtWJYG6x370wX7qyu2V1eyUs/qEogsdLCptg/cX+449gO3lzv2+4G+H+j3nY7pGKg2CKt5AMgKwYAMwHRDaYqyKcpyIOTDuLzVR2JQOYXqJ1y/Ykq9SK/zmgcMAuO4EQMsiEwO6T84K79HsepSiDpjrfdCn4dHoPuxhSIWPqN2fIsJWzNLohQZD0AYqBYGqsymSeI49Et+9MPjM+2kLq+fcHnzCsurK9q6YH11zfpEOCmUgtL2xPqlHDiyUEt2jUGwlw21K8o+ULcDqdIAzGjcMPF/tdSpysY/r3GVuLMaskjFbUik46xLZM+I0oC5ApknLa6enb1AsaBmNpWPgFACUlF9GMDXHffnsXBlVCEEyVgPPp6A7KC+d7S1+ygAfn5AgM0bHePneQhq7PVwI390ykJtXXHvin0YthHZqaA5MjUGC9lh2ONZHMsOaG9sO8a2Ydw37M83HHfWBfp2sFYYONR5wwRC7hAkxBUp3Myaw7i2Hwwk/HoNNWy3Hbof6JtHyb1DxiS4cFH4tVemy0GvH8lccyUMp6DP7HjCXeX0NaLasAz84in46RFG1HwtxRqFgCSIxjohfEggUQWDjg7oYMYkpGePQ+coD6UOP0kVVBIQ5/XFYdEg8rqVQsi4OOwc6zQdapnsWZIGuKZHRPnqTbh+j4tD0eqZZUw5WC8LlqVivaxYLisJOpfFNQu9EXUo+n1Hv+9cF/fdoXlvwne1kq6KXgsOVZShENcEbMuCsODm65makKTF324b+sGvx95pX46DyIQSTs5ZWcY0TZRw6OgKtZHj2EXcSZ2yRYtasdcEl8I+prIg69nZoOsBVjH2ZL5HfgAevj48HmpMp2fYJ/H2Ak+XJ7GJd9/iD/3z4l4H2zjUaqqZE9SCuJSmKmumXzPFif+bHpfPsWt8eUXtq+mkim9Mzls5VlJBVcluGj463oai7wPijJ/SjhS6jJpL8RBR6oTbIspPfD1qNOa1jSKoJdhNkabPCNyMDbQJ8E+9JqbksIT8HvuBgFx69mi6yLzzCNAsmWVdNUdx8P0nWWF41CsA+kGH3LcdpRX0vbH2FnOcpGQzZFD181g8e+gu09+HYj8U2zHwsg+8HIr9MOydxq7C0JSMpKMrmo9uX44BqQN1MAoXhTdhd/TbHf12x3G7YfvkLY7bhv3tjXTg7UjolcXdku0J3A8DUe0BvC6pIEauRnWCWqHHwP+vvbeNuTa76sJ/a+19Xefc9/My09JOpwVai0Gx8iIilAnRGGnaksaA9AMSomCIxDol4UViMAqo0SommmgqfjFUP4DKByRWJJZCS4ChaMUo1DRgqgXttLHYzjzPfc65rr3X+n9YL/s6z0ynU//Ymcfee3Lmvp9zzn3Ode2X9fpbv7WIX//hZB7haQGvpqBKd9ADwUxaFzBuHhroQqKGyQRc7z2fj/W814Bk+LXoJqTopuu9wZazY60jrAe4F1Ws/UzdOWxbO4iaNa6LOis1eHAXGEGytz6Bdi+nMAW1VVLsNX/qSjOEaSmjgHwYTl7iwEivO5C3cDRaFCd335PsxlTtkvcYOdf9bjKQwN5g1zW8exeAhhbsWK5OWA4nLHeOaIcT+uEE8UakUV+GwlhVQavVY8G7eUdz0zhDosY001tH6x2H02Ie1XHxdTWvL0A6MxuCmIicX9PRkQLoKsaO0RRcZAA00qUxY8ryulZzWNJr2bDEhAftAAnLlwYYJ7ysrZSgPN8AHLuwMWh15E+jI4PG/g6TZGMjpbi6Z/9GNIWZnQkFw2gMgmW27yOfX4WXZTyLcV8rqf3tS+xv30C9dOLJi12ivYrH2kHsioM9Ll3Ql46GFRJhEG3QpWfuJTeR07OYe06Z5A4NYYSnmvVEIgpP9gBwtJkLmRHmDU/I/ncWz4+cgDgZrow6jSxUwVhk+5OR/wilKSmIQmEp2ibcBwDiRa4RQmC2moX1eMoYNdeC2jqSdRpwC7B48a9/lt9/X1Ysx9XoXA5HXF2d8OTVEXfuHjPeXogw1QJMKzoK5jsni9NXqx2ZV6sjClh0X0xZLE88ifXuFZa7Vzh87AmshxMOT9zFGiG56NUjAvRmnpOH+ODMcXZEox03QZcGFPeomdGIcWqm3E/HBdoasDZUsTDODCSdka1hcM/ZvBs/mWJZG5bWrZttH5yIGW6BxQlDIBLgXsQoGB5qyQVN5A0QwmsgqciNKC5sjN67CdONnXd9ULRTgbQV6xVD1hV9NaOAVJxiqWOCOLJSrS0JXK03hXrTmPTm2UJQdWJMu8nYw+c5L9f2iu9QZ/eI9YWHzd1ZSmU4uYXd3UME1POubI0PCxtYohrFV+8daPZdbTFv+vikGS3L3QPk6ggsK2q0svD5LSqgE7mnJFiXlpGQ8G5jvVrvqZBOa7OWMP5vFU1FwGzMGJa7LGPdNizmKgDBia0VKFSc4d683uCspJA1ZQCI0ttxlGLQvFmDV2+5sprRDa/Lc0GR+6fHdaiBHIiMxaS44QCN8hm77hHni80Y7tP5zgziYYvueVsaJdSNAQ9yz8yvKSJIJb20Zx73tZKq8+x0+4b0KVPFSEDDJm/qKE1Qpm5IsiZOutpHzsnddRuK1ByFEvUSXGxbFoSEnUuELDStjaGQKC2NbYLy3vodk/mbRPviaKu1Jy0P3DFLUaVZtTSsrU0YaKu8ZGN1s4cdO8Pi6F78TJFH8ZDfeloAAtbjlBZx33lBHo/NGoWOfTUWhL72bH2x+OO02gG3qnRgWht4cY6yWrHsF0y7xaa9FqN7KYR+XNDXFevVAcvdA5Y7d7HcuUI7LVgPx2QgUE+YsFrYigJcQWSFsmm/a86zBpqydXQSrPAeU12wnhZoF1BrWddk5QWa1i3UFdTGMhQ5B0uEwBOoM4EPa9aXdOMRjbwlEF70yF6de9TAiLVggFk8b1h3kykuBoitmBLS0IgAGFO8cBCEwrkFNcN4BQCr1bQYPZIpLYXBmJmtuePkLS/qbkZ09+3Scu9FvI/Ck+Liss+tb1fWBZrkynGIEoXm3lqE3VXEmCTE8jnttBhYwsN8cjKUnSlgTQ8j5pYcCanUHXkmQI98SfTiciXVvabLYd3SZBPxGEamyW7enPF8En7h2JIMh2FRasE0G+NF2dUEK5WpIIrz/YR72NKMwWhe2ZitoSVWCHkXCI/MbOWBYPwMoE4UMOfu8pwSQkeFJbF15tPDH3sw8uKFYF6kM46E0h++10B2Ap8YeHTvuL+V1H5G2c9ObWKHMzYGuXAqsymQslpNlTRBmazWqbNZ2ioKBIO5OlzZvyNZlsOqIUoBHXvwrPvCxuAYBLL+u7MzELPTAQVzOA+rKeCyDp1vXudh/IORML/X7z63UmJjwu9NPUeVvZhcMJEaBJZBBgpAR1+ca8sPYjsuFso7LViXxWLvHm6J722Lea1WjW+IqsUJMk9H42E7nE5YTgsYhKkUMJlnO0+TFWa6FdgudkDvRlhbCO1wQD+dcPjo/zYF9eQdnJ68a0r0uDgwQTOrL57ApgChMFm1v8Nst55QCjztaAo0gTEV9I7T6QTyzqTkYY9eop+RCR1DVUaLDTIuN1Wn4nL2fUda5kYhCwv1WDlXpgEVttbvQ3mEF741cLY/iSlD1HU3oV7sMF3uMN+6gTJZfkrWnYEF9rOHTU8gPqCcVujawWI5QqkuzAGQV/QIsZFkYNBWcSXzoi4qdjcNXTtfXgJe09dcoLfW03mP4GF3xRzwcztbG8G1EZABbAn+zdYIzd8YnkT3ZpfdlVRfrW8cLc3olGwKXbaakq9qnit1D2e6orezjwzRGkegecLoFlZLgen5Nfa8mbW1IdTCiQjlIcpzz0Weuc7u8V7M2N28QN1VzDcMxGEkt9HwsuQRFy9KbscF69HysacnD+inFae7B7SjzUPXNc96cyN6dRnQskCcMPWCiRlSLTQMjjz4Rrg9zaB7f/r9EoCinEaZAcNGeDFuJH59lriJ+1tJRSw+D0JUHhKNmhoJD0QjTrKNemEToE0LO34XDGJJgSkscesJvjDAsHHjd2AopfTC1Mp7TcFovifg0/FeYQZRFAkreC0eklNE7xeAvYpSHXQBjGOOvMmAWKtPklmKAQgNBUybbeN/6iwRbVktHFWjcE+tn81UoN751BB9Fu7L/j6LhVFJHCEGp1/x70aHNYJbgH48oTOhTRXr1WzEk7tqPWgqO2DCQROnE/ppwH0t1OYWYcxF3soI1/I8gXgyFCEZtlFh4RaB51QkWDjColcEF2BagAIos9EBseWQzsIxqoB/TrQwKBbZ83YUwSFou2TLei6eY4j6J1/l3Gu4V0FhWw5QsjFkiQL33eRrZb18pNl6kXeG7WsHFKh1sbBosF0Am33tkQQApMUvi1AmA9BMLmTrbka9sK6yCjL+wy4QWr0hqCRJ6toVTa1uryOs7eRfSfQaY3hAAzZEo71EwObXNtrkLOZBaR/oTf9UE8w5d8N7cPsj5YN9J21QcQwiAZGRAlTWjcJhb2ljnYqL/74V2ls5FQZONBRlz/HWnTHbzI5YrL5+ZsTWPJdBKdXmijpVrNMCdMXKnNEEFUFncoDCaJFxag1dFGuE7YnMGC/VwCyuXLcjU2ZbybZ9y8a4AM7lYcgboq2Cslcj//Ys20nd30rKjFAr3JTulei+87ZKKgghMwTxyR4yPJbM7cAtLWBYsUC6++cURZuQHpHzBDkcPdxj9Zg2Wfw5Orha/yACe+Ex1eKw41BSJugF4vVXrhyZMGiIbQ6e4ma7FxXTFGSQG7mOQEopdfRlMb5DBuKdXBl1LpC+piIMJRVhvuYNJtHFmR+iQWUYCg26WvCoH45oBLRSsO4mkALzfoJOpqRkg+qT0wI5nRxppw7PN2XUNWqBhnihYkCWMs+gMgFshEaipjbF4mRQYgcLyKjEdwVPAWoBQaM/DoUYGyCItBZ9fRjDGyIKJQWjHMJAWqeikkB8ju65JcMxIyycwjZypbxlvDfhFkLPwuAFEAcRberc2smQadNUQJ3BnVIJbaoAgFKg5H6V761oCDjtZ0z7nXtve4CMOVHX1XgbFRB0qACtd/SuOC4tu1avEvtxzGJxAyp+wrd9KpRUUhayM9BHT0byKGpOVgoPmWaE5WkeAJLXToEMy1qJhWRh/daYSwOTve/aBuUYFFfY9JDyqT1DAo81m1H3M6bLnbFsOIoxulsHeMruVdDmBaUePSJkQIy+NAM6efmLwghdu1j4+tQamghOLZgGCKoTVA2gspNqhh4sjJnlNpGKONNOY8/H3Ck0gbXiL6bhln9+vhYTP7uA332tpI5XJ8zlhLKKhTuWnodYusdvPU7drk5Yr05oB4Om9mVN6K15TzIUBzYLgJQm44t1iPZYQKWhFDxabfxp5Lx7USzarVkaKUwZaAhU27yR31pXi39bp9hwoMJ6d2GnCogzE/RQRQzSPpQnNkgtt+otFAFXoqM4NxmgfZNLMJOX1YEjFurpS0E/EbqMMJ90Qx2JOwMkHZUUFxND54JJC2qzuhgSwYyGSRWlL+DGQJuAxdszLA2CYkrNBU4I/kIwehgCCgoEBCG2ZLaqKU+YR1R31Yu89yA26quuXsCtxoytROhszOe12f5ZSgP1FdIAhbU74PBKPRluaL17ckRq1j/LSAoTDS88DBuwhUSamsJTUCorUitqJcAZxkdwJSi/RpiPR5jPFVO9mFEuTOhZCNx4r83YCEFNaFdHoAtardBaoL1g0MiOQYaqSRlDRJj2LlQ9tFh2O/A0GfpPANGGLoR1FbSTkfQeD46Oc/LYeFjbE0kvu/ierYj9OzwpTtPA1yMiH0He2ze8iJuTrCEcPYYQnpNKoOr8bME9fjcO2NkRgk8x7j/aeNRq5RjFjUwjht5QdaSX7MrYz7CFnc2bXHtH6Q3aGMWZJLQa/yIRTJkVY4bQqUK7eH6O3NgIwJMVAPdutXBwVvomgrU3nNqKtQuOrUExGnwyEbpURKBpW9TLW6Mo5jOiV9sZplBU8YqmMbFdiTDKQ85+RuSklsOCpS5WT9CKk0WaMNVQUscF/bimYuprKCdJaGqg6mJyYwHGkd5u8zHSugvLW/0J2E8lApG6UCSP1hmfXS893X8wg7qherrDuVdH8PQ+oOxbQUFZAGzhElICiYcmsD3gcIHu90R2PxF2Ogdv0JmPn5vOhUGgDgWKBlgyuTUsp9UhvshvJTWFMhXGbmKQFOjEBoUGMLGikqI43NkCRcObzVYPfl3brq0ayWSy5LJ1IgZUxUJxHiequ0hIzx7qK3Z4naoniocLF6vd8fbxgKKfiod7neE5DRiflVzrrZJCWvtmlYdnQLlhNirHPGYPV8Zlbz9qbLSwZpHrk5yFnrcI4FBY31zYC7CdQYEAjV5GNTrbjpYRufb+xQnEyZ2P8++s0ZSypLelHtlI8tbV8lJtMSBOa1awujbB0rspKQctkVNFBarQSo2GcrJ97NySMRUZ+dA8H7Ei2zBUKKp4TRQJ0Mh9pmMt4znengNfg6ifZHaKqcIopWYkJNhqUklhMMCwOvGvw9UVViqytg5dG8piLd9RDJ0MJusqrWzgmGKzEWssU/WQbk0W+FCWAwk6FGWE/syoDmj7UC1D4oWyGudvsxmHd5R/qPk9Eak6i1qNjx4yTLE9Oc847msldee3nwROajmHGsAJt7w8Ia2n1Szz04r1zsF+RoJ1DU9KU8AAyEVhimALgGIU9NtkXygDCl8+fNx4lz+vBPN0mK2oky2nJGtDXyvo1BD1FQFjXpfVPJnWU0BmdX+EE2CQUYpePgDQFSyEAkIRYGKAxUJilv8NVBJlHN06uXpZex2CC4UzVkdOVqfq5Lq9Y13MejsdF0suC8BcwFxQ62QIsF3BhBmtALMukBWQVSyOXxj7SpgrYfYCxsrj/rgwMFcwKeb9HtTF2J0RB6yilArlguqtD4Q87EBA3QU7vtvlToGlHubjavVzqNWSy80AG8uxgLWjnxiNAbThbZMLxXvzeO5+DisxlC3iCBOCeV5dCJvQI3Q4c7gVb20MpM0v27xUKoqKOrkHFeGiveWIeB5IMQCgTtAqrqBq1oYFbVWnKCW3G7L6ZHXUZBg9nuNzUmZUm3sBpde/Lh3Hu1a7drxzsP5IpxWnw4K1dRyXFUszL+qU8PzIIQUKEomC5rT6ba+Co6kiDaWv9gehnASD/idAGtGkMtaEHJyikAzBx7nwU5sCdQAKvI6pVuufNpuSLtX2fBqdIdT94nqG2QNiH6UhHW1RLHcEfCw4tRXTfsZ82KG1jnk3Wz3fTrzBoYX+uBixroqizjO0i4EwdjPUc1aiClpXV4gGfo8SldhOWyVEZ79/IgV1PhI9fI8ijOJkbL5rgEj06T/sGcZ9raSunrgCNQKc2Rm1pEvJnseh1owKf22Qw8l+ri0TrMMo2GxK2C/MzkXiAsiCafcMD9HEoqSBvbUgzI+29/i/hXoubHQz7PCK+PCkxBLERq8PaC3OUo4Byoj7ZQulBZceCJhgYcBOYu0DFBCKflCwDcwGrRaCtSEpnNZcFDOTk79GW3piu7/RCZjTNOLiSmouLkesLXuvAOsO0gr6WgwFxQW7/S5ZBMrk3WTrAAMQA8qEcrEfBZkg/1mAYtRKqJPdT2FDOXs4LPaDRANHeP6wWBt1rhU0TUZV1ezziQh9MXhvIUJfvA14a9DIUW3LDXwfxB4ZIJ1hn4ahYdcTnuBYOyrsnHYwAmANWefdnX0bEuAIxdGBOVqel8nAE8Fll3vEBfTI0TqsunvBtz9GGYVmOAwyQnHwSJaAnApR0VZrOiqqBt9fGo53rZ/Rcji5F+Xh9T5ylRQPVe9FFN60KYgwNvOnRzlYXaCml5XHczPfwTZC6K5wux0FO4oKEBkNmfh+KLoR0tjoGQ5PfpAplxL7tCa4IVj8t55phM4Lx/VYLZ2oYGktGfe7g4yUrM2IdKOMktZRy8B40jSuy7eR1RMWNmqnqUDmCXWaIKKGFGwNpYgT88K68bqhO/n+zrY/aXWP+Q35Ens82CKipb0pXItEJQWYOt2bj0KUOdn4ClVYCPpZjPtaSR2ePACrCabgiEI4AR52qV7/Q9G7xVvAj1zUsHhts8fiBMR4WLbnEdYQRn6YabjBcWCezgax91onToSCcsqWNRLKvWPxHjMq4g3KzBLX4oqKBvu5JTqRbUKynbdfbSfzCkUVnZwZAUhaJyHLjYh7cygMDRZ0ZtBUQNOwzNl3WTAcmOJSqCAr5utcEc0ba1HISmA0U1KtZDHjtLfGh3WuKQDOwkkgoBPqxQVUFFMUTfduSZs6WauV3c6AElP1+iADABgQxI2UtqGecYRamSbQbrYEskP9iQndC48ZQCOFNAdnSITCtmuNYZQoRr1cPhfhMnudHPASljsDm3oYOCBmaMAA7CS4E4Tgfwt+xaGoinfi5cxdYCNY4tG7h+Jax9rEWkZsShxCWUHUBbom2axqpIAUshrr/9ob2uItMq5MSa2HxWru1tWjFrHfA9wgSQ9G7kFx/tx4MxgKPj06bAVphkByDlWRJcgdMEQhRmqZSTPhb/WrckbtY2szvKuYT/IC3Nj3HMYcR/h4837viEyFDc9D8C67gr6cDELfvM6PHOiQSqpAu2Ke54yelDQ6kPcfEYdgg6lTR50niMjoku0GYSlAVU3qouqPsvWmYp7jf7SVd9vwYZTraHrDrVtLkVBUIX+U2XLJm88HcKbInmnc10rq7sfvoh06um/O7pqG4I3pCJhh1kMFjKtL7XAkk8M97mdY3uny+rMB10ZYaTo4t9Ll9RjzVvER1HpChfoLReZ/BwH6asnyUyaUO07BR6eKidnaRItYPYZOULHwxxY2yswJi1UVVBVIZ69ZgVvMHUu3pP3qFnFXgMiKW4t7LtVhblysmaCSU/cU7+1TDJkk0jG7VWWy0/r6zLOF+wqTtzBYsRz3xrjdBCFiDBpewGUCpsm8IiKvKPU6Ea2YACsELRVSvKeXqBHU1oqy34NKBc3eL4kAqEN2j0fXI7bumRAOIV8d5s+M+WK2hHnvaCeD+rapoK8NCxF6Wy1k6VbkFlAjEpYlEhmqAfmFZ0ZEwKVm2Cas2grO3BRwruSMBcH6mhVHL1qBPxtCbD9ZF+m9KypvtxLSQLoXWp8alsOC09URB++LdLh7xHI4op0sypAZdH+QM6IbkYoZUzg1sJ7AypDSIGAsq1GMrafVa/sa5NSMBHeTVy2h/IAEKIRpyH5mRy7VvSXf5yVC3djUU2GceYWmUo5C1a6KBsIKYHUhG54pA2ge9qvMiey0qbNzm92Xfc8UREjRzz8B0i1yAzFGG1No7oXVYumIQt712+6/rDPW1nBaV5R1TXBFYS+EjUahpyU9egM92V3LEhEhsRA/CLVUYOpo8wT1lME0NfSumKLeCqbki9d2TYUtjcCh8odiiqajsRUF5glaRCP6pBmC0EAghiDs4nRXsJ5u1vvODO0wRACgfSZ4Um1pKFjRYgL9eYJ6HJYSTnseMnAPaJvY09iI7q7bH/lBoizgjbois6YtCZ8Fv0Aqli2CRUgzBBH5FPsszVCJuf3iVe72EBeAVrfD6NxRAKPp93sxRozNPTJQwg5VQhagi29+t9Kjx0zkSgxmb8qW/GfMEfnGK4FGpGgwyCiotolMqoC9eeQ8e48pJqBXaJ/BU/G2EN2pW8hZnAkB7YgDkfaDh7ZQrfU3Tx08N4Cbc7FF8n7kWSLkos4DF6GpaHugNAyJTLz7ehgTgCG3skWCGDtJX52cNoycvq3sHwIw1lM2AhmwdbBLk0Rqbq11ZU3+PvXP2VJgBfgu6sOSQqfyYKnPtie+X8Mz6ma1t7Vh9RDcuqxY19VYO9ZmNFDuLY3khefRzKU0KPra7Nr1hO6sDacl2B9Wq1fqYmz0W6MtPSYDzKht//SIQkkNa34AJsLLOcuTUF5iNt8j3Sgq98UEjqJU86yiKiug0hEZCA9OBA5oQSprQ15S5upAkqTBDEDF5QYDVMzVIWEr1AbyzLDLisoEVAZ87cLgZc8TFx4RC+3irCgtATaWU++ZV0+jOL0+2hitI5dtYB0eCn8b4txGiVJB+V70nFb3PW17XKAqydAR3RfsJzwyNSJKfE886tw9+MTj/lZSpxOKEFYRiFjztPB3lEaX2YB9gjB4w0IK3jNToZzCms+QSQg+YJMwbL6J7Lu7h1FC8UUxZoUY0IE4k+emACwk0b0avDsgoTXjCcumd0IgFmsoJoJGAHV2ocwbxJFv0JIOtbVp6IyiHSuZcF2lu0KKS2U0V0pNxPJTHSDp4E7ovYSkxzQxiCqm2UN0U/XqeGeALxbGM1oXL/ATE1Z9XdyT6uiroHfFcmiOBOsWjhFB6QJq9joKQMRQrkCdgRmgBlDtoNYM6ceMzm5/Z6LfkYjNa7fWBllaKl5eC6gY9LarLboC0G6GRN20JGEeCqotjJUBwgohQYchRbfr2MRzC4KM4YewLS5gWaPo09YMvmeJPEfoYB7zdN1oIEIptr8qAVrYvMh5PLJZpRf9WONFu//T4YTT3QOOdw443DngdNe47trRiqStmdPIu26T5p3NE6e1WQNKXiBlwarGIrEs4sJUQN7gMbyQArjyC9Sjf74nlMKLyjAfhSA7V0zJ5LDxovK86tYzQ8ZWNTrIqtUfmjE2/jp8+h7HXg30wgRMnrOqMGUjUKD7650zf5mdCtJw8BCsCniq3gurmlE3F0cETph1hMlUTRGFwp4cnFGIQCJGfuy8lurUZVbAbN6UOH2anWZLB2yRiLWU3A/FPbOJ2ZqnEvnf2LwFyfFQUEj+TwNGhKG5UVLwbgpQNPKoFnn3aqeXq0M6Q3PxPvm4r5UUqyVdi4fuSoRWPF9im1sBdVJRR7oAflDCSt2EH4jZ2kTQ8KyiGVoKkSwGstdN2A/0TgjCgMtu6VHCkkIEgTaWdujC6ihA9ZxKJWT9SFpWsMPFeY/IjWkFgGp/wbBHN9Ej3RjIBTBSz2JWciGL0wdLtvbB2q21o6igqKBPHrKYCrR66Km6strPnkwu50oquvWus4XR1gYcjZIHJ8scNLf2qSv4uKSVXZspC+12MPtiLUCMyQDepFEgiytX0MjndGuxIacTZDlZkakauANCECHrkroE6CU4+dQJRMmS4jsFFwuhslfmL0ro3KwQ1T3UDvWwh2bII7xhwFrOi69zgYI6UkGdCV0PueWe8gLieN1qwyhr6wJ+PLolDy8K6m1QvAttOzWsRyPltT5GMmi2/DozuLDx5LSrd3w2JJ9QQaMVqxKaAK35nImF1gsAxL1tzlwo3FEi7d6HC8kI9Y0yZpwpqbT243fXUuSeGZGDpuAM5Xb8UcEe5UDWHSb0nwhBRRyeEbsRabk5cefMQm6FFcICli1X30ZJsQnxLh28tqQSK71jVoso1GLlEZUrJniOM2icdHiPAYiyuk4vmj+tSQulTRKFKQkGcyPZ5yg8p4hlFqd0Kh6GC9BEKnwEPtnBJ2qdFHoYseroVCtaBIN9fkZuURxiy0yY/LumzdxCFZsI5DOO+1pJBWyVIXmgUuGIIIvQ3GJT2iSngRFKib8BDBG4KXXI/IiH1SICdS8sIkI+EaKJZxmDlSBd3NgNGaY4t7SFDR4e+YyysR4JsHATeVDOFXAo00DbRVdoZbVHI0AJvRCmbqHHFQRhQJ2I1MIWvj39hogIJGwCGIAsBZ0AaZPF4IENqemMOk+mqKozO3vIAioozXJJtDSIMgQrlE9mqfVgbxZwWTMoIN0S2ipO8rl2tDUQag57VUXTZvVbErEqBXuPJFoW6LoYUwUAZgGE0TuDq4DqMDjggi8IgK0FhSk2Vc08iVnRSOLREMBdrA168KRF7sq8ewtDs9eqZKJe/Ps3e3EbRrSEv+ZWjj49gcA0oEms+yYcBlcynowX9yiToLSN8osR6vHAdHj6bu2LCKLbVFsFHYwVjJOYJ9V6QO+BHTMqEUqgUDGUlH1e3M9GybiwHIAJyquxYxiKYPyOtP6RYWuba4IxRSAL1wtb/zLArPzcX/55wfGeYT+CgUZg8qCIAF7rCIv2WXiWNtcHDKqq3sHSrUWQipHwioKYUGGF5kyE4kAfAINNw0McUXtp66dop8W84uOSConEwpDhJY2eU8OIjZYnEXYtIAc1jYJeyrVwaeQ2tLjh1WF57B4KlQAQu2gkDz/7tgRB2I3UKCsBYcJ5uqUGk/InGfe1kpoZmMhDeAwPWQFQa+pHPmFhlRmljf1tIKbO8fxk3gR5OMYBCEPLDLaBbOt8ryfElLnnhKE6MSnYLWB/v7oAD6TeRNZ2uopgYsp8QoGHH6IRIjI4hQwmEXmYoaDME7gQCiu0W96HaUVZCVQUWgm1e5dgLhAHO5j0lSSlDY3MWtJi1GWBQtAnQmOf44udeScUiWIDVYSgz7xPY2uP0Qf0tgUZ7dUR0ux+2rI69c7RlB1R8glK61jXNQlMu1jPrGV1bjLvaQWoF0IKSm9gsbbyFnkoqFMDT6uBLYpBiOFKiYuhDtnZ2OtuBmCQ3rabMHnn5vW0WPgDMITmavH4Jt2Qmm4ch3ffCWhwY0WHQIYLCjOY4IXTJgw0SvkDyFI56XTqrhpHX9REbfKTgAstpw7qwXHnysmAA4RajRpH2VgnAuavzgYRBL69C1a1zsUnpfy5CHmYdghErRUzs9HeeFQicnVreJi6icLTPTQ88HPh8/GUQvPwIuP5CCKJCUARRQW5wSjOfm6Su/tebB7iC9DVuS/paSeCl7K4ccAAOdyafU8CSI9ON79HbhNM4JMxn5RdRVsX1P0MhVrphWqGlocr4+mErr5uxoe5XlnbmnZaU6EVtvYYJdwSl0HR6qYyQ7lgV4oZgx7aYyLMpaIWTm9qgLBs71npYxhdAiEFKoGoGnJwKqhTsd5lxfLGibQuXmhfTEExuaJ3tDVEMZ0On0zEA7jPlVQcCoLCjVPbLLLxPBy0ELCF8HJiASQlQ3gecA5AT7RvPZhQDVt0oFs8BmMNzq6oCwim5KAbGhZleHARfozvNwqacVDVLUJjiYjY8bgiYChD60MTCXTbqCiACoF0ts0DQScCRc4E5K2e7TMj2R8lXYTBvRdWKUGBKJbu3RBcKnnSU3h4mNTCqjKsNdVECJl31PxnoNrMU5Em4OqACtHMZ4WSWpuFVFq3fj+9K5be0qOuZJ1eJ7H23qxqSKaigLAXOas9Yo0sBgqdZNOwjYcF72tVZ7sGg99vrdBIKMPDvxjz4oLPGAjMixWHowuN9QwlC59/dtN+NDUsJiSqQ85LKCg3kNy4sS7Hnj/wRwAZzBDm9BCNqBceLmoOJiFXLl5n5LBz81yBJoTmOSn1SAUhCq3jOdNTXuvteQs4GpYymmDvHWdkKKitF0UZ2Rih+KhlBEBBnjqcNCZKRF4IO7P27fokZ/p8DHE9HiEzSF3mYERHxpLJ8PgMgTFQnio533U3pQKGqJVNhMJUa8sh0pOwuS8r1uPJe0e1YawGiXLhcXZqMYPMy0GkdFRmB0WNUKIpJx41Utv9p75Hcz/6nIYRt5sx7YzDcb7YkClUU4rk54KYHBJFYOlurBrQQw7PLt53Xyup6q6k+GEocUhxXldBeSA8xgoXIOKtqjUWz4oFgRE3ZSngoiAaHlfUCESTMRPkviBUPKQXoSI6V1AuRLKrbwgM9wgZZDETdYXq4auzYlBg0CkVPldOHnbjStYCBBOADt0x6rqinCrotGDqHWWVUaolRmsTVm4cdmbCXArmWjFPFbVasXAoKo3GkS2SqbrJcVD+oEjGuUfUTgvayZL27WR8im011N96WpHdXgNU4J5XgAC6WEHk6rB6U1LinpQJ4gmGIpuhqBg0TaUUyASUScHFugHH96ErUAU624EjwO63GBzZDj4btRaAMh1Ai3mhxkPnSXBRbJ1tgglnouDmI/SOgdDEUIIRagsBzYC3drHi5/1+h/3lHrMXQhvjgV2DqgNOdGOFB1elF7FDBEyWTKd5sgJuEcharO29ySirhVLK4t0u5EzawOpApaaGmPNDs1GtgVJkz6HZ/a8IJYVEhVrN3jgj8POznZOtUspGpG4I2aT537gyCOOyFAcreWSA1UopsDHEzq8aT4G4p2fl127agPxM2ickqvd859strSv6Et0DBO00AQDabkY/rlkyQGySSmBh5N6tZU+PYuilOSCFMLuREvWFdRps6VRgpSNuoJAKZK5W64eeSiq9qPDAfQ+bUWhyUgPEwkDhgnphgKn9zUvMlzvMFzvsLvfgqRhzezWELaXsG0AceEdhdSVV7t7Bsxn3tZIantRwvbf/P/M3VIyhGWbd5mOjBIqH+4i8MhvqVr+hZcJsyhYY0WjPY7QZMovD5Mi7CBOqWmtydfRXQNnZ3eG4JyIa8emonkR4btvwB1ySOYonGQgqeDK2cpPxCswFvTWU3QQ6ntDWBjpa87vWxfI8/j3uDJrVXhjzbsa8n+xxOaNMjGlXwfNkj+giqt6WoXdwj9kuee0JhxYxKajWIdSNY/MEENa/gNY+PJQuiX5cF1dSa8Mq1itnWT30lx2IxbxhmDcddV5aYA0VtaMqG3JdyRmgYagt8gPerKW81JIoPArjw1kHzJINiRYgCg85KhI2HuLLkvXsxopzDbonvvWOo+yh+NqWeQJPFReXO+wvd9hd7jDvZ0zz5IrcE+fOKD5CfNYwcj2YUdDX1aHm3ecf7qr5HAiceFe84zA7tVCBQhyarOmpGKDH9mDxEF91mqoaNXXMaZglXZR6HZJ7qnYFG6MmtjY2HpTvdfXnNIw/dW9N3HuLiEfMpH9H8dB+8VC/uCyAK4Y4hxGeCi+Mx5XF4qQXBWgaxpouFTbsGeZBpzFMnmctnB5SmT3cTKN8YW0NrQuW08kBLkZ0XAvjcrcDOwv97saFsU3Mk1+bgg8VZVoQZABmsKxmJOmY2y38HC6fgvLL2hLZTxQDEPFcsb91gWm/w+WDt7C7NILhMyUVMs97uI3P1yzp0CgF2X8GeFIRfgIAb/FzFioBxq+uVu4JXww03nZbZwgG4U0R6J4k3wBqwIWMnSwiZBFquLxKnHUzsQkEmgWfds4oDye7BW2fTS7QvT4rkGuxweI7t4qxsrf0rsGhCnQGt9XCAsTgukKVjdYGhhQiCkAGMj9TasE0GeR88pYCEYfmyTjcOEJNjtCTZpQuFjvdhC6jbmhrcRIltQuzQJUMtaSRQIZbvR4e7NbyO/JQTaxHjoX+DCZrNWRRn2KQWJCHk7zRo0PrAHiS3S1s6eJoQhmAg24hnJLvpwxlbBPOmx0WLjOyiC7DWgp1DkEh2QhgYHiesZcGq0T1PN/+Yofdxc7opGbLDbh9YEXS3fIwfd2EibwTgHlTpqRUTEltFgJIAR/VTDL+TZw5VEQHX4ziVnL2golHb6VoX8GFMdq3j+T509Xq+MXklGxBEvCQZK5liD83HuUs1Djui/x8RjitkMsAghPbDvi7UZANJXWmPM9FCly/ZSlHhsXUzngG0QVGwaSK7gp7PXDmWMti+RuloHAa7eqPp5MjMAW7Wo0J/YKS/X6+3JmS2s0hmOyeC1ldnNNRlVqGS5jRmy3Ixs+oN/G02jL/LE8f1Llid7nHfLnHxQOX/vsO8+U+OwsTF++Y4Dnqs+MRRr/l+Vfe7L9nGPe1kgpKTIObOlTUj5YNxcbZBGBCMAr6XJz4Z2GwH3slNKmHQWibVNxau6EB1c8QDT47L64kD0daEZyxh2XPpvSWPJRR+CzkYIfTc2lbbRu/DOcKZy/mqWPQ5LmWoBmqFcoVvK5QVPBpBdEKkoZO3YhI/dOCWXu6sM24u9xhf/vCEqaeLLWHEbj2ZTVrebVCW672vsJl1MSQJf6n/c5WpCmm04o6rzhdLWhLg6p5er0HAk0G/LYL1mjg1rr3JgokXaCbPOkeoAMCOmnWcrAAk1oTE9EOYouas8K/i9BOzUJeLny4FtSpp/dr4AIdrCP+gBijSTZOzKSHh6LJ4vydDfYcZQ0ppCnCL0bAW2abq/3NS0z7HW684BZ2N/a4uHUD84W145AuUK916m2F9I71dMxQ33p1Ql9WLE8esB6OWE/W2TWMJFPcQPc59tuAaCiruAUCvOV8RUSlHebMxsc4l4LdVFFLwd45NYkZDDWQQC+JgA1m/oqAS+P8kRGDe7wj1YyAaHhS8DVx4ybOcjDfAZpcnEWtXCEUZb7XEW/VPbvsEaeaNY9bdRVLa3Jk1F/m+cWA1KsowIEGtTnlpYFPazKPKDRpha7WBWtruLssqXhv3ryAFMLNWsCXO0y3L7F/wc3s7RUGXdnPqIcToFYuoAosx6Ndc++WNnPJEv+JI0xVZRSjMyzMXezz97cucfOFt7G7eYGbL37QWrXsZwMZeZ3kMN42UaWteAplrorlWSop/uRvGeMtb3kLvvzLvxy3bt3CQw89hK/7uq/D+9///rP3HI9HPProo/isz/os3Lx5E2984xvx4Q9/+Ow9H/zgB/GGN7wBl5eXeOihh/A93/M9aK19Kpdiw8M6FL2gNl5RPmgbAMDm35sIwpn+ccURwsiV35b37Okfg89KdaOA0qIe10hn17ipDdFxOO+Nyef9+s/BHzdCZxpAhuha6hREJnj82lxRkLOVc6lOZRT/LqM/TvCT+fNUHQnnys5aPXgop3ue6XDCcnXEcnU0C/7kjAZtsFUQG7lr3c2YL3apBI3BexqH1j+3J8dZGz3Aes/wQYZBdXiivJnXWIKRf9PMp/R7FH1YxNLFvndpaKeW7Sb6ahRD4vkx7eEhDu9Jt194z3O2pxzSfe8jBATgdShm6Ey7CfPePKj9jT12F9YcL8Ks3dthrEeb+9PdA053/HH3gOVga7GeFquVcgh6W61eqq0Nbe1WVB1EvCFQ0pOKR+SDOPnfplKwq4xdYcyuqOYyOtaGhzUxY9484rVtb6OtssY9CmpEQdRRndE8UdD6MFa6r6ttjzz5eYyS8owolVF4dUHfEwpre10BWjr3q+jss/WeV7ZPbsPdvdk+aktDX/qoW1sHErOtzgyyNjTPUXURL0Hw8zlFQX1QYgWH43RW3J2kBC5owt9UZ42I0GDWxm0iUlacX73d/c6V02DcL/NkiN6pjJYhG0Lqs4fTkBWvp3w241PypN797nfj0UcfxZd/+ZejtYa/9Jf+El772tfife97H27cuAEA+M7v/E7863/9r/FjP/ZjeOCBB/DmN78ZX//1X49f+IVfAAD03vGGN7wBDz/8MH7xF38RH/rQh/Cn//SfxjRN+Jt/829+KpcDr+ZEMDVzWAXnb7KwDpHbhFYFnWEYIFM+pkCADPupMZCLgykyuqrDi8qaKN/06gKyFgF6t75PgCsQM1HZQ1GxoYdAtUTs01oOOgTeeM4/V0KIryAG2qmY0CRFb8FSbp9vG5EAuIIqAFdFqWrfHHVXgBfmmjIKqHayjvsGU7UEvYgYtY7PRb3Yo8wz+rrHfLGzg1TsLtkbEZY6gbliWoxTDlTAdcHpaPUgUK+Nat244ELxisX/efOoZNYsEzljQPRyirV0j9RrX8QJTsnrfzKa64KkrQZZN+PDOhL3efLQrFpB7Km5Au4Zkgllpf63qbtg1wcidKX0AHoKQXdUipVHEhHIQzq7GxceXrnA5YM3DVm1nxNUYh6UCbb1eDSewcPBQn7rCllcqR4XU1T+kAQOETT2qXsiMcdGPOx5KXJ02yYsHewFc7Ec1FQKplqSGy6g/dW3cGc36mJSNvt660GF8D9TUKChfNIHGN6u+udFQWkYKZzaIn4ZiN/gAiT3oKorqfPgY/yMa6R8fquUnu7JrUJTwHNEFm8kNrSfFMnQvaUFOmSxBoZtaWm0rF0wRxmNVf0b670rpvi20g0UUr08wVjaPXeKoaBE4OhIZC8zxciLJUvPXFD3E+bLPXa3LrG/dYndzQuri3SwBHmJxFm+fDNzZ4p9I1+ezfiUlNRP/dRPnf37bW97Gx566CG8973vxR/5I38EH//4x/GP//E/xo/8yI/gj/2xPwYA+OEf/mH8vt/3+/BLv/RL+Mqv/Er823/7b/G+970PP/3TP42XvOQl+AN/4A/gr//1v46/+Bf/In7gB34A8zw/6+vRvpoCCsUiyA0KYCSF3V1RYmckYOu35GG/YPUlYfPMZABTRw6FEG2uzUjWDFsohuFXIWA10iHzCBw4EaEosf5QBWb2K3BuscUXwO/pXsUUz8ePsIC6UaR0BVamZFcOJubheg/hIH7hzBVcTRQUjGslT36qwnNBHXRcwV3Qu0FpIYJ2Olr8+7Rk0Wm9uEDdz2jrDfS1oc4zpt2UXqvCYD88TShgVCHUnbUnMStr8WinKWHpLdkRgl7HThBlzU+UGKQA8nvVgNOqUeJEYGkCeWU8B3Ij3WoLhQnU2vNCGnv2O5TUgjXgwat7rK7Q0rtW3ThTlG6dMPl+cm+RyL/ew1HedqburEfU7uYeF7cvsbtxgf2ty2S8FmdtDwRYO62upFash+NQ8KsVfobV3ldBWwdBqB0OQULYgbOfAqCRo71CQHvephYr3J3rAEtM3k69MGfoRxCG3QBSnIOd4K/eqxKQUOimDhBzLzic1XFkzhGwcb78Djdegr3MEU7BKHitNKirMnCR/4joxkZRxdGK61d/TjflGhiKVPwqlcgLcRUV1cKMhXMvF2JMEEwgL6AlsBhKUdvwxHoTlOolMYTc77qJ8KSn5NEXCegmwT4PDKJRURpGOBEMGl+L1+X5Y57MYwvG/ahZ29R6DX7FbSQoLtHnjp9dIO//V07q4x//OADghS98IQDgve99L9Z1xWte85p8zxd8wRfg5S9/OR577DF85Vd+JR577DF80Rd9EV7ykpfke173utfhTW96E37t134NX/qlX/qU7zmdTjidTvnvJ554AgAs7MM9d/kmBeGeg+P/3YpQrwuwQ2P0IJabMsvaC4eAHl7V8MligU0u24I3EV9UPQsdJsOARj0Hj8PhaBvOXT34yEJJ5d/j7H9PPyKM5MACIqAvlDmSRBhyfNcIWQTJq+XRClA2dTbRZ94VbPd2J+vSwV6Do+7FrscD+mpWvHjita4dddmlEBCnfBkFvjDzkNnyHB4eoE3NT867jEZq2wJKznNAGyUVR808KvOso72deQZRlyQewooFGFRam3lQAchqrHrkCFU9PGNhzOzDJMPg2QrEfPgtp2clXhQKA1OYB2jXwpU9JDpZaOXCEtYW/7fwTeRgZI2w5Ip2XNGbKc8oCwh2AmlROxVhPfuZDHZOtRJ7OhRURFUDKm5HylkL2GDMhuiL1g9Bu+PAmVhL2wF24hRbTMk4t4jDO8J0dj1RszXQuQlGGn+V1zY+V7Mlx/a57b4hIFtWFM9HQcPh0XNBu/2ezckkX1tgo6jieQ2Pz8OQrnTA7sWxAGx0Y8AApAiFV+eK1rVygnraCDv3LmOew1jyNR49wjbpCIgrDQLEzmWo+JzR8Iyi3Ud0f55GixJKQ4QQQImYr4HWpLHAm+mke+b1E43/YyUlIviO7/gOfNVXfRW+8Au/EADw+OOPY55nPPjgg2fvfclLXoLHH38837NVUPF6vPZ04y1veQv+6l/9q0+9hvVkG47SsU8FlbxVbDBh9opo6z47AVygpaB7Gw3xOLF0IyJVcW6sEIyuwLR7At6ZBeKgRHwbaqwRKj2LeDnCF9s8UyqLzU+/r2wlpGdHYLOogTDbJHdFoc14wroqOpOzg7vmLB5Xd4Zvo1ByCtAClNnoeaiUEaryEJklX1e0VRxoYWjHKORdDldpvaeSurhA3e9wurtif+uEab/D7sb+jNcvksXBO9bcIxkS3ebgDGiysZYJwzJQr09LaLDq8HZFQOInJGhrvKmi9eCp3siuJkoTMuA3mZDXoYTWo3UlbqtTDPWexJviuaUInQ3RM9Yyfjf8nN8HmwdVdjPqxQ772zdwcfsGLl94C5cvuIn95R77mxe2NUSxnoxtvJ0sb9aPFtpT627pWoAd9guo8xWKWHjPQn1etUWx39x7UfWaKG/rslGy00ZBTZF3Kq6kitXRBeJzCCyc/dzOhPiZiGVPb5Y2npQr906jfCTfH0YqnX8+fLdEEWsPcE14UrA6Kiay+jk34sLj6V3D6kxjbXvtnN8xFJRbxwnGyK3subLFQ6wdsLwS2xmrRUdEBYSJrZeB1prKeQZh6go9rmh3TzhxwWG3s5BgdxJrENarE9bjCafDAevRcpG9N9uf2tE1ShWsMFmYR9NHBHMF5TkpDroqtY5cUx3lNdt1fvqVHqvx9M8/8/g/VlKPPvoofvVXfxU///M//3/6Ec96fO/3fi++67u+K//9xBNP4HM/93MRgAG7d0Zg9UxIjTwMwzwX9qJXmospqlqhxYRbF83EfKvWibUvlqQXL0BVEWvzAM/fiGxOiWa4afjM/qBYTA/p4NxqzzCCv30box+eIVnuLQ59ft4mmatIpgFyxBaiT7l6mAUwckyMlhbDNWcwOlTYmJc3noCR6jaMDsQK7c3DTSe01Sriu1uNVRi1KUDWq6k5W/PoaDruPZSQEZ86kaZ7nGchl5yP7RwQNi6V7YAQWv4QMmHASlCvzi9lk+SNRG8ZHpx9maZgUlEILOcUBk0PT0XkHu9pY7mCMhR1FoeKHeOSNZrjFYebT7s54b3zhdVE1b2h+VQdbo7NfvEcUswJZ66BnEXF91CEN90wShj+RnjkR2LjRW08iqAxsnYS8aBk4d/WNYVfGwtJvic1V3QoqHMNFkYMnen14aUotiOUBhH8vOc3j/vR8a2Bnq1+3VPUwBGhq9MrMTI68xRFtVGI+T2beyBs3hs/MkRpKFP4nqoqILHntzAPBjCxl68Qoap1EEbrFt49rliuTpbrBlJJtcPJwtHHBeuyokU+N8FTW78yduK4eKKNfGLbI1u2la37mPLo7P/3gFXya85Rylul/0zj/0hJvfnNb8bb3/52/NzP/Rw+53M+J59/+OGHsSwLPvaxj515Ux/+8Ifx8MMP53t++Zd/+ezzAv0X77l37HY77Ha7pzxP0uOUYmNDAyCQRvbBQnyVnUJvYpRdBe8m8G4HzJPnqpB9ddbDyRPRq8d9nRq/i7FqE0xZCd/zvcC5tRAV6WEoh3vMBkjw8CPgtxEhoyh2c0UVCitYpeHfEMb3WcGcwqvMyb0/9rwNQ4oLBFEv1LQQD1sfagTUOsJr5GGs6Ba6FcQmqE2pr6cDWmtWeOgeSFkE9bhiXQXLsWGaJ6wXp4Tml6QT8itXdWSTNeeTdauohrDJ+ycPkwb1EsZnhe2QSiqoaYCkAqq1YKoV0+xtFby2LNZNO2VtVl6DwENkzsi+bkJq4srKQyqy8UYxAjb37BWkJ501afsZO/eYLm5d4vL2JS4fuIHdzQtMe0NSRf4LoDzsKXxduBQPB/bWod6XntYOou5rXryT83leIIwsAyloPmKXM5D5m6iJirqoKL8Y4doAiNwjrKHnixkbGqGPaGxw4rHXFVbXpCNMmmch3uP7Io0+v6dsQpn7B+kJFjZPaghi2y+MkTezuQ5D4/x7UyyPY78JLXrb+jg3qsOTUgGBsbqS6b0P4xMOj2fOvFsBGUnu0tCvFixKOBTGOk/J0gKyFkbttGK5c8DpYIwuLcA9bnWGklJkANYKkAkmLwLdy4P2y8LSm7zeJ9Axul3np7w4nv+/oqRUFd/+7d+OH//xH8e73vUuvPKVrzx7/cu+7MswTRPe+c534o1vfCMA4P3vfz8++MEP4pFHHgEAPPLII/gbf+Nv4CMf+QgeeughAMA73vEO3L59G6961as+lcuxEE1Uy28VFGKTqiO4vNW4MEiMVaIUQpkL6uUOqAXKxeHOguV4Mo644+J5B2Me7q1jPS0gh4eCKBkWoj4iulAWJqNkoQ1DQQnyV/8ZFC8aQtqphZo4e4B63mh7qLfJ4cFUQP4z3+VShdwKtTYDIWb8EZaSF11mTNnXunsuY10aqEXTvMhvmILqrWNZrA35aZWk8ue+oqyKJoS+CupU0Q9Ltro+s9TibsSUYUDXpQVHIp3dW1rpG+s3cl9QgGjYhQQvAFVDdYqH9eZpwjzbz2m2up5SS85rpw7ttrNiLUZBsff8as1ohPo9CkoH9mzUuJELB0nDhIkyFD3tJsy7GfubF7h84AYub93AjRfcwuWtG05BszeG+cIAOrjJQFSFdzhVoBYEOEa69+YqC9iVfip8VbAbHuFqiHt8lpMcsHP4CQtYdpDHxiMUFNF5dGC7ZFuhnc89nYyKv3XAxdaCV7W2NcSRb3SVFkoptsb4sCEQ8xkbVg/lYT5mTLUgwq6UCs0QsvAcD8hliqYqxfnO3Kz11jNxz64Qo7DNt33e8D9yf/kZZNcCAZEHLJnBolCXR6Z4YYW9V6ecOuva60XcB6uXC1SsfSFnng4UsSYrLwg5FWG+UhxQITjLg0mzdIYWQeQyR+7p3FAfnuTYXwAyKvPJxqekpB599FH8yI/8CH7iJ34Ct27dyhzSAw88gIuLCzzwwAP41m/9VnzXd30XXvjCF+L27dv49m//djzyyCP4yq/8SgDAa1/7WrzqVa/Cn/pTfwo/+IM/iMcffxx/+S//ZTz66KNP6y0902AXrpYsp6dsRAA2MxpJ/m6ehdPxMMMQKtMETNV6FIlYp8y1g0tJi5nJG9/pOFzZ9My+KK2fCH2EEsqQktOXlFpQ97uM+cItvV6cbJWsmDg8nnEfcX+K8NJSNedZJpydE0XWD40j5SEfuKXqllMIu/isKMpNb10VvHqEXzyX1AXSrf6oibVs6Gqgk6IdIOvhJK2Duxor8rZ7LPFQsG6lmYcSCioWcpjaGeLcPrAJDaWlS6mSrS05Qd2wmYpBpGvUhUWc3f/cKHzg6DcXIslAsWn6Jpv809bri1ApDBSxtSqTZcR7G3BhZw2weqj95d7qoaKifz97g0lnJug6KGfCuDhbOwc3iBUcW6TK6qLMI1aU5qHr1jJkSOJXmdJ+hJJtDh0Bx7wxxoLaaISfz/fjvZpIP+G/UrRvtU1+/1CciPnLVT5XFputv5GR45u2dVlxT4U5lZTCwuVd/QyyA2jy4nTkjVPYuKeV3765L6Kc0jCDLBpDeX+hqEZljJ/xVGSbkH63co8EAi2MsrZ8r4YBeXKWkdayBmoYfCMKZPvTzw8zotlrUhz5vclWSUXJhRcp5zU/5fa3Ssq9ODe8/68oqR/6oR8CAPzRP/pHz57/4R/+YXzLt3wLAODv/b2/B2bGG9/4RpxOJ7zuda/DP/yH/zDfW0rB29/+drzpTW/CI488ghs3buCbv/mb8df+2l/7VC7FLn6eUOvkgDkTBilgN8JKPMEvKuDeoBMb5fxcMUFADNT9lFbctO4gXUbh49Kw3DmgLWt2nm1lRVFz0aW44gtLL5SUI9XKZD2Wymy1BmWeMN24sJzIVG0DiKIdvIbF48nR6TQs3SCbDYTgMEWf3q3Oc6Mmrom2YTFTEKapB+/WtDcuOGZyL0FQN9d1IrL8kltmLGqhQlHvMWTU/uiKIt1g1qLopQCrpJeZeQuO3Mi47lACaAOBlLBl1U0+z613DGG6tc4DYkwAKjGUCeTw7XmesJsrJn8ESaedMQUWQmfbM72FkhpoufCoxGujZBPrDwEpwAbNhTNCUkVBZYBnM1wubl1if/MCtz/rNm6/6EFcPnADtz7rtimsWxde5Fy89iqYPkyYhAeYFFPuvcceqbvJ6r4KY6knlHrCQuzWcMmcWoCDhMTImokww+HTUFNOxNhXA0nMDpoIY/HMon7aDXmv8MZ5kGCzNbefwzBhnByZhE3LnXuM0qd8p4UJFWMvFEIW7dZEJ26AAMoGsgABZEi6VB7u0cX6bgO4957GuGMir51ku5uilnoICrJo776dNqtYCIU/XlBVtGhuuK6Q4zLQqaHXxXP1TiYMEVDPSQDUcImBhrUvLgAxxAv8aargOnmDUEVfO9bDguXuESBgutidrWf0sjuffc35gsvKRMFCsRxPeDbjUw73fbKx3+/x1re+FW9961s/4Xte8YpX4Cd/8ic/la9+2hGhqkzK+g7P6I9vpbRwmzXGw2kB5hm0GBsCSkURTW+iVP+sUAwCs7SbDC9pE2YKSx5ILEKyHpA/F55VEMDW3ZS0Q4B7ZWFxiFP1w+H1GfIyoa1np3usyWbLjMc915iCPehL3JInBzSEsuJSwNKTty7OSV89ft5qoueoFOu7U2o2W1ONQ+iWmm9WEYfuknuEQkkNBLhcjYpN1ad4TFAd7QVCWW2U3DDkKKMOsU7KnO0LauX0ohJE4Tx4KYhUR6U+wivaeEyb87BFavoTJuD8beJWMalCqZgzW4u1OtjvsL9xgYsbl7i8dQMXNy+wv3GB3cXOwBLRL4oYBDlTAucCnkbOcyrD+iardTnLSbRukHpVJ7GL/WJs8UoDuGCBL8VELtC9NmqE+WJ9MOYqzf5hY52dXUIalZ4dHS9EbMI1kLq1RdBEzQXIKDwQ5F9tvgMpis+8rEDQbbn5thRMZhSp8XV6fZv692SOyr052RjGiPmkp7lf/94gryW/r+AiJBcUQYQcIb6cDR2RIuthRhbBIMl2KAkc8Xwai2YOb2jOOGghVvx8+meUfOt4n3aBeInDcjyBCmM9nM7mLEN/sSc3sszqttTKNbKcRK2A/1mM+5q7Lz0BJ2AFAIJbdQBiJSxf4NZ5F/RygtYK1Ip6XCw52fZGILrxgBJkoMi+LMxbGhe3Xk0S5GFgIBkw8jkyuGsQv0YSvDiLgXT1Gi5yIWLx6a4wYeeJchXNs3CWgLzXek0vIoT4cOOJN8LMw5FGrVITQVammjmi8B5tniwXIl2S+7C2jk5AEWN0p2AiBwDmjbJyLyOKhdL1SatiCP/487A4wjPAuVAhGuHJM4hwnm638Pz6g7rF2o5Y0zYzFgxVFyMUUlsjLIlUXuY93WsUjC8lyqOavpUAjkwjCBVoIZRdxXQxY3djjxsP3MTlrRu49YLbuPHgLVzcujCGay+gDGWp61jzc0AJbfJTzoQfJRCzsbkDnteAlRUwW/iayOts4JyD7Mwo4qS//g0Ryp55hP14I6jSlIfmeVRswjtbyU2hoLJw5Ezhxr+j7CAtcrv5VDrZCVu3+LTxyyjstzUhGMjaiy+GEoj3+HlWdUW1DfdqBDM09Ll7dRv4CcFC4UQmnOOufJ4qTDEUilq9jYPjxoG6skoQhSLlkKhaWF26lwZoXlvUW07+swKoGNcWuzKMxk1ljSk1+BkWTYaQyJU3XgEiLHeOgGJwBfqI8OB2RIhcXQYbm7ugtQYVxfHOXTybcV8rKT0TDsDWcxgb2xdF4KzmAiwN7biCygnL1QGqAE8zZCcokxXAqmIwCaz97KHxaDIaySVLhfhhsZirhnBnMoqa1iAr29+7Kw3PJYxW4DVZi9U9GXX4c1h1IbC3Bzf6Eg32hNGTqWy5s6bqVeTOtzWbB1XninqxUVKq9v3xeUzoawcxW6jL537ye+iwcFp1ZBsRUIkcBeZN1rApwj3/HwA4UbA6IpFA3WikhvcSgoUccou0YGkz+/Y0Zb7NKKCKK+GK+WLGdLEzcs6L2VkcSk5lF7EW4JuW7KMgMuiJ7WIitFvEyhlYJA999gFTda+VMM0Fu/2My5uXppRu3cADL34BLm9f4taLHsCNB4xEdr7cDY4zl+jJaNEFbW1YlxXrYvVS2xh/RU1Ie6lGvaPRuVAE/dTQiKBNfC4FwgH8UFtDNY682GthqW/59obneCaz/HzGMdQ0UPKYxvpsvKYUovkBtpajFGJ8TnjxXYMk1z1vUF5neC/w8xiqkMLViUJXNwKFBMQl2d63SLYx754v3igRmxyXO+LnpfenhP8AZPGrbsik140hwxGBryU9FKgaRN3DZU0Ei1qLnUWi/1cAWxzx6ud1q6xT3zswJGixor8XK1CaIQwLN/ubwuZJeRkOV0ZbVhCz8T6eVtunxUi1447Fw3rixcfSjWoslJWq4vCZ0E9qWG/DSrLtCK/HGAa9OiJHMRKAfW3oJ2sh3pZ1WMI6FES20g4i0djYYYZ4CCDIbbEhgPFvdkJU9jYW40GtQ6oM9gm4Z5ZejtomgXmKKsPuU9xzoOMw3mPZUlrXm55TQSAbcPCpOIAkivWsqlxVra6oi1Wbt250POLeVRVwF5TKEBRMIpb3uUdJBR9a9P7aWq/bX8hPka2dJa8jLDTafIw/M0G0EZJu5VJArj2cOYoRCybnOTMPxfJB1cNp1pQOEN0ydUQYC7l/NDeWZoiN1b1SNXAI6/C2AXh/LldSkRNzwtiLm5e4vHWJi5uX2N+4MNLdaGbo15H1LV4H14N4d7UmkMYLOcKTBqFGKnsuyJBm1IQFZFy3bWgUILE9KbphRtGBoiscsPBzBTU8n40nuQ2Pbtec6Pwz7M2bUFGaept/py7KAtctPRninjE8qLMthpAOY79ECE/DtaDI1SAV1FntGyIaMO57WweZoXg/f2NO4gJGFGB05e7JRxiTrLH3HP1riTUPvBKCGMcLlO2CBuIxFPMwBsye0jPBoTLuRb2pq/QO8fIF4T4mHABUsHqIbrnr4T41A6ynkrL3b5VUeFPNCZl7s+7Z62dEuI8rwNXlx5lGcmjteZ1NRKAhFu5oxwWnuwd09xbqfo+yayjTnKG+7ozRplgcLbVBtkT+xKxYgTWT863sAgyAx2EB4gJpzRikV+uWWnYTiHhQ/jAZkgsApEJhXos6i4B6i9en5AiJ0hTLcF61rp91nrIHTd3PJpT3M8psoInpcufhPnsfVwuvSTeoctl56G+uKCLW5Kx3SGdMtXr+AmmBR+3otmdQoY3AOJNZw9rNnxHCcUNApA+BEX+3MVVDgJgwtS+KYt2SqMqK+cYFylwx7/eYLuyep4tdWrhGMyPgtYEqOywwlL66YNKoBrX+XGylB0oASUMjU9ZogLJdmwCo04Q6Vdx64CYub1ziwRe/AC948Qtx68FbePDhF2J3eYEbD9wyQt55QpkCzINUTuu6YjlZW5PDnSNOd4+4evKQSMNd3+VaK5kXWWjM7MhLDhQq3Krn/C5jpNCtJ+VKarteIe+iONVWgIYiwBDwAX8mgnPTYRMBGWotntFcWNo+4b9u6o0UaBsDxhC2QFVkQS/d8/kRgrS2Yt6gsjGIXdATpeAn77IQ2bmnjo03T9hQfhmQK1nt893u7XmoL8A1HUCDglVQULxBJ1u3Af8WIfNqKBQOFCRmGJHCuQfhAJfB7J6me+p7GdGfMPxUoWy8kqEAmxh7TikMWRuomMLsxxXUgX51xHqxw3oZnQuA4A1UD4mLxzOzqF1hrwFYD1dPM59PHfe5kiogrkiXSaMIdoN+c5OHHGKa9R9+6Nuy2GY9TCZIAEDDgqFsxTAGpTUUv9vThIB36tmJchbtqLFZV4CAdjzBLHH/mGKJcbst83BMoTrrMbm7r3DYmm8Kr9+wa7oXFMGDBSDay9fqHFze62kT6rME/YCHy0YJphUZlmu48jrmm2N6YHF/ogCMnIf57ANjOuMQRZLf4uMmIIYly5vzdGYb++cEuo6BvHdruc6Y5gl1nlGniuly70pqh+rs7HU35Zo2JoA7eCng5i0FJlPMZSomIMX2HEn3mh1n9a4E6gWdAWp2qKk3t1QJ0zxjmibcuHEDN25e4ubtm7hx22qidpcXmC+8Fsrbwat7ECqwNhqtYz2tWI8LTodTPo6HowFcXHBPbkxE8K8wA13Ro22Ke17blhyUe1jPlIfJ3PDSN2fAJz8s+VXFUYDp2w6FkFa71wu5wR2KcrstImwbCiU8uVAc1mrnXjWJnKv4fcuRmMovjFUaQAH2ECl1yc9UYhfmkU92YR85IgwPZRvMoTBkxM6fBUAoC9zVrx/kHYoxclIKt4HiAZgHjPG9XM2zLVJQt16nK6nqhqDxEGKjoOLetzJRR9sgBSxyYV40ukC5e14t5sJmR1YjSWjHxdZOFdSbnTk4GbQ4ig8hp6zkJfq25YKfydVPPO5vJUXFPSmXYEJQ9Az7WFsGG8wW400lpRaGM+ZugZaSjNUWoy8gKllnElX7g1YmCnEtJBbmkrnNm50HVyRdAOroywKooAVsHQCIwZNaU0JXQGWawNytMt0/jlp4VwUgcWXqX5KHaMtqwYMcsp4DJErknyL0tZ8Gyesmlp2KKZKfsmX4jmJQ9U1uHW45vUhKJXUmiPx6z0KUQAqsUPUUaEZVIA5LvOraPbzJVKibmq+6s5q0aTdnWG++2Hsjxx2Kv87TaHMQhLM8FXDzuZoqVAVlqb6sCiKF6CYMBwX1giIdWAm8riitoDZrGELMmHY7zPOM27etSPfWg7dx84FbuHH7JvY3Lr31gTWQg4f4IOaFR7+h5WrB6eqE492DPe4ccXXnkAS8axdMrYECzdcFtRSQKPphMQLaJJ+VIbxiBrb63xVJivkQxMDwZAA0VSye5+hyvo52FEc0g70lPSgS+hpscXkNilHFA3dkk0XeSfw22yd26bhuhIKSp8jBENjqUYcung3q3WmIeQPo8DwNAGUHOjitbCjQcLLTi4LXWcG5qMi8zMifkXt3zEMbRagyFFS+pBjngsi64xBh2hhrbB+Q11mIMG8jF5ucosA8nVBW4R2yn69oXZOKiryIHQpStrpBB9u0wwISBVqHnow/SmVNqjRbbwZxQNmrRajcYCYicP8MUFIC9k0DxEa1NteuSKJ2IJAnbE5ybEzpHbostiilpnCvdQaq5TJiMyQizhkLVBRcO4TIIbziceMw4Tw3ReTJWPNMqFlgpJ3YPSgG7yb7/HkyIVAtLyVSPHxHQHGh1R2l52CCIVg23p39yxRpsADEPQT1foTA5poACnJ0jvr89MUZJbKR4QmnuwcLk14dkmuvLcuGQshWgxiJVJIwP4Fzz9MlUKD1AvhRRiIk5xEMz9UVcJ08XzTwWRFeRXiMZSipOluYzbxHC3VaSKxucj7usRWGSqAb/ftV0NeKUikb0bV19bDFEOAdHV0Ea1uwtjVZKYI3cd7vMe1mPPiiF+Di5g088MIHcfOB29l112iZyPKfvWdOtLfmrUEa7nzsSRyevMLd//0knvztJ3C4e8Sdj99x40ExnRZM04RlbdjvZ+x2O8xeIoDF+hSJ875FU8ytogoPIjoc6ye0do24tXsi/9Qsr9Ilclc0vCD3poiCzshG1rHReYgw9koQM2/BoG6bIxhUoiCW2cAN9vr5uNeQMSXEVj/kvxui0cEhlYaXR4wKQu1mEIUAjrM3PCs/d7zhLvT9IbA2I2sAW0QyihwGXPHPKGBUBWpXFIgVwLtStg7GbITQCsxU0ItdD8l5p2MovI29zWzk7Sy8OIAT4YGSGnMEqSlsDW9z47GpwmvGjHzb0McKNAAqkL5C2grpzVMbDK4zyqTApCByOjhi0L2L9Azj/lZS6bmST6LbBr65QOoKyosbXUlJ72YFqOc8RJzRO2Kp8cFjDKQYbdqml5S9HnuDijNIaAhaDIEMPyiBFJJNmwcd7yUXtkQW+tNqxbJUy0jEK6dFmVYuKDeTvWmb9D+/l8iXZTO0yEf4ZlYRZ9vo1gLitFqRcfy+BAP4aolQkSS6BMyrBVkcPTekb3w3ye0ahBJ8EJxshjdzTypQk36Yicl72FjRIVKQ+edtACEBiKiT9b4pha3Fte8Fu1+kt7Ztt2GCj5LCytB9fQBQKm/ea/cm/l9tFVNraH215o0gMBXMewNEXN68gf2NS+wv95j3s4E5gstQI3+p2QqkrytOVye0ZcXJ81DHu0ccr444HY44Hpfs7ttUsbYOKp78bgLhYgKsCXRt0OYhv8ylnu/1c7CDRsR8BPIoAA2RGUEqqB4egRtoMckKE5rgc8BDKBWNbRre9fbchD2j/t0MC/8yoUSdHUJhBJoy72bc0/ZaXHmcgyLOzwgzp/JkYqgKxJVhGjB5ZjcyAjD5IxzuKJTG9xEAMpczPaXwfAweb7RPHBEYhRvcVtBcADNUTdX6Z8qIQmy0imLkvVRdQW2iIXHfgUTF5vyrsHF8aoTe/b5lKzdjL0gyXRgKER7lCb7IAu7eGImdKfD/BuPE8220pmisG0lD5rbCaqCIYMCAyPGEa70uaI0gbcWIbxMYjIJN7QdMKJK6UBSg1G5WAbxmqZcN7YgAYl1x8/DnJkVuZBe/47C6i916txBDKb7b2JSVGm1/UQH3AqktHZIzGLbY/UnTzLlF7ELc2spOsxGmsriIC1MAqgYvb4Ll6oi2rDg8cYXl7gnL3SMOH7+LdrLK8yCYFa97OI+t+KHfKMlQJNvHthi0cJCjeqw7YfdGfFtAmCoyr1bneXhU8TllhDnLVBOCTmyMEwG7Ne60nt5nGCYRBpNmEGJyXj2dCupcRlv5dU0ob4bBGG45izdp7NbuAZ7U3u1QdzNuveA25os9bjx4C7vLPep+NsUJyz21k3mwqyum9XDC6eqI5bjgyd/+OA53D7j78Tu487EncTyecOfuEa0LVlGU44JaCk7HFRfzhP1ccVmsqd7k2oQCmTqWJvf7yCENxmxfvDNjZ+RjvHyCjGrMI7TDL4s/j1/EYEAEgDsy7GZR1nPEX4TP4msV3gZHASoFnSyOYmVdCglZ4B7S0DqaKYC4LIF5Dt2vNtGQRGcs+eflBxvWEfdwI0ebxl7lBAgJw86vdJB3bDQ2fbuXqoyCAi6ECmvCGc1Yq8DzVt2jNJT7rDJlvykpxT7NG2YGSjEAG00N/LKKQKQn12TUQMJtrMKEosEeb2dTve1MyAWGAt09rNb9PGPMbbfL1S0o0BJdUG0AF3Bx5UuE9dTwbMZ9raS6iCHzACAsKA9zcdTH1OpsCk4FogISC3+winsUBaVE8eOGxcA/z85RASqMIcIFk3gtDYgg3gDQwgEj7wVE/Ny9mmohPKregp3ZBGZX6NJALCDu9u0K+0wXGMNS4+GMBFoBZq2ZB9NzjrLgM3jmHAbfV7ZWJBG6cOtYNtDmUFKnJ64y3LdcGWFlWxZvpjdyG5EcVyAFXAhxm0yH6kYdxz1KSoizOBLYKik7sJUYKB1FxA+ix+99rsJStrCPQMn5zNqZdHUFP4p0wwOF6mgkuGU/DyPIr5MLA2rhQBIJEx5UXNCTQGtFVNrb31iosc6zgSTcqyreyyoUpTgvW1salqsj1uOK5eqA452DASZyLY5YjicDUqwNTQSrOA1TUVRawN04+tZJ3NvwfMhmSdJzv2c85bmzdfPGgKSobhtOUlzAjRBS2o2qGa4TwJpmGp/KmTehjOyKu2V/yEvAUICh3FL3QQfPYoYPo4Cc0rUaHtNG8bhAF3gIzusJ6zwNlv1UUs3OjsOzR5dbz8FW89ILm4HJzRQEuMFSOLa/2EERDPec4AS0ZOFMDm+0W1iN4uLIDHEgQpIWQQJFSG8Yv02sH1gXxeJkwi2bdEqy3BCACQQla7woos5X6KCKAH6Qy5HuCozJ25kQwhBw326EWHsoqQ4sK6iI9a0jRls+AyDo0gSNZCgBYGyqsKinKcNDpuYF7BYCqVnnVCqYC5jtZ5r/GYFzwURA1SmetL3ZOkAN6N591jeNCg13NmIRTMZSXRiYqikqNip+bd2KYV3gRRW+CWCzjshO88aK1QjsAx7yElJgDYs4jjSlQu3OZg4mtFMzJuw+TJ9Af7W1Wf5pWXF84grrYbEGaldHa6fheSjtA9wQSmoUWVoICkDS7GRdkSMPhaJHViCotgb7Jh8FQLmDqqB1AVjcInPPgN0gEPPCDN/r350hnaG0ET70Zq4tahHvDVCL58LCIMjrh5P/Um4XM3IAUNkoZji/nnl+ZTdhd3mBaT+lkgovKpimo8vu6e7JvKg7Vzg+eYX1eMLxibs4HU9Y7h6yX5ApKUWz6YCIooJQugmnqXUrKncvFTQKcePcbIc737kucQa2xkWg+KqHCKxO2JkURM8EZg9Dyb12jwRbYYWH6kuxxHxez/Ya4TqGKOHLYHZouJd4mG9kEThnoYnAXlBSwb0Y8msRIENcomqhaCZnAK+Y5inDu+QhsN4ZjRq42f7qYQ86ioL9fPNUQb24kgpDVgE0j2SQs16EgtqWatDmuiSjJgE4MPfRmVb8xXvnu6tidQ+qdcGpm0EfLTsi5MfhQaJAWTCJQmgTAhUzuuIsqHRbsm6lF0oKraMI3mY3FBUMeIZuOC+QGeHNCAHW02eAkurioDkyq6MUMi4tEJSKgygKEscizjzR1fPxZOhAMiQf/D3SXEjpOJSlFrCysWjXgt46uFRr27CuVpjbBWirod56z0Qte9K/OBy6VMY0z0k3JICFQQ4nRC8peGamcNSzcJLmUjTKiQ5wvjGscA5JDCsJqyWMnU5Ymb2JYx/hDN+0zevC2mnF6eqIvjYsVwfr/rpYPkrFa8Yyhm2H3gSVdyDtDV2tKj7EBXtbhFqMrqUgQnu2fh0j/KARU/d7LCpQdCgaiBf0qpCGhNlnOU1KXE8aqzrJglvCGQcfQjBDHHBd7d8dxk8plPUiGdqL/JUMZoEB/GTLXToM3jzGgjJNBkOfJ9TJ0ZQl4Oa+rjAPXZohT01JHUxJHU5Y71xZp9XjAqwN6EPQJBotPEwRB+rECw40iTKFp2iniER4HRNoEJ2S3WDMScwVMaOKopRia99NKIoo1t6ximSiPsJCQgFiYNdR5lmxMrqDKxzkD4ULqXCGwojx6yIyBSRszQHVc0ahXJUA9nol8vc03wPZtdlvn8mV5c7IoKfLnber4DQU+7KiMKOtdmaICWjN5rYUzLMjZi927t0pyq5iOS6Y7h5xOljLH23iiinanRAKl1wT8bIO60Lgvp+IG8zqzfEoofldLKwnfuZMSZmR0mSjpNxQTSUFB22Qycyuam3sVc/YUkxwInNQ2uwam3Roj/QE+TnblmDZ3xG5B8WGMgaMgefZjPtfSeUBpUj/mPuLjOCYQU06Ql4SSgoARrmfAbmsgNWstAAXDGGIPOeGCKMe6Dtv0tcoodrs8VxyAVcKm/Xsyoo8/KWbPIj0nlRL5tnb3yoX8w4ThDB+hJA0X2IkONXbbBCFl2cWjCxrAhLIT3zEqdeTgSHa0rAezGtqxwV96ZC1JcDEqx19I4ZA0cy1tQw3SBYzFjI4K8qwIIfLoXkj4ZEhrFvynENXBxJ0NyAsf9h51KZlwNE/LmDR0gMiHcGeaC9hgiLmYZtgd/sEtXAqqWhJEfk36X3kW/z95EAdLjyU3zT2W37wPZ5KSOChBBz232zu46HNOCjJc4tGXIrkhCtEmYBPPztCN7FZcP57eC+In+5x0fa5VFYb7yaVK4FJMq/UyYTmyPkF0Enz+0TUzpZaXPBeb7eq537Tux4lJQloCO0VnpZ6mA867gfq8mGkBASa10SKZFq3UHwBedeCUFLkYIHwqFQEhQ1MESFQYzextivTbjIOPlfQzCVlTl87OjUv14gOx9uw6wBZdPfqDcHn3xGhWzdwA7HXIswX/4YB79rmtS7jd6OEstnsAMrZ5tjsDXUWl1RWnB6tnXl1hRkoWaSysmURwFvMQNXzgS6TnsW4r5XUImoJRj9TUb1I5LFQVTTqXqsDh0ZavsUa1Pl5FELvsOQmdQiMm6p0TWuXdiMpX3SCqqLs5kFR420bWmu+GbxWgHCWiJ13k7Nwl1Qkp6sjcDJev75YDxhtq3mHTBYqqgW0n62tRi12uB3HGlg4YzaJzehhPBemRcQsHhFob6Z0DyWVQfcW0+3kDBtrw7osEEeXiVtiUUk+xsYggHlTTYFFrGL9pMiW8oWAKUJ7blFRsfxGJvMjRKkeHlCYOdzhCClBwwopHbJ0X2/LCZqClhR2QaYeHl78HpskvNSzXkKhKINlgKKPElBLSdhwXGcAZhJk4K9Hzdl8eWG5DbIQUpmqGQ/xIMnarGCkL5VRJkOlWs7RcxPNFBT1DhZBda9IiofbQLbPiLErBTMzZubMeWRiIS4/vZNUZYGLtpD0Gerz3tNnyroAKKoorBApZpwRJZCDfO2t9EbzTIAMqWafG5wqbEW1akACYcbEyRMzrHpRY+v2kB02rycPN23zUZazitYYIN8fwOD9g62vTBW8n1H2O9Sbe0xlo6R6R48cGgh9We289Z5o32meMF/M2N28AM8VYMJ84wKnwwnzfof5ySsr3ThYKx50Seh4jIxIdMHSetYmmg41z5WLea8BwgplZMrK5qEBWEHo8P2hQaW0AQ36PjjDeLqhFN65mxluWKrD1OGFuwRp7Hu4JMYwQ39j4Tw8HkaNfmYU8zZRrF2SkiZiuSQC6mRanzwGToD2BlVBXyUtbmaLm5KjthR2qIkZKhY6AQCVguyZEmGiqWwSleahdeke2pAEDUSYy+Lc1axyYgsJrmYlo4sLJIFKg/bVrCcGmCJOPNnnTcVh1g7AID82awdWATcXDO7WW8YWCTnmXjI3FCG73sxDasvq1EBuuXcxJI8rkcwrAIBbuSEIMnkEgHjTKsL/xnJmBpbV8CiYjdIlKjp1JF3DEjOB5slk1TxhogMdJG4UiA76pK6U8Nvm02BVag7UcCUlrGnxS15DHjdUWCmccg8ZvoHkdox+UsNz5mpKCUoW2vHePMSEdTcboIIZulOjmZoroLZXjHLKi6xPE2qsd6E0yIxZwEA0qsh5NavcmjpW9zQjWmCOz+B6I08OjT5XkadjZBLOFZpgCPMQXIMtItrWKMCMzgbOyJYY+a6tslNXmCMUGNY5q5chuD6bvHQkNgT5fVjh69iPydSw/R4NA2oTcdg8J2GcsNEQoRbQbgLvJ6MNKwW1sMH3eygrD5UVAotTS5GCSVHIEIi1GgUZBW/kVJPsed2tWMrh7OwHrFvUHqdmhu9hXdG7R2Zc/swwwV3MIkol1WHz1skNMuI0gizqYChA5jAcotYreCbdUApPGeHwO/NF/GfhKptDAUBi+0vgbB1j7wTBQObSYwGItpvhGcd9raRWUUxuShWvv3DEo7mYHkIwY9AFSiIC3cpiO77SzHFW9fYFzoQeCeMQziCMYlE/PGah2aJ1bBi74dZKWMjFWkMYnQisWLYYzY207ihTBWmHWKc9ExDuCapXG1ItFjf3rsIUG6N0Q78trlTYtq6q1dwoE6QzuPURyvFrFVdS3VvYG7zcCxeTQude5QEEsiiUTsg2Zh2bXSSVmcLyhvF7QJhZTcAFUmojYzy8RJlzCeqrs6657tGI54gMsWXfZd4dpSUZnoNGPos1n9vWzliYwwVn5MbI9bB7UuKGj3j4VB3MEkoq8n3GxWf7Zj2eTAmWSDBXTGUc7G3PsbqrjgAcoUam0f48hEcwqUQItLqyCmocYBiuwWYAOBIy1g2+qJufZlBoehyqmj2RAGAENP2sePjx/HHvp8baKpLfTuBkEuZJCYISSBNoUXxNoEGdlIH6kT/Z7JsI522VlG6uIs4sYMaTstUk0jSB58nowlxJoXRoY8tLtYbSvV6u2/dj4yVYeorMIJ0qyjwbg4znYtZ5ASss/3tki5wEqlRgLOe9Y+0Nh3VFc5kVxq4QYSJGJcm1t5PuoT8EL6AhZpVijQnWiy0S+X6mQkHRCDsO0AqlIqPNcwiZGsZaGHhsiiqOWSoo5vzO8LSepY66v5VUMwfBQwAwV1yM38rS9chdS9CkjknsPwFYxSxKbuAOcBP06htiGkivSJ6WajFrZkLdT85AUYyMlAxGC5wLcrNOkJYKFNAmaHXBSoRpnoDeMO0mQBogzZZcBcwwtoR5Qr2wOpvp1iWm/WyMCrsJDinDemroi3mD63Ty3BDQ1avAm23h0b7AY/cScXb1kF5P6w7RNhtueerGI9GYX8oNWItZbCgFTRXUO1ZP3na3oEKJdIfBBoo+BI1dnIdttsrP19p6/LigSeHpXp4M0TOumrcXmx8bv48Io2ZoUjz/QK58CtR57EJJbUsOJBPdgRDj0iHFcmd9tYJeceaI1lbUecZ8dcR8uce0n7C7dZksIFwI065id2MH7Q3t5g7rnQnaKtbqRaIM86IYmODhluhHNuxgKOKMIAs2rVOtgAMIIUPAagpc0xqhtI2PzT6xeKi0uuAZwJOhAhhAJbtUYYIWckt/vM4bo6erohOhk3nWVSWvtzmNUo2zHPtXzxXVgJR7eBeBetPcu3kuc+tangdsoWeqBTQVi1ZEVwC/TyUDCfBUjKLMC6JUG7QBJIS+TOgLQdsM6ARmy09N+wnz5Q7Tfof1tGLe77BeWWH28qShaFtv6NqxtIbDsmBpDXdOJzs7oqilmsJkQ+IZ6s7AJ50IDWpz7PfcNc6H1ekVX6ECeM3UMHjmWjCXgloqShlIznsfvFU67qaKG3ICGDiNCFzd6PKQfjJxuPWw9Ww/2bivlVSMEUHQs7DGeFFd3sZG9veTF7ZStGm3UFGEE+BKpZOxY1uzP6OrQTXrpLA10qPJGQOYn0LcHC4zba5TBAnGiBYapVboVKF9ApPXUzCcMWFypeTNEnezUxo5YzvIvRqjAxIRlLlCFi80bn5vIcxDQYWQj3yBDEh5bMKtVSqKDKFFDUzChEEe8SPUYlb1Ntatjiay7xMn+o0ajAG7Bc4t7u1CR04CFEsb1z2uN/UmxsOPFLap9/FaeG461j4+VyMwHIlgV4FiRkTmpGSgsIKJQUDoawMIaKcFVNwjL0Cd1wzt9D5nHy5KaDG8pf1opVIqg50pSzmUtPuYrqTi/gYdzpg8xTBMzDALb5FSeierQJwPjf26WRuGU0fqph5wWMYBRYoaoOphXmbzbLerAJ/XyAfG/Ip/p5DzAbKfbbf+x0GNzwmPzw0gjP26lQdbwUjpEZ6fzYhg5COuN9xY2uQKaePVK9DXE/pa0JcF2hrgCNritZGqsNY2azPDuXf045J9qqK4v0tH690VlCkpZt3cTxhXoYzhIIoAW/ia+R+wyyB4/ah18vVGh0SoXMz7ZkI2Xx0uMjLysg0Fb6zXVFKbOQ4PjN3Lz/ymz/NnlJICNgsHE54iGKLnXmt7M4vSoj8U0IuASgc1h4ZHL6lm4IvaDdFTuECYQDqbJVI5W3yjxiKOC0uFGR5LVwh6hldKKdBpwu5yj1IJdVehsrdXGSjThFInTJcXKPOE+caFFYZO9YwclYjB1YAhYLfguxfqRqK+aW5un5wUyLp9LoYLglBMq/pPeNKZDLDBHB1BbaPPQNanEAgFwetnilAbsp5GIieACOfEisYEwi7AuRETUYUQqpLvo7zkEUP3ycniyTgsxQVneEiOOxlLFyEk1dxLNl/i+TnfH7rxzv09cQuNnCNPBMtyMv7Aw5UxsV9a9935YofeO+bLHaBqdE4OoDBS3AnzRYUsFXVmkDLQGSwRNjVPOshxVAM0YtZ0XJkhAT1EpwpRQpFQwQSFmCVcyJBYoTLcCAjlIyX6UPn8p9c79jqT5cwCEFIKQ8jaUQw2CwxvB5QcfcBAiJKo1UKCIIUyCsClDJYXRPAvwl0R3jVltd1KFua0fbAtV2D/Lu0W8u5RL6jq7TCwiZfaHhKyHGiXFa0tUAj4Tgd0RZ2Bsq+gAkw39m7IzqhzRY/GgpP5Nu3oNYdkXsnaG5bWsPaG1nsqJkKs3/keDU+0iTiSD3meo0N4BaOwjhYpItBip6MA2E8VE7MBRSJE64rF1iY8ODPG1WHGBl7hURvp+w866g/Ju4LHV5Ok2n9W475WUgWBQKG0nqMhWOr5rdmxVVDhJXjoqJvfDhK22hNmc2OjnqAYV1uJZnhkOSUiArcKLpGgxFBSm80StEG9mSfWTs3ZqB1lyIy6t/5GKjuEtAzeQSoFZZ6zuBBqtCwiQ2D3psn+wESoU4XsrPhYeyDEijEcu9eUXFzoFj4RtxZ9MlVN2QTU1UKsiiVzCLDNr1sUVoTnCBOrF+wyhIaXtuUmy/49vlyUStKOZnpFnouyw2P5x/D4RpHu8Fo1l8IUlADeW8lDF1mDRullNWikxWweRDf7bECY77UCabu1CB4lVSsvEEHXDmps7CfrCVwr6vGE3emE+WIPIWB/ugAU2N3YozqzCbkRVCdGnQum/eQ61uv+lKAIAI8tkvi8Au7tyL3hFYUo5zxHSLtL1C0NxhK4giZXcEPt+xfSU+ci1jimsLIRuAoMaCBs9TRN9YxjjwAEA3/ohO5KiokNJh25N/8rVr/fjZWe4AHQQHOqrZ8pqKFYIzzKqsY9tzarT2PCultAsxXdF7JFNSPYlW8loABKAtEG6Q3roqCqmA4T5uMlylwgvVluqnIq7bqrqIt5x1aiuTWiNb2caeOF7EpF5YKZi7XlwAhnJvBCvH2Kn4ls1+H5y8qGULW0Q+SkgF01VOiox8Tgw3TaMY10hSudMFBEJcFJ3b1ZkNiZcwSrgKx2UCNc6+7dsxj3tZLKPE9ofR95IO89PU/3pJoXpdodBihAseRk8R4rUKCV1X56QpyZIUuzIsK1Q7zQk0NJYaOg+ibnszrv18nIR8UJSImLcdF5Vtu4wDDiuRwgA7caRaG9jdgWzHsUg6+ZYC7RBh7mQfWOzs3rsAwsEUgTcguX2ISfxeDhD0/EuiXc3JNSHYpK1WDBgYYMtutKDHGy3204oITQcI0Q9ZfhueRIbQOQs1Owuz4h4EbtTVi6/np8nj+MvNY/KxDW6UmRKzVFzzW0g8Si6YXFZ21DTZTfGRfkglYNrGPGkALNcpa0GFy5Liua94jiWiC9j1YpqoYqgxlGEfqrcxxZggrDgAeGRCVVaHMF5fsahMjO2h5xHzMMOdEI2ZmAUcDKNzDWIhQ0HLixDRGaMqJNiHsDR9icTwTIgyn05waC7mu3kVnqcyas6MpeJO6qzK14ijfmdyOVyDCuKK/Fwoq2mUy1b4wZw8hD12a0X4WxLusmTMbj8piMk6+wl4EACoFIQ1sFvBDackJbT6jrzvYArGNzzGEJtGbktYLlQcxLjxxiza6/hLkwailW/OthT6ufgjGXbxRVMGhsi9UNUBPgHwv5hgydvDlmrtfYAUjAAzNAnv8EZag3QBMBf+9uwDAs0kBsZ7wzJ/iJ4nOfxbj/lZT/rkCGNoZLTGk9kUutZAjeSJkIf6nzjmmjzD9JF5RmUGdpPb8vaiPqYu0Y6nFyAMWgxAk2a23BB6eDN2sdwssME0bZX9jmnTzPVcz6iPe1ZbG2DYcF/bQ4S3bz23BRYj0yEA3tgnan1uqhS2fAbpJ9hXpzwlrZFEEyAVTcnO2b0Awg6tQ3CKFvm9bLg3PGwxoEWwuCIuxFfmZl5fr5ukX31qeEG6HniVxnFQkrP94Wiij48nKFafu63x42t7kRcsKGhmoEm0c/zARYcXZYrd7fKsItiTS8Z49GrqppR3BvCMGE3tUR0+GIupuxto79zUu01dZlf+MCF5ch4IC6m6Cyx2W7ib4KehNTTspWq+KFzstptdeOC9A60LobEnbdmT/cGnWZ1xgec8xXBJOZnIAUbmBEFayoJfApCleROZGhgDxZf8/cBGM6fPfYPIfnYCEkiBlE5F5RGDpEHnISL5RUVwCgZGZIg8c1VNT+BMMCwUEdbs2w2l7v64peGG1txjQjijKZgciVUVlBFZiXHRQN62mG6gKibrVtbrAO4U65j02xd0hf0NuCdTmZQlsW9LaaAamCiQzsME9A9Iib62TPbeDc5LVoTa2xKns4OvZ+8JGWUlAZmNmIbG0dneiXhyKLRTvLGIU3h+FdAQOI0j20ugJY1Yr4xXNo1d8nVUGwUOK0VfjPYtzXSiqkT3pOGEIvfg91FNY1CJsQw0aobEMfUKiQV6Fb7kjWBiHrJ8RLA4hQIpYM80ooePl8hJKSrIMYCWgNa5XgreKjKWHxlu41mZWTsXgB1DusrocT2mlBW5orWQLYe2JRcevLVDUzARNBi3j7AYJwzwS5OePFCJdRQGphGSWvOSPzMEhG6Ce8FMaor4jeV1tOuLDkzFq0miSbW7dwwwvcCLb0jPykETn03N8fNEoRIoqfMfNpHbtw0s2TW+GcngLlX8B1U3qSftJdyBkE2XJjXgfiIWSym7CrCQMUobTH3hI1YlX1TLcQoytQr44AMebdAfM8pdUbso2K0SrViz14EsslqRdzdkJ3JYVa0FZvlrnaPlVESExTOVMIdBrz8HSDtg91Yy/ADwFWcR1h58Y9oE2InZ7us7Zrp9vviLkaoTUVTcLT3BjhGlFEGNSFuYLB6aWNcoFRnDqiL/BQFgHVQm/FaaoighFdsstUXZir8c+xoM4V0qrlmdbJFG1h1N2Msts59Lwaa39xFKHLgd4bevN2N81+N9YX49GMdSrufTITJj9LTuCGCGEGLVYhy8cVV7zMhFqK12iys5F48TUURQfN1SaI62s3onFnJn3uk/H+s0cYnB5CJmEHghBEvORj470/m3FfK6mw7uNeuyI3YoHvPWzCNBzevm9w3U41MgEfkxfs4yo6+NXYLOjox1OmgvUwO4ktb1ZWhydlOF5svgqgYBcwqn47JNa7aNpPqPuan2VFxlZY25fVudzuYrk6YDmsrmwYVCbvf7TzZn+zo8KsGBQK6CQoxdqLQIHuCW2whY9IjSFePPwRyeTi7Mm1qt9bWFKUCKEoNowQWvgykxqqyzYon4Wf7FBE4nfD8YbweMICpow4wA9ofEIoKDvUoXTgVj7l+4kAdsMbXq5gSDXPTdA4jIWjpsuPXwhPS4IYJ6RaKCbmaitIQ2jnTapYaE6MGcD8doD6Al47VAva4lRcANpptUZ2taBWbxa3t58at++8lCKWj2xrx3Qw0lmeJ6zHBctxAR/XBAHxxqPYWrOq5kmEkjhTKOTWvLqRIKP2EJC0sgUO/RYvZHcGEdYxf9vvJH+twBS45TDUFaudswDVIIzHNCB8M7jREAZTEcuBVgpjKnI23taD2LBNDqOm4lDyuSZydtrPmLxbdb3YYbq0R2HPZ60MXRg4zSho6McdCil6m0BzxbTfY//Abexv38Lu5g3MF3urZ2SGavNShBOWwxGnqyucDgcsxyPaulh9ZO+oLpyCEYWd3y+iALFooZwmMq/Q/8i8zFowV0Pt7R3MNKtiUkmvONYjRFRwBYbCScNBB/I1VtsQxSPAG/I4vXG1kpbunpoUY4yxffssNRTucyUV7ZwzBIARa48DBmBY0YBZXhECoJh0m+woRowkeFgOKsYerACIzXvqa0d3IEWZV7e8KK3/RMudhT0oPY3i9VWYBTJXcBlM5O6x+2Wrt9boaMeG5WDs2PFYDouH9hhUO7gUTHt4Z9kIeRB4Gp5L9FjiWqFk9SeiJXdlkn56fLutpqRqF6sjUzW2ZI9+jfg5YXLDoJIpOHRDQJHCDo9iABXUoexiSLMW4SKRFI7V20IEKu9eqw2+RtHiIPJb0bUkQlkhVK3I1deGDXiQ7U7ckgkPaLN17GtiP0VOxAUrsRhE+p4Da8i47u084jPJiUABSIp5tFPDghOOtaCWAl07KhHqNGHaVXNSAWPy8BAOlQKDnbOFkkVQ9zPa0lCqk5oeTqjTyeitlm6UNjoscYiiqxik3Auhg00jCF0t7ClZMCzgcB3BMC8mBJYtaoRF/Wcoef89jIrIW3IoKzWkp7jHZhb+QIUZG72jZ52Z3Iw981Cqh7tIFNSdbNUL0iGc+RdT+t7BeXai36w7nLG/bXWINx64gd3lHvPFjHnvSF4RyNGBEpWBqeBiv0MtBrTiywtMl3vsH3gAFy94EPPlBabLCyvkLQwsq50t96Rk3XazDU/KyhsIo79Tep80fB6XXENRFS/knp3pYjdhCiUFRRHF1DqK02qRe5gIb1XDwDDjrMRaxVqCHGBLbqxvDBqKVB3BuWgBjwZkQbdaLlMjDy33BoCfftzXSioOTQqFCDFhCImN4wJgKChyjyli6fHzTCyl0tAkQyRuaXGIWrvxsg42cXLBmGGmzQUQLMfEzNBaLSZM8NwXJ7gi4/nxe7McVlu6Eb+eVizHFasrLfs+Alf1brVGZslcIKWA0Y3yJbybCJEUxsb4MgEVHdsYyRzOSwc3QekCal5YKOL1vpa4JcDaXsO8mRptqK1JkCl+x/2GqArL26Z6JF8DkeUOj70eAQka9TRbePEghqe8x6wdUX8DAIV4SxD/RFceCg97JLP8CNvG15hS9P3hIUAmr09jD3Fh6+GbpiQy65T9Gjnro90g6QaoaURYjgtOd48gAeZpwjR3aJ9RvXlnmRyB5XQ7oJLsKCIAl4I2Wx6Lq4V6oqC4s9XMmRRRA890kygEMe84rysUi8+RKJTNC7a5duBGWn9ubfuFBBErbRXUPd5UnNUwQkJh+RWYgqINqjBjdJTFpMTW9iLq5/pGUVoXACtZUPV1Z0Kt1WvPKsp+B54K6qUVytf9jN2tC0z7Gftbl6mgprnadfYG6QW9MWphaGH0qaLUAiGg3LqJ6fIC+wduYX/zBurFHtNuNjRc3rx6SUjP5phBrxUKKmNEGqAHD2neM4GxXZNlxJkuaK723bWgFsLcFdx7Ai6ILHenflaiA4HJVM09TOopCn8mw+VR57hZS2szolAeZzVKPNJrc9kWQK1nM+5rJUWlbEjf/DkAIQLsAIR4e3bIfLMKIrzgh9PbRAS9fSndBMXavC9S8aTt+G64pWpraklXIxA19FaZK2ozMANXq8kqtTjbt1ixn3se62lBO6443TnhdHfBcnfFemhYjx1tccUGQpEOLgDxCohtGlZA/HtGjsvnhygbvPFkwq7sp2wSGYK5tW7s402wdvG2AA51FTVWbrH8FYslcLFaKwk5HtFPHZ0MZqEAjG+seIW6HVis3aHwGJ5XzCZFGNHnER7G68HEbFQwATOPhW4wWRx1I4Axa1tXUwMwVAoUFNwrKmatc4lj6aiyCHAglXoUczNZi4UKZBLeZKblHkQE3Aq4NfTeATR0UZDKiN83s85XnHAUQA4LdGnGPHGxw+w0WNPeQrh1njDtd15Dt/N8qL0uXYzR4GCe9ml/QFsa2iEaVYoz2ht5sPUYQiLDjKA01kK8dpqsoJQIUlwoOhoiGUzUQTFRQ6Yy6t7cqxr5xoHSVId2JhltnEMPH1evs0rvp5oQjv0cPbKsbsvC9G1ttl+Dcki6/T2zhdNdIc03LqzH180L1L0rqRt71NkYP+o8oU4Vlf0ejop2AojVQoqFgd0MnSZgqphf+ALUywvMD9zGdOPSGEScq1GkJ3+fKRZDwBWPBxV0N71WqDqwyqHfSmLu0lOQPiZ3Cqz4m6aCemOPujOS22kqFqZcOrA2KJ3sb3sHhIehHwXDLkvMQLa3im7koI51CS8szLnqaxaM/JGPNrg8ebNDW3MBDNjzLMZ9raTYY7WAW9EwDV3g8fVwk3UoKNou8AjgxKvIF2mgwzQOGQWty2A4V1JoGftm+5njO8WTuia9os7Ko0voi7W6b6c1LS0aGg79ZP2d+mK1Vb0JpGn2xYrNNAxW9aJhUyxBhRQJ2KFGNa1SU1QV0352SpiS7lVr1gZdumDqkvxiEklgVzDoHdTNg8IJEAZ6Z2NndQy7EnkDPgMA965gdLDYgc+1GpOXQIzgE4swx9lhDeVAAyEWHlfE2xURwgo0m6CTZlgl1jxaT4S3N1BvQ8DC8xN5fUz5GerGBfkeoYSEm8AuDvvvhFy88AS0dciyoqliLSXbtmi3PmTSO+pcob2n919qzTKFUqxFd04gEbSJGSMKZ7jvhsAjgvYOZhmhYSDLLgzoMfZzKBDLA9m8RNAnrGQam3DzO0ZoyJ+374r5ZgSQItG6seYuEEvkkALUEM0Fq/cnYgJVRjAt8GIlHlS8Vqf3bJ8yXcyY9jtMFzvMty5Qd0ZLNe1Mec2XVig/XczG+FGLrXWzvM+WYYXIes1hnkC7GdPFHvVij7rfoe4msHP2KTkgnsdetho9OM+luqfkO017ziuTAAzvUWZRkIiiItoBuSxhNjh5rWxUR07rZLll5yikyOnZPahDwxXq9aEmowI5uGXrUBVHAVPKsNhqCVyidMC9ZUzwLrrUcef7M8KTqjU20BgW2xYU9WZwOsIKz3aEeop9ALglAUXSjHB4SpKorqcpaxzoGYeRhkcjTnarav/uTqQ1eXt3WbuHNGDU/osj+o4r+mKeTTZvxEZYuyRRR3sRee8okaRgSmgTG6SWmVHnyVgQblxYoeFushg6+We5BzWaByI7fPbV72UVyLqaoL3LkBOj6YpFlgzDKTG0TNBSTQk0QEtDA5z9wOHcHoJlPn8QRUW75ZK2Teu2ghn2FoPJu7DdouwMkOEhFngtEBtDtLULp8yPmEe2aQvhIb5ASwFkTQ1dWKJQGgvUHFUJC0UyGNotvKalo3XvO6aw8EszDzC801IL2qliORj6bN5PmOaK+WKG9J7elOUjZmNTJ/PUi6PSSMkQocRm7JTV7qk1aC/OdjA6wMZWChaBVPAII8GyUUQWbgM8nKNi+bewnDbh8lBYZyfEhX2UDSg26EOibI2T59wZ4mnyXNJULCKxq2lkwfdPO60GIb86JpWZdQ4omJ3lY76xx+72Jep+xv7mhSmp2UBLJZC2gcpzrx2FPYfpEd/CVt94cQG+2GN/8wbKxQXq5YWFEqs3JxQremX3/M4fVhDPJCDqgDaoNoSppFQtZD5Z93BQTZRgWNEZbmOrp5oLY++KipntTBZGYyumjlpIewT3o6IUibhrKsCEn+um0B2SILKwF6PlSGZ1KUBsVnISYY4hUz8DlBSxsQKHbCa3lEnZCzDPvSgb8c7NP7faaHy6v77NbIWA2sTsQSkodWMZx3ttwR2GHCtsFMXONg70ZiHAvrQUxNFKA4DXNlk33Oi4mxs0/b17JIxJmTz8VhU/rgfkZJGb8EvUU0TvK5rigFkYoKSS8jBQt89v1UNI3JxMtKGtFSQrpFhnWxQ7GEoEVPJ6JHYIbQGXPuo1wqtJixtjTtKK9dcIBn6If6Sk3XjFyLOMgDYomSfIMFg4CAiExShUhVfUj/qf8LQUTgu03TIeforDax4tW7gsw5VmlLCGgIaH08KLsX2iMK/HBJACaizcZBsHIEWd5hRAGcL1UgAGoUzuTU/WR6xXRulsJKkOU045RdsZCxBI2GR2/5b8DkDd4PVTwtmgzbznltyeM7333eGRmmwUgvNa8gYxyokcJWagOnuHky+bwjJ2FZEIK5N5U93myFrcWAuNup9Q97N5VbvZCJtng5PXKWipajLPR+Tk7LLd0LN8cIQiNw/fDyFvBoM4bc7gZg02E5QmEZVwpxw9HMau74tIdOpQCmcIZ1jKY1v7lt6PjntgAZTdYIiIi9JYt+2yhbGxjXr4i2MfUZZ3ZMjXo0SIS/5/WUmF+3lqJxOi9iwARwqJsW6zCooncfkph8YtBbd4I7QRh3PI+adaf4XNOuHCae1HruTsrcBTk73NN2vz+otesFJDaRMmdEx9wrTOKG3KUGY7NbR1xd2DtRC/Oh2xnk5Y1xV9HQvNWsDCaLDwTpGC2s0CLY1HLDmu1w9WkYq1CKrO6DtC1QkTGijaQis2XtRmvsRCjn2NPEc3tNLa0dYD+nJCX09YVvvZ1u65HoWwQImxNGBtDae24NScgaGvqTwVjCbsoRJDd1nOX9Gke5lAQNC9HT3Zaq9AttEWp5wxLJw12evo5nGHJ0UE7tVrZzqUGALC6vccgA4mYC32XU2tgWXVggIPu8lQUtFMUtpq9UutYVkNIbp286TisBLMOmevJylSQI1Re0XtlhSfpaC0illWdCbM0qC7CVU7dqQosoC4AArLQ51WnJYDZGlYlqOxba8NfT2iLw3rumBdrDB8WRe0rnld0cVVU8hZ3q91RmVGqwWNDdFlzD6aXvvWi4pGmfcWDXdsnoNmKxUFLPmuBawFugITKlAUvQombZiromhFoY6pdP/d5tFyUgtaa1jWk0UtuiERWQukMfoKyELQhbGio09AlYoq1ssrQ6eB3GkN0lb0Owesd++iXd3FerhyXkbLBzEB7XCFooKpALxaIT1gUYfWVhzu3sV694CrwxWuTgdcnY44Liec1hOO64KlLTi1BdJXsyeh6IXRW0FvE7iIh3/VATdt5FupY0UHLRWdBTgRmlTLI54W9KWhLSfIuhqHkcszZT9TonkGW7OcZYRrY3/Czxl3Nxo2UYv86c+FUg5U5dlnADispyGLn2GQfrJ3PA/Hb/3Wb+FzP/dzn+vLuB7X43pcj+vx/3P85m/+Jj7ncz7nE75+XyopEcH73/9+vOpVr8Jv/uZv4vbt28/1Jd2344knnsDnfu7nXs/j78C4nsvfmXE9j79z4/k8l6qKJ598Ei972csyavR0474M9zEzPvuzPxsAcPv27efd5N+P43oef+fG9Vz+zozrefydG8/XuXzggQc+6Xs+sfq6HtfjelyP63E9nuNxraSux/W4Htfjejxvx32rpHa7Hb7/+78fu93uub6U+3pcz+Pv3Liey9+ZcT2Pv3Pj/4W5vC+BE9fjelyP63E9PjPGfetJXY/rcT2ux/X4f39cK6nrcT2ux/W4Hs/bca2krsf1uB7X43o8b8e1kroe1+N6XI/r8bwd10rqelyP63E9rsfzdtyXSuqtb30rftfv+l3Y7/d49atfjV/+5V9+ri/peT9+4Ad+4JxJnAhf8AVfkK8fj0c8+uij+KzP+izcvHkTb3zjG/HhD3/4Obzi58f4uZ/7OfzxP/7H8bKXvQxEhH/5L//l2euqiu/7vu/DS1/6UlxcXOA1r3kNfv3Xf/3sPb/927+Nb/qmb8Lt27fx4IMP4lu/9Vtx586dT+NdPD/GJ5vLb/mWb3nKHn39619/9p7ruQTe8pa34Mu//Mtx69YtPPTQQ/i6r/s6vP/97z97z7M5zx/84Afxhje8AZeXl3jooYfwPd/zPWitfTpv5VmN+05J/fN//s/xXd/1Xfj+7/9+/If/8B/wJV/yJXjd616Hj3zkI8/1pT3vx+///b8fH/rQh/Lx8z//8/nad37nd+Jf/at/hR/7sR/Du9/9bvzP//k/8fVf//XP4dU+P8bdu3fxJV/yJXjrW9/6tK//4A/+IP7+3//7+Ef/6B/hPe95D27cuIHXve51OB6P+Z5v+qZvwq/92q/hHe94B97+9rfj537u5/Bt3/Ztn65beN6MTzaXAPD617/+bI/+6I/+6Nnr13MJvPvd78ajjz6KX/qlX8I73vEOrOuK1772tbh7926+55Od59473vCGN2BZFvziL/4i/sk/+Sd429vehu/7vu97Lm7pmYfeZ+MrvuIr9NFHH81/9971ZS97mb7lLW95Dq/q+T++//u/X7/kS77kaV/72Mc+ptM06Y/92I/lc//lv/wXBaCPPfbYp+kKn/8DgP74j/94/ltE9OGHH9a/83f+Tj73sY99THe7nf7oj/6oqqq+733vUwD67/7dv8v3/Jt/82+UiPR//I//8Wm79ufbuHcuVVW/+Zu/Wb/2a7/2E/7N9Vw+/fjIRz6iAPTd7363qj678/yTP/mTysz6+OOP53t+6Id+SG/fvq2n0+nTewOfZNxXntSyLHjve9+L17zmNfkcM+M1r3kNHnvssefwyu6P8eu//ut42ctehs/7vM/DN33TN+GDH/wgAOC9730v1nU9m9cv+IIvwMtf/vLreX2G8YEPfACPP/742bw98MADePWrX53z9thjj+HBBx/EH/pDfyjf85rXvAbMjPe85z2f9mt+vo93vetdeOihh/B7f+/vxZve9CZ89KMfzdeu5/Lpx8c//nEAwAtf+EIAz+48P/bYY/iiL/oivOQlL8n3vO51r8MTTzyBX/u1X/s0Xv0nH/eVkvpf/+t/ofd+NrEA8JKXvASPP/74c3RV98d49atfjbe97W34qZ/6KfzQD/0QPvCBD+AP/+E/jCeffBKPP/445nnGgw8+ePY31/P6zCPm5pn24+OPP46HHnro7PVaK174whdez+094/Wvfz3+6T/9p3jnO9+Jv/23/zbe/e5342u+5mvQewdwPZdPN0QE3/Ed34Gv+qqvwhd+4RcCwLM6z48//vjT7tt47fk07stWHdfjUx9f8zVfk79/8Rd/MV796lfjFa94Bf7Fv/gXuLi4eA6v7HpcDxt/8k/+yfz9i77oi/DFX/zF+N2/+3fjXe96F776q7/6Obyy5+949NFH8au/+qtn+eX/18Z95Um96EUvQinlKSiVD3/4w3j44Yefo6u6P8eDDz6I3/N7fg9+4zd+Aw8//DCWZcHHPvaxs/dcz+szj5ibZ9qPDz/88FNAPa01/PZv//b13H6S8Xmf93l40YtehN/4jd8AcD2X9443v/nNePvb346f/dmfPets+2zO88MPP/y0+zZeez6N+0pJzfOML/uyL8M73/nOfE5E8M53vhOPPPLIc3hl99+4c+cO/ut//a946Utfii/7si/DNE1n8/r+978fH/zgB6/n9RnGK1/5Sjz88MNn8/bEE0/gPe95T87bI488go997GN473vfm+/5mZ/5GYgIXv3qV3/ar/l+Gr/1W7+Fj370o3jpS18K4HouY6gq3vzmN+PHf/zH8TM/8zN45Stfefb6sznPjzzyCP7zf/7PZ0r/He94B27fvo1XvepVn54bebbjuUZufKrjn/2zf6a73U7f9ra36fve9z79tm/7Nn3wwQfPUCrX46nju7/7u/Vd73qXfuADH9Bf+IVf0Ne85jX6ohe9SD/ykY+oquqf+3N/Tl/+8pfrz/zMz+i///f/Xh955BF95JFHnuOrfu7Hk08+qb/yK7+iv/Irv6IA9O/+3b+rv/Irv6L//b//d1VV/Vt/62/pgw8+qD/xEz+h/+k//Sf92q/9Wn3lK1+ph8MhP+P1r3+9fumXfqm+5z3v0Z//+Z/Xz//8z9dv/MZvfK5u6TkbzzSXTz75pP6Fv/AX9LHHHtMPfOAD+tM//dP6B//gH9TP//zP1+PxmJ9xPZeqb3rTm/SBBx7Qd73rXfqhD30oH1dXV/meT3aeW2v6hV/4hfra175W/+N//I/6Uz/1U/riF79Yv/d7v/e5uKVnHPedklJV/Qf/4B/oy1/+cp3nWb/iK75Cf+mXfum5vqTn/fiGb/gGfelLX6rzPOtnf/Zn6zd8wzfob/zGb+Trh8NB//yf//P6ghe8QC8vL/VP/Ik/oR/60Ieewyt+foyf/dmfVQBPeXzzN3+zqhoM/a/8lb+iL3nJS3S32+lXf/VX6/vf//6zz/joRz+q3/iN36g3b97U27dv65/5M39Gn3zyyefgbp7b8UxzeXV1pa997Wv1xS9+sU7TpK94xSv0z/7ZP/sU4/N6LvVp5xCA/vAP/3C+59mc5//23/6bfs3XfI1eXFzoi170Iv3u7/5uXdf103w3n3xc95O6HtfjelyP6/G8HfdVTup6XI/rcT2ux2fWuFZS1+N6XI/rcT2et+NaSV2P63E9rsf1eN6OayV1Pa7H9bge1+N5O66V1PW4HtfjelyP5+24VlLX43pcj+txPZ6341pJXY/rcT2ux/V43o5rJXU9rsf1uB7X43k7rpXU9bgQImniAAAAKUlEQVQe1+N6XI/n7bhWUtfjelyP63E9nrfjWkldj+txPa7H9Xjejv8PmTAXuKe1GAAAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plt.imshow(img_list[1].squeeze().permute(1, 2, 0))" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "img_list = []\n", + "skip_cnt = 3\n", + "for images, _ in train_loader:\n", + " img_list = images\n", + " skip_cnt -= 1\n", + " if skip_cnt == 0:\n", + " break" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAakAAAGhCAYAAADbf0s2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOz9S6htfXreh/7+9zHmXGt/31elS0lHhWPSMAQ7CTiywAZhSIjBYDC4YXDHTsNJQzIk1XFkckHpCPfUsIl7ScMR5DQUAmmYQwSRMccHg40xxshgdY4vkSzpu+y15hxj/K+n8b7/MdcnyVLJx3JVUXsUk71rf+sy57i8l+d9nuc1Y4zBh+PD8eH4cHw4Phzfhof9Vr+BD8eH48Px4fhwfDj+VceHJPXh+HB8OD4cH45v2+NDkvpwfDg+HB+OD8e37fEhSX04Phwfjg/Hh+Pb9viQpD4cH44Px4fjw/Fte3xIUh+OD8eH48Px4fi2PT4kqQ/Hh+PD8eH4cHzbHh+S1Ifjw/Hh+HB8OL5tjw9J6sPx4fhwfDg+HN+2x4ck9eH4cHw4Phwfjm/b41uWpP7qX/2r/Dv/zr/Dsiz8yI/8CH/n7/ydb9Vb+XB8OD4cH44Px7fp8S1JUv/r//q/8o1vfIP//r//7/l7f+/v8R/8B/8Bf+yP/TH+5b/8l9+Kt/Ph+HB8OD4cH45v08N8Kwxmf+RHfoQf/uEf5q/8lb8CQO+dr3/96/yFv/AX+K//6//6t/3+3jv/4l/8C56fnzHG/G6/3Q/Hh+PD8eH4cPwbPsYYvLy88IM/+INY+6/ul/y/xfcEQM6Zv/t3/y4/8RM/cf6btZb/5D/5T/jbf/tv/6bfcxwHx3Gc//+f//N/zr/37/17v+vv9cPx4fhwfDg+HL+7xz/9p/+UH/qhH/pX/vd/60nqV3/1V2mt8f3f//1f+vfv//7v5xd+4Rd+0+/5qZ/6KX7yJ3/yN/z7//v/9Td5en5+gJYDeh/01gEwGIwzGAv2Tcc12qDVRi+Vmhu9dVrr9N71+wejd0brMAYGcN5irMFayxjQx2DfD+5b5rPPbvzfv/Qpn39x45d++XP2I3O775TaGWNgnGWJnkuKfN8nTzxfVv4f3/MJ3lusNXz6+Suv28G/+JXPeN12btvBALyzfLSufPy08m5deHddCc7hncV5i3OWZQ346InLAs7RMdy3g5orx1GwA8wYWGC0Ti2Vo1Rab5RSOXJhy5lf/vyF1+Pg05cbtXf6GFxiJIXI93z0xCcfXfn4+cr3f98nLGvk+Wl9nHTAOktInpgiPnpsCIw+6LWx3TbyVvj80y94ue386mevfP66kUvDhsDzdeGrX3nHD/7AJzy/W/jo3YXeO7U2fvVXv+Dl5c7/95/9Ktt9Y98OkvckH/jKuyvLmlhSogJ1wH3PtNbovfOUAil4rsljhlRut/vBdmTe33du28ZRCvt20Fun9o61Dmst6xK5pMi764V314UlBpY14LzDBY8LHussLjoG8rNrbfQ+GMgtaYDtKOx75lc+e+GL143XLdOBZYl87/d+xNe+9yM+eXfhKQZM6/Sjkm877Sgc20EtnSPLNSuty8/LhfuR2XKltc4AWofWO6/7xlYKn99eKK3RWsM7x7oEvv+rn/BDP/C9fO37v8rv/T1f4/q08PTugrUWY4A+QRUjbx5otck9czvYjkItlVoa1hlC9FwuiRA81ljaXthvO/cvXslbIW+HPEN9EKxhPoK3XHg9Cv/88xe2o7KXxuI9l+j4/q+84+ma+J7v+5inT96xPK/gLfue+fTTF/7Zv/gVfuXXvuAX/9kvcbvvvNw3rDE447ikBW8twVm8tTjn+HhZuUbPuxS5OCtxYHRyk3OaB2ANl+vKuiaWGOhjcNTKr33xyvttZzsOnHWkEPjkemGJieAdRxv0DrV3ggFnDcEaGIPRB7SGAYIZeo0699qovWMMOGOwBkqXa+itYwlyz4LEmC0XuY69E60hWsc1RZboCd4T3BskaQzGGIwBbUDunaMP6gATA2mJXK4LT88rIXpiclhvcd5jvdz3zjsYMHon3w5KLrx+duN227m9brzcdmpptNYJ1uKd5Xn1BL3HYoz4YM975/W28enLnff3nX/++Qvvj4Nfe71Te2PPB/+ff/j/5Pn5+TeN+/P4t56k/nWOn/iJn+Ab3/jG+f/fv3/P17/+dZ6ennh+ejqDxOhdAmPrzAdtPoDG6sXUC9D7YLRGK5JIepML3IcE1tE6rbQzSfngMc7iwiMo7Xsm3Q8GnvtW6d3y+lpwNmDw1Crf77wjBc9liXx0eebpsrAuK8YY+Vns9FHow4HxWNexxhC957KsXNLKJS1EF3HOYq3FGnk5Am447HD0ZhgDajHUamnNYvRBiM6CGwQXwBRKqbRusd5im8H7jK/gfcW0RhsDawPOBbyPRJ9Y4kLykeQiDo8ZksQZYIYGAI3OIThscLjFssREeap450nLxsDjfGLPFec8T08Ln7x75qPnZ56uC5cl0Vojm0pwO942nAk407EGMI5hPB2PtZHgE5cUGMBl7ZQiwTkYSfTBe5yRosXbyGVpXNfMbdvZc+Hz96/sR+F126ldAozJHWsHsQxSkc+EN/hh8RiSdxgsDE9tjdI6t63QWscYwxo9KQQuSyD4RMcT48plzzQgpcBXP37Hx+/e8e555Ro81E5zB7ZCboZjVDm9wwIeawbOGawDTKP1Su1grcM5g/eGNsBYTy6N3Cq5VqJ3pBBZ48oaVy5xZU3yul6esJpAem3IJZVrOgZUCgxDRQKxcY7gDSE4ljVyfVo1ScFxPzDDcrwWjIXW5XsNQ96jNThnaTjacCyh0HuhtUJwHuc8wUWiX1jiyrquXC4XXAqkVKgNnt5v3LfKGi/UajjywFmLt46PLheid0SnYc0YrAsM46jDgfV4Z0nWsPZOT50GWOdYrgspRVKKHK2x5czrlrH7Tu2Vqgmn9wEYjPFYC5iBNQNrDc4aUvBYwA5JUoyBpVNap9bKURu5ylNvDXgLe630ATHAMHIto3NYC2t0pC4FtDfgrSX5QHRezpl3EtaGxrEOtTdK72y1c8uN3AemDtZhcMvCk/PEtHB9WrDWYJ0FYzAY7EyyYzBsx1hDcpURwCbw3dJip1eJUdZKbPHW4nA4LH54nLNU20iuEVzFu8bA0ruhNSh1UOvQy/Rbj2z+rSep7/me78E5xy//8i9/6d9/+Zd/ma997Wu/6feklEgp/cb/YDQ5gV6gcSYgGBhj6HQMYLqciMGAPi/q4wRZZ6TrkR8mP7Pp1wBYi3EW4x0G6R987/jaiMkTo7xC8LTWqNXhNQl5Z0nBk7xUHE7x194HrXdybeQqga5pJW4AY8A+4j5jDFof9NHPU9CqJGTjKm1IN3EcmVoauTSGNQyt8Mx4nLMO+rPk92GMJjRDtxbTGrP0NRqw5LmT5F1dkQQ+k5Qx9CadRKsN6xw+DJwLWG8JeGIKpFJZlsiSq/w+50gpEKMEEGvMWWjIeZRXa/LZW5cy0ZjOURtJK9OoBUnUc2t43Au1dXAGZ8A5LVoI9C4J5eY9uVT6GOQqf44h94/TzrX2TgW8b4TmaQa8d7jWybWyl8r71zutdZwxtMvCWOGSpLBYUqD2DlYSSUpBPneQz22MYRjOiru0xp4LOVeOLEVDH/LfWu+aGKW7Cshnd8adwdJZi+3yc2djZNB7u3f6iRzo/WPk2o425M8uFXk5CiVLN5hzpfWODRKE5PuMFoByH/UxyK1zlMqWK0bvEQuE+Tm18Dvf0/n89vPztVlIjiFIiDV4b/HePV5Oq38rQTJ6J1128FJsDKh9UNrgsJ3SJaEs1uCtPlUapJNzROcIztIZFGuxBgad1iq9y2fItRBqwFlHx+p1keepY86i0AFjaMBv0m3l2tlL5SiNQZf70cKWpbsu2mF5K4nHYuijM7QY7GPIM26aJBNj6BpjuqIfvctzkVvj9ci8HoWjdYy3lNHxS+CprMTe5X4zcs1GU0QE5Nr3Ts2VmiutNUbvWqfJ9R72vHK02hi2I2dBb+Lh6Od9NDR2CWJVW6XUSq31N433v/74t56kYoz8wT/4B/m5n/s5/uSf/JOAnOCf+7mf48d//Md/Zz9Mz9NMTrPVhSEVIYOh2RqFe0AuAlpZG4vCeJrEhsTmbpue5EfSA2nprXdYo1CiswL9HQXvHfkobPfEPWwCdcAJPfjg8dFjrCVX+bmtNY5cyEUCwAB98CVgG+b7kgBt6WAdZkjy6ANMH9Qs1VNtnX07KLVTamN4x3CW5OfPkwde7nGDM1Za9RhgDHIplCbfm7wjaUCwxtBb59gLo3V6rThjcOh5RG747DLGGWquxDXCuAg85h3r04pxjg7EFMi5Yp1lXRNPzxdS8jhnaLVJcN4y2/1g2w6OXCWRd2hdkpazGadBKoWADxJ8GoaK4cgNgFw7a/BSZSusYa2hjQ4MnLMMYzha554Paq04a9jyzpZ39pxJIRBTwnuBWdY1CgRs4J4z9/3g85cbrQ9icHz142e+8vETaZFE9EkKXK+J0joDiw+O69PC9bKQoscOaMjn2vbC633nX37+Qs6VXBrOOoy1YCDXSumNXOVaAQSk+LGaWK2z0OQZqK1TauXIlX3LbPed7X5I8lyiJBljKEemlUY5qhQGVa73cRRut409F2obhBS4XBIflcpAEq4zsN8P7vfMy23ndjv4/GWTLgqoMRC95RK8FA2tE42hOcNhpajovbPXiiuFey6kXIil4bsk4RQD18vCu6cL754uGAy1drwxeGd5Wheik0R1z1L0HVUSfBtD0ATgGhzOGIKR4tRa6a680YDoHCMG3l0W7kfiyAev20bthS0fBB/w3mOc/BxjBsEJzLhEKU69MRRTqLVyL5WtNl4OgTlzbdIVm3EmqTakWO0dejPs3ktC743aBZ43veOt5RICy5KIITCcF3iyVVrTJFUKe6l8se3cjsxRK857rteFozfiGsAalksUwsKQZw6FKEfrjNooe6HVxrFlWqmM1jBj4MagMaitUVtjK5kxBtY5rkviEiNPS9KgK7Cmtx6v3bTEs07v7ZsK898SuO8b3/gGf/bP/ln+o//oP+IP/aE/xE//9E9zu934z/6z/+x39HO0iD9b3dlVYQyDCf2Ns32dldnoUiUbY85ZkzPu8XOBgdGqdtBqx4yK7YNuDYFZkUsVFlPgel0YffDJR1eW4InWKnzStZq1GmQsDaitn9XFPObMS+Abi9P5yHyvVv+7UdjEOYt19stQpgzMNLl2aa+NJmYrlZI1nNUnbgCONQQMyOyjVrJWptFrR9g7uVRu20EulhrlMwadAZzl+ujQDHnPAIJ9G4P30nmE5FkvC2MYUqoYa0kpsCxBzinm0a2VSq9SLMjTi3a55lF5106rAu8ZYzDeSEWpVfQsXILruG55y2U1TIjDnokLoA/5mYOGMZ3gnd5bllo7xXfa6DKfNIP9yNyPg9v9kMIjeS08msynnCF5j/eO1mVG6bxjWSLRa2fdmsw6m3RJuTS2I0tyzo0YAt5JweCMIThH9B5De3Qjvc+bYJY2cg8AxRhyKew5cxyFfGRK9OS9SGVspGuqRYJSPgqlyDxx3zPvbxtHabQ+SEui1IZzjhQjjIG3Rr+nngVS1utmhtxmpUqh04cgBl3h+dE7FQl891wwwbHlwqVUllKJtUkQNIYYPOsSeVrleau5Yo0UJ5cYpSC0Fms7phs6cs83kGSF3kbaRTCRk/6AC6w+G2uMPC8LpVaNGRbv3OPZ83LfdwbRWryT62IN2CEFQhuDXBtHbRx1FqJDoTKDs9Jh2iHdr9H3eVQJ4NJ1ZEot0JsUYWmhALEPmqm0PthKOWfqRykcpfJ+37kfmdIaznuwsG6J+36wHokjV4zRGXuRWGWbzuJro+Yiz1dp5zM2O/BSq/yeWrkdB30MnHPnM7V4f8LI3jlicCwxkEogeEdp9rdk9L09viVJ6k//6T/Nr/zKr/Df/Xf/Hb/0S7/Ef/gf/of8jb/xN34DmeK3O2abO/SGP/9R/xwKPfXWT3LEhJJmwPfJ4Zw74a6zW9XmtbYu85ss5ICoMEKIXucDlrQE3r27EINn5Mp2O7inSDkKvTVKk0GmdNSG1uGoTdLgHKLqwzWcfE1w+kBoF+YU97XOCpTmnf6705vByPRcZwpjKFxmwJqhkIF8nVccsXmpbLw1sCSikxY9V+lanJUK1RtDa439yJTa8M6yRs8lBpbgMSEw0ZPR5vmTa/OYoRlc9CR9r8E7WutgZN6XUlAISQaurVTqUWml0XW2h8Kxg6EVp8IHRb5e4CNzQoPlLASk0nW207EYLVgmTOmsQEfeOeHgjE7OO61Zeq8E57XAsRjrsK6x5YJR6HTPme04uCsDtffIfkigAJmZLCkotGywQTqdEPxZhLTS5H7VLjaXyut+cBzSSV2AyDhhrksMtFrJ1lC1EGu9PYKt4SyCuiav+3HI+9wO9u0gOEdKUaoWIOdCyY377WC77xx75v0XL2z7weevd+n+gfWy8i5f5XNFDzrUz3s+E1UuTWCtLmhEaxVvDIcv5wNaq0BJUhQMgcv2TLOWy5ZZ98xyZFKJSjSwLDHwdFn5+PlKsA7bJQk5Y3le0pmwt9IwZjCMoSPJqs2XAYcUMoaBGZbRGqM5jQ2WYB1PaaFeG85Yhd4G3gVCkIIjRg9azAZ9jkJw2DEEbRiD2jtbqey5suVCG5KknLMEp0QLhKxhjBSlrQ/qkOSzl8yed458MHrFW0uujbUL1J0HlDa4HVm7E4Fac628bPuZGIN3kkxT4OW2k1Lk6Z4Fpu+PJOX6wLQmc+ncGO1RLLbaqE3IOnsu3I6De8683zY6AlPOJPUUI8FLbIresY7AdUkcrbLEqF3YozH4rY5vGXHix3/8x3/n8N6vO4Ymn5qrPIjD6BzHnImpKnRRi7DdWpXANgeGKcnMxMcgPxNNfG3QSiXnSi2VPUul7sLBskZCDKTFC4RmwDnPshg++co7rpfMfkkcu1Sm96NwlM6eK7U9Oj7B2T2XNWEM3PbAUSoWc3YxKQZS9KToWaKXqlsTl9WKzki0/FL1XJt0Pr13WrOszjKcFagSacGjs3Rr6M7ijCEFhzPSTR2lnu+z907eG7Uf+r4taww8r4mnFOE6CN7hveD5A2i50sYAY8E52oCLQm1pTTgvAYEhHaTzBrr8rpIL+Sh6/qoMaUEwfAMglXN0VoKDzrFGG2DHOWu01tGHYOm8we6NdmEy1DasMdB64+PriumV3Rnupkm36T3XGEgxEIKjD6MzI/mkfUhSYUB0knQuSyLFSAoeZ91ZgNize3fSTTmrN5wEtaHnwxqZK73tkme3sAapUJ1JXEKg1MrrdlAV789G5gJBuwFn5Jr0rgVXbZRaqFlmTWXP57nJuUkXnSv3+879tvErn37K633j05cXSpMu9uPnd9AbT0sgP68szjKSdOLeOoJ356wtd5mP7KWdsynDwBjkvfTGLVfqZKQZQzVw3Q6etsxlKaxLlkISSN4zFmGcXmPgEgNWLjvJWXofUuXXJt3hm87IBYfxhtw7dQzsGALzWSkkbGsYrfCNgWAMTyHiVwjWSVJzlnVZiDEQQgDMOa8SeovMeHrT7qNq4YAwHPFWO3thxjlruHgBza3+jt6RrlULLKk5DMbMLtHQujAQX7J81vtRTjQpl0KpQv6ordGH/P5SqxQQuXIclX3PoEVdzQ16JyBwnhv9JJDlXKi1UUrj0J+5Zemi5DxbeT6F0XM+F30YHFbnfJ6nNZF747Ikam/UPguW3/r4jmD3/auOSROfCQlk8D8s2l0JVFdzJR+Ffcs6iO9YJ5CZVPsOX6VzkipdILOulO05IwGwRcgBITdak+rfO7lIxhiWJeKtERxWB/LNWrqrlAGdpsNgo4wsS4xeKKbeCQW+D+lgnHvzkottrTJx7MlrOAfRk50oA8pGae1kPebqsDiCkVZRYD+jBA2D0a6q934m3tpkgF26BLfjpM9KZ+V0QHzVuYYdUrH2OZwHOAr+KBjvSH3gvFWiACcbE/19XWHbVtvZTdQq1aHhEaiN/hm800B8Tu7kGmmh4pzFDIPVoDjP0QnvIOcweMsSRCJQc8TRMSNhrSE46RijztVqB5qc4wkhy3uzxGDxXujri5IiZnKaCUrmgUo44HH95oAZHu89OEf1XeYu7kEOmPMPZwylOr2+jWKatAoMgiYoZyzNDKyxTGbGUJShTwarMQyMBtVOV4lGKZVt27ltd15vr5QukGcMgTUFmWHlQkuBobRpOWeO4AX+KtUoIUVgPnqXJMWDBJJroygpwNVGKHLti75a7TL11HMXnOOyRJ2JGiVnSDCrTZCJGLzSsPW+cXJtjJUkZfqA3hjOEoE0BrYPnBYvRglTwqbzJ/xvrCHGSAhCkhpDZuJmDO3KACVEPQg44K0UhfL4Cdw34cDgHM5YvPPSRSHEIJidvmU4h9XizDuZEQ+FErMSJSaWXVun9val96DTeAUkHuSZMWSuV4tAmtZMuFgKJyE6KLmrVLYilPijVuqkzuv7CfYxFx3zPjs/g94XToqYiQJ9M8d3dJLqpdKswEK9dcXWjVTuiqMe+8F2z9xfd15e7+QiFYFzwhRaFhmAO+9Ea6KB3hjwxk4kRB7GLjOq+/3AWIsLUg3H4Fg1KC3Jk5xjSUnnOw1eNuw90+2OPYQVt3hHdJYUhGrqneV+3xTPluF7Co5r8qzBsygzUII5J6GjjBnkDbk1cq3cjsyWC7ejaGCXIXGLHpeGBnqLM9A1WHrrT9x/Vmu3PQvkoFDFnovMGQzsuZxB+romhkGDuOLwrUE12NIoxrDWjl8iaY246AhKE55atDEDWW9sR+F2P1RXdJAVO4/e6GBcEvd1jSzBywzQK/vSgfWOgHQsA+mcwmRc9cdDOwPAGiPBCDPwyVtKvnDk6xlw12XBe083jlwbW2ncj6rMMbBOupt1jaToeX534d3zyvPTymWJxOBPpuZMTDOgnHC1MricgSUEelr4ytOVIxa2WLjGwBICH10Elg3eUWKgtsbqLKVJsLrnyl7rCXnWNnBNusJLSiwhkpzHYjAdeu1SvSMw62j9ZJPC4Cg79/3G56+fk1tjYPAWkoP768p+u8r9HjxmqGxijRgG25ZgSAW/Z4Gl8oRuNex3JTXUJt3NnjPOWbY9s+2Zfc+UIgWiEHgkcTyviUsMPC/phIJ7ESbocXjRPpXKesjc0zqL10Lo/eudXKRbfxcDa5AiEGdx3WGaMN/oHTsGzlieohBXvCYn553EmyEQ7WSy5VJ0pihIhLWGpxTo0dOWyFFl5vv5fj8716dlIYXAmhb8gDAh/y73OiNihjBYvbXE4Mmtk/uAkR/FjZX44Ly8/9ClKwIIwZNCEgKQshgdEu9QosRog2EfQ9teu8L8he3I3I7My77J2MQYlhC4LpElBkEd9NmMTs5VHwj1XjvXk0kMUh2a82n4LY/v6CT10HRMBp9g0HOoXmsT2GgTjP32ugtbqj6SVNMBsPOWpmy5rjdX9BIMvH6tG5O2LV9Tjko1hpaFVEEbJC8wnI+W4QymOpbSZJDfpCKeScpZoUULbq7Vuc4PRKCoFaDToeqbzql16fwYDxJIqY1ctTJVSrtUNYajNoI1VB28C+Qi1fiYXQYQxgxuXX9nQyqwcVJcMQJx1C43YBv9ZFBN3D9PCKs22ArdWJ6OgnGWuIRzqGqsTLBH7zpHUbpua4qtd2WwKVnEGtbgCd6dCSB6T9AgYoJ0V8Nawlk1NmxX8kVtZ4UnHanFu4EZDkYQLN5aFp31yewhYp2lDaOicIEXSxtaYDh8cLx7WllS4N27K9frwuWSBPbSe8Io1fukbetzOvS6zq7WO9HVPa9JEoDzLEGq0FW7M0EBLGOMs2OTIka67SVk1hjJteN1NnhdEpcUWWOQ+SYIYqDXvLyZ3xplZTllgg4qtRX6gJx3jmMnHwfHfpBDIOvMbSia4Z1jiZ5SPbF4QqmKUliFX8Ebua+8NRgjcxDGoLUqZJR957YFtn0V1uQQZKT3rvePVTmHXNDmGlY7w+sYBO9F96fXuQ+ZL5cx2IrOFcegDc+72vChE8fAaccxYfPSmv6uoV1NZ3QD2LML7tqZ5CIIhgjmJR7F8Aizvlh2ZzHHRuuNvRzn7MYYSYhYYQyHZunWYhlYBlERGues6ORqxztHGEJ/lxm2FAq1N0LwCvVJZ3ldF54vK9c1sSYh7bQ6YWuZW0sD94D5pywga/eUlS05CSQheNaUNE49IOYZD7qiDmXOyhQdEfOENyym3+L4zk5SYzwCzuMflfWlVZXSmG+vG6/vN6UyV4H4vKPmdrJ1BKpCGVmWnjx2iXgbdEgqFWcpAhnmXIRhhLCXTB+MS8R4K1TzPnBBg6wmz1o8MLRCmrCYBnxVlg/F7qVFVuzaGdEwaOCtTRLDoJ8UeklSciOI7qphjQjocm06ZO9EKx2HEZxKNGBvAqVUtk7ouW0Kjue50YCgmqU6pEp6JCqdLajmq7TGcAeNwfN2COW8RnxwKv7VWZRCfkNhxlwbuxYUtQlc5bTzlOTkuC5RA5HDBWFPWu9Ey+YdWAkivTXphItAFPI5LQZ5KO2EUozBj0H3HqIKNL2o8o0x1N4fQXsMfOu4JsPyFANfeb6wromPPr6S1khaohBClKQDmqCcJCljjQQ7M+9gucbBW4ied5eVFsXhwVuLt4YlzmQg0Jqck8DwEiynbug1RWof1A5tSHH0bl14UoqwtyorOHVXnZzbCZPDA16Vt96pLVNb58gbx5HYt41j2zm8Z49BzqNq3YIXskhpTRwzvD/hxjk3jor21FYxuTAKVERHc993Xu+S5J/uF5YmxaEwVyWYWwsu2vO5b87hfVOyjqHWxlLbmaTuqkfKvbPVysuR6U0g7I8vldgCyxiMOVftndLElcUahajesECNHWdB3HWMcOQiM7hc9VoYYvRKULL44rA5Y1+gN3FdiJpcJiwoLD/HcANUPO6Mah0nNEuhDCH1DAzOe6IWbxOmP6qgHRiIXpLUx89Xni+rdm+eOqpeFunmbZ9dtMpvlGZeqiTeowpj1alGLYbAZYmnE45cC06XjFKbwo9Ctpikjqos1m/m+I5OUpPKCzrfOCtnZEC4Z7bbxv114/5y536TFvsojyRF+XVJCulsvLO4EVmCw+C5LGKJY53QkFvt3O5eWC+lsXgnc4A3Q3I0GF3WiPeWlLziwILJb3vmftv47LbxxftX/uXnL6c+ZaC04D5OCO0BFWlcm9q5Lsyopu+rtaGaCX2QjNpFaQc45NtkQOwcxis9XmcFcXRG99QeRcg6xknT3osTdpLS48dAdB/O4lqjG+lIG1CHBMlcG6bIXC9nIZN4A8YLa2pi4JPQIdfjgVuPoQ9pkIF8SkFghRjwQYWd06YoOFxw2OCxzikRo5IRexhUSyKCYCvwxhhY2+gGYXgBww7pzFTI24YMpyc9PBcZSDsEJpwzrcsSuS6JmAIhBXEqkRbpFH2OSaAwUiDM+eDsqCwSkC4x0J1l+H7Otbx1tDEovZNbp1RJukLyMCwakJ/KQpdWlTaETfa0rqwpEaPS/Y1CMqWJGPkmllKtd3oWbYy34uAQnZBqJIkUSs3knLlvO96KHZdzDquUeoMExksMjFUCXvGNUh/i2+TlnttLEfqyy2xa0G258ul7YRQO61lT5LomvHZ3lyR6NRcfDFcXxS3GBiGmtNaIc+ZmoA7pCIwRXVwDjtaxpXEvjaV1Llpo9TG4l8IXtxuv910Zop7ndVViTMBN+yJFbUptvN+zzoi6JA3jWKyiK97p9TE8rQu9N2rNpOAJ1io7swOFWmQOm4xIUbx2qBiZ+6Id8yUlVsS27bJEYgwYaxnIPSLBQq7FmiLvnla+8tETlxRwyGjhrYDaKDu0IxrOOZOqXZ+fKdUxcs2rsmjRz9W1eL1l0fBtuSojsPGyb7weO3veyfmglPxNxfnv7CSlVanFMrrR4KM6lzbhvipsMWXpVZ1VzUDfXIVuGV0e2DZg+pX06kQfMUQXFbywvbztNCfVU7OGCvrf7FnFGSNVMQYZ2BqvQb2fepijVHlQinix3Y8i6u4mzgexehHW6gt9+GcL/yVE95xUvplY8ug2u86Puv43+Zizop8ssqnFMjpr82BgbfHsMIfOxCZ01XV4a5zBVc+wMlyfb2Gcb22oxmL+aYWNZ5hfpRRyTseESbMHdLD8cBqYBcOpcVLNmPMilHVB/n9vRuyvrKVpRzNALJ2MJsgxGGZgumD5+mifVPWsJJTbkZUhJaLbOTQeYxI7HqSHt8XKPGdVz78ZAnN6OEXZkzyi+QyxcDKM4Ri8IbgI++OEfKV4MVjV4TkPHmFbziG16eOECEVnp4xQvT9ql2S3HUosag1TC6PVB2HFCVPRmval78ulsh+FYQ+FzZ3Cj5P1Km4ra/B47Qa8nSQQy6Qr7a0Te6fQUG4KR2mYXcTBRV1HkpvefHLfJiPietEMDpp9w+Ksb8Tw8OZ+0fNljDzzSEE1PRMm+SfXyj1nXvadWrtooHhAtUGfwhNGnBB1kwLCWIcx45z5gTJZrVgbLSGwxigaOiVxtUm6aB2HIbrwkMfwFtGQ3x2dA2uIUedDKWC9PLdtBgGDCPNjEEu2GAjOQa2YLvf9fDFntuMt3DfhQPlh8l4sreucsXWFzx8Iy66Ejnt5JKmjFoX7qrp4fJs6TvybPKxzOO9xbpyssEqlF6me912MXu+3nWPbqUeh14bp6INtVEQ5Tv+12rtUIsFTo2NUVVmbB+25G7EF8SnQg6NHh08BFx1pCWfA7L1jBxIwgmfReUQfg+MobCXTjAgYX3d5GHqTOzqGAljeb8ep+Uk6xwpW5hvubP85RYsWVPEuRAGGZN7WGrVZSu1U33FDID5rRWyKsyc0IBU7XJfAOgJLityPwv2Q95lrYy8CJRyl8unrK2H3rLmIwaQPSD9gHnouK3Tr0boQXrSTYmq8NOOK/1ngsjTyZRFtcG1YI355KYgg2Cs9W8gSmqA0SM7Xl6n5s6iRxKHEdMyQc9CrofdG7pVcM6U0uc7G8rIdbKXw2ftXgS9KFfjRypxHOhVHLpVYK701xvBnMdDHYC+N+57FtaGLFicuMh+KzgrrUgPntOaywJiVDoAGjlmA1Sad6hSB40US4WZXoxBj02R71E5uMkub9YEZQ4TDe+az9zf2LGSFYAbOSNHnjecaV45YMcbJ9TWO0gcv20FuBrOVs8C5rokUPO+08wlrFOJObbRapQAxk0otRds6oBpLD5NI0eR9bhk+fSF4zyXuQkwKjmHgyRouRvR3QeH4Vhs4K5qr0jCunvMiqzOdKe+I3uv1twymnkqcSEqvvB47n76+8itfvGc/Ksl7Smt0naUlUQzQuiS00sRVojSxYJqdRvSZ1pzoGIExDNe44DAkG04nkaNU0bHtB24YEcuvKy5E8WVUXmSG8+enIOLYp0vickkk7eAnCjBnoFEZl0sKeEUv9r1Q90w/CqNUaP30MB2oZKGN066tDzBWiqY2YK+dSqGMcRYdEy24ZUnWW27UKnrRI2f2nKm1UFuhte+CJDUJA3PqfwbsPjjUb+y+HeIYfZTTpVrmD4L/SiGvVaG27NYJDNFKUDFwO3Us3XZRYpcmGpPxsBnyb6pV9Oe+pTpbnZkI9bSrE3EgLjK/iDGKeFVvlNoat/1QfLkKYcA5LlH+9G+GxpNAYhDDxz4Emy5KQ92LQGrzcw+E8UNXcWuVQe9RsroC6HDUmDPJzAHvUSoDwZhzqRxVquijFtZlJaVGigmrlNOoVb0FaJ2WK9UAw2GjOzuo2QXOWUgKnpqCiI6NCICTV8NYDcynN51RWr4mLKsPPt2cUIR0XNK6OL1XYGpZO6PA0Sr3vPN636XKNobbXjhK4/19E7y/tpMK7KwlxsCuAtZcBG7t7eElWXvnth188XLj5baz7UIgWZbIR08CH73zHtv6ed9MMpCZN8+AYb7sjOAmXVBhUWMNtYkH3FEbu76O2oQgolTl0h8dg1G4p7bKng+2XYx3owVvINExWFJYWGJFDJAjw3hyHeINV2FYe3Z6uTbWFAjOqmO9EEGG025WZyzz8wXnCLbLC+lyp8ktfVBLgw7bABGsermvQYgyyRNSkBhg7dmto7OWUeXnWOTeX7wnR09JUR3a5VrO+NGUGt6RmWtRn0Rrp5eeXJve+5mkpi9d600gsCplUGuNZAcjOEwPiqYMgbCtJwYxDe4Dei1Y7INqbp3S0yW+9PmelNhUu1gRTZp5a41SDP1NrPE6FzPWSteuJI/ROi0Xugp1T3PuLhO52c3OLqprfLV6joYxVEWERhYWcaj27PQOfX9F7/9JQul9nKxaP+Pkb3N8RyepyY6Sv0ymmBhlHrnoA3dw00Q1lLU0B5VOH5g+xqnLkXmVMPCmRqqrIK+3TkNmXfXI1E0U2yEG1dU8xLV9dNVxzSTlTigIYwhDPNBiCrIiYK6cMDKzAWG53fZMLrJu4dDVExaw0Zz6oEmDHUPp1lrRVG+pWQbDrXZGl64wapdxma1+a5Qhg8ztyAqFDqIXI1HnnZADqlCtt6OIP9hxcJTMXjLGGvYcqcrm8U6+91G1WhxDfQarDOPH0HUlwIQI1UHAO0vyjp4CXeGqMCFXfVhOvdEkYJwCWElUA5nJzQRlrLDxjBkyq3vcOvTR6Naw1yIV9O29zgINW5bk83rPJ2VcNB+d4LwMlNUINqWmwUrZS03urft957PPX/n0ixtfvGxKWU/kr7zjo6cL6flKGAN34qrzHtduufe32KkQLPT+NdOP0JhzXrUVMb0V0WUXhwudlVQN8ENZbNPQ9lDnjNu2UawlWoMPBoNjCQtr7GAKKq3mqJ2xZ5xtApNrwyewV+SShMW5WBGj04cQZAYYpkgVgpW5XnSOzMD0CZFqEjifPyUqoNU9YGeSWmaSMie0PFmoE2WZIuk1OloNQr9vXQsOqwzABzwngbpTehO2XNfCx0ykWE1/NfieZCKVA/TeqNZIkuoeMwbeeaYuzVpHihZnlS3c2mmLJufEncWonTPNLiJoSQADbyxuPETlfQxsl+vtmwPvMM6BczB1pRrTqq5ekUJcCSCDU5A/z0UbAsYbFe2iCEMbQwhkVc6Hm1g1UEHhwDkjn+bZ+ixbR3PfXPr5jk5Sc6/JxIhlqKgdyHbw/rbxay937jeB/LzCdsQogczqQzBgL6KKP2rDVKkC0n5w3Q+OFATiAWid/eXOftvYvnhV2CZJ9dG67hmS99NVIDc0uBiDMMWcJUQxfOwdfvAHvofLkjB1cL/tbLedkgt9DF73Q9dyGHIVTYLXm0UedKmq7jmT9X0zlBnoHaZkeheoabOG7ZDEWlpnXSK+N1wzfLFtHLnwct/FLkdZc4uLSsOXfzNINXrfPbcNRmvcN/HuurtDiQUyDPbOkPzCGqSStn0wSqWgc6AwxHTXO5zT+NsHo3ZsGzgMiw6n/ZQC2Id2zRiFfDWRnq8gO58wMoeYGLsxhmKlmzYTRx/y4ExIsBsRU77u2+mB1pplDINHXT6CzBSCczylxOrFx9CMcYrLh5ShdIOwTHPh9bbz+eev/Oqn7wHZKUXrjFz5yDpwFjNnEkPoywaEXGGMzDO7zCqE0SUMRhvDiSD0rZ+aqcmsml3JhOOMna4Aev+oI/Xj1cBI0l6Mx2AINrKEAQT22mndcDsKW25KCtETaDgd3J/WhHPqwKFOJ12GHacBtLOGxQsZxBqD1QB8aNci3YN9zDf1s+NEbuCix6eAV8cL44QUMQCbZfacx6CWgjOS2J9TxCNwWi6NWRGU2rjnwnACZ1nvCTGSUsJiWUPgkgTKtM6+SUpdtZWqMTRSMO5ZZnrOdPqIInlBkmdpb2Qmem6Msawx8W6tLNYQjOHqJLkBgrL0Lh6MrXO0juuN3Cy1t5Mxir6PEBzXGFmig8tK8w6CP01km3ZRDE4/xdIG3QilvfRB1c8VvFNoXq5F1Zld7nKvwcMpxViFTjW5oZ1dDNJdL0HMC46yf1Nx/js6SZVcKLkKm8s+4JvZgpcmPnRHlQ5J/KXsYxWAXvxx/k8fAsXwqzJbTs+/3rHD0EqlHJl8P+SBb4NjTRhrCWuUHU1O/cD06R0GujWMJtxvq7DVsgjjZtTOy7srDoPtg9cue5FKk4dI2mkRZQr84PAqfs21iZFkH7Su9HnQiu1RpbUuFflRqmgp2qynDfuR2Y7M67aRfID4CJYTxrGG0/kientCCTLEb6L7ODLWipnp0oLs2jFD3NInlGA63Qt02lunK1HjMefVRZPzlxqjDgYKy8gXCfzwhjRh38ynrHsY80ryUueKJt2m1A1DB8WPtS3eebx2kKYaJUWo00EQCMZbzxLCCb2uwROdiKMtj5nS0IA8WSTTtqbrHGZ26dN+Zg7sZ1V+rgvhQaYw1mCU7YhTeyXVw0y3BRlrm3OOihkP70clNDT97I9h/RyM6zP0QBW1Sp7w0yArHVwoxP18ZmahmEsVSySlGg8UhgWZfQJjtJNNIG4Flu7R5ZWSvNqQRFCGGnQzk+wkf6jtyuyo0ZUlXlieDGj+y/eC5QE1eWdPCKq0jlFXdxecFjgyf0shYjukEHQurEbIY5wvlEj0lqI+5SRV97O9bZCHPp/n9xhD0vgUnSPp/CxgTv/DeWHOlRddRbjzIpk3zFFriM2LNGZ4SvDC5APZc6VU/i8RJc7PI5CnIAmcP2+SrMaQn1HNQ5M6pI0VPdnQexW9F404WzvjEaMkz2AQ/XcB3PfFr71immO5JnzwuCBVFFaskZoZpwhtr+IP5o2ypnSIHp3c5N45vLb5rXdQTdF89dYZ1kqlnQtlP9hvd+hQ/MHog/1+UGvD65xpakaMEezWasXhose7RAwO5xa+73s+5rokTOl8lt7zmXVqOCrOESAPaKkdbzpHaWy2qB1OYc+Vz+47dUgyXJflbLtbfyRsxqBaw71UgpeVAX0YbBu83jdet43PXl5Z00LrnY/KVYWIwiAzRnRb3TsuKXFdEnsufHHf6FnIFK8blFZ5d70QvaG3K6Z7MfGsomjvDFqRm7jmqp2f7tBRGM9ZWQjnsBgrSxQn3dzq/GDOmsT7T+ZQsmvICyXdW2FnngXMIB+VufNqwkhD4RJnHM/XJ6x1HPngvm283G6MJgzSa1xJLpB8kIG1FcNTcSEIXINn8VYX3qmQWbveSVF/WhP3dYExWJJ4z63eK8yn+pYie3y6dsQxCMQ5k/I08XRBNqqGNZ7GtCl4aq08LwGjncqUEDwtwiSzZnYQ6ihQJJlYazQReSzjXMPiECLN4sRV4jCdrPDw3EQ87zdZQ1NxxbLr+pgOsoPKiIVRzYXeHoQOoyw1b2UdR0cSVWkCXd5yo47BMSCEIDPJSQwZ0OuglX4iGM47UjI01xhaBJQJjZ7hUxLw1BROmN/r3CwGh/ORy3KhPg1aOEjOc01J3FKMOLxUJRQEZ/DGsgxPH52bnd1Jp6DJQSFFg8E5Tu2TFDgTL5MENsXWrg9Gk89ga1MGXVeDVtEmmmY4WlMId7qrW6Jvuo27sVgLoeJb0B1wasXFgzHYBuxNdI9VYeA+RIqR7EMa0pGNwbU3TOFMUs7NWb89Ic0y0OdVURlrWYIiQ98NndT7X3vBVEMtF2FKPSVGFwhpWSOXy8J6kS2vJVdcFxaK+G7pnCXJMDMCsU4DTsGv1+TVMeBh4noSFAZiI6L08OO2CQ5uLSFF8qUSk2irvLeyVLEPOATGs8GdME6MnpZkV07eM2UvvL5GplnknJWLjYy0+/fRKc6qXb7AFFMX47ts/ZTBMeooMR7dxxnwjLLUZHgsDCB/iof1Iz+qZH3ArZWB+GVJ5Np43g+sFT0RQ+GtI7MfgWM/OKyV1QUKzYE6fFhJFNbaE7a13hGXcNLiZ5EYlyjJIMken5mk5nxgVsqSxMy5KG/aNZ2MP2vAjJMlN2dMUttYsaZxjjEa+3Fwv2+0ItTcaAPBOoL150A7alIMQQgC3ggEWnNRcaSY6wZreF4T/d31DJMper7ydOF5iViFi/NRuOn+rNbEwzHFLtY91uC1FJ8rW87PrqSQCdU+J/GQTN6JXkYJHs6IV9ttE42K0Mg7bSD7icb0ohNLIGEeop6PAw+U4HDNAINuJcBxXg8JkM7M5ZJaTOjcrDfRFeYxFErV868dSNCZbVRIuvQhBZpG0xg9QR0z6IOyFTY26pEJ0T+YjkbFzl47qzfEiNK6sC2Pwst+yBoN7f6Cc1wOKQyvUeA9O6AFYWFeloR1HqNdgzVDugQnqEJCSFhBi6mu12rqpKKdPnfSnXhjTwLXFL1O6NMpJGn1Z3jv8EN3V3VNfEo+mBTxU+tJxzaozVKtrL/pzpzLCw2P50bOl87wkOKhTH9KZDGjUxeUEPyJOhWdvQqRRGbdSxQHmGmLlCdxDFi9GEInr52qmcT83/r4zk5Sn75gm1RT9drwSk2VZW6JyzVzuSZZaXCUc84xzSGDd7oiQix1UpPtmVnXPqQw3ZzdObMYbwfbOqAdrXPYXcgJxuKXQlQ6p48eswTGMCIczQqPpYD14jMXvaPHwOUSyXsi75l1ifTe2Q+vu3eGOh1IkupNEum0KtlyASfzsNqHOiNMy6M3cMhMTlbXz9tJrfcU75XS6s+HGjjZTMAZzKN2Bq112e2DLP/rZzcgy/KOI5O9x2HAD4ZS31tzGCdu9bNi885hHQQtHKzlhLziInOHkIK4SpgJCup700TqrNUkZU7LJeeG7v9xp3/e6RTQ+pkIvbOsKbEkMRDNOXM8HZS9SCfcxOnAoaa2ZtpWaeGjFXFvTRYqNlmljZ7ja4pCFHCy0iAFx1OKLF7WO9QqcOnrltmVyuy9Y+0DRjgXV1ok0c4gM2GjGeS7d1yVXbd4T+66n9qpAFMdugXeETp7H8L2PPuM2rGjk4zouaIR1p03cPSGNSrdmI+DeVTP3j6IHFI8yHoZ44Qm300h98FdkxRddV3Gk+yEbZ14DrZOBqyK12IMsibHiv6r7FmIP15W5rggTD+nSfzUzzl7riSpTdiy91x4PTJHkSQlurLOlgvOWd4ZuR+i84wYCNawxig2ZgNdTKo6Su/UI3MQ2jRRtWcnIqQqd5K2ht4rc3Eoulont87R2glL4r0sZ7TCsg1dkkFR+HPO307jZCajWCjr07pMNE/KBtZoJtdmPJ6Vbs4ZfR2PXXfTyT8GT4pB55kPXz4JD0PcYFJgifL8tgGH7kkDWJ2eIzfZz98FjhOf/uoXlL2z75n1mhi9E9dIukQ+0a2orTVevrjxxWev9KNCh2Tco2q6JIIXncXE5o+jAEKyuK6iZTHalsgJf9Cfxxji1DxkUNuGwWdZ/FZLU2rshRCdaDmqVCf1yLghFjfOiRP69WkR3zSg18rtJgPnrAv0ci603nh/l02eU9A523VvnajIlWUYnOEaA8GCHY/V1B9dVp7XxJIksAZr+OT5ieuauFxWqfqcJ4aAsVLRzqCDvt8lBPoK3onZ5RKjLF/LsmStt0Eulft24DDU0oje44MsyusGIgGfGrarYDdKhxWWIIPd2hhDbDV89DiF8WRZnXx2qQgfUoRZIQKnC8mk5jvz2NvlrKU7EUeLnZB8Lq+r0a9PF01ilbwdUv1vWVhK9ZHcuoVuLc1IV1Lr4PaSOZroZlyUbvz56UKwjo+XxLsoe4+mls2OQTkK+1F4f9v57OXObc/cjop3Yl781aeVawp8vESChSicZIZRp/5zhir05hTEJaJHBJYag60O9tY48mDTFQ5SKUtA/Pjdk563JtqZWvGlnu7gixftX/ROrYWEYdcHVA3cfUDwXkyX1czUGGGDdWOpxnDvnS9y4bP7Jp30QNw6xpAtxQg5x1pP8AMTPM0YijHEFPHRswaL7Y3ti1fuKqYOKRKXwHJduFxXEXxPqYD3zA0CQhEXcseeZV4tDu9yfx+5KtFjiHuI/l2IF1bF9Y08zKkj6gby6GTthND5JsYSQ9RXUBmJIAvS0Qj0nMvgZc+833febwdjiFD8K5eV5yXwND+3s3yEIdYmEN+QRHQ0MT7OTbw0jdqXTD3fQ1yuKAlGSSyC8LiuWtDWqTpXlWcP8FZYlNGfm5zlXEeueaHUdhb1a5K1Ns5rJ1UfyxJpD0f1UqRI+GaO7+gkte8Zbw9xu+6d9boAnHDexSQ++fj5tIgv98yoHVMfs48YRQyXFnFVmGsyxGli6KbNOSBELPkVXpnEhKYDzD7AlSqaEV+xQbiZtVRZq+QtpksB0WvHOhmIWiP/LcTAskZabTw9rUIpLpXdWbwVnzFZbSAedOdWX7VImQF72gsFayF4vAXT++lwviiV/bxxrZEbaw6CVFQqVjLC4pFqinMlyaSXM+CakrihpwSAVXxfvOFUM2Lq1BWDqYQqCensTA3qGKFBrQ9ccLrKA4XqpDruKsDutfPQP9mzMmQoGWZyFt4ks7eV9eiyJbmb8aj67YTOHNZ2urXQoNkqhV8RpXxrAr3W9jDVHbaCkepxy5m9FIEBYxC9UIz4MMXYei67PLxDH+SmQS+3zl4qTl0wLiXIvGR03NBVCFo19dpU59fVmUHZVtaqgbGQVnJrEny6eLLNUz/lAuuaTtPjuh/0XBn3HVrHVNnP1I1hQYOaE0FpN1LVzyQl7iwKfypcJdobOVdFYaBdbbzcgOinF+VcfQIoUSR5Q1doNcQgxYo1mN6piipM2vloXTp99TichdW5GFS7zpkiZic+5nucsxh9/r0zLFETCxI3KJVhDM5V6tD1JnWc7it5wmQqOA9qVD1dVLy1p9DejEFTb4jpWHE/MlVHE9NhYwldOl1jSHGAFYq5WI8NTH0wLEuXax/cm7Uu7iH+P18d0RYOffbQPwdCa0eLR+dOX0bnJ5tWyDA+iB7TgMB9ug3CB3d+punyk48Cuk2haDf7zRzf0Unq9V7ofRd9z12q/uf8hHOOy0cXlkWgm3072G87t/cbZS/cX45zaB6XKAntks6BaskC77Qs6niLkc2V1ggErLOT6ZggjuBVWDPe4zEY58GKMNZrEnXWwLDQHf2QGdLw4ncWrOP6vKjHX8Abw37bWZ3j9VVo9KUUSkMZi+10KvdOWnJvxLlA4BnD6h1XL4TlumoCsZanJSlF3J0378UJjX5lOTHp+5GppXAbopkSe5ug68stzkilXNvlZAumPWhwFkV/H+geqsFROsFLVe/03F2U0DHnB0KAUT/G8cCzgXPTct6zrJevTWAWXZ0gdYNqY9p4rOXQaCwMPXWCaHI92mRvnqihLJRrb/FydXLwUQfKpbEpUWGKPJ01LM0B4kX3et+47zvROy4pEsfAXC+EVdZ+nJ9JA2tTOYWwrIxCJcJGLH2wBI818E6XdAbtpMwQ41fRRFW2LQui4Oy5tycai+1dk9/QfVkyP5yQZUqBr3zlmUXZivm+U7bMy699Tj0K5S5bh50R2GqgLifTyNdMW7GHLu26JHFtR+a3jU4ZcPTB1jp3Ja8EY1gU0p7QEYB3RmZpwctMKwWVeFi6OjzkPSsRo1O2TF3E9Dko+hHnRmSFtKyyHIOulQjOqVGy2j3xgIFpnWCtkFvWpFqljt0L5ig4lxlVdizdsxCRJiuzD7nXnLVc14VlSbLRN0iC9XDqloxRG6bW2HLlZTukQLHmJL8khdeDwoanoHdIBz/JUHupHPWxxeEpRdbozlU/cw4tsUDmjI4hFk/GEAMMK3Zvbkh6iMHpDq1ITFIsWe+4MGdhykI1jwYhLHLe+xjko5Bz4f3nN2Ea3zbuuXA/vguS1FEbNsuK7lo7KW0ikNNW1FlLWuK5uMwZyxGFWTR9/HyUwbfzEgQkgIk4cRhzLu+rVYWiKM05eIz3DFcoDNWjwMhFgoL3+vWQt4PRG2M0MUX1ExtGPNyWCBpUzBLxTixUYvSM1oQEYOD9tovNSvAMpqeaw1sdauoDKFW6sLOmldIY7hQpzqHnlstJJ5+VprFidFl6l6GyLp5bgjDRzPUiFZMSB4yzXJOsjy/twhI9ey1MN+7gJSl0pXpjDE4TbGvjYa4wkM6nP9wCAKUZy99bq2J3ddupulY+pCjfu8g8EPvYyjw1ao99O5aYZG5nDZRchSWVm9K+pUtt2nVMe5ig3ak1nOSOzqD0xsu+60ytsybPXLlQmlTHFlkGaYWzTT+Kurqbk4wz35u38pDHFEhjEHM9A4B07Oo636F2g1MOd20C+dVdNu6OMXDDPQxszWPp3uLdCRO3MRjWclFB+bunlTUFLmtiD54cdvbXu9CdebNKxkxashW3Bu9wc0utXDSZIyo1XzR9AA/9Towyu2hWaNYpCETodZY6v2d27U4JKmKeKgbSLYsvZ69dDSVVp6b3RqudHvpJ+cfIdZf9b55SE0eVZ8AeRViixj72Lem8aNqNSe8lgXe6wMykcj/E/XwSR4KfO+Ycl8sipBRrTwKIgXOx4jyXUWHSGDy5yrUv6hspdH9zwpehi39o1cLAe7kOaxVHkWmFtIa540m6aiajciZuDNZxbjd2VizTvBMYGiOrP4Kf3Rg4J5IQFHWYscwOcEHeR1qTOPdYg98y/vDsW8aX8oDnv8njOzpJldpwpmmF3bnfDkLw7NeFmisxeaJPslvIC3TknOPYyykAdl4qd+cVLhuDbi3D9AdNcwg8BzAmHOTFaRvnqAZKFwNMUyrGOoL6lBkDeYeuDsejdXqQltloReKCVOoy2BeG2miyK6Yd5Qy66xLFw+9MUv3c3CsmlU7cGNAWHllTHZVRN90IRFPT2Bm07hhDmI5CipKHsPZ+2kltR+YSJaCvMarDuzsh0xTF6buti8wrWlQoSoLW1KL00THdnESQ6Qbw9iWWLZxsQufmoFdU6zVXsgbj3roGMdlobBXvGE1NbNuD8IEmGB/8Axq1BleFElxboxcpNo5SualAuQ+4aAGw6INpFfdsDK2g5bUVYYKuwetsUYKeNwY7gNrpplE7J03YzEBhLM6B9zILjH0QgsxJZyIbCkVVpZUPhZ9ba7RSaaXQqsKqxtAnnKzFUnCW2C2tqwflGAwnCwvXFLheEusaua4LHtiNwceAOYpQwrt0e9O2yplx+lmm4JVVY5kCTucem4lnSBKWmyfGQIpe6N4oScm7L339fPlZ8Dh7Ovq3IjBSLU0c7ZECZzQRg/faVafoz3trEoeCV9PbFLjmiDPiLDGvQ/QPE1vRvj3mR1NDOfdvTXjudshCUO8cMRiSE1LWmsT70jlHM3Ld6Jw/d8Ln0vl4fanu881z0rU4sFaST++GMR57myY9vCiF3CjcH50WElZXzHS1dJmEfL3/jB3qY2hww+D6A/6WNfcPw2TZc6fxyj7W/KAIiVOGrlVX/PlzQnRfgu35JhPVd3SSyqXBqDRnaM2y3A9xSUg3Lk+LYsrunEMsa8J5d1J981FPQap3VmcDYhsy6mMt/RT9OlSvE4VaHt9d2Uenvb5y2w9yKRzAOrquvxCNQ6sZ6x2Hd7SnhbgIQ8lmS/OW6qTzcnEIzussl+siRAsz8MmTLolmDdfbhl88+y5kCjshG+tOUsekyrcJeVmBMXJt3HRY3Hp/+OEFJ3ofZwnJ69JB1WOVysu2666qxvOScEYCt9XKNDmxcQnOUnuSir9LotuyVJuliqDRIdXpZDYZ7aBq7mw32Xjc1A4Hq5CJLn2sRyHvmX07aHPdtXM4r7qibgXazMpem/RXM22RHvuhXHREnQPNe+F2y5TXndt28KufvQgUOAbPa2KNAXtd5PMGzxKD2guJZuTlfj9dPj55vnCNkad14ZN1YQmOJQQYhiM36hB/yBgCKTpZahnk/K3W8TQG1ll1jFDXfoQA8fld9g9t3sniTGNwo0HvOAZRCcJeNSxDE9F0r5B5pQZJY/DB8XQRQflH71aWNbFeksLa4JKn3eG1VnFJLw2HzB8u2v2IRsjo3EIYhMMAGtjeMkvX4OFplQ7DW7rO3VbvJLBPgg5SMM2CywidjNyEzfr6spGzsEe9slQnOxE4n2Ox/OmnjMJa2b9lhmgm3RgcJcr7QgLzx1chqQhUaei1krcHArHvmeMo7Edlz/VBvmhd6fPSEa/rwmWJWCdeg7ct4xh4Y/goeaIxJH1P0XveXRIovBedpbZGtALVTZ/I2aVJkuqiozIQjWUFnQs+YPJpQWV0QNuHpCd0Fc0kXc2VG0Xhb8Y4O1p/3i+PZO3GwA7d4qzXWITU6oQfRcMn0KwXCDpGllS5rlFZwN8Fqzr6pFwqpDO6DpFzJW9Zqpol45SZMskLaY2PgTucD29v8+dMymY/q/0+OnYIWcJ5C0YSR8wZnwLDGSoD0yquOUqt1Oo5Ve7IL5I8MoOmaC2GMml6E5bMg0hgCCmQLokBvMsFHz0NIY3kXBlVLGYYqD/WA946t+mOOeSXddWHijcN4Ku4V9RJdzaPIbLRBP7WpXzwZUo6zJtXMHA31Ddwsp46j5tei4bgHhUzQ4TOpYh/4EBmblMDJRi4IwQr68GzLJerVeA8V5vAh6VpJyXegLUoo2ieb3UeCOahW7FK1hD4Bqyv6hk31GVcxZjeq1OJzlvMG4shM3UqnL+v1A5BnQ2cOFSMITYzfchcyDrZHmyaCsut/GmduOanMVhToBRDRr+vdl56IzjL7hxXdVBflKovtGVzdlj0Lvc0nPuTpgi0KhHCDJmjWcup6fPhYS8lc1chmExW23TAaM4+5khjnPAVCMNwDHkf9sR0RXMTg8OYgKkLvTZca4Q389SpKji1V/pMNmWFlSKSklxly6tx5tF9mcd7mFKRL92z5o1gXOdxfYhVjxAtZAa1TOmJgWneKoitxoU3z8AkaDxe9oS7rZ3Pg9okja4mt1bJDXo/Kay2xiDG0PqMOkUqgn8U24IEyDmen0m0aeY0IxAiyTgL1gldmzendwxdU6Lw9mml1dHuTIkmp2TlIV+Zi1bnLNnMz+tVbgAycwNQT1GnI40UPbVFSo2/TYSX4zs6SU1qsU6K5AK0TtkLty/u4tqAkCPSJRFX6YCu71ZSiVJ9qyXNOKo4Ait0IjOPB549nJjOojMkFxzP4x3dGr643fl822jloOTCsIZYIs57sWIaUllaffhDjDJATwEXw0m+6LlibFObfSEPpCXivGO5LsQ1cuyFjz55Zr/v5L1w3A5Kqex7YTvUibu2L88x2tQuiYDxtuXTb0uQOysVsfd81HVZm/rtgVDJg1O3B02u+vyfhzGIMFGr4ObVagapvIIT3YSIUz2LF4q0GYZWGvf7waHD8NsuFZaxhnWV7bvrEk5m2pbro0LWob0PsiKFLnTuUsT1fr5Pow/55WkRxp37dQsJMbijqEGtOQNLrZ3sK9YIJGixpwO7szIPdNZjradVkSLk3GhJJlLOeYx15Ma5PvtoAosUoABxQAicsMiiq0jojf2wvIzOy7arkW0WyMo5PrksXELgq5dEAOYK+WG/XGjRRCdVQAbWpfL+KGANixk8N3G1t86clbCPHl8CJnhwkqRyE3d1caKA6p2KobvKM9RObAgUOsagW6MrIITNao0IPpfkuUavVtvqptA6o7SzuBqTZapMVrnPM3sWacN0gxGBuU6MjLLY4HGPPvLTCTvOY7psrOorF7zjo4sQi1LwOCPFx9SV1Q61SvE650NJbams7crkEzRkuoOU2tmPwut20HvTJCXjguScFhewqnn0ZCNXnYteUzoXGoagi0CbFNVSGRiJF/pcHlVnrV0QISFHPLz15pnoQ0xq91K5baIXu6tbrLEW43Xv1ltmn38UMHN/nk9REA1dtWyM7HAbTWL0OCqjSHG1pMC7p5UQPNZ+F+ikluBYVekfnD0veC2V++smm2BrI10S61F5GqtsTI2eGGX20zVRlS5WPRhzdlNj+q7NY7a0wRFSYO0rpTWe3z1xef/KkSv3tlMG3Epl+EoEVhtYrEBqbkm4NTKip3tH1U4L4ORot3bqqWb3Za1lvS6ScNeoc5nK9nLnOAq3151wO9iPwraXU9UelE1EUz2MeQhdW2uyxbjJ2nnvhTp+Ubgpqui0XwbRycwghYCz4qIwhjgVTBjAKr49q1XvHhVq9FJ9TkgsWOkuZ7Kph6zzPkrjddt10A3rZF+u8Qww27brBuLGWhprbULJVf3J9HQ8chFdhtpcGWe5HJklRZ6ehSQQvD1/l/eOZYnU2nm6ruQs7uYi6BZXjKMPihlsu+iaauswJNAZiwZIy2jQamfPjWplsH9kWZ3dx8BWJzMt3UobolcKrxB9ZLgvriP3HXIpapp8l1mXtVKQLZFLcHRrSdaqsefgUKaZuKCIyLYOuBVJUi97ZhjIo3HdDi5HVo+5x7myk20Z/WPJ5OxsUFtCHp3KdNHemrD4moHYBxGxAusGwY7snMd6rEcEz3rPd6ezxjZ+3c+WOaq4dk/j5nE+l78+/ZyVvn6OOQvBGHWckFnSyyafe/TOem4sfnRDRlk9vY/THdxYcy5zbGPwri7EIDC511nUJYXT7qgoyjO3Hg/VNjU6AyGyCNNPNkY5K/ZQYXh1volCwkhBSCrW0vUZRgkVxtpzxcjtOMTQeDuETm4MlxDUlEDv9z7IWdiAr6rPm+vhnXoW2uDwFmEkpkBYAmFNBC9jAa9OHmFNeo7t6UdZj3xKAqZ7vVh8ea4XKRTnAs3f7vjOTlLeCd6vVj7BW4XtOvuWMVmST2nC0IuL2KZEdTRwXr36rODOxtkzSb3tE04YQYfvs6oIKZCWyLImYoqEGBhbpiIeWKZUKgYbhJaOcxhlBfYTJhpqyCjsmEcAmKaNRuAMKzqqAKQUqWullUb0jmM7zs7JOglIRm/OEKzuThqPlexWHOBbl99TlazgemfJGe8ti+6+CcawjnjuhZIK0YhOCFXbm6lrsVg7DWmNukzIOfRWOtE5iPVW8O5ehTl29MF2F6/Cl9uuOiCouRGCE62ZkSpt23XNubq+N+3+ojpblCzEim3LQuutVT0dLbl31rXq8kMArzMQo3MiL556azy1XkFhljYeDLc9T4f0plCbPa2YLEZJDjJcb9qF5VwopQm/oMk8awjWQyxBtqpah18k4abgKFXOd9VVGq+b+J05Y1mDrHTPteG8IYik6/RgK1Vo+ueetQF7qezqUCKmx7DnzFHK6eZ93nNq1Oq9U2sh6aJRF4FzrcWbLqob6bZKHxTUFcFaUlWX7qJVP8JSNTrUp1lw6kVnG8b00w6sNXH/PrtnzY5CdOC8L+YxIfXT+kvh3TmsFxannIPbUbRDli6/9cd+s/MnvoXMjDnjgBCWBpcl4KvAwdbLrHE5HUjmOxpnh8jsNPV9ahsqn0s1fV5p/s47LaxFH3aaKCM7n+yE9pk1rgjqtyPzftsERrXidGGtOsQP+d6jVllkuu28bBtHkcWvIQQihnUoWcOpvit6eenfnd4Xfgm6GdkqzNhOh/VeJIkOJaV4h0hsnKX37wK476vXheuy6jI+qXyaikePQypWbjvLLXG5Z4YxXLPoVEIURpsL4uc2eqQVERfa4GVQS/uyz52zJz5rzmrTEZbEcrmwHpWXeyHXznZUbg28rxRraMHjSyfshYzhpY2T5bKuIii+qKbD8sC+a61qv2KISUSXIXqCT7CKl1k+CnGJLJeFfcvc78fpcOwVqy+t4nPBRoePjj1X9YgrshtLIcHbUbDOa1c1H1iHMw5nLLUOWq3culCdGciCPCvCQ++90mGlg5oixrcMK9k+K+7xrRdy7xI4bwf3XLjddmUDwrFnobwGSeoDZIDfhGl2WSLXNTGaaIkW7ymlkHPli9e7VIp7oSID5vS5EANebzvf85Vnnq8LF91FZI3hsiaRAIxBzpX9qLruvovNUy7ct8zrXQb3t11cKGZgmUJWby12GIrCV4fCj701nDOMOrgdG/cqG0zTkni+rIB81gkbnYFuiOP93GY6jD19JksT42FnraybKZVfu+3suvdrMsiCtZQ+yF0YjMPAsJzD/6rBC2sELWiB9ZJYLwuXdeHYMr2Kq0EfQ10Oqghuq7grNCz3o3C0zt6HOiN0hjWE7LC7in+NYb0kUgx88nwVCUWSZ2q0RsvqVFA7tT8qbqvzl0UZrh1hLXqn98fgkThBNYjynOW9SHwwg1stfLptfHE7dPYjSddgZH+bteor2NXIWbYoDCWGYMTWyjmhZE8hsg/+3G7gvZAxpnVRrp3cqlyL8NZtX1CJUgqHrmOvxmKsQGjGeWzwdGdpM/ZMltx46AGPUthK4Ve/eM/rtvPZ652nmFhj5GmJOC28S5eNu792u/N+2/n09c4Xt41cpfhY9Zm6XjxLtwQPIYrtVFqjGD0HL6bOzsqaFO3sbBVXi95FbN+KoBzGzm5Wip/eOp3vAp2UsKbmKnEVE6qeoxQRjQ6dWxjn2LeM846iS/e8s28aJvPg/dupATDnf5MkKMlKqtI3K+enGNRMUeOk6zYhOdRGLJU9F273g9xkm+VcOdD6QkpqWqoD9KaCwlo7xoxzWD8UiptV/lR2L5dEhxOWETqusG8YA19lCN6NrJ7GyEoFcb+2mpQ5SShza6t50xXMzmAMgQjnw5GN6HCKd6Teid0zFw96tWE5RbVvjrM6PWGBdjIq50bQYqB3c3aWvXdNUgL3TdbYvmeM0tBzrmLCuxf2oolkSECbxpkpeC5LPGm6VnHVCVOmEE7X7FIqpRrayGcFvuUiWh1ljk1zW9lGq8vq3GNZ3cNtXRY/tjHYa2XLWextjEAhc536TObOyvwp6YwwRQ/jsQbdqxSASfjondxkN9pdlx6aIUyt5CzdqO+c3tcPo97Hvf5gaklnGaO4Zkwq8kDYm1QkSRnDGivWOoYZ5yqc0jpU6WDDUeSeMbKjqDI4WmddGkuK2BTw3mODY1hzdmoCL6AEKblvrRY/YyBenXbOWt7AjroZGaVZC2TlcdWLZyTyHGQlkjjUHaE3ff8ieZguL3suksD9kPhgvjyXNcDUSHmvnp9aqInfvBBPfBUyRgwK3amWSDocTg1bRWZtTVmypcsONDeg6zoMy9Q9zu3KkxglxUkuler8wx5M3++UouxFipP7kdly1lm2xRWBm7tqO43p+pEfji3GPpwr9KPLPWsfhfyYnbfeX3aOGhRBCOG7YOnhUxTsdw7yD8U/c5W1EaVJsJ1av8vLDgaen5cT+52WR3IN5YFSxZr8OeEPb0+GGEaCtmiIRDl9NKjDiqnjQOYgfeBaJxw6d/A7ZQysdxTAWNER5PLE9ZLO4OasPRc61jIN+OWteN3BErTttkGFet7hdTnjZU+PKkYDZK2dmAsuBlkI7iy7zmzkPOgZeBOwuk6d+5zN9U5hwkZa6faGVVrt4tVlIAWMd0RjCN6clOKH7qN/KRn2pntxWj8XsnVNUpVBM0ZWAgyhZN/349x+O7rM8W7LwiidEZtsyS2Cs++58LLn0+PQH5kjFwxS5LghsPEM9kK5tSzR6wpvy2YMjHIytF63g9dN1rJ0pfJ7J98TneNJV3DMhY3t7cwRYRLmJjvAXveD3CpYy1rjG1fph7XNGj3PS1ICQdNkanleFy4xnqvPp2vBoVKDm/780dXVX8kxQvRQCFm7EHEheCQs5xwjwLokLmviekm8Rs/hJxtSxKzGQG4V760mZi/zQrVpErPSyRY1QiFXI9X1ust8InqMvcqm6uh175iRwknnXLMzkgTqiNIePvKrIgZzS3WtwgIVw2mHtcLGraNjgqc7Q+md3KXQdPCG3SYFUK0S9EupvO67JimPj/F0OJ9JpKmOKRmBiZN+Fmul04hJrIJylWfyaYksVtcGOdmaizHnHruCcCJsrVAs3VYy6kY+vMybmbeVbgMu0ulvh8hMap377B6Q4NwTVXrnniuvR+Zl27nt+c1mYEOsjtYKfXhAC2WLwMBvDJyNmQXEhEHF6MBPv9NuzvvZKFtwFhSxz0/wWx/f0UnKGt3Dgu5PGw9GU626sBAwtmKz5TgyYXfko5wL9EDmMseeObZC3kSU17owk2SAqgan+upD7Hm+eN344v2dX/n8lc9fN273nVwFCpl0TacEAVqXOcmQKmLvQ7RRXphOBtjXRPNd9EZKoRaNzKMD6d2e1csYkqyMfhbrpc1Ol6QGraK+b61x7LJxdK/tTdWmuhGvrtLWcL0krpeF60WWODJE3W+HbtNtkw4t3UoulVIO8Tm08HxZuK4JFx3DLixOHmhrxAnC9A6FM2mItc+cyQ1RvhuZrzdEyzI72tpk1kbXrky2BMIQ6UHRJLMf4g+27TLjuu/HWYiEHrBAXoR8kpOsRhlekvVZgY+hTg0yM6n2seJkkllm0AzBscbARxfRRL1L8Sw4SmlkTVNCYRbn8aK7iJyzJBt4WmU/1yVFWe1iDXaId+R1iXzl+coSxKV+DGG0XlIkOVnA+OYdfUkuMDdVGyM2Q34IPJW8MF2XNXJNkSX6kxQho1Dt4qI/mW5e3UtqFxlDrtJ5bFXo2pcYuCZzOlx4vc9q7dxHFqi2yJyw9E7posn54v1KCoE1ReIaRVjaBxSFBkdXb8K5v0nlEWbONmWuM4YUPDVXylGw3lJzxSSDD+KiH3sjXeKpVXROOrJg0PU9/kEd51HwfrEdMi/zXoXWQX0tGy93sWYbDD5unafeSYtaCAWHj1YNpp0iEUOgccD3QfcCL4cYaEjnNZQ40mpnN1VWXpQu5s6xsXohyvgmGVyQBwPDEJ2nhcGIgyUEvBVReK6NhuXQbvuR+LX7GdPV3+iCzIdm7Yy51pwoyRmHNCYYIys4jLW4qFBnf8zzrJV5pAmy+DWM7wKD2TNooM+lPqdnla5iy9aFdTIHye3Nq2tbv2+FY5f1EqU+1nc7p0QJpY9bZ2V7aGvcd2FcvW4HtyOzZTF+HQaFRrT9n1We0mg7htyFweQmPBWqDNWHwGxzVfQUcgIYXY8wnQowsvjO8WjDB+CjtPi9qEO2mt/mJsNrjfPAAzrxSMK6pMhliSy6tHEMgVOGsCzE46w/tv3utbBtuxrldjrCWvqkPBNjO5librb9DdVvvGVOjlNOYBhiSGo4Rb9TDCqVoCY0vehzdcXcnFznnEZXmMxrPpOU7K5yp9CzFXUGGQOsO3VxaFIW5qI5tTBvh/TygE9DVbHBWaN6rCnUyeBhQ8MDkpubcKdx7xKDGJmqQHfe09OJ4JrieS7maUte9lt5NeptY27m/bLtzCS4SHDX+aHKDNYU5e/+zTqaN7NDp0PzoAnKGNQ2SxaJDiPQ5b0U8S8MQsf3SKJq8zNXIUXsR9GZSJdg5izbLtBpnSxMLMZ1hdUeNkSl9tMxxTqp4J11TFPaMTg3HNdSccXRSsXrpl0XHL5Kle/VBHValIXJOp1zIjM3Tj9IFlVwb7p1dCxF50Cv+0FRGyOvFO3J4jQKyVv1yJTRQD83VbvaZFtxVzSkD0LrVGXO9i4EmNFluaN1AgESHXgHo2NEZaBkEktwnu4GI/RTBFx7pzeDoZ5kMuY1VsQAOIv3Ce1aY3XTtHR2Ew4+76/BaQItHdP0SHQMXUU0FB0Q0ofBBpmn+fZdAPc9MhNnkJtWI0qakmCsVNyWq5hl5kpW882cxW3g089f2TRJ0cS2fgmOEII4PjxfBGJLkXyIkPblZeOL9xufv994ue0ce8agavw4GT7CPnSqM6paKdWu1kpO3MGk4hSWkX2TpKqutEarROesEA7aIBQB5aeuxandkovTwHQKW6XrYZOHZDoZOAzOe7H98UIkeXq+cH1auT6tkuyAcghbrh6V26sM5O+HPJS1Vl63jVILrRWOVthb4Xu++jFpJAgOF3VnV230as95wxhi2NlnSTfeXEv9e1A2oHOGYiXwTd2MwbDGwBqD+Ot1qL1J91Iecy14FDTJCcFjsZYA2D6oRxVTTWM0d47TUXrKEQwQvGD1KUjAbuq4LlZWAvdN7zuvlT4Kh4TgOGplIOsd2hC495okOX3vR+94WheSd+qmXWXB4RCNmayMkI53+u55oxZCVqysbRNbpdA7a/CU5ijNYRBCzrJIl/y8LvLzgme5JN5dVy4pSiJrQpW2qiuYBr4his0NRiC7vRTu+aD0QK5NYEdruabBGmRZn/Ge0pEFh+rcPxf7dZ2ZtirkIGFJdtESahFWcoHDyJ6tXHi/7WxZiCbWy3W8pkB0MqNrfcjMzRqKMyytkZ4XhoWQZNAfU+ByXXi6rry7rkKWap01ej5eFz66LFyWpE4Tjb01XnPhs/2QwO4cTxhSFwf8e8782suNnMsDqquyITmqkfBVC9awnAAdozZals89pS0hBJkJWYvLkkzuTUgue6/svTOMLA2U9R2Bj6I7/QWNEfj2o7RSXCD7IOcDw6dbVu/OhxWStUJvNwzWJAhR0Nj1tCSeYmJxAdMfkgqZHw/Uxl2ejwpm6JhEMqUko2FxyZ/sSDHllnhgrP3WdVI/9VM/xc/+7M/yC7/wC6zryh/+w3+Yv/yX/zK/7/f9vvNr/ugf/aP8/M///Je+77/4L/4L/tpf+2u/o9+lrlOAqO2tDlGnPf0YiF2MNWoeO10pxNurlkY+KvshhIZtL+x7wRmhs4cgcykbvHQsU2uh1bHsSWlnpYDCQzFIoLrGSHSWaN3ZIjdjaEa83IyXmVSM/pwrGK1MJqvoNMfU6n60GdA5HZSdlw7A984IDmvD+fuc1xtEmWdWg35wjqbVszOwpEAInusqcN31ushDZcwJn5Q9y+e0huXI7MURijipt26obUJMyKpsZT/OWd6s1G0VwfKZCeDsdCa54FTNO6vbdWUjr8VAkq5JfAMDiwoxjRENzAkvGHN2DdMN4JLimdiieh3Oc950ntHHeKxFHyqGrkqQ0O7KGgkMTecXRYW63lpK7Do7AWsdzsuaBV8qropd0dmpGIuVtoDa+slcFENYCUD+DbzYurhel9apumIkugdlepr6puhZeqT2rruUDM+XlafLItBiimIEmqLOqdy8EGfnN9/jHJhPv0vZ/irny/ZBs/L3eTnnSphkRbdm+6D1Ru1mEq/l+TUP54c5I3Mqc+iYE7mQubJs8r3nwl4bJg+xGWuV5AX2a8Po6hBDU6+856MQliiFkBVR/dAW2KoebXjH07oozL3IUso+xLS3zzlfpTS5/rlWsa1qQ0k17WTjlqL3Qi7iiqEUcq+fRbo0kQl0q36g+sxMFvGU04wm23UZcv/tRRiG2RroldEKcSQh/1ijhfpcKuiUganzJ7Ulq32cQuO5QsTbhSUFLYiEoHOJ4bEPTGON+CXW830KsWpgHHpe57U1iqBIopqH9e6MCfOe+maOf+NJ6ud//uf5sR/7MX74h3+YWit/6S/9Jf7T//Q/5R/9o3/E9Xo9v+7P//k/z//wP/wP5/+/XC6/499l7JuhPLoa3g0VnXqMlVUHczZkdfjfS6Me9WSF3bfM+5eN+5bZj3pqZdYliP9YeOgCjFO4rD2cts9qWgPidUl8/HTleYky75F3K1WNswxrqFbIGcaLUDUGL3AObwgGWrVMeyaQCrcWSytNxMi1y4OdK2EN+ChaCh90nuaEHOC9wwenXn2eFhumBKyRRW7rImLB5+eVy/PK9d2FdE3qvdU5tkzes0KMjqMUSivULuerj04dkpimS7yNQp21ul5hUoBG9zIn0U2gMznNiqu1dg6jxRjT4U3AeGHOLcpodEadtr3MvNoYlPpgDTqt5r1/wBdPiwgj362JVYkO85zXcx/TYy32tEyaw3SQ5XfeiFi19IY0qYPXPci8QanH1qhEAVhSlJlKE/f5rszLGahb6+xHptRyvtfrkkjqZH0u5KyNo8icZM4916C+ixrkozFc1yRmuF5Ew8453j1feHdZZOHlhHOcZdGdauh16PrMSEWs95AGGKyhM622UIPTB7EBM4OsrEovCIuuKGtuJlOBmYSwMh28fbCiw3EO/CDvAbf7kxm35cL7TWQKfTSCM9wnw9daBrJYsVvLFcPaB8/3TFzET9IY9HqOs6iMUWZQn3z8xEeXlXdLUputKi4bvbHXxq7efKbZc69TbpI4pxRAkIUmIvC9sG2HFM1Jij1xmPFCUhkqZOehN5vsQOOUr0VjN+LEUUth026uj07JniN6wriyhoANQQooEE0UhjEM95rZ6+Bz/d5cq25McHzydCUp+Uy8k0Wm4N10gpcC1PTBKE28M7csxbH69lljMEHJK1oIyFzKgHoqooWOUbaf9WJGLHOr3/74N56k/sbf+Btf+v//8//8P/N93/d9/N2/+3f50R/90fPfL5cLX/va1/7/+l2TEo52U2EKSZVGXroICyclMjpLwNBzpWpCyIesf5hEi9o7QSvXaTPShgz6jW68FGYgJC+CyzV6TE80L6yu65p4vq48L0kcyPs46fHEIBdL16DbSVfVCspo4huqEWk6U6lN2ErytiQIeO9IWYazMQdabXj1uhvnHOTxMK5r5N27C700WanhrM7AYFmDrE9YI+l05fAC8cQhG3WXAAbWS8I4S1oDT7eFmIQpuOWDyyqamqenC8u6iMO7Vk5zkaLtHZMdxg1hGw15tZkURqeokah4lMlMJulOK73vH0JNI5T90Qb0pg/QIDkDTmEerRqXt0QAqwstNQHk2tkPCTq5VKn6B8pgEjZ0tLIGvl9WDu/P2VjvXVmDhhQKUmMKxj/ghANH77Qm1P+i+pxWG5+/3CX49HYy5b7342euMWLNQi0CY972gy1XbnumI53UdYlcYuS6RJIWVDY6Lq3xrnXwEvw/+ugqX5sitk3D0UFIEa97qma3pNOFN9xizufIO09wjeg70UfZNRYjKcjfg5MgKDoeMGbopl1zWvMYIEXPmiKXNbFM2x9lrTIgLAF/BIaDRmcrco/d9sLRMg54cQLLe+swLuizr5sJGNz3zKK7nnqTrdufvn/l0/c3Pn+5M/qQglbnzj4FubdGP0X0b01yGSqR0M4aTQxTzhK97IaziGdlKY09N/Bd3R8k0YsjOtLpVGG3uj60O3kYAQcD3gzc6FQVXd/zwbFbXr3D9crzshCen4g6W/Le0Y1sXihDtjFv6nZylEKplRgcT2tiNV50Ucuco74x9UWfM70hehVSipmf1yBFizUYO2eZSqx4U/QYRTmmWcJJS/9WdVK//vjiiy8A+MpXvvKlf/9f/pf/hb/+1/86X/va1/gTf+JP8N/+t//tv7KbOo6D4zjO///+/XuAk3cvhzgaY4Q9VxEHhbd8/mDFkYImDsnVGhmet0n7fQzI327yfMxQHhWYLIsTF4ZFabPdDS4pKKQUxdrEWRnSDxmUG7Wvd1EgxDmUtsbglOgxVCM1l6/VKp58R6kSNHWY7r0o93uT0utkwanA0oXGNHF1TphL6xLI1yTu2bMT7EPea/S6mG1Cg/Zk80ztzFqbsKZaw1phRDXT5eFRy6EUIzGJdyGamPRuPyuqCb1Mt/T5mnMk8RxsVGspdm4hFmuVuXPIaIYwoHMOoWhPPzmLEg8UwgjefUlbNIftkzwzO6lcH+caZFfPZLtF5zAB+iKQXK1VXNvHUP6H/qypE1IYZBhR7gcvlX91HVf7G6Gn6Ig2dfwI3vN8WQjWqSZMtUe1iaD4ULbZTCDGEKNjcVKk+BSIY7CMgU1CFvjo4ysXnYGNUulNKn8XZRnfHGzP3PSm7ZHgYmaSEoJBarJOJHnZPBy8ClStVtDW4rQTtW+eK6mbzDkHndfGqSuCQMPTsd5poTgZpZVcRf9mGHgLtXmdWw6RdbiOzwHrlX06zYJHVzH2IczPowhsq4WEJOL50gSlFH2vwXjGgd6VeIBArHPwMLfhWvtwlGlj+hkKmmJm16+FkSSpLsxLxmnga40RB3IkYaHd/JEzrULxlpsuSC3rSoj6nDojMyL9PXIvttP+DB5dJUb0fbI9WuBvdF74kObwGDe8QXjUm/ihd9RnwKAfAB6wnrEnBX3CgSf+/9scv6tJqvfOf/lf/pf8kT/yR/j9v//3n//+Z/7Mn+H3/J7fww/+4A/yD/7BP+Av/sW/yD/+x/+Yn/3Zn/1Nf85P/dRP8ZM/+ZO/4d+N+zK7z1rdDgmy1lgHmUO/QnBhc5pFNoXs6ALRyKI2dyrhlyWoS7aw+TCcppHeSSVi+8DUQbmIn9iiaxyul4UUJBhmhXO6aiT6kJUV3ppzj45h0PKA0+BWHAr2vXDfs+ys2WXrqLGGJcgK+F4XwdUVbmit4aOnT2jKy24a4y1pTbKpNQbyUdiekrLbmijInSVdFzGKNLIU8LSBcu6k1LbauFwSx/HMcRS+b/sKuYjuZ+7fiiEwjOF2ZAaBoQlCNGiWagZ7b7w/DvajnnTxrEmidmE35daBJgEFeWC7Jg2x+5/muRKM9izszNq7zINwBLzcE8ConTLdMoDUGl63Ch+5ckyTXsXenRMx7VxpohUL9zWRa+VpSSIAbV2IDaqXMkCplfuhLE11Dw/e8JVnKcYGEqhqH3x23+h7J9dC6+qc/Yb4IQVLpeSDbd95f3tlVwbZ/VjJz1esNzy9uxDXyPXpIq4E3uEVBr6+u5z3m5gpC9t0zi/TGk9o2xqZH5nZTBkpjJYQ+Wi9sPjINVbpnoLnq89PPC+RpyURgzAUm0Z+gxCRgrUs3gsh3xhxC1kilzQ3RT+cXJyzmOgx0XHQuNfMy3bjZd/F0aLOil6CbWhOVpobp/pDCxbu+8F1zxx7JldxWXn/IkSn257FObzDy30/STUr6neXIu8uK3S47TtbEf2Rt4+9StYbWLTTNoaPny9c18jzdSFNA2nvwFtQEXHvnde98Ho/eP+6kW8HtM5i7fkePKhswJK8LKsM1mAZ5JLJo7MxeNJuPl+vYlBrpQB0NChSCDbe7m97JBV4zEW9kwJDpNoqrp9Jx+n03yjhQo1/57xyYOjD0NvAIMXhGHOztcDD9twLJ3f+6Xn1TRy/q0nqx37sx/iH//Af8rf+1t/60r//5//5f37+/Q/8gT/AD/zAD/Af/8f/Mb/4i7/Iv/vv/ru/4ef8xE/8BN/4xjfO///+/Xu+/vWvv6FCzhPOqWORi/yg7M7TMTqUIvt3TOs0ZHA+dSCDwbIm2R6qD444PUyXiX5WtsLqEkZX8gKxJS/diCjORSBpnC5QNCLWo3ZcaUpdlmpaGD9azZdKVjHeruK87RDdTx+SKGcFs0aBv1oTmr0tlpoLGNGS+NiFWRjsY7mjNcQliCFkERr2XMIYFI601p5zsVYbhIEZXs01HWkR/VOI8iDmUrH3nX0Xf7r9qOSiKzXGoKt554QIZIle566f77ZnqXpb1+A8eyTpNKZOxhtDaRJA67m1tJ7C5Fzqef8HL8Pg7j29qW+ZQQzrqLLHSwu63oY6N8u9MZQg4Yw5XSTW6E8GgwVylaFyU4aidw9z0qZMtvueFaqt54bUpxTO3UAidhXRsrWG3IVU4r3TWZHMMOb7lBUcYo90HLsu6xvEYLmvSSBlZ0mXJDOQ5AlLxEXHel1Ou6VeHL0NfKlqW2MIyZ++lBJIeHRPCkunGE6mYY2d6IOsmEhSNDnnZOZyxiN58ubn9QoBTkcTQQ3eSELUfQQn4bIxOFplb4V7OThK1q21DwfzOT8x1p0w6zjvXSFI1SxdWMnqEgJntzPGoOTMcXh272QRKpwmspcl8nxZ8IfDkE9kwem890QazDSDFcg8rUkEymskLZGg9kG9y2aB7Si87jrrbZ3uhcRhcV9CgCbk6NzDaWbe41W1ZjMBGU2ezk2vTvvYmWct3cmm3rmFILzRQp2rffT5G2fgnDefOaUvaNTtA9G0NVk8a/pcOPrGymqSRsZcjCk/8+3X/FbH71qS+vEf/3H+j//j/+Bv/s2/yQ/90A/9ll/7Iz/yIwD8k3/yT37TJJVSIqX0G/7daNXwNiPPRBWcrn5WTU/vQ2dLukvHivHl9JNalXBgrMxanD6UE5ceoDfXg+IMAn1c1gWjQ+GJCYcYzuRmWhdtkRkUnZuQK9bKllCVvcn8KUuC2rMo3e974bYf3HfppGaSmoaelyTkh+otzosDwKF+cr0PYo+EOAhxkdXOwRPXSG8i+p1aITl3Rh8iNZ7dywk70gYkzvXrzkVCkhs5FXF5MM7T+o0j77y/HcBQI8lBvwyWRfD+4SxVsfIXJa683PZzg69eSax1WoQIa8+qTmzqV/aS1f5FxLszMUx9xyV2Rg8szisl3Gix0undCktqCLzSu5Bq6FrAjI7F6kZe7Y5TOEkWQbvjNXgGb3b1DGHp3fbjdHSXFRuF5yVyjUFXQQgrcViprq1CLlMbZJ3luixinmzFyWTKK8botFLYj01gnFpw3rCsiUJnBEt6WlguSZLVEnBhssy06KpBA3l7QHnaoWJRFqmSjpwlRs+SIpcl8e56EYbsGAQnRIenRQTMZzcEJ1wKkuyF2KTuCuMBa7ciSytLLrRSBfLzYmDc6GwtcysHr8cm9j2l413AGVkmeYligNwxQnIZD4ZsUxJE1iIo70U2aKtz/Xx/+chs1hAZ+GUhOlmjMTdR7/lCCgVjrELS4GW/CgvmnJE+rQvrGlkvi6zXWSLxaSUsMu9trdFyP+eK728Hx32H1mnBM1JQfaVT/aPAl8538cVUk2iRRrRzS3Dr/ex8rbN4hEAWneysCkoQstZwiSIAX4MkKqcQX1Ox1Snx0GfRjIf7+1n4IxKSpkmq0xm5af4adNWNMcbpoj/6kK5Mi9VevkXefWMM/sJf+Av8b//b/8b/9X/9X/ze3/t7f9vv+ft//+8D8AM/8AO/8184c9RMVJI75MGb+GiXBWCToXNXVs0w4igeglN4Tn3K1ijmiX6uPpYAPiug+y2f21zn7MOpCDO4x3Iyp51UAvHjG4NeRFjLkMFqzpVRqljnH/kUFEtn0bhth/i75aLQl9j2TLbXnKXU2mAvp8uGC55Y6vk1cU0yEwgOn7wOpv1Zbc7zd67mzo1yFKHV7uW0WTJDthu7qCuuMcL8aY3X285nX9z44v2dzz6/CaVVh+B9DJ6eV6GKO0t3hmbgaFUG4qWceqkJp0hQmxW4biZ1TuZTOreStQtNGF8605LhvVS6Xl9OoeATKzd8aUuwdTrbUm55b0NcCBCkxhtO7RZDbKCcswTrzyCPwirbUdgN0LquDMncj4JVTP9dXjDGkQJC3nDCeLqsifWynqzV5yXhgTBQVqisprnGwNMS2bIIJWsrHCVz2zeBTFvDBktYA8uT7FGbDD2ZMRpwOnOo9nyOpr0YY6ZdDXjBk5bI5bLQc2Ocm49lTQmGL0GzUeUGc+EmfXaliHt3a+TWeL2LMPiS1KbJwHpZZONy0OIGKKOTe+WoB63JM7fExCUGPl5X3q2LdHZDWIBHbUL8cSKONaXR9kI5MmXP4szdH5tnHVCPzEstHNudbV1ZQuCTy1UYpM7y0eVC8gWrxr51DHyIYGR5JTofTEvQV+T6vJIuicvHV2EtpsCxHefSxKZsuz0XoZsP0QFG70izYDgXKAqTdWmRp3WlVrnfl5SIMZ7zvCnADgaWIHqnPqC1cbq8XJfIEjwfXVeWKA4ssv5Ei2p9QGyQ6+KTJ8ZAXAJen/vWHyjJxISdq6Dz2VbrDMznDqp0qTKPVzFvO75FSerHfuzH+Jmf+Rn+9//9f+f5+Zlf+qVfAuCjjz5iXVd+8Rd/kZ/5mZ/hj//xP85Xv/pV/sE/+Af8V//Vf8WP/uiP8u//+//+v94vPeHNcbKwznr8jCtCMc5N1ON9THNGUZavyzgx4KjbJc/K0ujXtqFdlOwZqq2d7LjZlk/G4XRlx8g2YN8trlpc6+ebG32cD3vvnaxrqY9DoICs1Ndc2umC8dZJYP6MOfOYsFgz4HTuNpl1slnUnVX6TL69D+yEI4dQ64Ue22GyetrADCF/tCK3jPUqVNWOopamu6wyt/vBbdtPttP9SKQjUKroh4I3p9r61NxMWrCeG5EN2PM1z6cwreRaT+hjYuW8qWidQkAnjfbNeXvMWThhGkCdMSR5TqBRVm/Pkz0eGAdDh+bmdKmf8JSzjx1h5wqLJuJPcZt/zAeskcS7WIGOndKbjdHFeH1gajvJCBLAvBBzQqCNQe4yDG9dyAFdSTRz8C9FkxJN5mmwajFl39xPRuZH59ZZncE6hQBDUAZoDDQr8HLtc9dTV3G2XhtjvjRUf9QHjy3ROcuv37aDbT9Yt0DJFR+8dEI8iCHMWYkspyc4Q3KOSwhyLrwXCLl1rHm4ZHgjFOpeGj3L6oihc+i5zsZp4jxqJx/SSZSYWH1g8Q9UBXQtirPULjZGWCkeJpHC+7naRHeE6TofmfU5arF6P2pw0kQ+upyXyXAdQ/P/G8hvSmsuKVK91SQVz3neeR8bgzH21EPOOfnQazyT1JqirDeS9vw05BVegxTpzlvZIxUf8hYp2KeJb2fo09fsNJ2W7ngWOt53nBeWtfMOhrAwW/4WwX3/4//4PwLwR//oH/3Sv/9P/9P/xJ/7c3+OGCP/5//5f/LTP/3T3G43vv71r/On/tSf4r/5b/6b3/Hvmhd0VmsqB8UMPfEzUChuu+fCViov90Moz13+bVFho/eyqllgOLEzmRjsMVcuHCL4PY5CL+IQMR3YO7yJfjyqVjS2KSTYxlCmkKz0uN93aqls90OoxrWxl66GpoXaqq6DkEH+xJdlBYbAS/et0Ml0A+Ym86eUIrk01kvCJy9mqF5WdzsdqtuulZA6LQAP2/3ZIQzpREcfHK5g1Th0Brz768bL68bnn7/w2WevfPbFjfevm7CdgiwSdNbycj9Yl8jFR2yQWUmIHp+d2Nx0pSYH6ZyCzgOlirSnCNfpuVuT554i9z3zGsMpxE3Bk7zno8t6kh4GUBR+mAshJx36XBOOBKE2BuPIAh01KQBakc229KmNU6qwk1XeMoaRQCoCXGUVOkfznuLaaazZz58h3x+9J0R/umFLrDEYFYs3A6M6zAg8r5dT/2ad5Z4PXvJOVPKBfA7533TzyLptFfMoooQGPs4dZnKTzj8ff5+WYCFKsO250XddKjkyWykcVTa6GiPkgafoiU6WkM7zOpPVZJrtpZxsPa9dnrOGj2471lnWuoowWWURyypQ5fAC2T5Hz/MS+HhdeFoEBRnGqGN4V/G64+IcocPYK/0ojKNgS8O2jhuDZJVy3Rr3feOedz598VxSwvTBJ09X3q2run1I19m0w/NLYlhLt+LJ13onenNu0I3Rk5IXAkWQojcETw1VpBD6yl7Mat3bZ25eB8R1ZfG6iTmJxGUMgfeeY2QNnufrSgwCxc+iwFqJZwNxTseIbu6ySGK76qhAZrJKdDDTZcTJFusYWJ8XdWx/2InVo6jFXH84/Cvs2Nvb9Soy0nDOUnMTuUyS+HMc2zcV539X4L7f6vj617/+G9wm/nWPpslnUshbl0rd9CGB5838duYOhs5b9ESaLDfetmeSaoPWGuVBZhIxJsauwektEwPOigEevm9TmGrUHdgaaeW7t9gxqIhx63Zkvni9i3vx/dCho6yp7l1WY0xmTvD2xPW9DkQZYpFUdc9PHZ1ulFa6VLoxlNZI1wQYITo4KamtN2f30ntXmxrRZ51C5f6o9MYYFA2srlTVqMF22zm2g5qrzDjmOUIKhJJlW/Dr6y7dXRBKaoiep2sSSDEXGbwOlBWpSbg/ZlBDIZXgLQbZs+TPClNgwD5ki7BQvWWp2qywm2Ls1loSTtwfnGUZ4+ERqPeJQCMdY4S+7P04NzXPIPC4JTqjPwKLzCYFJrrEcM5nhFmnm6S9PfdFBa14H+wItAvRzbAKc46ulO8RuYzOta0Y7+hOPByDOmucotKjYt0hSQ5pAOfwfVo7eYMyt5RI8LZTHzzmVaccQbpWSTjqxlB0r5RW/l7vi6hVtzNiXvq2kYM39OymfotKcBBLsMf6E3FBEbeMoxuakVXk/iwANWlo1xfhdFhJzuEN5+Zf2hAjVYwuBJQFgmgn1Nug9Yo3VlZdKNIh10fuNRkVGMIaGdbRrMVVMXJ2Rrop+8jOjxdK9nBOFmvq3qaeK61UMYx19rz/UHgUhLzztCRWBlfF5AxDWX9S6Bij1lDakRXt3ovO4zHKxDNCp7cqZPZOvTkBrKw1idGzPssm8+WaTklOvsnXTWb0+dJYVapoAI/6mHMnNRGw1hD0GXS1kY/vAoPZOSg/FwTqhTXGkIzBoXoNbUcBMA94rNRGH5k+OvftOJNUzsqwGicZjYkpTrHd3LU0H0z57+YMYmJn5B5VDRK48LLhdS40fN12Pn25se2ZfTsmBomx/vyM025/Dp+FJebU+kmw7aNk7kUgwjoEB16OKHBQqawXWb+e1P5I4D73gAtrE09ApaT32k5rpqE3YK1dcGQjCW5Wftt+SJIqVdXoahIrwyhyrmzbwfvXu8Bh0Ut3FCPPTxdJtLlQswxbo5uECSuGvJqkuhqoipWLOfeJlSVy5HhqWCZECDKj2PfMVurpneedO88RznLp4uA8rZUMAptOFlppndA6w9kvBRyMORNUV0xQILxJHRZvPj/JB18SFLs3g21JWF9yWB86mO5Gk5RldIGdBmK0+zQaLji6E6jSByWmDDnnxmb6GOzqY1haUxjM8nSRNeDJW7VmMtplS0U1EYAp6JXO+gEp9SH7l2aCupeqz4ohKnHiGuXnWSeF2Vu5iJksPBDJQGmU3ISJV6SDtEZo70+XlefrhefrBV8HhUJUIsyEqeZ25ul2MZltUbd1P1bBDBXKil1ZtLKmppt2GhXX3siapGYBLBovgMlQM4Q1MZwkKa9bmulddXXaWfdpcSb3jqx1cediwefrwsiV5hwO8WkUtEXs0OwQZEM0mSI6NhrwZyHNUC9AjYUDQWty6/qS4nLuW6sYvLGYKJKSJchsE1CGrycukes78StdLlF+R+uUTe6p3h4NQtO1QnvO53hiU89R8QdU5qd9bEJw3pKP/E3F+e/oJFVbJ1vZ8fPI3moV01GzRIFRXHAkXbMxq+6iWHTNg9v9wCo9dFjDskaeQZYRBvHGMwiEU4N6e9V2rk8XSKxxtHa6O4sbusxEBEJ0WJ2/bLtAHa/3nS9e72y7kCam/iL4gdEV81ZhlFVZOU9LxDvRNzSFTF62zKtqOUoX2nnao0CGR2FdZMVDWiJxicrUEVy7FyVJ1Ebds5jS5kq+H2eC2Y4qr1zEVdsg5ARrqVU0Sr1UcXlYZM397GzsGJRc+OKLm7DvvOWj5wtLCnzt+z7io6sYe+7boWwsmcHtudGKJqjcTjbkGj04dy7kG/q7xhuYaozBq7Ic76VyK5WjiCu764M8EJYhcL0uAru92ZXTezud3o+S8U72MVljTsErY0KI4kQ94TxhlzpGHHx8SdQWeNfH6Sh+jf5MUFMo+nbn1pkINeHP4D87mY7YDN2Og9d957P7iyQS57DRc8+FuCQu68KyJI4s3eBR6jkzqe9W/n/k/TuwZVt2FYq28Z9zrrV35qmPpKd4SB6CIAIHQ1QEOAQEgYGDLBlYuGAgcLBAlkws4RFgEQqw8cAkkIOHAQEYV5eQSvU5J/dnrTnH/xmtjzFXloRU3AfUPVGrtHXy5Mnce33mHL331tsneIvr6mRvp+EXIgjampOHVBpa6dMItghT8UgMyttzwlEoEh37vdItKshYtJaH62AtLs5Rv9jPyc4LsUUD4rRS0HKBdnSJ+eaHZ/Rc8f4z38ZXyuP+tkNVhVQqPu0R99rhrMWH3mk2bNlwWGvoRD/eV3lvjdZ0pzEQTReArlAWNhylNQRrxRPSzCBDaoQqmgLZwRCRcW0TkrfoJCC0jpwKC/5tp/dnJmGpo2NbPfrTCuQK3zvSkZFTQq+SeJwy0AHTAe+5i7ou3HMtS5hnSqviRDIYwaUASqOCE9Q9FewSgNmh0LVC6g1L8ViuAWZx8E8bQjBz92i9wLubn2nTWeC94UkYYzpRn0wpyB4T3o+MPRa8x4gRWXRdA9bA79VkD9hr/8ntpP5PPhowTRNTbYjTNkgJpAU0wem1jNLjUDCj0+kdrQGlkAxhTILbExlywU23BC2rGyPQTHcD9utiUNqmQanWGrppKMsbkwafAMSzq6sm7Kc2D+WSJSFUlNljgiNcIp2fNfBWS4FiL1obYUtanvArtwZDrjqOxMDFI2YswkgsuaBkC2PqZxTgmgvKkR8oweKgnrKQIqjraK0DqktgnpIkXWojZlxDx9nZKQXITZsSo90Zs6CxrQvTexWwO6Ym7/cIqIJUzq6wtzMJuaELm+jUk4zHqfVo0LlMTVZphELRmBME2V3k3iRGXQgQwpDqOCHGItZUg7gy/nsD4a4BcXuYE7GTvaG3FtY0uHZCbU4IGkphQmCD5ttlv4o5Z5zL8zMiRLr9nGgTtB+8TrXG8v4OKODt/S7Qo0bO4oCRykxBzolOBc0ZNCkurEwP+xD5DClTIAu01vrwNcTUhMQ4hWnZzWK+n9ZouU9PaNMb7jXp6GDE0R0nVF4blEyXS/C4rAuer1fE94SaGtLOuPtSCioUcuvwzkiD6s73bbwWIcEMuyGm/wo5RwEKFt46LJ4NkrfmJOzg3BV1IRY0UOBfKnA0phW01tA130djCYl3DbQ7C7/JQzArxdmR0ZmXyJTdygiUPD6rgUg8QLHDEs0Y7qdz7yiK8OhwOVHi3Uzh8Ig5Iau4KdB/VDG6p6JDO8P98IiFd8y+8oubDiHAsHniPRxTkZXAcLKv08NwnEVVGiwrFlm5NliBBhv6CTH+MY+vdZEa7LDUGmKl9oDQKwGF2joTVgUestbAtcbdjuHh36T7zrngdhdNS+sIq8dROz48r7huDZfFQymNEHiReWeQjZ7apr1U2TFlFhZr8ARg6X0uCo3W9EyTCWzajwjTS0FNF+vVezEaxSRKXBcveynZqwnxY3/4OnJGRYdFh2kWR65Qmi7vzjnc3w+sawAqIZBRpOLtQE0VeY8MSiwVh2hL3t92Tmp7wuv9YO4PMP3YenuYcpyDXyxU8DRETQVHYchdK4VxKYmUe2sIO/XLgvrhgrvEnfzgh6/A7cCeGdA4DoQoqbO1cycwbKUGxXWQBkZRsalAlzotmEbUdu+E8ioY7aCdmYGWJluYUgBF5+6UC/YjikdeIKHGKAbHlYqX230q858WimUXibRQxmDxp7p/WAMZEMbBhIIebtjeJ/QC4AyEHIUELNYxJry8vePL2zu+99UPCSEr4EgJL09PWMKC+kWFVYbOG51kDn6Rdn3ua0+RphI3gS6yjXQkHPeI/f1AvEekI6FmNjLtoVjR4UMLi48F38pu0Bote03qjnpXUGCzYIzGxVss1iBoxqKrSvhKOTqGPG0bUIHjZyJq7OhV4/vHK1JpSClB6UppAigQH3Zlg3k5ltFmvH55nnSQN2cwaaCZcSpVmHFuxoaYsZNTXSanipcjYc+c5kZDc/HUu21lQSiZydtfVijL6+zD0xOW4HH1gUUA1AtZayiEjxm3SDeKVtnMXteKgo6n1uCBWaC0whRCDxf+mCus2FXwFGRDlhJtt3JryCUjZY/b7YJt9WgA3OKxLKSZa8uJio0+o2xSyrjfD3x6veH+fuDtq9vcF6s+zI8pB4kCM9bWZLeYUHvH87bQicQ7Olv80fSF+fhaFykXPKx2sEKFVXp0gxDNDPOTxjK6io1OrhyLcy0AlFAqNbtlKOhU0JWCPzKWxSF4R/aNdO/dijmnluRQKRYjZ2ksCF0uTMvtpxgO7cT6rdITVuiN01Jw/PfnbWHXqccBBXiJjo/i5TcctWMuyLVOeG1AeQDmfmw6M98j7u/7VPr3SrFjvEXUVBjHUWnhlKZKnzdPjIm+YaK3KFKkFDqsEWGykaRfaRK04PemcacTvIV3AscILMrirNCbhzYKyx6QaoM/EqFWWbAPRhud1ocHongOBjsp/6NIrbmgKGDbj5nQXDG6WIOn64KnbcF2WbB4OieknOEkaTarOkPnsoRHDtiNDEy6cg/dS4dCsBbduylNQBOqOnAeGgMSKhXNVtRJqych4fM4CzwQFzop4QIJG3XuJJrQz0vJKDnPRGdrNJbFc6leG7SR7KtABtqM4Bh7qKEJHHvTkcF2JNRU0CSYU0PNQ99o0UApTKNkO6dFOSwFiXCa1zhAhqaW76FweijSQoxfSjeSZLSBcwHGehjrAG3Q1WD2cuLeU4YzGkfOyDXAy5Ke84+aHnjeMDKnaaIBY4oc5BISbOipV8pwdCC7VgFThJwONoa324FcCu25nIb3DnvO8IeDNgq3dKAbwp+1NDxdNqxfOHGEcXCBsLy2JGfUjplajN7hSoV/iAPhpEmySCviktL7/EyMkutJSCxOAwoNNMetdF8RSLXKZ5uPJJ+hgmlmhq92ADllHEfC7Xbg09sd7287Pr28T5KY02OPpsSlXT5swcSHJqyM6bs2GOgH4f4f/fhaFym/OljlYFOGKRxze+N4yuUedwrS3tF5WqCxVOhUYDRFcL01Xril4hhFKmZqlESDpJTgqYbzdJEdRKn8O7eDXZAxGr51hMXDujaL1MnH5U1rlT7jvzvNKRfvsAaHD5cF3jInZjDlhnh1TxlHEiNU0WulSrgRwDxwZrqoiIaPI+F+O1h0xXm5tzFJRcmNSoLfN4mGrxOmy/JV5AYZ+ThGcX82BLjeGjjF4jy8xOwoUkGKlEAW1ozuXaKnjca6BcRc4cWNvUm3ZqW7U8bMBbIRkeSIFdGGzUbJBVvOqAq47IE2PIYKeS6uLZ6fNzxdFlyeVnjDaAkXkyS2EkrssnvMmXtPbRoMjLDaCt6PY2qfjDIornJ3ARGK9i5wLSb01OUCaGAn3DQPTch/H+SfMeqMXdSgjg97Iac1nNLiWn1aWI2cMwW6r2+L546003pJG01atBF4Uw/Shnr4uX0yM3PMSEfk9CTFjwWJ0Pn4Gmasg7E4BLrjotfgNQ7IPlVoykbx+c9FfDltkoZ7hVYG1nkY56Gtlx2XRofkf9WKmDMOq0W2IRAkToKTVrSODFqjyvs/iQpVmtvOaJHWT7JQKaNICR2oMpMuHglHTLi/7zhyRmkVN6PgvcWeE7y3UBr4dHuj5smQCIIOfOPjM5w2cMHBLQ65Vihr0LWaK4zcqOdKpU0/yyZ7tVEgWqnoworV8r7O5kDTeccaxhSp3tBbBaoGqkbPBTVl5D0i7Y5/BoBrI6iQNnEpFhx7wrsUqdfXO3748j6h2c1ZiX2xc9eocOoZB/FpfLXW0VSf7/kf9/haF6mPP/MMC4duNJw/+OEKd1+BavbbHqez8iH2OS/3He/3A7f9EDdxRlLoLm9sPyGaRwppl91Tl0NgHApRYL57TDhSlQNOT+ZhKixyTRwIingHBmPwYQvodVyEnTHiweGLS+AOSoH5SqXiq3d2bC+3iCNnxDI8u04vMqPOYLdcaHpbTMGr1qJpYMe6bR41rxP/v73dkSM7piFeVV20Xa3P8L2xQxmw6kj3XLzFh23BNXgszrBTAmA9sOiFwYUCJ1yfNzxfF+nmHw5Iq+GCm/CDXRxs8IgxIR95ChDH5GMCv+ziESRJ2C12Mhb16nC5HbCrx31nmnJTIJy6Bnx4WrEuHk9LYNcohbGUgnUJ9E40ZCANnaOWHcqYri/LIs7cbGZa64i5nREoho3GxdEg1GqKR8kMBC2pOvcjQ4A7N2yjWAGTmuydxeodriHgi8sVgMJ7pF7pKBnbsmH1C7x1tCzSWhKm7dS/KSFHjPddftBkFqJjOoyUSKeGsbPstcGA9HoEi64XbLXhKrtYZ3kdLOL4QIeJ/tBBA9AKGgZJqNIpV3RklNbxdjvQAISVQYWuVHQtKEeHmMc6OEfYEJ1NYhMYtz107GSDnu7r3sjeaxluGPyMauvIjXuUe8x4P47pRjF2U4uzInwVz7la2YDIvdEKd4SxJBhjcNsdlpUEh/f9DqWpP0qRU67WCtbT43PJTA9erwFHKQhHhk8kotRCq6HWhaCQMvKeWHx7Ry8FqjV4TTixCtLAWXC4aogMYLCcO8+wfD+wG403rYGYERaH9Uorp3AJCCslHPf3Ha8v7/jyqzd8/8s3vLze8YNPbzLVNVyDJ0ktLBg28o+p2EpWGaqrueIYjcWP8/haF6llC3DK4ZBl/nJPSFBIjXh5kWX7mCpG95sfsqN0axS04mRQmYevGdkhy/sqRXDoiIZzQMOQEj/ANXLaNOnGm+JEMyibvAmoIPeWN2zw4hMn+S5OK+RchERB+C4XORTLGRegB5GgNVQhhNTaZHvacaRM1bqz2HcvZpii4RHMOiY6rg8HDSc0biUwAO2KNLR6ZJ1xER7ECYEwnpGCyW4bjsJhtzJYcb0sWBbCTWOXpJSCAcP9to7paVdqRzwckktTcT9o9Ixy0MTQxb3bLU7UAh1brdDWoLSGZaXxbVfUsSwLfeio1qeLfEGd0BqDEs80Yz1iRmQScd2i9o41eNkVseiU1hBTnRqZ7iya0Qiakog+99CnAFLJdQhodPXoKiLQsOx4xsJ9mL0uzmP1Batf0BUD/6y20IrSAlpmiWmrphODevyfOjvu+WsM8olEn8xrvc3GTYENkQOwKgXXO1zrkwW3ekdn/7HbEgEvCweAzuuJByb3Gaic7FMqcI7+lcNXUxlIPhqmAa5zVkgmbR7i+uF+Zc2VKUwP4bae9xDBgnH//kg8TG3QChJtQeQlywTpjJ5C7OHs7gb5omPqqpRmEsMAehkLb0j3H5/nEEoHi1Idi0NINKcdTvod4kWpJnRfa5Wpp3/2XCBWphAGaP8D2rQ+z7HhgJ+PhHiPbDpqFfLK+T5DMfonRa4LjigkqpTRWp22cKFZOOPGgHuqNMbZJCuLcX0Nav6P8/haF6nLxwuCDegAfHBouWLfE957x3suSDnjfsQhPeJeqhFr5l5lGI7SQ42UZgcXLPxisS2WHaGVQ6w21JilyFTeXL1PXYyxGqYKHi8dtzF0xE5ZrIP2xDRdYe6s1iBcVwwPLOcsnDe4CM1cA4hRzyLVeuM+LZPp5gwZTd4aOLnZ9pTI9MmFomEpsoPhYy1tXLbV86JCxy1yX/XyeoMCYaBrcFIoKf7s3SHEAihqTqw2sFrj4j02b3EVN3hnDLTjzsiuHv6ywAb6yFlHGu2IsCblW3/G0luvK67pgg/fSHh6viAeCfv7QUGogmgu+H47STs1gV9+C/N72eBQcsHleZuH9TicjVZSwIF0RFpmpSIsNWDxHmUpiNEzxE/yu/j5MHXYVwetNe5HxH1P+PR2x54yXm4Hu8zW8WENuHhHZwPFmPaB1oupGqdga5kcDYyuYMLUUKTGQwHWWnhPO5ynbUOHxsdLhk0JKkZY44Cucb8nvL3csBoLD4VF7HnI3hJ7G+gJkeohQJVDfBS4UihoHYbFY98w9T6iNRsOJVqzSVGKry/JvvZ2JMkEO6MeuCPuKLWjdjaV9z1BKY31HmcDZYMHWqM4Nzisa8B1W5CtQTH6hPbUmOq9pBCcLuAjln3o8JoUpJgrWlOyi2riqs/dY8qkgjutcZe8JTjDXRoIcymM/RE9Bd/usgvXTJ622uHpQj1S8A7XdcMaFjIapUCtWEiq2GnKPIg0MWZEm+G1QjAUHffakGMhxC5MUN2Zq2Wg0Dsbodq58xvEm0eH8yQGAbd7ZJCnUtC1InuLKrtXKNq5QSnEPePYI+73g1/7gftxCHTHaapUB28snHN0O8dZoLxoBK3SnKRbQwOJVD/O42tdpKyz8MFjvSxQULhfCPmZI6F0+t697gdGeJcTVbazfNmDBeSdw3UN1BB5R/fi4PB0XbEFB2/1pGUft4OdpSzyoSiQvdTAqHBfRFHusa4BIVDHk0vDfU/TVcIrXnjGcloaOx1j9Pww1dxPYDowDDdvoxVMOxfVwdppETWjrIFJ9zxy5uFiIvZIIWepBVYEz1VSYY809C4Uo2qlxN3awFm6XoxIdauo3LfghTTMWo1mFIgNHuF5RbgucItH2DynHnOywLQZ3byen6txkJRUCidTLFi8E15t/wxzN87KzYSJQ47/Zr3slTpmDASkc+61I6WMkgveX+/cu+2Jy2QhSDhDlqUR2u/wLRs/aEwkvZOey8iRiLf7PhfcXuQOpS8StX5CIFVo+EYIAOKLPFtfGToocwAZjWMP6ER/FJzDGsJMea2N/pKv73c4bYScY5CXwGA9CUCkswCgnZ6Fe7Dg2OWeItm5SxjPSaYubWhbZMTj7ZzEOPmkSmbnPRW8RxYprc5o8oeXSjSi9UlSyamiuEp3f0/YavEO2xJQLwXptqBYg6I1O3q5H4N3eNpWLEuAD27q3sY11+XQLqC2bbzs0VQ6a+Es93nacA6pIL3bNoXWzmvPOw1ojQxFskarnMrlfrXWwjsHLQWTz8cB0LQZk2vBOAsPYLssKKmiRJJVkjM4jD4ZmfrMUUMXR3x5/5Tsu5XCDNI06vQRffSTbI0ek8PlI2euE4zWkgb+4DijBaIbk0+nf+J01uEPn7CqU9xHivMn0ZtB3hjCdwlerD8NRUpbS2+vLUABWDfPXCNDl95YCm5HnE4Em1CErXUUu3pPcoI1uKwey+IRgsd6CfCLx/USEDwhrGOPiJEMl3FxLYuH0mTzbL3DOgvni3xvh21lHlAV/cLITWriXqCdRRBq/CO8OBbJczc0u//TcNWoJg4UtEzx1k6vNqs1ahuMGzmUc8GguB8xITiDUjjea02RXenU/aArGE2SxshIMvrUuNQGYvGKS2+rWLAMBDI1mqSHzWN7WrE8r/CLh1uddNwQFw2Q5KHOKWocW8ZZeCFe5FTgjZlOGMI+AKDoVj+LB05ISj04KPSOZvRp4TImhJSx7wmvo0jFwtjwTiLIILIomRrGIXGCXmpCWqU0LtGPhPf7Pm/gi3Nkk03zUJACPy7iysXII6tPyf/r8nrG/k8Bc281GqzgaP+UaocvbU7Rt9tBIojWeAqeBUoBrjm02sXIFnDS9U5YSDrwAYX9aFgeLYFkOW9oOmvHzkteQcmFi3+RDOy54J7IfrPSjNAR5HRs6ZDDc1jr5IJSLGph0KTWmvfU4tBKwLF6FM0Ebn7mHcZK3tW2YFm8WPFwWtdGUrl5kUwfTaXIwRh5TczbsgD4M2WNcnqEjk9eJgSlNS5K4RY9YmGD2gEYbSRI0MH5ICJqQrGAOjOgACnwdHbIwqYsR0Y0GhYkPej5ng3LrsGxGbtzzEZmwJ0a47xQDxKGARt+no03zGJHqjWLknx/IS6pPn5qnz/wdCQZTTTfl8fiSV2qElYkUDWv9frTsJNSmnbyhHg0lsuC+5HQNWnae2Ki7Yh8X0KAVhrXdZEJymBxPIwui0dYHHzw2J43uMAl4oA9vnp5x+124IdfvsmFrfHNL56wLgFP1xXLdQGUEr2IEnsWUm1fb4fY09CxoZdKXF0B1WjANKiu6fNXRwxFQxNqcBtUVGMQrMMleNkFVYntZq5O7dyzheIASMBiJystlzr3A9tCk9Yomq4Ji4gdUQfmKD+wfgsNBS7tB8bvLS/I1TJN1Mj04leP9cMFy3XB5ZtX+BHA5+SYEDhp6MMGYwmQQ8MYmbAM1otCCBVO6Lut1DmltDk1idixg6SU3tHt6TQwWW+l8QDIBcct4ssv3/D2vuN733+d3XsQeHNzPExW7+cNOGAqQISNpVLgvCe83ne83Hbc9gP7cQAg2yquGbEaRJnO6IheB/MEiwc8AFc7lO4wfRwKStKJyfQyAusaoxm3Urmbq73juvgJA5dSBOoVR3p74L5HGEWT0lao9O+9wy0swKp1KHhhY6lTvyKHjzKSqGwN0IDWK6A1rbtkUnDeTHutQxzxv3zfcZfCfYgr9mLdnGZI/+aUMhbtpRSkpHAXBxgowK8e1ius3qFeFzit0faE5BKS1rPh8UHu323B4iyspq6Jr0HPHRgyO3mAB79TGqtXlLEoRmK0Tv/PxY7wUzPZi1PwbQ0Ln7PYUkSsBetKey7CspQ1aG3kfgRiqrB7xn5PcxL1gbBxCAF1rWiXgnZkBEOW7FgvpNaQK5CyNJdgczgKkNKSOSbkG2NotsxrhN6dWQoTgLkfOz9rBj7aQSYTZu5ohoMlCWd1XINwHaZxCQGrs9icxeoMvOF72UDReYwklZScEKxGCmzc0k/DJMWLT4LBbGO0hhGnid6ke5XRWEnwmqFOI8gbHRwnBOp33HQmto7MqJIlfHCPeLsfeL3tsljXuOYVTi4wFzidNTV8vngx995hojwvGcnHITAPg/lP6ZCESjZEj+PPjeUvpzO5McUCxhuN0k+BIgudmV1wqWVGJOQiOrFS4WV5+Zj8yQBATIbT2OEoKIm6J1w1AtWGj6DWIgJ0Qq2VL+MtPxvN1z+jCaSDm3EOswvUctNIFIblHsuYhiYRBeP7jAcX5pgwFSoX7WMSGOyyUugRlxIP8fs94naPLFK5olo7qf9WYeb0GKPnJDU+tHPhzoJVZJ/RKvcjXT/AWJWhjWNaGJ8nKeBGulglS3dF7U8fkFuHMgIvCmV8LLaHYJbQn8VSLCUTCtPVgpcWO2N2swo1V9L1U0FxBnrQvZXAdeNzF6G0tiS/qAEFahqV9oGXKUnS7Z1sV9lDHSnjyEWgvtN5fjp+y2c80I4iz+OIidlFVuNSZYeraS3WvFiVZY2sSBIA+sNzGWo0NaUfSmuZCubpMUkVpkMaPbmvlBJ3EXr/OWsmXf/83uf3MYKGeG9PTaU5AyCHiXPvp+ddzhVFkqudwJlzPyhfQ4jcAJRGX83eSbQxcm96IfY4bcSr9JzElawPhl8k0SXL0NeOz7Vs40uux6mfkweRFJKjhu9kox2PxMfQ5otrCgrhS2uItU6EIBeDJCGhq7OiU/3jH1/rIjULgTNk6Um0RkNH6bRrGdoQ5qowAvy6eLLopKIPcSdZNVy8GseDPsaMt7c7vv/lK17e7vj+V2+TXXW5skhBK+6fFg87KdXDOqVhzwX+yLBOdD7CiJmWOMC8iId+Bk1NjciYssduZAseRikE2xjsJtqkufPoQKokJxiVEHVGKVkOgYIYM3abcByZhqVdaNmi0Rr2+97qWYAGUwo9SKE7BblbIOTjvUNYAsK6kMp6CYwul4RjgJT2Vrj8raUixzKV60PD09cuUB19F7VW8MrNPzce42DrjbqV/lD8AJxFqw4DUyazxiPjdot4fd3x6eWOTy+3qYdJTuK1lYZybFick2LgDBsipeb7MQ6eoc2qkkg6GU3yZygZKHPaU4qaoQqaxW6FEJPVJ6w3GWijsVFsyCC/b52DrZQtEEpsMEbN4Mt1CdhCmP5zswC1jpILoIB0pLm3y0XYjYbefdoYhGVBb0CKmYe+ykgtMtVJKWQoGJwHaUwFr7cdb/cD3399FyJCo1u3Zacd5FDTCpKWzQkzt4p7rNCZdPTcaV21Pa3QxuAiU47qgPeWdGwwXr61jmoNmpHMNkVNHCwLEaR5RWtStHggWyXUbIH8vLNYs2PzAUy3dS+T2QwvleZOadqfLcGhouOL5yvvQa3gvIFSHaVn6G6goea0uR8Zxlv4WOCDmwngw/GDfRbdLfYsGscjyr6oCtROP09nDDZPE1cvbNkxWXnPwMuPrZHJqhViKjMcNFiSeQZ13FjaIlkxoQZYzq02WJ3Hh3WBah3HUWZC8bP3WC1jUdA7Wi247QeOUvCWErL4YDpDx5EvrhsuS0DvPwVFSknH0QRiI11VTX80b2mJT+dwi+sWCOuJx1etFUdr0I3aItMYC95BcV8rBcf9wF1gnPsREXMm88VI8JckUSrN3xvOB0OrUFvDGgsuueB6W9BSRo0aAYycMGerK/94cKeQvQvkH2MvFKxht6LbJFI46WLGRBJag3csYEcy1Fo1TlIdD7Rgudm8M1DN4WkJ80ZYHbsma+hfNsTBrWlUexI4nODtzlkKYT2nqUEh7r0Dlbq1JpBbOjJKpl0/i7D4nY3uUwqSEXIK5D1RULLQFmS8Dgd8PHwe/PvjZw8iwJiyWmuyMBYvwVxO70R0huC1BqU6rNWTKTb0b8pomNbErmpAu2x0amVuFwMXLbbgSGoZ9ObWCNdAwZk66cUpFegOZA0hAZwxF5DmxMr703sHNCfn3Dpi67N7vy6iSTMPbFU7UpRPgcTjHrCUhhYz6p4AWfo70QdZZ5jK+7QBUFBGI7Wxw+BrSgBMp0XRfkS8HxFvx4F32Qejd5rqGtFsGYUOphYwOboiFzqn3DNd9u+loBmgG41vNfo1KqOhGtGTYYf2HhNue0QuBSFlhOBx5CqEJ4unlcGELoz3Usn3aSBCzEbCGoWuTndxupwMc2fef7XTfzJmWgxpw2t+2QKcdbhog292MEVY5C6l0KHFOwkYlAmnip1RKWQJKy0+nPL3bkfGfU943SOOgw3l7UjCuMziQahwKU4cPM59pesgTCfQtbEdV7BIGKVw3xNyHQiTFoZlm757LlMKoK0BQH0nUxwMNu/Ra8OxJhE5dzw5frZeK5HGFNxixD0XvMaI3Ojj54xBapzgiSX8FOyk2ADI5KJPVf7IbHHi5uBllN+CxxocGWqgpqX1Dt3YPXpZDLcOKKFwxyNjlwsl5iIsn3OxPP2rFJlqVrpt48jyqbVh2TyW6LFtHuXuUaDghMI9dVg4l/7sxDvQ1QN54oT7vGGRKopd7NgRaZnzlaHLu7VmTmBHSohljN5qWpWMfZATIscWPKqpaEXPzswM+BIArGYREAIE2ZJkdxk5yK39HC7oraOrMwZkTDQlF6SD3VRHh20WzXRoUx8gUDlQJwtQipQUrqbAa13VOY3ya0wNmJ/RYCR12bcN+6QiUF2tDVUrNA1AyA3UJenT7UJrmtEquc6EuELIzaA5qvW9tfDGYnWOy/TeRXND2rMC5NCmSLSUhqIrajk9+sbUps0wpT1DC6HIDsydzgS1sygvEs7nvZufxZlR1ERv1U8GYRcj4A4cMZEOrxRW0ZAZowFvsayBe0StYCMbjCru6qp3mK5oRJwy9phwjxl7SnwPZyP1IKgVxhxNaqtokiiIb6CNllsIF5fexRhVQ1V1oiWt4cgFb0dETBkuFyy5ILeOXDx9JC1lHR38+2idU0vl+9xkQjUAYBQlAgL3DY/KAe3SizLjFiNiLnDWIjQPFxzXA86iGYWjZPQYcUsHjkSilFGARphswgHjjl3ctKGqFSlX3BPfi9uRqE1KdLNhonEiEqOAroDSAW8dfOuwcoYRgeV7bYSgYrUWqy7QTUZg6fFatZgGZLknskz81GIpETU79FJx9Z7Xa21Yx/WpMOUL3L8n3FJCrgW1Nzhr0QAyHo2B+mkoUsRPMemPxIXpmPzF9QKvDS7+LFIfrttMaY0DMy+s7M+1A9bCeAcbM6G+/cBxO5DutNBXjVZGTmt4rWHxYBRKPIaOBJ7CUkhHdiniUpEKbKpIewJinmy4c4o6D1mgzXjvwezyRkN1gxYcklYohaxBHoDDo5CMN2stnIz93lm5oTP2aOCsg9F2ThnohEOd0viwLdOSZsBEeljvAHPyU+bUNo3UTjMhCyXKcsHRwdeVYkZNhaaltzTJCpDvXR0JJb0z3K/lBuPNxOjHvqsL/bt3xtyXVBH3RIeEXGUi4/PpvZ85V+3cFz6yKTVG/hXgFeC1gjeMIrFaTYq1sYZ7D5xdKynPCwA6D6SckXMRZh3JOdSuEQbMhaa1rENcHpdiOGUVhWY6qhRcI++/cWb67EEpVEXn9ihQ0OseUSuFlc+WjcV1WzhJKuCeE3rq0EfEEgKW4PHs6VTfASSJuvn0ciPjqnc8XVasa8Dzh41JudcFyjDOo7aOeCTc3w+gVuTcUI9GMfgecY+Z8TlNTF3BpnE4u9TG2JAjk5beCvPQUs647QeduTPZbksOaFqsyBztfJrSyA2IpeI9Jnz1fsf9oFMFxfEBz9uKS/BQ4EG8rQGLczAW6M4yzLNU9Mobr0uhMhpQVoHRUB2p5Bnml0vB/SARJJaCNXhcy8JsKE/ShgkOOh6454gYD9xud2kCFYx6mpoh81kTR9ZbyTR1fr/veHm/E5K+x+k6HsUfMDeKATQAJ1ZdFUBT6tSsjWZdGI5eIGu0LjlZFXui3pGFn02MvR2D08MJSmuUXEVMLQxjwxwupTtJNwIhN8ieufG866IfK7VMjZw1NLktrZ3MxD/m8bUuUj/6UFrya7zDtgaoTkNLN+m6hOJiGRHtGXuuxN4z6bLDD0+1c2+CcbNNndKpAO+izB/I3Dy8RaSKRsjIjeWiM0A2qJnwDnBSe9VDkWqdugLVuJQeHdi5eNaoupOt1dgJjSWxU+e+bjooGAPbGmmxD3RqQDolKYRGn4vwSRGYi3T+i5Kfo3BGGAwX7fHgPqXNaW2alaaCHBkDQk9ATlLnfkegrvG9zQl3qqagmoJuYvvSWKBSzIh7mpEndkx1hsyF6bogtONJQBHCSbB0Ca/A3Jc4odOOKXXCY6NIyb8bw+uqLgGtVOTsUUqGUUIf1uIZWdrpsoAxGQ/nBeJJn/1XaVzG8527y/EL2XsOSHlMJUQH+GcGqeN+RAaCQqEQ5cFaFmgj3nQD6hHLnl4bvDYwAMoW4JSS+AbS1pfVs+DGTJ/AxqypVCrjH9ro5hVGUKJ+YEd2yC6qY04yk6HGIVJg20YHDqPoLPLQGElvSPi0FDGVrUjGTDeX3hqOuCImPs8uZIhxb2itz92SXNNdcVdcQWeHkZt1xCRTDJ3GRxhpl2ZsTOdzidwBLdQN83AGeccv5xg++WjCO+6XUtqMvaAzTp/G0RosPPLxT2o7991G7vn5ij57beP65XNl4e1CIbeic5uxQZlsWO4QTuJ5lWsqVXHikAJmNdmfJJzIuTvp6EQEBsHHW4rj9U/DJMVUVMHZlSLtU7zdvogFF+/ZWavBJuIN/R4T7omxE0dlQKDzHuHIcD7L1NRQI7tz3YHFWHTbUR2jAIyiIG/c5L13QmCW6v3hoqDVSXigw7mFchUpavEB5IJWkmrGOEWNQueFprXAiZBARGPE3Z05Okm0KMPINlQWIu/sZPfxRrGojgFx3tkH9+mGBi35WPM4FJisEVIbBaj1WbDUw81O1/UTtqyloeuKpk/HgiRO2vnI2G8RORUcByOklQIhKifapka8XOs2ob+hFytycZdccdwjjnvE26c7iQu5TpjLuZN2PMoAwPvOWy6byxIQV49iqcG6BIfFk0QydlFjkoPsxMZOZ9hMGaUQnMPVe+4aMt0H0CTGXQ4crzWiVrCa9HQ7tGfWTChvvodnWXr4lbwPj4e65mGfpdDEXGF0nWbKsRR87+UNMZPR9eG64UPZENYFDQrK0i4qDSf8wqnTdvrCLasHQGEsJRqWHnxaocaMW+R+I0bulPZcUDqfvzFWdnanxZRSGl0NYTCZnMYAwUkopxVrLwU2eoYF0riTYQhThJgkB2bJ2FPE/TigtcHdMjwz5YIvLgvW4BFjRlsCoDWMseimocl+r80OU4ofWCCPFPHp/R2fbje83e+8j8B732oNp3iA9sr9ZldAUUDNBWgNThtCjs7iKi4Z1y0grAH+QqJVkJ05cBJwyDRt075tuKCQFGEAdcpArsGT8LQGfj7DTUQNRmeflm2EF1mcYkq4Hzs/A224n+sdSyrITsTUpc4CxwJOa7l7LvhqP3DEhJgKvLEIxuB58bOBvngLrTsaGKNUeofzHqtMuc/bCvw0ECdqbhLgR3iqgwI852nuaLVBFfxfqxHgViYLjiydDqU4jaRScRwZPVfo3oBc0GuHVYoWKLJXABRq7YiZh0IqHLehuJQdljatPV540qEI3htl5O5iukmHA8m5+aNe9Fkr6C7RmgQ+lolv195hNOGBJm4Be86olQXLDrhS0tFyzKhgUYpHmqJXGqIOtqJMW+PAl6730bSUxAqweCfeqCh0Uy+loRyJy+KYceyJLMMjD+oa1qXBe+mytIbzkkcj3Wrpo7sT6CwW7LcD9/cDL1++y3RWKS61BmuwdFqfxZQ3EGqD6cBiDXpwaJd1CoUXT91ZCHQfMc7SMki+xqLdSGerep/yBgOgFqb9dnkPlezGFmexeXta1SiIDIKLb5qXCuVYDP5qbSyMpZIkpNSkwjthHq7eYfO8Nvug5jfCv7EU7DnjtnOf2jpYOIylnlA+x+GcrmqFlqm3lTqhWa01lrXCeh6Aw3MRWhFyLHR3oVUXX5zRFJg7radVFu2S6HivdYezvJyJMNBVpICC8gTg6bLielmxbBTXW2dREsXyzrHRogEsm7xcMhQKaqX4WyvgJqy4fY9IS4Du1KL1setU/DxZoEaDIPE2OeN+HHi93/B2u0MrhaewYl1WbD7geQnCju243XfUOxBbpXNLyfDawq0XXC8rPjxd8fHpgqfrRmH7FhjT7gy0EtmEQGe6Qwg84vwnZ8qy0K8yBDclI5ujXdM1eBoDyDXJRpHBhFCMzUmZMGzMGTFn3I592sO1VlAKE3TDQmNZYwyZuejImVq52BruOeNlP6h/i4lxQ9bCGoWLd1itxRfbhmuruCwBBUQpnEDNH56vuCwBreUf65z/WhepYeFBDUyfNF1jCPlpKDR9uh12FKg2hnBSxEefSgU0qcqqVO4pSuFSWFELVG2HM8Tae4cotjuKTDBsxh7giHZ+z8cClYvgy7L7GYpspTjd4EGfMKcr9dhLc/geXlwU8ZKhQ2+1Aq1EDCpfKZdTbyXaFK3AIpXrzMiJMYvqvJMkgQ7XxPLmgcXxubbicz1OGxTnxlDIGOW1x0Qxbq6IkhIcx2EpUyLAlGRXqR0aWqTeGoMapStsUlz3GwP53t92pFjEoJRFChtdB7pofRjzQf8z1Tu81ujWoAaHZg1arZ959E13fGskHsTMz6Q3YRgaBdU1mgFQNbQBIEaqqgNdd1jdEQQWHLAcwNgGJzonLZP3gFVGE6JG0eii+Jf32xgWAS+aPzZgZpISBmyUpCEaRSrJr2MqLHY6T4akavSBk7Z7SgWsK6ilwfrTUWQ81zIL4mnarCAsM2NO3zYxHQYgMLOBMefO1FkD3yxSa0itIvaOyxqwruGcsLWaCIW14n3nBmTWxR4JsvPKcMYgpswpL2bknIkeDMYpZs8n1+C4hsd1TIF0zBlHSsxN8guCNbgEh4v3lEw0Bq7GWnDLWcxd6XpuncbztuG6bdMJwy+euj9HpxkR08296YSkR/OjNKA61uC4yrgsvE4NiQxOa6zWwPQG3TuaCKdr5QzVByyazxy9XAtijhNOpPShI+dt+iAaYUorBdGgKlTw87nnjFuiDm7AkbE2bOD5u8gudumeejqt4EJACB5PTxu2JaDU9GOd81/rIpVzQjrcjOcoqaANZ3BFfUQfB34HPbQU4BeHUALW3GBLJWxjDD/gUpHlw7atSYppR5CF/mIt9lI5hWGgY2oeYpP2XDriwaj010933F/veP10w8vLDfs94tPLO4oIahfphr5xWbBYMzOuphXO4y7jrBNiaTQuZBY32alSSJcLUi3T/mSImSeLsHWkxOdwJMJyMSXCiloBwSF0g2rMHPmtOrcnQ4sxNyad0RND49GUQgawR4Yn1pzZLZbGzjZV3Pckn5cUavSHTCQ+byWwDokWBSUxKiXHjNv7gdttxw9+8MZ9QSoIIsruHy4IzuKyCNNNazRDenrNBao0WLD741tCax1tDNwWyNgKjm4ZhhqS06iz0n2hkq5fpQlpw+5GuhatDZxVWBfxjxSGVet97sU+E92C4ZwKCqo+CEcNY96tos2PUwqXxQO14ZvXDZu1uFum8A73jdaBViWLSXcGegos24RwkpEn1OyMQhchtVYKaA1Z8tGO+yHO3pbFL3N6uqWMW8rYY+bOr/e5j4AIoxdLKr4zJ0bAKZwHbVjD3M9s24rSGzI6nr/5AdePV3GIoX6viuOID4xef75uuG4LjhTx8qYE9sw4EtOUX287VufwvNxwdQ59qXTeaBIA2Jo0txozvUAMZL1h0vTFB/SlwhuDD5cNX1wv+LiuNCHuHW8p48vXd7zsB758vwOKu6IvPlw5CfoF67JgXcWuSSZStI4mcGnNFel2oKcM3zuulllpkF0a0KmFWjw+Pl9IBFk8I2sA6EZpR02Ze8FCp5JBcT8kdy7nyswnALmSibnHiFYyYpIQ0OuCjjYd2rXRyK3CFUEWpFmjeFvy8uRAok3dgqfFS6oxpjWVX4I4+WwIi0fM8cc657/WRWp0PKwWQjvG8EGbf2r+atJ5nUPwFdtCrZBWFMV5Y+ib1zDdg8fjXJRrWNkbWXNGl8+9hbDOumhxRmDYLl/3g1qSW0yER8SRuDWLvPBG/sM5L32+FFqfiEbJ0HcvtIZSteyX+J5ksNMd+igI5DXIGVWW7L2CHVEh/dVpJaDWSWgYkycFnZiLezWfGzvPXkibb4qHYgZYWAoPcrQGJV16k1wuyK5pxEGMaay1Tqy/KFLoD+ZdlZip3E9FnJkj3m87YmJicQv8PjkVWIC+fa2jStMyIleG4SyX6ZCcJUcmn7PTyX2kAUMLlCldaRWd1Qiem4LifnqrncqkUefVZDuWB8IDHUT4LuvB+hpwtEy5qp7+cQBtbSiQdLJPlHlgNE7yP6s0uibb1I2pRp1i8ikq1+zY+7ASUmrCh1U+PwU1yRK1n/AyC+sodqQjP+r6Ri4TFGgBJlCxMRSOOklrhuyNi0wOwVmm4MaEkjMOibAvYqLsRAvmHR1fxvs46dxzEmIUjdMKFkMzV1HE8aQ94CrDIWIyBUuRWA7q0DbvEbyDtXp+5qUUshtjAkB9H/0DreyV+tQDdmmGmanEZqEUGhzXxB24NxrNGiQrzE/RpBmRS0BBXOG5E9W1QbV2ipXVvDNPNGI2TqdbCZ1kgNYqWuUKYny+46JVRom8hO91CA6LpAn31rE4R/d5gQrD4hHWZTpaDBatF9u59cLMKvPjoX1f7yI18atJP+nzIkA/DwcAApNQkLaugV2l1sxmUQqLY+Q5L1LmMI2bdy7KZcndFJ2/14UfjBedBDsMhd65h4p7wn6LeHvZcXvd8fp6x+v7jvt+4NP7TgZPbSjFogSHD6uHNwbd4VxWYkB+8jIwHDTImlkbXQaMsMha64iSgxMlhG/AMABguhYBZUMqdDSureN2ZMnCqYAVmyM577i34xNp5iQgnFCp3NjChERRQAGqogFozkwO7uUsUtwBcfcxSBgQsgE6p7xcG+reUFrFp9cb7rcD7+87SiREhdpE6Jjw5esNqVTU2me3ndYAB6BqYvrjehgehqOLHtorY61YOBm4xTNqxFkYN+AthdIbjpRwu+/IMSPfIwtdFRdzeW+aXH/jIG9jsoEsrwUS9ClDa4W1OFij0bqGxWmpxSA/Bl+qoiVSncWDfmoGT4un07SiZ1rtHaljFilnHbRm1MXqPRZn4WXasQqnf5s2k5k2rnkFAJUoRbZ5MiprkeA6DHiyi3aH99LUgAnc58UXkg4bFVYxWdlaWmgF7ybrbbh0L4FarXREHHvHEZkikFMmclIrUwtCwLosCI70eJSC0qoQKhJuMeJ9j7jtB3dSci802eeSHXuyLWmqr7B5j3a5IDiPmBKMUrguC56WBavn0Vnkc86lUtayx8nEc85BQWOPCWvKE/YfD/oUFqRdhO33iJIyVG3YnIEFfR7vsSHXjv1IKK3D7Q7L6rGhw4/Ym9bRR76TNGUkM3HnNAhRSil469B7wxoCodHEEMXWKmrJ8kUbNShg5KuF4HC9LMi54IunDd4a7N7NMNKPzxc8PV+wbSsul3W6dVhwb+uDhVs81qcVfltgkvmxjvmvdZE6u5PzAxgUU6WNOH3IwdTHqoc3NhYHp8UrDUJakELXlGgXiggdW8dRyO9vAJxQ2S/C1tlWD+/0xHVb64LnZ0ZM71zc7kfCfmQc8UxyzaUiG3VOQYPaqk+q9xALA9J9QuI9mkHTGr42eAlSrK3DyMGXaiVM1fVkCD2Q9ISViFm0hjO004wR8UK5H+9f6x3dnB1aEyxdWCNSWLs085qOzKJJctBzl/fYPhghYDAgUWHwTqhvyng7EvYj4fe+9yXe3na8ve9oiVoep9QUH77vce4lgyUkNdycGTUg5JbO19zGzasgkzDhFbc4mmwubu4/GPPesOeE99uOl9cbXr56RY4ZPVUExej5EW0CpQittIY9JjoqiAdkzhWv92Oaqioh2DhzdrbD0FTrMVUTDgIU0hEnu68VFvVRZLgjahI9Lp+P4u4K6GSZBS+MMCuO3zOvlZ+K3APjtftAyNN5CZnUEi/jmW58vazcnwBQvcE24CLFaZXcL6f19LFrAGxnzLh2LFLbZREdj0ZCh5IJu6SM+tbw1esb9pTw5csbqeClYl0W0e/RN3PbFixLQO0kPPCa7QJxkc0WY4JXQJBzoIM5aqlypzTkHU+BU5K3Btd1wRLCzJh6DPdsjQXPak2Gm2WmUmmyDpBmLMeMtCcc94PvXSFjJMtu9n6LbAKOBMhENCQvWtHeKeWKo1bolFFUkwgS4Om6QGtxe/G0DrOOEfWttGlp1lxH6xqtkU3pnUH++BFbCFidw4imWYKHldXGgFeNtVCaCNTz5QLVFeK3I9l9MSN4h8V7EkMuK7Z1wWXjJOWMgioFqpMR6gLDHd3i4c0fjhn96ONrXaRqYTEYzg8DYxl7HK0UunSk4zZUchjSwVjNg9kocXdoTZboDDMrldhuGnoFkB3HEDMaSoYZ4od5kxMi4eGSUznZfXXAO5LRI91y72fyqTxVrnk6n/PobAE6W5gOwHQEBdjG/cb83hILYa2GLYztaMJse9TqzFThdv7sRw3RsPkftOvxvAaENbzzRtzELFRanp/sBY0kznJfowY6CzprDPPfR0IHl8m1Nhx7xPv9wJdfvePt7Y7Xtx29NkIiVk+ro5Tr7B5rO6fpH/0aBVKQD7p5Gwlp9BRzD+8yrtkYXVGl4Lzddnz58oqvvnpBTgWmAqt1uDgPdDvJAVHIMe/7QbhGdCdDezMmnpAcjDY4UoE1hPoaANc7lmZZ7CGfUanQcpgqrR52Kn3uAXMdRapPE2IjzMBFWFhBiojTesY/jEObn3Gf8LUNDHkcRUoZBdPl+g8O2+Z5vwnUZBpJIk4EpEZiPcZzwHh94I7YOOYsjZ1UyTRWRmXkRysFX+03vN0PfPfLrxAll+rjh2csIeC6bjNXLniPVIpc5yd0R+ivSYCjEeIK4esozMS3I023b29IYtKOeV0BmBZJdgjBlQLAfbZVEpcjOVlyc535TXLYZ9EJKmmORjzMcVAAXkTgz+yEU5/HYsvwSFUUYIDLPWBZHHIpcGI/ZqxGE3/QbsTw2fCmbd3IRM9GsBiNp20TSI7kMq00fPCwkrtH30vxKexEnpbg0VrDx+cLUmK0SPAe3jk8XTdpFjyWLcg5ooCcgVJZqB6Ysrr/FExSb1/d0I7TVXtASGTWnDb0A1Pv7cSbdWcOUlP6s+KgtIZ1FlUpEgpqwz1mvOyRhpVK4WINgtXYNo/t4rFtzIvRSg4OCQ1r9RSRDq2OGQe3UjJpUPAWDCEROxhzwEOhkgnogdnFrBtFG5QuC9JCcaayBsZmWskoBW81shNBnTEIzjF0rn8+0RitEZym7sJ7BOdY0Nt5eEn9INW4cRczRjSlaAtltIaVw94EBy/sw/fbgYiMeDRE0U6NPd/iDXcQgbHyTXEJ/va+49PLDd//wSe8v+94f2eelwb3iMOQszR2J9NFYqxm5q5mvFI130eSXTTs4mC9g18D/OJOz8GhK8mE+L73w6/w3e//EP/3734PX375gporri7geVnxYd0QvANA77Z7jLjHhPf94OvsgFMUgifZpzQopJJhssbbfhBO7Q3b4rE4h8UZwFpobcnOzAX3lKUPoK6t1Yb3PeHtiPh03wnbtkYaO3FUSRbWuCwel4UpymHolsb+C5jvpTbseK23uHxgbM2yMdQTSiGmDLvQDmjdFjoi3CJaKWipQFde76aPPkBNJ3Gy9Ljj07LP9YFWUq136EOjgpPUvifsKeL/+v3v46v3d/z37/8AsWR0AD/z7W/hiw8f4H7OU27iLD6+vUNp2jOhc2fnh3C9f5423KBQu8LbnnBPjBUZ+5OxF97WQGaiNWxm1NlAjVWDax2Ls3heAlrrODIRktIaPlxWXNaAYOnuUSOhvSFYH96R+56QUkFNmd6hWsPqk+FZBfqOmTKb3Aq8ZUzItz4+AQCW4AmfQsGvgWhSxwww9E5PNGQgNou3KPXKyIwmtlrrgmVbcLlu3D/miog096nLGuAcWZt1SFWchbEWy7rAi5XVsgWMbLx6j2gpoe0RUIweYojsTwEF/fZ6R0+YnXOtvHhUIz6OUbzGxSmLvmEKy79zfj8jmLQBEcQGcWeWHU/roHGsHO5aDz+1h5tdJg2gz4gHKq0ZwR6cQW8GyRkUrdBaxSq47syqwSPd44ToIBeKMkPUyAV16x22krFWSmMx1dyfAF284bj0HnsMhrLpOc2IYR2t+GXRTRd1NV/PHEjACVbiB2fQGineChqaCbzOwi8OqhHC0Ae79lxp51RLg1NdukAD63kwDmskTBNOUqDbLJb4rHhTWMt/OnGFt6ObVlKoZKdG4oeaUN9gZvL5ms/iXroImoZzwxET7vuB2/2O+31HLQ2mkphglcGRMzo6jpywy7L+kDwzp5jVY7SCVWTRdUV6sQbJAr13pJqnnyN3PvxKsleMpZJ9B8JB6B27sOv2SF85wrhi52Q4OQ1xt5OF+aCRazXiLFiktIhn/UJYZrmQkRXWMGnnDeONJzW5Zo+0eNRUUGNGSyJmrmew4ChSZLaZM7LekE7eZGH/qLlrrYlVUMS+H7jvO2IhvXo/DmzrgtYqgqde6emy0u5nP9BrhwZ3cKv4dxqlBRVQE+YuUgCSTOe2a06hUoSGaJtmq4SmR0rt2fqo08R68ZyS0XFdKSSepJEO0v0L4eom7h6EpQURElFt76MYSqFXpDLVTqF/KRUxFtz3RJJG8DSB7my0jTXozkKriqbpVThja+QG0pqm1E6Kr9YK67ogyDTVW5vT7LQmE3h8WcNkslor+q1V6PXBwi+e91/v6Daj5ZGLV5Dvh0hTjh/rnP9aF6lP3/+EPUSKWBshDy349+IdR/IqRrGRbte1naaqgNiyiLODs/y70HN/f17EuaKhw3QjB8qgVo5kWEgnPFg0zGvxjk7svRS05NCygwPQ53NpeF6oGg/WwhgjLCsM6hweu39tGHpnAsPmRgTB0CaNnCAfxWjUGgnDa/OCN3JRKhDyaEK1VzKdrMFh8Zy2tAKg+9yVjc4u9Y5cO7K8VrovyHODgrbcZYRtgWncG5nbga4UYik4ckHLFdrJROgdcerFw3qLVgp6EfH18A7rPNS7kuwpiO2OFHYGMT5k2ygIWCYS3LG7kQW+HsXN6gk9aZERDFrtOJRLrdiPA/f7jre3G273nWm3TdiMlVBwbRX3GJFKQSqMITFKQ1kHaAMr+WeikZjQWGsVRf7uKFK1N1Tw+x6ZwtxPt10+Ry72tVI4Uqbz+D3idT/ocA0wdiYAWjORegkOQRwQpsPFnILPw80vFuuVItrrhwust/CrF3iYF5F1nIAubWGzJ7HnaU/IexI2YJH3HtOg13nq0AYjdojCS8rImQhIq2zuuFepiMeBY9+xS5FqAG73Hdu6opaC5XqF9w7f/OIZRjEja7DknrcV1+BxCWFOVeCakvBo60iVGh8jqcQDMh274CE/GNdPFWZefYC8raE/6BfbSocWgca8cwjOE7kAZhrt2Fe1XIUEJHB0ByDswz40U1DTNQdNQbXT5+/l9Y5aO5Q2cs8KTG8tdAC6lsmlELIupc4p+9wZ83Nx1uB63aZZbi4VtRUcpU5brMsaxBvUzNBSbcTPb/UIGydsHzwB5NpQj8SmQ868vEdoa3GP+491zn+ti9RXP3yDc7RjyYI7W8vo9idJ30XrOFLG/Yj0MBNW1XD2HpokZwxaM2i2Q1t1wnNaTYZS7x3a6rlwHrCbUgC6ksIFwiXeYdkqtFbIscA7DafIdEkuIwhpowpc4IyZh1fFIBQo0RoIhCW2MDY42MXDBi5L582TOXE455BShvcWW/SCxdd54WNMJBhuzIM0oWSqk0lK6KldoLQKkOxRO265INaGJBeqMRrbYrnDgGOx8hZ2sczSqRXmjblSTRbatVZ4iV1oWgPGMP11QCsdJCQ4g0vwVOALzKUV6Pg9oE+w6w3O4mnxuAYusa1m0ae7fcdRmrDfGmwVz7PFQ5UK1zpQG3Q/Q/iEISJwncFiPZ7WFciknAfLtGf6350pyJDnbsVt4eoDrs5hHVCrTFPOUYOV2tm0kHIsBq2ynL9FQnq//+lNDi+FZ7nGW2PQ4PjZo0gZQ4+5kZdGk9UzfkWNyRIsyMN+yK8BYZMDR+BPmtuyWRkwobFawgRZpGuqKItH2sS1pJzmwVpc7I0V9GHmcgFAY6KBUVgvlfCR0RIgWXEJC46QsPkABY3aG2zXsJ3TxeodLtsKfOsbWH2AUxppz1Ct48OyYHMOT8FjkeiawZqdVuJGpAmyMxv0fT6kyVEDXaH7PEXM3IXuqUzTVm95vWmJrjGGG6YxuSlZRygFkUA0OKWgrIJVZrrPDKMAhkUCwWpcvSPjDp2s1Vzwwy/f8HY78Ho7cN0WrN7hOXg5a2TR2wGoNmUne2KT+Lofk33KCCNeizaxiRiWV/dUqS1dA5QmA9NoTLKV6zQh8EPGIcxgNAqLW0yo94j0fue/lwYojVv6KZik7nuCTcA90ak41UbH8yXAdMXE0Y65G3i7R8lToobDyn7GGjMXlUYBXRgQSnYc1mgxSiS84YzESJsTmgAg05kcsLbDCU11XT1Ua+ipoEUPC0WtkGhkBszXlUJTrCFa9gnKnPEMStwPjKjtuTAm8wZQDH0rFagdxiigk91TJI1zBA6OSbJLgapVoTXemCNEcYTkKeABb+SNlhvx8aNWRCn6fE/octEgZpgDQuudMQjWSELx5JeweDx+dXxWIIymPmXxdnC4oaTJWJ2dVkcNMs1JxHWQIjs0R6NzjoVRDqlVWAAOHaFUaCGdENLSP0K8gEClvF4uYUFfhPmkLAbVu8uL0lBzGgqWRWl13POtlno86pB4mCmjUDPZl6OYtvYQ39D4vMfE1EfR1BqtcSqa6IA8h9ZxhkkqTNjZDUd1+XDnX1EsJMNpfur/JpR9gtDTSkjgbKUUuqHkQANQGnPyVvLnIdPvmVo99jtA70Owygh4Cok7luCQEjU4hO0C0Mlg9IYu81ZRh7V4h35ZoVpHOTKiiUBtuDpS7hfZrz7Cm0Qtz8I04BA1L3w+HuH3x2u0Cmw9mLqlirmxUnBaT3PdQWw5vfSawIZtNsKq62kyDfBapYaOv8GQU8bkjGfVa8ftHsXqiOSctHi4DjRrAGOhRCMw4kBqF/u3XPB+5CnGVWDDuh4GxRCGve8JsVbcc8XSGppS2LYC2SbMRrMJoWypJ4ltuKCzSGX6oB4JNXNv2aGR009okvpH/+gf4dd//dc/+71f+qVfwn/6T/8JAHAcB/7e3/t7+K3f+i3EGPFX/+pfxT/5J/8EP/uzP/s//bPe3ncolfG6M98lV4r7toXGsMFRnPu+R7zed7zcCIV0QByJHZ63lbk5c1nJi9gIdHRZvBiBMuJDGYPL04J1o+LbudEpdXERkKlHxm4XqB1yVkN3KsOzS/DGnLkthdT2owFOcT1kpSANdbo2CkasVMbS2nrufAbk2ArHa6s1SuKea+Qk5UGsEJ/D1toU2aZU0CphKz5//TnaCMyla6rMinmPCfdccOQhqqQl0FNbSWDwBi5wtwGlYFvDdl0QE01Lc8zIstsrteMeC9w9CSvtdHHwmlPUt58uiIGOEhq0qtocJyUjXbBSjFLxs5E4tTlDO/bl+45YKvbCRFTvHboxuIi+JFR2k01gF0JOHborfNguMBXYtMP79T5d3ItodlbRnwAnS9JqFv3FGqyy7xvUbGMI15LM1pAkTntAzDHTpbp0hT1SKHo/ohQk+gECwFPwCEajeYuULbSQToYYl04jSpKn6fnH/f+wzmGRst5ynxCEPKLUvFaq7HOnwHiu9NQsQrzuFXxwvAYEOmWjIJvNUbgerq/eWRhdcbDWouSCHJI0iQY/8/EjnDJoueGeEkqreH56xhdPz/hi3fAklOdlYRyH1wbxdqDnhkUKhtcDw3/IbyJGTEamNDMD+hw7oOGJ2GqVoEVMKcEhRJa3/cDtOJBLwWUJ8I5Bk2iMVCmF09tIHtCdTgyQZjR4A3Q2RsMB/Z4zjpjxtkdhJys8ez/r6V4qjlLx+nafO+iXlaGu5YtnLN7jungYnPKAUiqOVPGyH3jbE773dpt5XvfgsTqLGtNssN6OxPu9dWyXBRdx5zFGkz6fGK+yLZThlJLxdCRsa0A2AEpFvR2In96Q33fktztFzIXyj/qTJE78mT/zZ/Bv/s2/OX+IPX/M3/27fxf/+l//a/yrf/Wv8OHDB/ztv/238Tf+xt/Av/t3/+5/+ueUUtGhph9ZrrwRrDFImYdZU2pGg+dChwlRyEArPSm848I06iFEUCv43qdTRVeE3NbLwiWh4OscNPrsBrmA1rNTs86ijNRaZ9FrhykNFYpLUHWGn8HIjW8NtLNwwXEisRp2/HqIh63+zOCVe3R6bsEqdG+gmyZrSqyjjCUNlsWrT5+5KofHnGIw+QkPjwdvOYzFM+nzejAntWJyrT3ZW1AKqqvppL0unoeQcNFrJ2xy3znp5pTRG4vpETNKofO88pweLHjwEr7hrkOeHg061Zmu2js71iJd79iHHbmiKYbeHSnDWl4zRqboMYmMBTdqhzeMv27Lgp4bks6yJ6PTvhdG1ThINE7H9HFMjwiLsS9URqMpTPryCMIE1ExMpeCan60WH7dxgHPvaWewYKkNVl6fs0Y6+rHX4ERlxjTbH5wI1FlwhlFwrQ095vl5j8SBQRiyTkNLlMwUYgNzEpnTtPw8dE5YQ4+oHq4rekCe4upeGpxnE3FZFtRcsV8j1pxRasPT5YLnbcMWArxMq9oKI/NS4aDQcoWVtAkD7rdIq24PJtMUi5da2eiMPVNrMuWdzOHagST76ZLZXGYRzR85IeY8J/fgvDR7o+HrOClVfb7m0dDOhkiRdEJzYGFzyvNfjJ1aLq3YXJRSkVtH6U0YjJVSA58J+ctnFYw+UZDapGmjJKK2hkOR4XcfnxWA14NwXwbknra43w9orWkue0TElHAsEcceaIWVCtqREDSgSkXfD5T3O1Ofh9gf53X24zz+txQpay1+7ud+7g/8/svLC/7pP/2n+Bf/4l/gL/2lvwQA+Gf/7J/hT//pP43f/u3fxp//83/+D/1+MUbEePo8vb6+AuB+pDWmZcZckAW6MEYjJTpDO8G2qSESfzUASjQO40Aei8nhmn3aeWg03xCkWzHeImyeuozFw3mKhgdbUGEUDA1lhZbt7VxGuuCADl7gnRqKBIXSu+xOhdTgLUxwcKtnHLs1cIuDMgbGm/kzzowdnhW9UQehDEPirKahrpFFbUkGyWjoXJASD+osnbJCf4AUhOEm73n/kV830GOu1Ar0xmA6MJKbkBGLsrYjulxjXR3S4fF0WVCODNWBI9KV/khkxpmD9HEeJnUWCiviYuMBB342XqYlM7Qp6A8aNBZRdHbDpbFBOXLGIXuEUWjvR6IbQ/QkzsBwchQZAc0/O4KxUD5Ab0CvHYcxqIX6Eqv09FB0WskSvCOVMmG72vV0/tCa0gBtGaTonUWoNIwdxBk2XQ1axKG8tukgrqFmRtoWSGpYxJE7WAuj8yxIVmDsUbyNlVjwTkPTx+IyCCmtNhQALRWJw6jTkmnIBPhJKGhJT+xdAvDGITwQBWcnYjwF1qOhGxCiuCYMNh1KR/AZdSl4umws+DJdttbpanDd8LSuCN7BWTu1VhpA0hotF6gsxXMwe8XOqTSGB3LHWpErrwel9bQFerToKo3WRLt4XZbCGJRUK2ItuKeEI8XZsG6BKIvGQ+iKGncP34xxDxtZGwBARkfLGUcpuKWEt5iow9Ia18Dp1mopfR30UawVMdOeK6YE03k9LcIa9sZIMKeY71Z+pUo4sdYG0wsNlgWKr63h7UjIrcmujuSIIe5/vdE554gJa3BEnFpDvS7Ii8emAF0rcBxAzOi5ALXL9SL+kOYnWKT+y3/5L/j5n/95LMuC73znO/iN3/gN/MIv/AL+w3/4D8g54y//5b88/+yf+lN/Cr/wC7+Af//v//3/sEj9xm/8xh+AEAFItyxWM53WJL03QDVsnllEi6Nr8XWh15Qr1DAEiTrYgsPqrSyUSWBgHMIoWqL6F8qsWx3CZYFbHLbrIvi9sPyUAp3CZRKRi3xOaobsqeY6tKPGqJqKDEWbTyWZMd7BybS2Xhcm/Tq6IGjDSWUuEwChtna0RMZcvscpclWeF4NVQsmWw3donwZFvwo+PjKpxs4I8nsTi8f53+lGraGhEIKlTdTiEZaTSq7HpAeQyLF6PD8tqDGTPt8E4sgZ8Yjonb54rQu9VeCyS7BYHoLjqPJ/IK/I80SnYHhMQ0An21BTo+O9I7OvtrkIHwa8g/k5pq8ZZFfPKWGImHdJdn65H9Nz8Cp7D7oRkBQTZV8xXLlH2nEF2Xc086U2rfaOp6VMPV4uDbUJAxF0Tf/iumGY8X5x3bB5j+fLOmGp4CxiLrgeYtyrFa7eYjXU1ZjpyAKopuSeEQPfyMl+6JigFPbEzKDbHoWWrfB0XeRz3LBtQRbp+jx/x16sMq6+mU6Sxeycz/dzMFe1AmA6AwRNl0OMCdPbuvDekkO59YawMJdpWC9Zo2AXohveWyRnUVJGET+8EgtSj8giik1V3LwPSRKWYtA0kZnkLIkrtUMpEoRSrbgfTMlNlWzMWDMKGo5WcC8ZbecdclkWBO9hlIU38loeJtXh6K6NgQ1Wds4aqXfozMQ07n4LilYoWiEWFilv2JAO93eI2DzmjFIL0DunbG0YveEdmh6+jUSEzoWzfCKtoUBhTwWlU3bzfiQUIcrYI8Fby4+3d7y977hHunjUTJbik3VMCo6Fou/WYHIm+Udp6MAnPuUX+icUevjLv/zL+Of//J/jl37pl/B7v/d7+PVf/3X8xb/4F/Ef/+N/xHe/+1147/Hx48fP/s7P/uzP4rvf/e7/8Hv+g3/wD/Brv/Zr899fX1/xJ/7En+Co3E9MvPUqXV9DqQ61ahhNajm6Q/R1soaCFCVm2QyN0rnwnh0dgXcKZJ2BC35qSNzcBymJTlezWxpMuklSgCycjRYtzoB6FBqEWaRkrHa053ELJynnLaep4OahP/QVrQ5/t0FppfHpiLNXD4t/NcZseR8fi+h4nCQQ7ilGJ1lbE2iE8EhthE2hSLV3432UL054evwgACP8UXMvEixKcbC7RIOkhpwScqnYY+LP7H02DsObbqR8DnamVnpCdGhjGjhfz/j8tKEnmw8WVY3YbTNp2I9F+XPnj4f3ZrK0qFc6UsE95xmvUI1F0+d7SBPZKsmxRULnOF3RGPicYKwxcMbCO4dUmzAgG1TDnFSs1tiWQH85w1+vnh2z4nYcvdO9e5ifAEAwJ1NTqXEdKnTFvVWHTBiF1ks6lfme3PYDR8x4vR2cTHlKobc+dVcKgBonST/fv+E9OFmSY2rC55P5nDL6w+cwrl8lYaauYQkBzbGRcN5NZuRwKbFWo3eB2qvYRZVOJDIVpEZfyz0z82noH8nKFP1ZV9NaauTN0f+yigEzU4BjqbRgqhUVpKPX3hBLhxf5QRUm3hC1ninM+KxY6dFsSaL0CNgcnwEn8bFSkwIue8aJ+Mjn0ipRCYUCrfSENWOp09B3ElhAotY55T3Yag0tY5M8qmHxlIgMJMnxY4NXUYo+Uwq0RrEa6KTWs5hqGOcAzSLVFKD7j1d+/pcXqb/21/7a/PWf/bN/Fr/8y7+MX/zFX8S//Jf/Euu6/j/6niEEhBD+wO8v3qE0hSUl1G6wR5q2plxw94DVDdZesXoHfTHYghU1eJeO3GJdFyaHissAWWbCPBoMPiX6H2emsDFsAdt1eWD48TkNO5HSshioNtH50LHWOBIwjK/QmTYzyrKrtc5iuQZsH1Zcvrhg2zwuTyunEvn5XCbwe7ZSUe8JtTTkY3SNGUW6aG0suqalSZfOtpdKZ4Bc0EsR+xaZ8tTpFzasWGqjE/mARN72Hbec8XK7C2yqsK4rnlaH5+uCixBKeDNg0pCBDt0BZwy2zSNHdscpZqjeEI+EFBP2mPF654FYe2cnaC2j2Dv9+lbZD3KvOG97KMWDyQhduvB+pvbHkdreg8WRCvx6zMC3deHuQ8v0MHYqNM4yQO3T1qm0hj1Rr/T6fseXbzdoAFbRdLg1C68VqmT23BMDB/eUJwHn4h2MVsh1ge0GBhTYetuxOI/cMnOM7nHuLYLoWD5u22ywLsFJ4vPpgu6dQS0Vl8XNBsl5/jky72RP5JmAq3OBykCXXYvSRRwZSOD48vWO+xHx6e2OKk1AvCccTyvFuqXRBmdxs8EbWirI+9+Ffak47p6Q3+iWhkC+dZSYUSLTm1tmHIq1TjomdTIWhTk6CpRWarpZdLkusrdoHYitYUfHlzGJSfEdIwz07Yg4MgkwTfOePwqZq4cc7A1ArAVHKrgdB173iHvKKOq0B6uKhKd7KlAp4eWIuKwLSVkYcTqcgEYBGk2xEYTFegsfCoJMP8Fnwr9jL641tOa5Bc0p86gNey68nsQh/UhJihp3VbVXpHIVITd3ua3SNcegoTS6rg/mIyPix/riwWi7dRKvWp8+mVpcVCgNgTQpJKA1GBgrqwtr4deFn4/Ib9L/W7z7Pn78iD/5J/8k/ut//a/4K3/lryClhE+fPn02Tf3+7//+H7rD+uMeWjrQ4CxqqwhOCyFA9EqaRorD+NFpPSEdI4XJilnsoJ+OjKhBkjCjUI3wOxnTh62OtrJ/eujeex8UXJIKPoP6bINuXf6dojjvLWwH/OpxuS64Pi1YrwHL6uE2Gp4OSBEAepUl8GADpYJ4ZEZYJKZxKqVhtAgCCyfMXmhHUhPH88H88nZQ8CH6GZIOoli8pFQEk5edTkrY40EhoNGwZoNz1KfRIoXdda+d8Q4PsII2Gj4wORlQ2PeM3jui5BapudBvAgVWaED2ioaFdkKoai78WW4EQgF1P6o16VZ5Q2tn8LxYLLXCLg6lEhIMkvI6GpUukxoU6dvD96HMz5eTZWkjp2uQcIYO7WF6mLuX82ssyefOQw7o1qm/GUUtlTqLrlKYE5S3YiHl6ZxurZlprOj9ZNONa33QvR8ehGD19BqEwIUU5tLdu7SOFDNiLIiRuyAoRdNko7F7h5tijHxN7jNqudLcxaLREkgJ23AUKDZtbABVxwxezDujWNIekWNGGe4VkM9bA0oIJIPiPb6MkptedsddKWif0KxGBnDPBW8x4eW+y86JmqEkO6imyOqcade1QdcOpbtMwdTC7TnjliIqBHYzStKCHWrpqF2JJ2BFkF2eket1iJfxgNoMVqQxD4nLgb54lyANJ9iUL+IXanuH70DRCmspcM7iHkcIIW2pmlwLWmtpZkjVL4Gksn1xKEWhGD0n7doAKIZHjh2ntwbBiKm1XMRGqZmUMFYl3oqtGzAZo13R2gvOQgc30wRQq7h4/PGP/+1F6v39Hf/tv/03/M2/+Tfx5/7cn4NzDv/23/5b/Mqv/AoA4D//5/+M3/md38F3vvOd/+nvzYtTIziL3it2Z5BVRQY1odaS+cSI7YDNVzkYBHcf2iSwa5y7GH5zWfqeUdkzW0ifY/oY1cfhOrH4E7eQbkmhO+qYWjOzSHlnsHTeUNvTiqfnFc8fN2xPC4LAfSNVdkAtI6q+d4j/V8EuN3fLBap2GGZQS9qnovV+qWT2JUKCqnXRX1gxej1DEUtt86A8UkFtFU1iKo4UcY87IT44GKPYDAQ36bdK9mS1Fy6FJ8xEE8t1a9Ba4zgyukxS1uoTMpVDvNbK5I9SUIs5C4E8fpQlpOSzGBCcUjyQh5HpsjhUdGzinl4FFhoNz+k2r2duV+1qsvKAU780unGlFJoW2FXMgiHXAu/p82eQ1qzOoiAHdhUYkQGUQjePSfwWhVI/3AGsxM57K3CXhNA96G4MhPjRGjp4EE4YUwq3QvtM58f3tU9T5Vwb0pEpFxACBaAQdwMHhbvWMK0heYu0s2COxot0fi+u3h1w8t5OGLxJ09egupmSg7RHlD0h3XZ27bnJDhXS6fP6GFPIMGGdUL3WgJXMN6WgvUU3dFd/zxmvMeKr+z5jLUobkB4JAk3TSSXWhr00aNugm54WSqnUGf9RQYhxtR7GWgR0pETW3Z4rjlKwZDaItneSKNTnUJ8xZ4EyVrPZCw6XNZBUdWSB64F1IREiBM+CrxXM4hBzhfce9v3OeyploFc0IRi5YckmbjJoFU4D6fAoxaC4KjZyQCxNGhqFEfQRxMUlGE7EVUE0Z6TOr85hldWJMQI9oo8KziLlHVRwfN0yaY+m+497/C8vUn//7/99/PW//tfxi7/4i/jd3/1d/MN/+A9hjMGv/uqv4sOHD/hbf+tv4dd+7dfwjW98A8/Pz/g7f+fv4Dvf+c7/kDTxRz1e3+9wboGBwuo8fub5Ga1XNFR8fGKi5/Wy0fnZOMCStaXO00tIF5h7gA6KGsfk44TurcSme/i6DaxazxtfLj6MKUvNaOnm6EGmTaFKuwPWG4Tm2MlvAdpqXD9e8fxhxdOHTWA+xpiPosiDjRdOFWbSfmTs94SXtx35yKiZQX/Oaiy9w4mzdpeQwRFZ3+RwdlrDuqHhIBIRc0UsFW9HIhRSinTsGsviAQMcdZkH5rYEBp6FYbSrCUdWTmsDEnWLm9AGtIZfuKfRWqHkgvfbjlYb7kdCQkXPvLF1H1lFgpSIEHa4wrNDwIQqUqWw+0iFOzPLCHlYg6dA49tnb859Q6FtTBH7Ja5PHgoh8Quo3kU8SkZdKRV7iDDgvmx1Bpuz2LwT5wiDVAqCMTh0nSa+3grrsktciqqItWOPRZwlDrwfEUfK7Fgtd1VQCredhSvIPsgYvr/oQB/FSt4PqApUcdzBEG5TgGllHzDgJmsNp5lKU91apdlqhGk5WcpOLldElfCGjnxEpusaHtjOGgTvacO0BfhAh3IfZH8LuY47oL0RtmpHLQUlF5T9QDkySkyomZ8LGZpj3ctCqro+U6rV2Yxoq6AcYXTtDEIucEeC9gYFDbEWvB276L8qNB6uI/Dayg04SsN7Kmx6pGHp43LD2QAZkSVcrCV03jQZcw3YU4UzmfePTA0a0qwNuNJTS0jmr+fn1zu+eN7IXpXXrHrHxbs5RWtxtlgApFIR1kjI8N3SUV2gesZnLMx62gKeLyvWYFByQLBGMq6KpGH32ZjGXJAzJylr1CT9UF/WYSSx1xiNJyGwPK1hTlzDD1AZEsKGywwblYZWCmr5Cemk/vt//+/41V/9Vfzwhz/Et7/9bfyFv/AX8Nu//dv49re/DQD4x//4H0NrjV/5lV/5TMz7/+QREyPUnNVw2sAvQa7mjqdtxRoCvB3ZPCxK9AxV88qu4AVQoaCaonnmCcpI13lCfPx9IRy0ht6UdM0yLWgF9BFL0NCbhhIWS2+d01htwlwy8K4BQsq4XALWLSCsXthxAvEJGskCNeCwsdBlAvARBfLLBU4BXqa1sXCHQD/Df20SK9QQno6Ouk84i4c92ZBG9C5BeyijcG2ZWLRhhzbgsuk8X3lA9t6hGyFT260wmugVppRCWBziQT+54CyiWDKRdSj+ewpzEvkscfThWugQpt4gNeSCeyRA51wj6aTwcjdGIyx+fo8UuQRWwFmo5mTdzxgYAMNJfnEO2RdswUP3Dgs1zXmDNWhNw+qG1TnCUGxf5uuAPFfdGlqV2PO5yK9TzQ+tYeRzLpXJyemzROezSZqXrsBJkOuFuV9AFylGKRW2tnO6FVRgkk4GCtC7JOxK3ppc6mSIVqQIdNmHFM19o3cGCA3NDciXVVK1PpmYk0QhE682pLz3+gAFPk6iMijrDjTVH/qHPv9xDolKmiKiH7QOo0MLySKnNqqWQtssJdOoIrGhDti1VoTa4FuDU6MoDVs0GtYGa7E4DyvnyLGzwTqE6p7bGckziSMPk6B5SPcebh+uWITguANduTdGF/s0S0hwFM8xkZXesfqMHCrW4NkMA7iuAZd1wbp4hODhg4VRQDEGTRqzMggPskIYe14DhelYIoV6nBfa0PbJOdLbveOUNnazBuO+PW2oKHOBZPUxseHHefwvL1K/9Vu/9Uf+92VZ8Ju/+Zv4zd/8zf+/f9bbfUdrCu6ywBmL58sG6zh9jOhmb92Jx8sOA3jAgwXu61AoeohTT12NqRW6GVir5t3SaycBIYvWRjqjAVWNG34y/arlbsaY+cH4Q0LWrIZbyOB7+uKC5ULfNOcHtHi+3ilEHJ1/aQI/ZLzdI263AzkWWA0sjrk5l8WzUCtMQ8iTKQRh/OGBxo0H6xSymACSD9Y1ICxXdHR8kZ8mDPHx6Yp1WeAsozN67Vx8N0I1k7VkSbc1I3/LGYTDY4kZy+JwWT1QKmJIOBQhK4A3yDQHlc9rWLwoOaGqUITfj4yX+4G3PeJtp7bOWYsvasVzqdg+bDCBTh3G8YBKMSNHfh4JDNVriTsY1fhZ99ahG+C1xsU7fOtpw+YsFq140LaOD8FjsQYfgp9NhFUaqRTcY55CaS0BkO8pEwYyBql13FLCkdl5ZxGqD8kr6ewNb3sifCc7VSjMgt9lezYKdgV3S7tcp702ZK2RFaDcgJAVjLW00qkk5FTZs0H2DeNAHWyvLlPWPSf0WqF6h9fUra3Oom1c+Ldckb1D9hllpWB6sgEVC5h28txFjGq5GEW1BgoaTRNiHMQfTily8Y5F/WwqOhEmcbK3AAo6Linh+rxgXR2WxcIYhVJOs2StOyyEhCN5ZKk23FKZrN+rt1Da4LIENAAheMBwD/vheoGF7CJTwfuRkCujfZLsLqvo40Z/TL2aFWPlQLcPP0xsFa7XlQJvue/ROptArWYG2Ch7Flx5PG1h6sRaJ2z34fmCy7bg44cLd1qLR3NVIjYcmhSp/XYgpwINEjAWY5AsocoRjgqQUWgNEZV1C1jWgOvTxobbmJkrpqg+505cUSoUj8zGXjwZjz39WOf819q7z2o1Yxmo1DfESaFRm4KqXP5brdDUoOAOrBWzAIwohuH/Vzv3JWNkLrVj6WcQnLENrfIwbnpY7QNd6XnljD0M4QHCJEZyZMZuy8gp78WOxonmynxW5Ma+5ZzeZl5Va6PhfMidqWiVsEJKhTix7IIGzHI2yv3hZ7AN76M7fVjyjpthiB2NZqbWmHDWEE52HLhH6kV2LaXCNBIeXKSos0knCGDa5hghBGQRqDbZiVRZ1ufGPYHNFc7Sf9E6y92TgjQVbeqX3veIN7kJvK1wgQzJlDNKDdxXiCZuiEtrtmQjtk7LJnmvp06qU0TrjMY1hOl+MdJUN2vgtYZ3tEcaAlsNTJ83TnoFrQAtJxI6DPOzjkw6NEA3CSP2WIvs+aw2IqKWcMPappsKP1New8PcdcScvEcJWVQKUQGpdzLJBD7i/tXQGaaDUxbbFyxewXbac400YYb0ZRwHYSIxmxQ/Qd4vRpMOXUtD0UJdrjyg5jSpGRWhnRW0gAd374ALHs38QT2fygpKrJxGowlFSGmQZIw1UJYyWh8qls3j+rTi44cLUkz46rpi1waHinQVV6dlkfrsPChIhfZlTQIctyXAOItra9NE9rIEiZUpolOrMMIyHLusQQXvlYaxLVd0Wxn7LgXgEbUZE5bzbu7xRkae0tThoYngXaaf4B0lLHJvGcNitywB6xpIapKmQGlOh00KekyZr+Gh0ZGDgFZxctBw30h/1O3CmPjL00pne63Z0JWKnouQYU5S0Nzjyvl1/KQmqf+TD1J6tfi3ke3DrksJRbJDqYKqmRCplT6X2BCNlT7xUupZCKUohamyL7Ict2JXPzVKrdEWZnRy6nSd6PJziBEII0nsjbQ5c5eUUnCe39dKnPaEXmaBkqIyfo7sDuZ+HrKPmV0bdzjJ0rrF6AYrWE2f2MhJMZ2VSJ3kkfHbg1I8briuqPGiJZE4Pzhh9RkDNpSi2RJtRW+cpkokA6yFOqfboUcbhr9OMrdo72PE9ZmMK10qtGISKRQQRCxK9lUT+m3G7Yh43yNuOzHv5Az8QYFxzPXUxMh+bNof+YoaNbpRLFJy2KvRuYOZY9AMEAzWIGhDSKs1eJCK7o2ZYuAkzLucyf4rAHZx7Y+9CimGN/6w7QLU+X5Itz4i5Xlo0O+wVIlLqSRr1NYQk+jOShNfyIp3iUjPvSPJe7luAV2g4FO/Z2DQJfeHCEEwGh5A1wKDyTVD/ZPs1ORz0IoT4tCDjUm6loasC1rVQD2dXIbZsKmN965WjHiBQvcd3ZwmtcPaCwpQAnUOuGuOZoPQZDmlQyn47rCuAU9SpHLM+PJ6obi9M90bHVBqiPEFTm8jIJHvYW80lw7eYZXlqJVrPjiLEhNyGrvCMm25Wicpq/TzPUEBtK7orsEMtuoDVju1U5ao0LBoOhEahV6AJsa8w0sxeAuvHLbVTxLUugU4z/dgRGpAVhHOaFRTAZUBy/TuWUxxngUyyM1DwRiDZfHYtgWX64rL03ZKToSUVQE28JmEK56xpK7nwok4/jQUqedtwWVdmB1lDFImQ6j0DAgddg12ZgypMTB0YUDJVNB658J6T7gJjROgNuXpsmBbA2puhKSu7BqsxVhaAYBEz/dJDdedLKNJk5aLb/jaGfHdQwe8d3DewQ6I70QW+Rh7pNon1DKWqSPt1AuNukknQxYyD9peaVs0dAxaCc1aq0lGwJiYOtjRyQWtakMqBeW24+04sB4RwTt88bzhugRcFsf4bwksHFlMJReBZ/hcTevIe+I0ogC7ODKx5BA67Z00wyahmI4s8GOMBUdpODIPxNKBEDqUaTBdTzfq4QjNOPl27rA6TjcJDGaVhnEaaBq9jqlRJqja5p5k7EYmnVyRCTho0KjMBDKtYax0auMUkQtjHEhbzrjngh/uB2IpuOU0STEDKgU0rHU0CF1XeEus31mJUbB2wmZQbEz2IyNmxtG8H5HTjUzKvXfcEmM/UgdiA+65wgWPp1xnlIg1BvBCX+/iYl94VA2Wq7Ki8RNGWkyZMTCNAZhtHnDjAlYYJrYqA41LJTStYHSDsixMdKZ4sPkC0MU+DMA5obcOG7OQcuTvOAu/BdhVYkUkZ007QplOAZe2Ar3j//vz38J1CVCl4e3lhteXG+53wrslt8G+eNDeDSnEiD5R2BYP72mEa5yRebPjqBW9mDOOXd6P3CpSI539KJJL14GmI9aUcamVsLeYP6fE7Lv7PVKUX6rsMs9JcRR3aCUSCDalNKAmlG3N2fhSRmNmQeMNL9w9rVAUcCsFbzHie283MpCrNLZgQsLcXxrKdsISEBZaw1khd6H22UxXuYeK7OZa6zjqaXKba8N7/gk5TvyffNAxwEjSKmSKADOOtFApFWYoIqcHLu6MTGFOKNcxFRyJLtO7LNxraTJ9KcSV5pHLg6/XZ5v7x6LSBzTXCWPgpP9OlbnV7NChYJyWnZae04t8o/F/n2HvkF2MUhDmDans3hm0QgbOTKYdk9NoheR5kK2kZkEcP2PsTZoc5h006K0octM1rMVjDaSdAv6zxW+X93gGUTaygbSiSafOCiWZ2S3O1zMONQz7peEPKN9LJioFXuB+EkAw31sW2QFXyp5NDWeKk6Y8tCPK8J/0HZQYkiLEhMxir0Ts/Nnn2h+7S76Bg7ZLuLPNbKckHm9HzrjnjHvK2HOS6I0k6b/0uzTawlkKi72zuAprag0e1th5iA/SzSRgyAQ53P5zORNxlVKIktZcOtEBpRVizPCWe0tnhA3XxVy3GE7qptFA15AabbxDV8ASPVIqjPwwCqo8pAg/dOHjMxmXHQk55+0yG4EJefFeGVPumBqGAF7LAWsczZKVIpPPiiOLtuY0XVbn97FC6366ruil4Rsfr/R/bAoOOw2qdZbPVO7PcQ92fHaNTXf7h2I0Gr8Bf4+GprYGNMVJqtIrMBaKYQs4nRpncRyJ51etPIeOhPv9QCsVunbCykZPUS2vaSP7RDNhfyuC4HUNbFwXP+3amuz0Wqtyr/dJqhkRNnsmu7QWNl3BEOLUarhUYN47j/T/6Y4vRYhMwSbwnkzUnYSVAdvnQi3aj/P4Whcpet5hUmWLfOUmbtNacY8hxahWge9K4cFuNK6Lh4Ji/MQ94u2+43YQJjocF+C1NmyLh1KQqaqi2vqZB55SpFsCOGdkNQ7PPgvM3L9Ip6iUmrEbY5oABqlBvt3U0wjzSX6eMbQYarXh6RKgWsWiFYrJ0ArwmqJUTnltPrchdoU5oTx0HjAjSpveZrygOF0mHDkxPG8JcEZh8RbXK30M/eLhNtKye6arR5PJT0NsY1KZS9We2UEOd22ATh+lc/+UG+SLMMEwmx0Q03BTGKVBKUwfPGcMfcYkPmJoT4J3Qol20mGa6W5fOum3L2877u8H8u2A6YBXNIw18t4Nnz9OavKZ9g7bOzPDmoirU0JMCZ9uO/aU8OV9x1tMuOeMt5SRa0EsGbXRp1BrDec8rtsFH+0Fz1vA/+eLJ6zeY1v83FM2fvgy0XDPeE8JL7cdP3h5xw9f3ykClvfBWQNoI82RYSBiKni/H9BaIeWKEE4tXit6CqZHUzdyppbFQ1s9AYT9iPRuq5wCSlczSbgDn3nEDZ/EiU+3Dl15LdRaoZqGbueEoIc5saUd2Jj0bRGoPdf5Z+3qYfyYogwxWbE000rDeoOlB3zxjWeswUPnhrfLDa/bOz798A3xSHi/RaRCNusgRg96Qhe4qo/uSfYzg63GBpK3u5DsAbmXcu+4lwxdDFwmgSYVupAfpaI0Ebh7Niy3PeL9dmC/R6B1bJb+olvwFKU77uGVVfOAGFEqPtBKbfuwMXHgEubzO/YDJXb03NGaNNq1I5eCIya87Qc+3e74/tttFqmn4LFYi2sIsIoTtzdjdcJzqeaCKNZXJdep1ay5zA5lJDwwhYC2VLFU3NJPQZG6xYyGNKEXqJOd1gVKgizfJ623NZScpxGoAm/2ce8MplHvjHou8jV8qXLMKN6iGMZUK0OaK3U8+nT2HfVKisB0SdAUBbvg5I+pSTcfe6jeQQeB8UKbQH1jgsPZ0Xln0JvDZePy1imFLFCf6ZLwOyaygeeNaWl0Or3Pe4+dP/0PT1o0u/49RdRKdlbK9CbrSsF4Bxf4NQ5QTigdvfKAwvjRTSYqIQhAaPIjXG8E7J1P9dydaaUeyDLi3zd+X7ERWUS8PcxdlaLQePEWq1Dl3XASkYagtoaUMt5uO758ecP76462M+bg4iyUpw3NXKqPjlH2g6JOkuV5xR4jjoPv1+sRsaeE9yPhlhMO8Ykb+yCtDHon/MrXJnqrYHFZPAP/gpMipU9YVi6N2jqQeFDRV47wIkAnkdIsnO3Qsrcd8oXhxdaGm4O4BygFmEJxra70bBuPAZGGxSFEFvyuO3JnVEXrhH3Wwg4+yOdmBkylh9ATc9pqo2ESIgVPe5lGjJ6puWM6MoJ8GDd2K2oWp+EX+aMREEqkEs47tNKwXVa03MjgPDKi1tANiLnw8CxlJtYWOVhTKbCaBIOKDlsqoM2Ub9QH0bTRvOdqo+F1TJxah0fgiHFPqeCIGbdbpLNHKXi7H3i97Tj2BNU7rt6jbAvQOq2tJoNYXD2cQWvya9lvWkcWrVuc3PcNJhlUM9YELKiPaNDw58ziZdhKxWLPvbMzGsFqOK1hoEghjwVRRbpIdN7XzItqU0YwYl9O/0HR67UTRv3jHl/rInXPBQ0Zzva5Xxr4VeudB6YsHQF2DbU2pJQkcVeEktZIRMNYCg/Iqk7x49ATnF8aJRdoq+eiebobP9wk/aEonEVKUzOEwWgSPdSE3frgNvB7tD6nqTk9CHzlHLHmdXHoxcOCXmW90txRy0U5FtDjuQyMvwrRYsBXaRSmShPOQcVPpSClhFaN+ISVaT5pBHJx3oqBkBx6rTFY7nwpsltrslOo9C4UaG6Y554LOXXCQIpFySoFZ06Pwanp0FwUB2exeEZbDBzdOSruvZdI77EPlJ/TxPrpdj/w8nbH68sNiJXhhEuA15r2V+osmgPSG9RnoKMXak32SEeCXXZER84sULngKFwqKzBDqoNNUUWXiZ/L91XcARZvsXjmRY0DeFwaVRiNs3gKIy3mMg+G0XgZALqb2awM5/vWuBcYE75SgMkGdUz1D+eIVmzohtO4dQZd9WmX1To1T7EW2GpQGqdoS5xoelzOpgkPX2ok5uqJQEAcYaZN2djX4pyeldaksQtLTSn10BXxizILHujdOyyrp5XTEVCXBNuBnrkC0DojdZmIC+EpJftOo9kEmE4Sx7AF89aeLgrjOlYy4YBmrCmXec9Q6N2hS0OMGfseEbXC7Yh4ve14ed8RjwQFoK4FGsxRuwiMN+8Ho9Gcga4aStVJzBo+n9ZbWTsITPzw/k9B8sN9SZLYQ2NeK5MGFODMSO3mtdRLQ0l5vMH8+6V9jvYA0Ep2lAMGHp+7/Lwf5/H1LlKRxrKhMfl2C56ECMvl+9gztH5OU1UOXK2AXBR2zw7woh2ABqUaeq8YYXDsengxGaVwd8e0KPIrF982yRLVaGhxBmC2Ez+sVkdhGBcxJk10ECoerhb0xySDARU+7LS4SB6egULF7R26N0SjoFpDTTTppBFxm9T7UYSrdN6pUhM1DosRewJ0sfAZcOOp9Sj1dH/uRsEGx5iEzUNZA+sKTUJTRj7S6a0mWjJjFbUhztIFRBvU3rFsAbk0rIsnbFsaerfcsSnGxV+Cw9MSsIaH/CBFH0UohesaTkhMYBqKEkg0GYyzkuvcR8VUcN8PvLy848uvXvHy6QZdgBw8XFdYhcbvHOna3XBHMHKqIMvh4yDE99XbO24xTmujLILo4TcYxEvtKfipwym9z1iKj9uGSwhAJ+t0T2CwIcjgmnqe3mHE0Z/+acJy1WS3QiAq0jHGV4eRL80LHIMmyuINOO9kymLzoox+sPuYVyUaGmLJMjHu8NYiN+oB99oAq3FRC6NngplaGnbz7XNChhpI2pkADAki7DGzIEjIKK2X6AKjrYF25pzCB1GmCV4uO5ExNYyJfDBKjSEsungyoVrvUDGjtY7U6IxQWsfLPWJPbAAgjagTAfrztmCRnZETIgpRGsJwR0pYnEGvBUZ2bqqTZJVixrt0Aq+3HW87C1VOhU1vbfBaI1iDlBeebbnQvVx2Qsqqk21oeQ6Nz0x1ut0PraJzDqorJg3XhoaGpTJC53Jwah9nxNCRWtEprs7AaZ4jhO4lEHT25H02CGPPPWysMHd5XXgABun/Ld59/zsfY2wcveyIjHDOoI7JoBSOmYOjP9Xf43t87sLAg03+jiT3tkaGm80aMRWEXGAzLUVM5hK31gZdG2zrUAb4nEnBh4ISoPv0xQI+J0rI0PeHPAYUxph2KMHIBWyqzcJlK55oGrWQqjzG7unH9TgNtEHdbtNVHAKjOTmIGQcgxWVkEY3nK3DLI2VWBljG2gujkCQP+TOGRBEr9kTKcC/lMnU7IRTGeAjxAOBE4LSiR1iw8I67lkEZnhPpw0LbCATRZNJJ4o6fEvOAci7CetJzgT8ox8TkZbEvzcqcXpWG1eAhJFNhVzwUcyVJ4p4y9pRxZDLfGiDuHB1OAdYMF/MwY2IqaLGzBI/FWRilkQp3e6owFdY7mn3CKCJjnWSHsXsaJp+lNKBV/syhIzTMMxpfTilYhRnlAkDgOANtGxfysvwen+1opqZjST/TdTtkkmsNRy5QWuOeC4yr8K3hoklxt95NBwWAE1KpJOaoxvtvQNJNIKkKFkqThUkqImzCbeNSPDHlSZ6R3dcQwfeH+3zKRh5h9flKHkgFrQOdk+Lwr+xy7S/eobaGxVlYZWkFJBOpNYMl2NFlB4TexQQXU3RLiyD+1CwozSAeaIU5efWHrzaRiDYWZ+JAoeY0ed6jAPrjParpgiOj7LDEWrzDGrj/HHsssj71hPxG1Mhocmki2idsfgJIDz9b4Sx2hu+nk5DSYH9C3n3/Jx+8fBuU7rBW4bLR4n4NbgodX+6Cv9eCXDIjsftJk51O1K2itYLaMkqJk4WUq0MsBvcjUX80aMsaCHEBjIY9srgXsLsCBHqUx/yVFmz+oX49Zu88/t6EuTBgDXF2lj2Ilg7RWA1TjQQhEv643SJaqoiiV0JtaLLHYSAf8eEi2qI8KcCKjB5DU8pSClRvePcWtTjeOFrDOw9tLLQIUUeMiXFkV/VGUWhJBWk8Z3RoQ+cK6wzCEmb0yHBDvj5laKVw3BMhWK0QE6daZzQWb7F5i4uno70dLCx1+tcNxllXAl0W5v7AKJTecP30jqaA7cqkUifRFkPXNSyQJqT08DWye6xMrtUoxN6RM3d43D1FvOw7jsQdw/BqW52D64RDnbVYncO3rlesi2f0gsA3M49LKXy67dStgHKLLTh4a+EVC5rhkyKBZfH4sC14XehleY8FXoxoryGweJuziK/WYNEGZuw/O50vzl1q+8xEeYRYQike1nLoWk24yxmKZ3NtuMWM3ABlI6o2UMHhWSsox8BQCCSUpIjve5zQJITGrwCxhuJ7C63hgsd2WbCsAc8fAdcdn9O4dYRNOzClCS0XSlNqrqiJprUTvh+NSTudIc7GFVJAgbd6uvlXuTmva8B1DfCWn4UWLZuXydYbjVJl5dAIyzoK7ZDt2fygDpFrQa2FYvhOxxFJm8PMC6sNJRfxwwOnSPmMBrtRC6owjxSZhulFyjVD0wpKFXQQqfhw3aB6x9v7E+73iHcbsWqNRbSLY+Jvcn/UXmFah21diBTqbFofGocR7Ei2Ja89BaA0Cz1zBf7ox9e6SDnLEL3FO2wLk0LXxeOyBqZnZtIqU87oIiqssqNS440FpmB3BPtxlUyYMNcCXQzuiTb+zhosq4NLRnZTw6RxGMiK4NDIKA7+euLk+JwQMCYPRRwCgIzKjzvFh85IaWobeutQTXRGhktbFQua0dhLw1vK+Oq2o0Yy6i7OIViNzVk5h+jLZjXZa8SsOYkSytGT0bhf1ukzBq2xBI/LtmAN1IwMc1posptgIIGQ7PjpxI3p9mxln6E1CSumk0iybMwMe3qmI7rRGjFmqu0VjVmDM38g+6ZV0sRr7+JtV+YSPBdCbSoWVKXwdjvggkNMGUv1gDPztTlrxZPPwVbMg8ZJYZqv0/BgL0qh24peNX/dz+6bosgB/2isPkwlvzcWwTk8rfIeDgcOuclTbdhzxleShjv2SlAKuTWYbs7uv9M2KjiLpyXgi8siu4Mo+1ZS2Z3AWka6+M1ZBKulmycxBxA42vMC1EbDBX6O1rm5IO+d905wDs/rAtSRtdUnW60rnEzRwtTXpkkZbzIZvkmg4qfbfe7InEzyVpvJpNxTgVIazjs8f7jgcllgDLDWRpulQayQAtWlw+y9n7lruaJKUF/Ndd6zOZ+hlKnKDmoimyLqbcxlGhEyIzGhyj38tNAOy8lUEpzDdQ24xSSFSVhxSmGxVujjelqTdTSUSu/HbCuy47VqtcJlJocbIWSMHbVMhAPy0xJTIuQauTTk/ODnCttno6sAusEI+nTZFigF/OwecVsiruGAaZLf5h2sUN7zZBkrOPn2VjEjaqAaAM7mRqKQON21kxXYOlr9KYD7BrQXAr36LiK8vW4LXEw4jgxnbwJxneOyUnrCN6MYFdndDChQVuHTySDlAqWAmI0sQQepgu7iZLVQd6KMnr5zGBeQFKRxcXGig2Rbnfqd8yHT1MPYNZhvCpLNozq6BnRV0LYDmru4o1TcY8HLHlFjHpRHdG/hZQIByLpqWsMZdlpaE97rINXZO4PexfYFEHaZRggO2xIQJvTyWERlSe3NXNTOSco+aHxkCd4rmZjW0g26t47tEuTwpFdeq20q6J3h4pbrBsF01WBT9WmKmwsnxCySBJUroBX2g2LtLG4iAPc7p+Evu2CjSWxw+tx7PWpDYEhIqUajPMQT9A7RC/EA0krDGjIOx/s0MtBW77B6FiltSO3OveGeKP59vR9CAFHY1gAvrLPBPJOfiGF6uwYejtxX0VLJG4NV7I+8MPiMBoLla+PTfpQ28FqwfTynzutCrKpGE6aVBDgugVTmTFZoLA0Nah7CtdFtoUFYekZzGugdtyPh7bbjez/8xCTbWrFaB6s1gnXT+uke8yxStZHq/HQNMFqjbhVGJve5Exn3mux9WmkyLUsIqdjyDJZmfmCz1oddjIaIssVrjtNWnUVKy720p0w7st7hxWSXtHFHJqIxcNrI6zIwmkbLtcn37w1KVUbfGIMgNllW0wtxEdPlsRJsvUFLgZxkLS1Nzo+gNONfx1QMa9AtrayUpuuONQbrQpeKL56vNErWGshVgkrtqTntwtKDGM9Kc8afpec1NEyL6TeoJ8sPvaMbjaob8k8D3Pez3/yAddnwfNmwrQu+8c1nbFvA9bLgfo+47xE/fH3HLst7o4ZPFzt8DUYtJwXsGSi1oDbAey9MWEJ3pfIi0kVL9yRPQJ2j7ShA83SS/z6Xt3Ij73tCTgW32zGnqe0S4L3FRSYWpRVqEViy9UnfHeLBz7ZdTQ7n3HA7El7fd/z+l6/48stX/O53f4CSMlTv+Nblgg9rgMIF1+B5M3gH14HqTww+S8xFFnZPA/B0WbEsHh8KC7APDt/+1kd88XzBdQ0wGsTWc2XK8GASOQ2l3Ny9DRNePYp26yddFcz/6oFiRN0B3TuiJt11dH8afdLmRyR7aX0SPlIpeN+Z8FskMqFDoQLIHcgy4UAOTC3xEouzuAaPj9sCXwGTGxarsVk9u3ujBiuQWL4C4C11atVVLMEjV4k5B/0irbUIojUhi5SHlbVmFqhhFJskVfXTbcfrEfHpTpLOsvg57XUo1MbcnyY7nCauBItz+LDRpUIrSS8WCHq8xkHdd462S5BDP8eMdE+TFabElskGdsLGGuRcoFIRmrPF4gM+Pj1hcQHBeWpgUkWsjKMx1tJ921vJCtOoteEeE97ebvi/vvt9fPnpDf/3730PORe0WvG0rAjOYgsrhKBKAbIiMSCmjH0/cF0dem1YJAJkhgkOdGTsoHPhNZb5xamqoCRmZMVcETM1S8MtI1gLo4RWXitDEHMWNmVh0e1DBqJwiwlJIEMfAoxaJM9MYY8JgMIaPC7OYXN0q1iUnsGSeyrQtWIVOJ1pvoS4P14WXJYF23IKume68visDIXpgzjSxxHUce6FZFLXAHSlP6LOFko3aANcrcG6VBgA++JxcxbHLaLVBtOGJGTIBoDRH06YVib9MUE50d458bE83e0JX5re8ePNUV/zIvXh+YJt2XDZVqxLEFdeD796wfI7g/jEvLUUagoGbKO0kC8wSAg01hxx1QoavXOyGJ20MSMIUSxHxGlhTg34fL+EDtRKGOrYE15ebjj2iJeX23R3+PjxgmUJPAwMdyo5SVpoaROKGy4DjxNXb0yvPY6EXQrz/X7gdt9xu+/IKQGtY9Fc+D8lj9Va7qiMmayvLLu6I9MdYU+FS2zp0r218E6JJQp1WUvwjA2Qg2FEl3T9+WT12WPuCvpcAs+l8FjGKzXfC2tGY9BmhzzIHjGPgtpQxNA113JOSkMErfhzx25PCew4YQhQCOw0oxeas9Coc+qk58jDQ6Zj1SEmtUbgwvPLVgPbRAaDzwABAABJREFUOkYQn1GDJcX3nQay6qE75lSdxUopF2qp9Nj5yBegCJXkKu7UNLed+zR1QoNjqiuNLu2D9agfXvt0F5dCNZhwxnLHMaZMbQ20kCiU1vPzcc5ORMCVCmsrnMB52nCiWCTmfuiJYqKF0+v7HS/vN3x6excbLYpIF+fRKtDk/ivS9dcOxJQRo0WKmRErqUyz1m6oNwNwGjDXR4LEI0HqRDUGOQYgTmE1iQXeaMTxGY3rWSkmDauHwWVyT+TelAbwEjysIoEpuLFH1UJEIsuxTVSE14mzhOO97I/XENhMWjuJPuPzewxgnUy/8aTGExu/N+7D8efl+3TNQ0sLmuO9RUl2ejr2DkkbwPn6dZcm/nT2ICIx9HD096SEgOzmNhCn2sTmDf8DgtgffHyti9TPf/sb2NYNy0oX7ufnjX5Sq6duwmpcn1aknHG/H9Ag9VJ27Ki9IcWI3BpyH84UBmvwhLY6LYFaaycUtHgsa8B6oU19WBmzwXiAzw/kcTOkyMnp5dMNv/d7P8Tr6w3f+/6XwjwDfu5nv4Hn5wsUxB7HaBwHPcVyqpMqu62eB4MsIjtGnlTB/Rbx1Vfv+PTphpeXN7y+vuPl7Q3HEWnwKnDnxXts3pHRY8WwFEDumUQT6eLf9mNSar/5fMW2BApLt4CwOHz8eMXTdcGy+OlqUcVyR7XzDp5hjXIoVHToplAzjWdHJMZw7mZQYpuaHGvNA+zCCz2mglQqXo+Eo1QRW1YRqZ62LLWU6X4A0LvRiZ7LCsSmFfeFBmrGcNilAbrAKupTtOIE99iaMghPwTpCwr5WBO+QKh0cCHERDtGyS7CKhdBJ7MiMdRcILxe+tpgIQVmtsTiH58uKp43dNBRdVMburfXGmAmQUp4fUnXHXkzpjNo7nLOwlnT90Xm33lFzQQJfmrYGxlMI6jyTobX63IyXjZmFtgbeexjNJqbIBD4mKWiFdQ3YtoVZQ0Yjl4r7fuDT6zt+8NULfvjVC37w6RM/KyEFLN4jpQplHJSmgEBrLQ7sCc4Y7HvC4iPynpCDMEkl8KgDs2ifZJh+Hopjin9k9mIcwGQ91kaCUZJry+gKCMTV9Ngtcg81P0cNQBEy18HTBccVEpw0Xf55PQnsJns8+h4qKG3Fh9PRUd8yUHDo+7S4uxtnoMVK7dzHqR8pWJgNiwIJVR0NzYj5LhSMf1h8tw5TKnxwSJIeMBm2c2rkOaR7B4yI0bUEkMpOTAmRSgk6UBo/05oLasxTCiDI34/1+FoXqW1b8HTZsKwezjuE1YtZq6UnGjo+fNhmx373B2opUA8C1T1z0ii1QUHBqA6lJPpDaRjFzvK6Bly2gI8fLnh+XnF9WrFeF/jFI2zhdDefI7fonWrD/Xbg01fv+N7vf4Xf+Z3fx6dPb/ju974UTBnIKePD8wUGLFJaa7zfDi51Y5HuXOP5aUNwDusI7Osd95hoa/K64+XlHe/vO/b7gRgTSinIOaHWhrd9hzMan24Bl8VDGx4wUNwX7InU6bcjTtW7UkCwFqvn6B68m/EBXhwmrKdGTAt0dlKBuQessrQeExO7TT2NWwfUNw6OR1rwMB3tcihUOXxjLjgKgw1jaYhNPNFqQ875M6ouWXMKwWlsweK6BVxW7tNIU8eMtajjQGvngdZkuhtTHoCzM1UNqp5YfSwFR8mIJSG3goYGKBJRcqPJKFT9nMYvO6dB5zZKUVMD4OItLuuCbz5d8CREFYCHxx4ZMV9qo+YJ1EPd9og491mDhc3Jc7htBIVZoAd5pVVGlStdoGJGzgUuFBhv4WubrixDRDpi4uFpcgxPWLcNkgEg4ZYOIRCuLLXi/bbj0+sNn15vuO8HjpTE5+0BJu94EJ4OzhH/+2g+UkyI0SHuET5Y6uD4F/nZdO7rWC37OUWP0XX2k+dJyYkac8ItQirwzsl1YKc/n9YKQSyrPm4LgjMzi0kD0F0OcWPkWj7F51pzcpqRJQM26zilDkbPCV0bHvparNMoB5Cdn5CGtKITjFZnoTqNcjGvW601uqQVGGd5bQPopQGqiUs5CSUxFqRcgd5lJ3bGgPQuDahWU+PWIMJ4iEdgF4lGJsNY1SafKc/IUh+K5B/x+FoXKe8sgneMq/aEQ1gsFBwMene4XBZi7qnAdkleLYX5KRj6IcGw5YNXoB7G6NMReV08BW9bwLYGLIsX92925mcXIzeWHLq1NMQj4fa+49NXb/jBDz7hy6/e8P0ffCWLdb6OkguetlVShDVe33Z2y6ly6W0NWiIGX7cw8eDX+479SPj0csP7jQXqiIkY/1CPi63RPTrcI0WJ3hVcxDGi9S6R0XQRP1LGkTJhqNZx5IylcDoY09H0VZu0V31aG80iLayvRO1HL02WqnqeDY+hjvOG+azD4okytBlDi5RyRRKTyizuAIPg8qjDUYqHjn9ggQ74aU4xdUxeDzo6mYTawwEyhyl5WrzZhgCaOVExZ8SSmfzaKrrS05jXtAZdNXzr0BTK8QCT7pJNkoK3lh25XG9P24JNWIC59Nnh75E2O6rTwFeDbv4pF+wpQWxmJUaBu9XWOw9IPRwI+B601tCyFEwtwZKtwx8J0AqutAl7kaXJYoX/H3lvE2rbmp6FPt/vGGPOudY+51RVfgqCSrCpdoQ0BBWMSiLiTxCCNtSAEQQbpiNCgmhHURtREQWRoMQ0NWBHMB3tBAmCBFEEg8TLvZWk6pyz91pzzjHG9/fexvN+35i7yqRO7r0hnFszrJxT++y911xjjvG97/u8z0/wECcDTQK0sPepzHN/1cCd7OttHV97ysi5jp+9O5CT5n/cSw3joYIICUvdpiztBWXPhIWDdvSdKflwB/X9cZ8wyGCVQZDi5lLdT3Qac5aJ38E1NIXhuim1dxbzFLBEEiS87otz5V6nk3ucNYBYfD0k1xEHNivHu3h8D8OBRRvfLtId1mHa5DUDGLHjh31PJ9Vv2PG8dUjSvmfw3PRzewxULRr3IoJhqcVvTehVjBnhmtCdMOU+0EJF4ootDLQMfW3iCKB3z85v9vpcF6maKzFnASAPC0N0YaTBRx89YY4B5zngdp5R9oy0britO17vFu/WjRk+2kFLExVsWswxKs3d4aM3J5yWCR8+n3BeImZNl33EiUl+400npQ3b/XefXPHxr7zFV/6vT/CVr3yCt+9e8cnbFwC8obx1WK87bCVryhiD2z3zUKkyYMjb2zvmKeD5PCvzqeKr715xW3d8+nJFzowxSDlj2/fhQFxbxX3blcYdcJpmwj+e+pwG0oHve9/l8GFpjeP+65rQjEUSgQSLZIBTuWAWMnVsCEf8SC/6Oj2VNSPdN+Q9oyaKPPvB5bxD8GE8iLZQ/wWjzCw1wCy54LplrPuO+7bj9c5F9bUTJ4RegkXtjQzIvGN8icPlPOFLHz7hgw+f8O1f/ABPH5wxK72+lortnnC77XjVr31NQGmI1mD23B8Z0zSx1pD6bzihpFJw3Xa8vd3wy+/e4u3thq98+qnehwaYL2gCeNvQUFEbWXcwggh23DAGzpBxB22AmgWWMwPrnp/PmGKANRa3Rk+3Wtug2rNQcSm9bTtypezC9R2nwlR9pxp0Gu5avh5IJ+op2ERQUlH9mkHaMiAGcSYU7SwjzJdlGvDNsXuUsafo7uW1Cb766Qternf8z//jl/DJpy/45O0rrrcdpQiWaRlyiC+cLzhNER8+PREWg8WqP+NWCkrOWI3get8QvMN6XzFFnSI8ozucMTSZHQXgmFT16UQROkpsCueVWoeXotP3H1UgXUSGb+R5CvRUjN2d3iIGT11eKbiuFd4AswVm4Ej8HXs8LTSqrfJo9Gw0BlaLgNFOrbu8GBXu2vbga1jpsC4C2EYGJvR7WXPU6NHwPaAAxhoYIfGn1oacMrb7jrQmXF/uWG878l5R66EX62SlrP+8lzI4Yotec+cMtpKRSsW7bVfBLx0zoiGU3gk81lpsuVv5/tqvz32R6nhnMQYlc6KpVeDVyytOQSniM1AqsrNw0hQaqrqUrmOZ7ZUqGhwv5qympKcpYomBlGTdwchDYeNpwwmBeSqc3tLGG2BbE/ZtR0qEUmo53MxTKth9wnrb0G3x99RdtjmKN1uxOlqimHbEQLx9e8Vt3fHu9UoxoLKacqZPYe/PBNSCJZ1E9lyQauXuDRghkT2qvcTAlFhj6KqdMsRQ/yTW4IN1x3KZsdR2dG92tKkDIhvxIh1CG+QITlHE1QnR9KU89KHoJpw5Zdw3WgzdtoTrTsfqrVQ06fZX7aEzY+fdl/vTFLGcZlwuC3dqatjaacjcA9Hsc9357ygNVaGNuTY4x87QkuGAZujtuOvUct0o5L1uO+77rrsAixIrSnODNs7LYMaivXe92ttgMrSeMc5gWSImdaDwntlFXVDsbGeWaoFpdRxq0iiyDZ67oiXSA3DSou19F2eSYi61jSV57857sUobrb/SsnP6U+G1NYZszN6h91PxYRdivaMllDTc1g3vrjd8/PYF716ueL3ekAo9BqcQSN32Dk+nBacYcZknWOPGbnPPQK5lhBF2P7zSCTKPwsKHaWXcj5ABxTdRQolOuEmp7ry+MvY4fW/orUVzXbjsSNFW6LTv9ZJS1LdUEIyB8VZTjzWXzSoR5QHq1aGEn6fWVfvwxY/3cdo4RsGv/zseY2jG1KlQOqd1DFKF4RwDQCCtoqSM/b5jX/mVE69zhw/7O2iNJrKlcWLvAZcp+GFCu2WK2F/WXeHzRhswS/ee6NXmzDns3wp5UnnPyBt9vWoVpksagQ12xLBPSxjsLSdADgkBGN3BeYpcyKY20lC7Uv9piYT2YsDTeeYDbgztWyop19XaQRhwKiZsuuTf1HL/9nrH/bpivXGiKBrzAADGCHLKSMbi/roNoV/pD3+HugDcmmB3FmnduY/JGb/y6QvWfcfL7a6uy6IPYlPlOnSZrASJWrHmjJgzUi6E6MD9kQEFrCQtGNyTVcV/Rb6vuG4bijRsOePy5oTpPOF0mSFYxsEAHAzHIThUrL1rjAbm7h38pCJRwy7YFnWgLxVpT1jXHeuW8Ol1xW3b8bptuG47nQCajGIA/Z7d4YHGvQ4hRiynBU9vLnjz4QWXZxJtrLUoiY3Euibc7jtebxteVzqYowKTI/w7R+6RllrROSEVpOtf1x3vbhs+ud7xyfWOl/sdr9s2LInSVOCMUxgRg647Ftq9UIH3z+SMund4zJeZO505EvsX6I7EYfLKFDNmRDWUchwui+ZQLSq8nvr9PAVMHaIehamMGJTeVDSpQKnYFeZxXu+T6JU0pjspHLAnDA7NOk9OEl1ywcfvXvArX3uL/+uXv4bX6x33+wZnSMm/zDPOc8QpRnzhcsISI96cFp0OWMDv1mDPCaVVpNyw7Tu2PSClMjSH6BPL18FiHX43jlo5AVAaHUk2paDXTnOX7otp4GEI91l113Bs4KZAMbZzLHh7rUpDL1i3HZN3sHPE4i0mZ3QdYGG8hzir70HG4U8nGINJiVddLjMyrfDws9kHrV6fjm3fEdohjeiszf5ACjCe9f48opE0s9823N7dsN023K4r8k54nhPpIXuprWHPnD4/vq08F2rDotZe0Vk2v6XgZU3DmSRqkdpzwdzJRd4jl/0znfOf6yJVUkbaEwWCAvglwFc3CAn8QB2jqJVJZgG0PWOaiGtflhmARa50S7CWcQ9z8DjFgFMkhTaqvqWLTFsV5F3ZY7o8FHCJ3Efo+5Uf+vVlxXZPKIVCYu88Yogd/scSI+YYMQdqPmAtqqZhWmPHjoWWfYKUMrv+RAhs22ls2lQVzw4ZEDEIPiIAiCFgDgExRN1RGBRQr9Cxeu8sLi6iCkWT4b5hzwWv287JrNXRqb283PHmzRnbloZyHbDjoGuFEeataFS893DEPvhgRc9FsEYu9OiAWitSStiUSv9yveO+JXx6veOeMrOtchk2/945BPXSAwCR3n2rPigGTMuE+TRjPi9MLVUn6drjElZOutueR7JvK4Km8OU58yGkmSx3CbsGDX56XfHpbcPb244tN+Rq4EwYDg+T85i9x+L5zzk8eA/awyLrEY6LwcNH7ls725PwIgYEdZoCzpMHpCHlBIhhAbXUc51V2Pu0zLicFqIBS6QkI+gy3rBRs9ZAOnVY7zEeowZSK82K10TNWvRAgxJT6iiyXu15KCodwwufBc0s2rYd60pST8oFc+AOKup9GUPg+7KH07sxGA1NDJ6ZXdDJoENYyipzwQ9rro558WDWZ6lWmOy4ezZQ7ZwgC3edViea0gJEGKVeGifoKqKZdfQmrBCUtSG3qvfljpQzWiu4TBFTMEA4wZ0mxGXh/SZAqUQI9pTJfFNTAAtg1vuNbh7cr8c5IswRcZ4wnZWg1Sdg0NbLBYd4msgy1lQEOtL0cYxNEM8HUuhHM1J4LuYtIW0JZe8oj4axiuVkDQxt4poybutGl45SsXp3IDCN+11qGEmcSoaFt7SGKREOnIJHrekznfOf6yLFDpJdUF8CDvihExlgBhPJB8/8H+80gps2OLUKlsAAhe4IMMxqdVE6NC3GDBZTLhXVgA+4+poZY1ALO/R9TdjuiVDfnpkmKiycwfuxJJ5jxBwZymeVyln1CR1FqhGTHjERhUW2KJ7O9MueN2UGk8fp1xQCDwMfFOtXsaIRXU4DMBSnAjwgsrLjbnuCiGiuVkZK/MpKiOjaGh5MMqCG90IhrT2solSn0TUV/H7Hnym58PvsCeuWcN8I8625YMukZ3enBQMe7sF1qySj3TzzdUJQNmJk2KHzTjO/dF+nTKasLiKMKWjD4JMO5qLCYMJ9gy6eK+47hcNrKiBb1yA4r/CVR/SBNkjeY1JHAe+6W7nVKbd/ZhhTrHOq0VL9ikD/m+3ZPg6Tdyhe06VbQ7PEDJw1mKIfS/3u+TcF9969fJAJDKD7VANd6I+HjKhBzQV5JyuzO+M3ZWtxj0rE23nVKhmhI0onZeg17Uy+4eoPJXH0Z0d6eGnjWgl2TBFdzMxr8QB52S4ifb9QjolVm5YuiocxY+nfgOHiUVpDEasRNBZF0xOK2vgAJOmYUhRRKMyA2nes+6bpCg3OggxHb2FjgJujFgSSmFJtuCf6iOZWYYiYqxclTZi9P3a3QcMMpyUO4bIog7IXpjB5vbetEn/4TL0Hw2qnwx9FOpx0yD/K4cnYzZSpIzvMmotajXXD5l0zsqy1yNYqyYiki+7WU3DEC3X0g8Gl3wo7KelUZYrRQtAY9cnrh3bYH0GAsmVI9XCee6YaI96cFwTv0MBdVhPBEgImT8zZO6MPC6np1BuwK7i9rhSu+g2XQir8+RRRcsV6o2D33dsbvvbJC969rrjek043HtGfNfvI4wtvLjjPEz764KICOH6YojsI04ThjbrPut23kZ3FMDsyG7s42HsPb6n2niMFt7PCE8GyIxUI3q2r7h4Ih3pnscyBrtpWY+idxbanQXGevcfkLAIAo24Hw6C1uGEeOgw9O+5soFAmHtZWfZ+i8Oi6Y72uuL/ecXtdcX1d8e71rk4ad+y1IjWNHbGGS1iddp/mSac5fjZVBC4GLMtEu6zzjOUUmYBsDVAVX1f3gZoLD+N6hLKVBpRGbU9ydCXvh9ptTbhtO97ddlzXhC1XOBdwMg7P00LH9uDx0fmExXs8TdMwYp10cQzgQb4jMA/+jgOWedj5GHDajZ4i2TdLRDSAqRUpOOzZ6V7R4HmZcJ4nXOZIOMbTBd2Ob3rsOow1RBEEME3Fun13CGqwZGcHnI1RmyAebl1UupxnPVAj9YLWwIin/sbQz26ZIk7TDDQDK6R2O+eQS8Nr2ymBWHfGmMwTdV2Oz2YTgfURs2ehvixkPE7eDfKS08ncRvcg1GVjaJRqL8YMjdiRsdTt0Y69rQGlDtc9kVDUaM21lTLMAF72DXvKeN3uSDmhtYrz7OEdE8MxRZjTDHOaUFXb96oN19de7sPV5Rx53rw5TfCek3SYea9OlxnzZcF8XjCfJ06ZXfgOMCLe02OxP/81Fb1nrNr5dc9F9S3Vz7aLa0UFtkb0GYXAWBmsTwrMj2auFmZqGZ3Y9poBAfbHCVqfwVIK/z5wak+uINeK4ByafAtMUvRAB0Vk1g6tTtcsdU5/s0071kHiA2A0tpmH+2mqY88RdNLqmLhIX8wD0izTZmtPBgbEGZjITJwY/Ti0056xbQm3dacmRHUtAJfFp5kL4jdnGuMu84QK7jugHUfwnhYiAmRrkKxFymXY60TftQ5x0Ey99+oPp0VKizKx7o4vk9XW4zqi4++5aJ5Mj8HoXTkROwqKL8uERSFQa4zCP3KoydtDt9w79bHI5oudOEW9gySxZ6Q9c9eYiqaEsmvLpWjxaYNSPRynlVrudJrK2vm66LHMAac5YJpUEGk79HEQOXgQU6PknSFhQq+/AMO+pk+WQI+JkbFgj97BzPwMaORLC5znZcLkHF3QrR2MUKDfj02lCJw+nDzS3vUaNrqA67w1olSm4ADxKCUiF05WhHsNTlNnoB6eb4OyXA2aa2PKGEXKAWJBOnMvUh0OBriHBWg1pMxRsUQviu50rKuMUBcDmEa9kDE61RF+tDBwsMqa1J1aA6Uh1qpTf8OkFkJG4Wnu7TQDSh04hnOITkmdadtMIyyvUzdUCC6DIGGHQXXxDk3JJEELlwiFqLmo5KEw36mJngsGuG87tpSxbgm1ZpLaxY8Jrum9s2oEx5oy1pQYirltel/XQcK6qP5xnGUKczpP8a5RBrFA4XEeY8fZpp9JKx29aGD7eWgR0Wg31ifkrle0OKJumj2mpipQL8w2/AkNDKKzqJ4xO6lwF874Dxm6KaPPVm+HurtHa4JqyVz+LK/Pd5FSQgGnGU2jDAe92Xl9sEsDLLn9VRkpXSsyq6iza4VyFRU9+mHaWMFdiQEPgFb5d717XQkRQADvUCFYloimUNy27bjdV7x7veH1uuF6XwFwj7JMEU+nBR9eTvjozQXzxHE+1Yq91gFpLFPk4hEGWZ0o9pQRQ0YsXER6Z2n0Ct6zMQR45zHHqA+6G1YvIgyqS6Xg0+t13IDneR6sqg79CViklilgCg4iEZfzjJNqd/r3foxEGPqoxsOhm19a/RrRKJXMImN7WjIZRtu6Y98y7W9Spu5IGUNF2J0xq4dWM8tEcsvTedbsG4tc6cNmo8dymvB0noaHnOvCrD5NCOBENTGqR+tsRKcFqTRBqhTj9t1jj3xxlvuD8zwBEuAMC8Q5Biwh4ByYMxR1wW3NQYTJXbcEjEPNVoOgmiqyCVWUrHuVvmv1zuI0BfoKAoS9lZxhDHdXPWeq22i11rinqAatNJ0o7CCyeOdGh94LVIeEoPuTrnsbcTKanVYy91PWFYjudp0hrTpag8s0IZ8WfPR8weQC7m5XinPDXo9CAFAEu+0Zk3obxhjh1AMxegvvCY2PpOU+ETmrjgweKBWChtrKkRemjujWQBOQHabIFYBpDdFZjaqhUXQRwV7J3twydYOpHEXqum7Yc8Z93wE0Zedphpf3PMBbQ1pJmFqVmHNfE15uK229epGaJpSzNiwKbbI46Zdnjhgenk0Aej9q0nI5UsQB3icQHM0hu230br2LbNFksBhj8APibLUii2DNbBJzLiw81gzD5A7VEiZsD9A/t4rt4b0ahQf6uqRHnnyz1+e6SK2pIOwFLlWILzgVQWgUpvX8EiiunHLFbd2x3nZcr6vGq3cHBKeZPhbG0gLJegs4h72x+7ruG7sAC106W9zWrL9mMF8WuOjfo0JnFZeuGzHrdd9HsXCWHfc5Bma2GIOS6SJw3XakxolBLgIbI0KgxkB0aZ7LBGsIefVd1AgndB5mlKxOwOC/WxhI3lFKwW1dSeUthbY/0vB6v2NLCrPoTeQdHcqn4PHmsmBZJjxfTlimSIsmHNDd8MuDkCSh+TaPRWrcyEJmI6eogpQKUqLN0ab7py0fOpYqGk3fdzPKaAqe0fCTkgr6Q2KniPk04fmyYI6EliAH8xACdXnuBrNh6DpWY4ZtVa4kf6Qm/P3GcIlvDD56PuM8R2ypjATkOQQswWF2HnOnF4vOTqI5V1WwqiZNAHjPKHbfehhlj5A5KOGdztCvrXecSUwLaM2NSBTo8r3vX7hP6XAiv8SLFhX1WRusM331KarvpEpRX7wK6/RNG+gehM0hCQtW/Qz52dhGaPaDywkeBvlLO27Lhvt9w7on7Lni0/vKpGhg7FcNdF/UGooAvjRIlKH9EiVLdFG51X2Mdfo+DNCsgc32kECUqkm3PGSfTxPy0wl78EjRD+mJ1UZnbRU+MV8NqR+8djQ3Ay0QGQ3O83LGeT4h+Ig1FeTXO15uN+Q9Ia0JUrpLRDctPtiHPTQy1QbX9NrLgVJ06LzmrG9BXSpsgVXClqhPHguChamGcGdvOGpHPBranoFc4FojI3EiCpMbKfX3UiCFhSo36hGDNQgwvL8Df+8tJT33Dn/NXAv9NQu1jA10U+/oh7PM2vosr891keouCTEXuOIHXNcfaKA3gmrCuhesW8Z9yzCNXmdeK30nMTQ5/KjEqBVPKXi9r8itoogMLH3ftbO23FsUdaaG3s6tHVEfZPEUsg1FDUeNkjIMGTel1MFY3Gul83SMqM4B6pDQxZizwpQM+yM0NTpKY4dZadcydGv9vuwQxYtLLSiljKU2xaFtEBrYFbIjP00R52XGvJDoEZTV02+1YdypMB8MBgzTjV0JA0E7L01Grqp0130QF+eisB3D6NrozgbCMV6ikKLtE6VwnHBTUDabH0mpw2C0HYXKWTWXDQ4leBgB1fZoY2nep7gG7jU64+40BaUAO8KB0CnGd4IEpQ9WoTYB/fSKYv0d8TDOwsn7jht9Wd0qjUh712yguypdbntHtprYHnRnRrfd4RUAeu0N0AxCv5a9kfBaqPr36FCoQkLk7/Da2dppyUYdR9z4O0bInUJVLHSMGjEi+OjpjMlYzNbi1VqsLuOeM/ZaYItlZDz0OndoqDYYVJTKxqlpke7ao2FH1u8zq+a/IDOwdU2i3pYU4DIF+TzTBJZmwkrhNgauVEwpj2nNJ7qZ+0fChhm3uU7intOfP3Ztqe749OWVzdieMZkAo+SW4KgJdIbIQEd5cq0IjcL2fn4Nmy/wWekNpDGV2XgDndBiBMP1hBWYxmtp+lRcVbhdFP4UUeq6gfXQz9wM5/TWPw/ggX3I+y7oRNSLlNFDt1RLkomhQXA3D/COe2RnLXLX2XyT1+e6SL3cEqqsKJamm8vzAj97XDqMoofNtmW83nZ8/OkV15cVr5/c4A3gDfUk1hDjReu7CVqAAVSR37cdv/TxW+wpYS8Zl9MJc5zgHSmzLjI2gM7LoGefHKyhrtYGjr8/GMMocQAoBbUapJJR9oSa6AhtSkNxGyMmmiAYLrcX/ZBP8wSgFyhlAhpgTRn3PWO/brinRNzbMek2ejKkjHbiIhYQZg15a5AyIy56oYoqCHUni2WZcLkwtmNZJsSJdO7Ds1ChBWNhKw0tpbL/N5Dx+5zxPHibQDKFs31F1GCwN8FWG7ZK49+q0MEoTtLtmgrsngBj8LRMQyPidJ8YY+CXig2NUBcijRNcT+N1el3OU4RphJuKUmdr0j2MANUcbtXOWXjhVB1LRSgN77aN+Uetu8FbOG12XOPPVhsnqL0UXFMGWX2ciOEwdkQAtTx0tcjYK815GTWjHa3QCgm6U3oU4fYdGoMHefAbQ3h7mgRhjnTWjwF+CrDRwU19n6LTQpNxkNVcCKnrP3uWkVeqtI+BBUPZlPR69LwnBPAxIO8LFu9xf71jfV3xyesd122n6emVBW7bWUjn6BFd0Ime01gD0AwfIK+Mt7hE9Y/s1kN8bm2fDKdAwpM+49byOtF9hfd9SXRT71ZRdI+ocEuEnxzmKeBdoCGs72iACFJOsCIo1WMOEUuMuMwLlmmGcx7rnrDlhK/8ytc4iZaGL13e4BQnfHhahu2Wt2z2cuV0szdBcw4zgJgKdXNJySAAHeP1fGsCzW3Cgat9w0v6Q8MClTMkV9QtAblwraDmxWIdG+RUMW0JqSk5xgqabYSarcUpsnQ0sBEt2mAGq81+Y67bdUvYtFme9Fk8z5OaDX8LECeKhtulXOFT0WnlcD0m9ql+b+oosO3cdRQDBBDP77YkBtQideX3+2F3ug9ATw7lwWYVO+4aKubiUHme9f3wYCHm39NSu0cfhHRmgLCSCEZAmrMWs7PwnTpqeAjx7yPk6KyBsXQM79Nbk6wmpAl3dWdw7ghOg2LmyzTBO4/gG/0PfffT1p1Op6saMzpkr9ojH/0wl+3+fc7b0W0bXbI309AJzaPLtRYADS2ZIK07EzFa3A+Ci1GMP3oyErv2RQTYEx8wC4M9FwRvUVscRfEQFGPsyUw7ogPkkSJv+i7ADtlBbTIOrQbowS/YazsIKNK/NDpEYzaahgSyGTkiD3r+la4CYMbEeYTEOct7owmQW8NaBLdSkPUPBGcwe4tF79vjIT5YarlUpewXrKlwkrbsbosIphOD86yjK7YPHi6Esac6ihRp5z1t1wZH2E8PSa/7Eh/9gN28Uv395A/POk9Rq2hT4IxBFoHxDm9qg1hKRdZER/M5BgTnR8ctAIx1WBY2SMtpwnSaEOc4DI7xMCHrW2SBcnRul8giHJoSYPrnkgtKaeiGu7AqvwgkDAVtSFopFMBq8Z884f0jeuX4spZQGz+V4/+6tOU0dZNoM/KZUmkwrcDUBh8DYA1yrvCpwrkCETLvqvrpCXgvGwjE9acI7xcrRRUO4KGP7noJVObQn0uxlg18k2GI8BhDNJ6VTqyCQXS0DbPN6FkKZBCubFANqQGbV939EoX5FjCYzbkiO9rmJM8Y99q7cuk47yGeY5FiMqsDO2NvhP5z7hDS0aKEF9s73lhe9S3SZKRnzsHDeg8fo1rw0+Io54L7fceeCnLV7sPTRXxWtXrwTlMtabzZwCA7MUatmgg/LN7DG0PKtN4mIoQs2IH3IEFDmxddoO+l4LpteL1TvxFCQI0HiSKEgPNy4q6nCuYYqYsAADkmG0iH0txwiPDqgB4iv1x0I2frkVJuBGRedhaZFnzTYaUKHAVKDS57kVJOknUWvvHaof92fZ97ymPHte4JwXEC6mlq3XuMER9CvN60UaD6Dg04YAxa4PBBKo2HeKedp9rgbIXNOHYJ0OW4al/WnLHvGSUG1Now6/7E4QHOEaCIdqHok3tnnHVKtaO/XGm4bRnv9swIDCh7MDqYKWBydhRM3hssUnsuTL7ddrxu3J06RxFybg3np4VFRGG5PlEZRwgPHRZNGqkSG0yw8KUCzoxi0OE2HxVVCA5hCsxbWyhOd9YgzAEtF2Um8h6sAPwUkK2BnzzmZcKeacN0miN3rDC4r2VArU+XGeczUwhOlxnTeWIwo+okW2swpQ6t1EikDQ6+T/PmML913iqxQsbeFAptzqdZLdECJjGcvpMKvlPBHLwaC/tD+zb2rwp9GmUxWgGcUb1awKVPLsIk7VwZWtpAKzIfPcQCT6nwfXYdqCFJRm9aNlq9gry34umHoOl05vFn+oM0cqWAoc+E3qtetaW9SPU1Qcc42cT34E/uHksjOmUN4WzAaMPZdZiKQniLKfrfPBf03/pbfyt+8Rd/8Rt+/S/9pb+Ef/SP/hF+/+///fj3//7fv/ff/uJf/Iv4J//kn/y6v5dVU00rAgc5/K9w4MWP+wfRA7Q1HqDFALUQz63Ae67QRhiBMDkHEyO+9OaJNOjSGPUQIiZlHbmJTCPbBG8/YRLwy8sNb9/esd4TvHE4hYjZuqHBmlQ0u2n3XUWQmqgA1ePsaW5Lirl28eWAMZ1TtpgzaA2ANKxqOPrpbcW764qX+4q7WggJSBAAwLBC5zDHiWygSrt/EWDPFR2g812HtMzsXpcJ0zwhLhFhoRLea+4Q6f8GtumiWifIDkfCsGMjXZaNQ6ed7xsFz0mp56Kfj7MWsyfV/TxHfVCMso0q9rSOqSnlop9P1UWzRXUZ1Vq4NcFOHvAWk/HHzke/BuNT7wGv9ORUOV1zd0RRb/SWlkRG94rejv1ZUjPcWtQuyzk2SYaat9pwiLQdI9m5zzBj7xe8VwmB1bOFzNJtT7iniiSCyVvkEhCtgQG1dv2HapWuBqtqjj69rXi3bpwKnVUySsHpPEOswSVXuCY8CMzRUfP5Ee6p9EL5EtBqg49+7DA7/bsLSQl/exrbhh7iyd2ReEcnD4W/L00QlgnhvOALlYSZKmzqpskPF5LbbR/yg8t5wbLM+MKX3uC0RCyXZdgBtVqHi8KjR16fAYzuJV3/d88psntkHmGCLMJhiRrz4oBcsavFWa0N2R4u6dGpaDtoOKH3MNbBW4/gBKf5rAnBBdaFw7rJUHNZCzO4bnvWtYDwPIHBtibe9k1ICEF3mddqADa6HeoGBCWXMfX0ENUY3LBaMsHDuAZvJ5hcIInojxQ2QXTF0H2wqN9nZjoCnIU4i+QtpsAp6jzZAwFRU4G8k7XZc82Yjs20gPqe8cA3f/1/XqR+7ud+7qj0AP7Lf/kv+IN/8A/iT/2pPzV+7S/8hb+Av/k3/+b436fT6f/R9+pF6TDdPEZT8/W/WXSnIYNSQZh2QDANVnRXUxuaoWjOgvDbEnmwNy+Yp0j/Ls1Z8lOkjkWAdWU67vV1ZUedq5IPDIxzWILCffr7UyUsUxoNLydjYXligNJLM1yqU1frQxDUr640Zj40acP54LYl3FNSZ3DeFBCMA3FSd4t5jqTa5nrEVTQMSKrrq6agy+MB7bkRG2CtHfHV1lq0vlmyFtYJs2v6MskeHnuDMptJmS25DIowlHxiDWFMJ4bXTzGKKocFVBdsdoIAc4l0N5Mr4BnrUTKJLSHw7+4Jrn3abvKQ0KpEjN6gltoJFA25WGRHooWzFlPzaIb3kZHuQm1Gw8N7sTthmAOuBODc4585Em+H7kcX0eybeA+woRBkZwf7sHfH/UwWHAv4vZTBPLTVwbnMYrUnTCmMovrIuOSR8n5jYUDiS0+8tbXBlDZ2aH0POASyFuh2RN3RQvp/724K0XOfFgNmqD5Qp58Q3dDyxBhUHNqwLDPmKdJ8d46EG5UwwetBOrkx3anicNUw4Pvohz1JImbA2j2/jDopQWjMTZvniCkG1FzwGGPSz5SeUhCcH9OH6Tgu6DDTrACmk2a6hpA3WycHJS2ynRxRit63riLbMuDDKjIISV7Ztc2T2iAiSHsejYDvxBIJEH2fRslVRkhssbpPFyiJqT64RdRuZs0G2DZaJeUaEJzAeCBaQsBNdAVTDySraYEaWi4cZJ7ftCL1pS996b3//bf/9t/Gd3/3d+P3/b7fN37tdDrhO77jOz7z37nvO/b9MCN8eWHMxSWyizxPh5fZHOkSMabTRtqwh5IVnGUHqvdIdw3f1aHDGJItxrJd9R7nMKF5XthJGT+n8wyn6vCtMErg5e0Vr9cVn3x6RdoY6HbyAdFZzN5h9sTZYax2KAkvKztFWIeTGIix2CI1L61h2PTcU1+YymCQBb2h1pTw7r7hvu/4+OVGWDPTZdpZUrOXOeJymvCFD59oOjpFdRrPeH29Y98yWrsPaI4C44Cn84LzMmGeJ8X/3ejk5IFRBTu2Tyqo/Hr7Gjs63ZIKxc5dG3XbkLcdJWWg1dFM9D3aeY7K4LIoUIq57godoG4J9ONrxgDg9QsC2Gmjt5w18OEw7uzkgjUxm6jkCvRpe5APKtaUSKWtbXTiUSHbp9OkwlKLszqWt9gwB48lcKHuLDtzC4Om92BrDa7wwTbAyGjq/n1er2cR4BQrTtGN/epICdYKdjhEtNGdNdWsVHU1KK271dPi6t0rP+fny30cMA0MwgtV4AKnExc9rZmchVTHw0y4F7G2joJqwwHr9iRipixzt0XrHXpa1trQIGx6AvczbgpwCh2z8OlnWSrerPuAZqmL8jifJ6IY3o2mUwp1gKRC81etNlDOM9UWCl1zV2UhRrVfRXVm+hl3oo5YBxM4hVdjcM8Fr1vCVb3rqgiiJxngvEx4Xmjma8CJZE8NuQApC/ZUYUA/zMl7DTX0uO0Jtz1j3Xb0BFwmN1SkdYdpJK5UQ6ZdfdgXSaOuE4oQ1VpxfV0ZW1MKYVfv8ObNCfNEo+zgFSJ2jlZWyiAVYQZaKhVrKni973h3XfH29Yb7RqPn1VkswRFmtiS4nCJTBYw1Kh1hvtewqjJWQx4p8u40+WGA+01ev6E7qZQSfvInfxI/8iM/8jB6A//yX/5L/ORP/iS+4zu+A3/0j/5R/NiP/divOU39rb/1t/A3/sbf+IZfP58mLPNEB4RFQwj9YTkDHZ0tyNii15lDjV5FbbqzaIK9kAEFHPb0S/Zg/pzGc8CoTqcLIHuom4NRjUDLFS13ZTvV9tE5zN7ipFOUATU3tR2GjaUJvBcU5zSkMKNWi2IIgXSvwL5sr2Lp5VcIM71omu5937HuOxl9wNBXXc4Lns4zni8Lnp9POJ+pd6q6r2OOlsH1vqubAn3eyDzSGwz9svappT1oeL7upVPD+7CLIkf9D/QDGuYhnuAQA4+0VmW79cC0qJ/jpLBYDHQLcLZrThTmrW087LVopEuqw1EgJU4Zq7pclD0T9tRiUPqX0tCzGoFWmAGJtcb7i0F4cSzvp8D3N0eyR0UPgqbC2Nrov1b1WhyTlxkLbWcsopDNeZnj0D455xAjd6KT7hih5qEGh1PI0eG70cWKTt2coAvSlrCr5guWRarViNg8EB18dKMT7sve7lQgpUL8mBfQZwepSiLvDYPpBachrTuK+liOKdB0E1kLr+7sffppxmDqE57ImHSG4/7Abt+XFoiiB9IabOtyCha/frNxwiVysG4JKfGAht6Pg/mnurZUK26JUTGv24YtJ+6FQ8AcaFW1TGE4mzQ5JBQjCVyYWnubokKDDVtKlH60+uBsT4btnvIgwlT0aUTUf1TjQBpZnlkh9NfrilxolNx9So0ByjJRcjMF7rL7/RQJgRtHYg1qPUI8VVTf4XQjBg7ceXYxurWKZnk3pCJzZA7XHKhzEz0LvdGBAT0s8Zu/fkOL1E//9E/j7du3+HN/7s+NX/vTf/pP47f8lt+CL3/5y/j5n/95/NW/+lfx3//7f8e/+lf/6lf9e/7aX/tr+JEf+ZHxv19eXvBd3/VdeDpNOJ1mnM4T5tPExNXgHiKOCac4Q8ruHBxaJMunmypuOtKuqRBCQk/TBHIgLmw9D4yRAqQdrFWXC+cdbMowQhPJVqnktko6mD1hvvMUlJlmCMW1RluVlFFFMEGQnUMpDtuekXWJ2X+WIn1vQhNMa6CTQMK76x3vbivWPWFNafy+eaI10pvLCW+eTnjzfMabN2dcLgvO52VMUiXRYyu83EeB8LYbkvaEYjNG+D6BPgoa5SF48vEA6kWGPcOhT+r/6T0mJY4iWAZsbA7Ks7Iqa+B0GJS1GKyjm0T/HiCjrwtRR/ZYqmiWzUZnfN73hLwX5D3Da5XrljVZO+z+1ScfV3mYiH4O0XUrLTOKVnC0KjLgkEM2IQtnqVTpG51MnKbSjunIqCO6AU4AcqvsgBW+IUMsqHedgTTCf/1wd5YMxZ7qLKDjt9EPosO8+5Y0Gp5LeR+9Xmv+oKEJjDvuwW6Uymas8J4YhYroxNgNKVtVREY6c94P6n9frfBewaGtsscvdv2bSN8g9tUk76mmzSDfIEYzIK3TJwERjXB3lpZNvZkxvK9KrSOu5b4m0DPT4jIHoOgE2Br2wkiO13XD67opo/QgRJ2niPMcYL1H0/c8fCArXRukUJ94myJCqAh9Us/ULFrjYQwP+1Ir1p0SEufsYMo1gA7/3mF2jlqFUrFr/M7Lyx1JYV7njmmz5opg1BYpeu69rIHzdI8wxY4dVWkNe87Mh8oZKWfmyzXe33spyK2qNVi/39x47ucYUBuwhqZ7KbKoo6H7SjQGn1HL+xtbpP7ZP/tn+L7v+z58+ctfHr/2wz/8w+Pff8fv+B34zu/8TvyBP/AH8Au/8Av47u/+7v/t3zNNE6Zp+oZf//BLz3g6XzCfZ8Q54nSeECd/EFhExY6esQeXJSK0hlgYCpiSRcq08uGUwgen+Ipq7PCi49aHh2ytFTWRYu1zRnMGTk9mK0B0BpO1mJwluxo4nJdxsNj2UgdEWFp9IOcwwC6lRPo1uG/rVjiErLqbtsWemtJi6RfYPdSCI4zy4ZsLLqcZ3/7FN3g6L/jwgzOen85Yljh2UsFbXM4zWmtY5jAOmynwgHMGNJPVA7ZVq+7Jh8sEo7AfyCm9qxXCMBAcrLpcFct3akxKt/Kc2Sh0Q8xu9ZONmt8qxDd72hu1J3bWPYyuNxeHw7bqs/jBQbRQwainmpp9XjclbWyZLE0IpFalbfNaQotGhyG7AXH3fwueehpvLU2JLQ1dvUZiQI4ilRMPXGcNBBrLbR92Srpbc44swsk7vDlNOLeI5wsOtmhn9jVtGqBCV3RHDoq+50z2m9McH0bRsAFBYwHhipYsuz6ctCYsWq3xe+wZLVek266u6AU+VjglgHRGXU1laKuawtE5qVN/OxoYEQDNsHEwieLz2ggzei78XfCwXv1TtCl49ImsfY/TCN1zj8UC2X0kO82euVgObgq6j+I0va4Jn7694uVlxet1gwhTneubE6LlNe4u6VvKuKeE276jlAyRAMxxOMhElWNUhcJqa7icJ3gPWNuQ9w2lZbzc7wPue3dfKV2o3UrM4mXb4JLFy7oNi6sYqI8M0atfqUHT1OKagPuesKWC620fGjlrBc5WvFoDKW1oM00ThECCh5sCyRSlwpcKVxvcnoaLxhQcpHmgNZXfcLruqwh+ZoIAwBsL44HnuTOegazG3cEaTM7irO4wufwmw32/+Iu/iJ/5mZ/5NSckAPie7/keAMD/+B//41ctUr/a63xecLosmE6R2h2dovo+qi9trWowgqeRZPGkkjcnDwXtwS1B+l+g/9Rt+oDcWgOqxZwKjHOYelcIHiqdyNGbuT59lMYOtbstV2kHNXPsWIw6Iyj01TtjZ3QpSwFln3BqLWMKGTRqFSrOE3dQz+cFz+cFpxOjyXuWUUdx+qEXFEITL5Cge68+SY190yDZPVw7nVrsAcsMF+oucFYotO+koFNmUC2KARDngLD7QX7pmhRjmA4cikMLDKcEjNLmu46NivlOgGi6j+zQz9BGtTY66P57axPULmCWrpMz47pEMfTQk4N8Qxit60iUNaY6kJ7o2qUB3cFc1/q8ZOa4wx7h0L7w7teR9xSZmc4JH3zVc7nxZ/KYbmujgPyAYA8YsU9X/cuP3VFDNQ8FPBTU6FC9RSsFFY4wZ2a2VM1l/HP4AmYHqJFzzZU5VLmiab5QyVV/LvNAoOnPm0AMzUyNs2ieZBtr/SBldAiw1UO0LFL5uXZPwdKOSb1/1k2fq1pH82O60BfQ2JuGtGsy87qPZvRyiiySbmwBB4mpw3i+PfjVfR1K0E2AT0uENQJpBVISqhD6s7XB2Io1EZ6voquF6iCJPoDWYAjqDQzgHaJ4nhf93FJ4vPTdaukkKOkbD3UxV3JSfZBedIq+3o/OkyDVG/tJ8/Q6ROKAYR01rodek04ksdpYQTDeRxNBdBbB0lIpWEsj48/w+g0rUj/xEz+Bb/u2b8Mf+SN/5Nf8ff/5P/9nAMB3fud3/rq/x4ff9gZPT09D9R5m/pMv3S850mB7AF7LFSXkYZVklUo8IjGMQbA86HkUYsAjqXCZmESNE4PDubYhUjRgmmtxFquj/1dTMWZudI1wWoB2ZfQB1E4YYzTmm8mpth8ulqPzHD07dS22PUvJWR5953tAyoTsBMAyR3z45oxv++gN3jyd8KWPnjFNAdMc4AwgpWJf+15J0IPWnk4zJs+lf4x8OHqaa4wOQfcUXjtdFlR1iK8YMQI9P6gkWvx3Jp9RCNYrpj4tE0QC6ili23bUWjEp+QXQOAERrMr2887ivOjy17rRUHiF+kptg/ZqpKEYgU8RMRcWudJ4SAHDUZvJwGSXUcpgGTio90yqh2MIgBG45wzpyV4nLXpOj3XbmBjYPIB7KANsjWnHm8KZVotrh4Vc0dFLGxQoXG0s84k6i64vn++Veqo9ceGdS8W9ex8W6qKayDjwlikwaDMEejk2koVEWWHde88IkPRzkuBQ9oyWKtI9DVKDEQPrD5kBzCNhgpT4VpQ1pvgek97ZFAyj2lRgLI1qQ/SwZhluJi74QQhxViBNJ0BliK7XVaUM9LQzQkjLQNTSiQfxnCvCFCj21UJVUh7ZZftKEk/TqT/lwp2P7gh70m8/nGttKLYi1YJN3c3v24YoEWGecJ4j5ilgmQPWfcN1cvhaq1jXHfueaE7dBC/bhlwrjHUjv0xwoAKLmhV7pcczxZdELA8BqiBnarhqYpSGrtxHpletorpBjJ/HehryQsW81gBhCphbw3MTfPGjJ8Tg0UrBet9ZwJUAtIQAZx1KA3JpsIbkkw53n4LH7BzOMYzPjihF3z0Dzvwmxse31vATP/ET+LN/9s/C++Nb/MIv/AJ+6qd+Ct///d+PL3zhC/j5n/95/JW/8lfwe3/v78Xv/J2/89f9faZlwrTEoQWwtgeCHfTG3pkeVHMyunrk9IBF1JgUEKandvgIvCFpFss01r01VADTlmC9Ywy7di0KTPBGbgzLayKoKhD07iAgOMMC1SeVeYo4zeonpjEY1ttRuOYpqOjTEMoQpV83wfN5gYAQHazFMkd88HzG82XB5TSNqA5SW9kt5t4BNuEyO1dMqqeIjjEmwTvMpwnLaebe7zTBx6A5NtzBMKZGUNENJo+dB/VPhYUqk3jgLDArW490WEfD1OD55TpZg/dS9+5zfc8TAoI3I43X6OdoAI1WoCWLFItQmwquCcPMtVEVHxzmOaCK4HJZhrN6NGr7Mmn4o8gQkvanre9l+hQAS38yU6q6SbThBSkBw2E6VXazr+pkf9vSsYuCYKpk8FURRGXjjX2gTookmZgRwMDdBWnCW6K7RMoFtz2TMabC06pFqjdOos2M4Ng1San0JcwFIdNdvDcWEEFNlVOUFsdWBdUqVJ2OItVfBqTdD/ag3icCEoCGpk2ElPU+eYLWPz57VF8hO44do5JpWi4oe8F23/HysjLJ+bYNRGNkZ7Uuh6DDRUyRuy9lFpqH6aA7IfA5NWMidrpv7CQErzEhTY2HS2247Qnufkd4YeTOWYTojuPu0EqDTAX3GIHSUFNFkabavu6XKXCWsDdMjwkiianhcL+PCsN7x5+xQYBGbz4rZLv2z7cLxb3twa8q3NYmzOlU1Jo643iLSbOpPnhzhveOKePThnsMKKkCwoLzOFEJjqmtn22dsdqnYN3Ojqmss2y/2es3pEj9zM/8DP7X//pf+KEf+qH3fj3GiJ/5mZ/Bj//4j+N2u+G7vuu78AM/8AP40R/90f9H3yfOHlM/7PQBGbh+L1J6iLemyZvaaZeO+1uL4FiY+rg6KRbboQxRmC8V2sxslaP5vCWEwCIVO/sJ0GWtFkX9XsXy+8d2FD9rLYLD0FYsM2MnLqeoJrZ8mPrYPSk9Fx1GK11XQSKGMcAcGTEyLxEfPJ/wfJlxWmZ4f1iq7JnO42vKSnwAPChqDp6jOLr9UXBYTjMmtaGZlxk+0q/QqNVBJzqM0D55XMxnrPc0KN70ybPjoYftUJQZdNnO1LPQIlUqGlRvZi3Oy6SuGf6YKKE7w0ZW0qaUXRYpBmH64FBrHUnNs6r+n54W5ghB4EGrofMcldSh+0QuRTggNKEGTsXIItBoAwNbe4S8Tuda9ODpfr4VFilGcNMVn7BbQy5Od0tAbR7eGojuHp1wUrTACEIUnk1knhXaMm2Jepb7XnDfScrZdNfnjEEJXieYY9rr8GitDWK6Zq3Ae0tmJPgbmZN2FKjWYbW+UDVGP8+e7dS3gr1IdWiIXzmX497RAtWLcg/MLN5SZCoKE+shJ7Ux/mJNeH1dcb+ueHm5K0RKRi0ZuW24nFRjMJeGMHlEYYdPeJdi1/7c19boiqFwbndf8J5NVPAewXlUx4ypUgW3PbGQeI9zKRBn8cZ7xMiJ3zYW1lOIkNCw2nTAYapRtCDMWGzVc4xvsB8r3vUcMUoegjarDB9saqpLNEBASLkLeL0mBnQ7Mz8FlRfYQVQxCm8D/PdSSfmvqeIWAibnsa+J7hyijcADKapPxaOpMoAxHQ7nTdRRFv5Mv4lF6g/9oT80isTj67u+67u+wW3i/83LaW5UL1CP8AjAhyLvhcGDtw2vrxvW24b1to8clagWLd726GPRLoWdqzQ6BpBCStf1vdANfd0TwuQpcvMeTjvHKpzWdjUSbUInbIAFESBMFI2HgOxBHxw+eD7jzdOC56cZp/Myup6gkMvIIso0xCy5wk87plQQp4A32uX6SJPP5TTrQW5x3xJyJub+cl2xpYR3t23g58/LjCUEvDkxETUGj6hT0/LEvd+00G3CqpC3i/5qZdZOt8AxhsvwtGXcrxterxv2Xd+vs1g06tpYg1LKIRnQBy1Yo52aHeyrrdSBfS8xQGbSXPt+xSjUt2Ye2GsuSKXB+YKie4gG4PK0wFqLBQZnddHwzmLbMtbrBqmcHKY+yVoMyyxYN6jBryrWzrVHajeUtA9WYi9Sb04zJu9wmrxGzSd8fL3jvie83lfdZRm8BDprTN7haZlxigG1njFPgQXTP1QmPZR6snDOnPKPIpU58atmZcuZnTJkXK8enBmVsUjTEj1kXEGIBd47GphifDyoVZByG2zJzib0Cp91vZwuOyHCopZSGQ1RJ1OMX2sykBALALVhs6R/26vjM1SV4IHjaMup4H7b8LWvvsP1uuLTt69jbxgDBfPGyAgQfM4Fp9MMBI+zABe1Mpqix5unE5yxmKeIUhu8s3i+MDMtBIfTaUZKFW+en5Azgx1fb04lChWf3u54t2542XZcTgvuqUCMxRsAb84LHWbihHw6IRqHmhu8deOnqTrxBR8RQoR1XnfRDucl4jwzHueyRJzmAG9535RaBmLTd5iT94gA/RADG7/zianhyxy4+50D3OSHKNiBPptWJQ3WOzzBIE4RrTS1cbO4W6eC+wanDV3UHZM1X1d2tHmRVof8AXK46X822sTn3LuvK83H4tliLIm7roMPCZXbfXFYNGjM4XAIgO/CXhlLxA6F9AvbGWtN4acBe+guazhYK5OvfxDSh2/Fgp3jzsOoWwOLisfzmxOenhY8PS04XxZCX5F7NteXzapvyb7AdbGup2Au6v7GRwa3+RjGBHnfduxbwvXGSPZ1T3i5bywuoImtaYKnORJWck6/dMlsVEjYNTC606ua9WSgXZg6APRuuS+msx6m0jg59D9bC9NdmdAqoxPuh3e/6fu+huF4FbEHQyoEIk1gdZIT9MmZsBxzqvjV/R0Bull4Z3E+zzpBsoNHa+wSe5GybhzAtTa40rDt1LYZRwPWztgshRYy3f8xOgeRhuAMl9cPzc6WCzF6GKAKdmeR1Lqq1kZtlKVDiBdoo3Pckx3+OtwyZHSzvWsVLWi9SNGdQ5fZ/QsYJqdGSSYjuLL/HcBABlIuyIl0cmP4eQV/WCqJNorSoWBp6qnJ983GhvdxL1KuWcAJIWEDpC0j1wpYcBIsnMxpeabQbuau5H7f2ITeNjL8IEc8i+6jnHcwIaDC4Hnb4aPDUqLuNi2jb07TgO+s43XvcTTOc689xYh5itinjC3taBCkmsd+rSn8e9oW3m+lJwFzvzvHCGmCZYqDQAWjRcpZeBcQPO3WrKV11mlm8vRp4fcOgUQWKwcpq+o0LIbp0tB9K88PsjxpQnD48XUYrsO0/bryq8OctGiKGjJZgqf2rVYyj82j08+BpAAYRsQdA3wcXL5xhPnVX5/vIuXsKFJdjGi1MwCAOpaGlSa0+lVypeu5I30YBjAVkEKIjotyNRO13CH1h308/HoAVy1SnYXkpwC3+xEiBxwMOucYxb7EgA+ez4gTIbX5TCHym4+ecHmacTrzyznHCIL+0DcuoEufpFJFXHbUzAOjj9xWcfWUC15eV9zvOz7++C1u9w3vXm54d12xp4x1T3QYdx5BD8oPzye0iREU4kil3WtF2TONVHMdlOo8LHVIBZ/mgGkh1j1ElQoT5Vyx7wVNadNpzwjOIG15NBslFyan2sMtuptbUgnPm3vNCT44HqqGxBhYGbh9L2ykPBvsqWDdEoLnxBSjh9QKv9BUdV5oBpv3jLyXwRYz+lDRLJTvgwW3IqaCIpzmTamoENx2xolfb5uyLGlWXGpEsIRkc2ah6sWqs6K2lGFBHdg9JZwirbeqCKYpIjThpK4/GwvjsVNUGqkaI+skaimetGNDAS2KfddGq62HVQLZfr4Ovd9gb4GxNdue8fZ1xb5lbGuCA3Vis35WxtGYtxe+Pmm5TiCxhgWqNuypoOeKedcd4A1qcbxOjULqdc8k5FTRdAA2ECkX3NaETz99xcttxVffvpCO3Rq96nQPYz1d3m+l4rzviEuEGINpivCOk/RZma/1aTlsxMzR9ITgEKeA82XGlk4UAO+7hhRS69SbqAZgiTPu247TPLGIO49pYUMTvUMtlanS84y98s8wL4472aCWa9NCMtU8Bbw5zWPa7vlSFSRkpSYoAJrhLjkGNwwOvHeY5ohpDlgmTla2J1T3n9GYsVfqOXh9bxRiwDwV1GUCSkXNFii0K+spEu6hwI3XQ4Mzfr3/i/mG3/2rvj7fRcp22w0AuqgzWnwAsCMOnFKmiXsdyRUS3egCYPougyNzD66DWLSgOwAlK9BSyaJYrq1936U4xhCE6DGfIpZScDpP2GslC0sfwik4XC4TTvOED75wwbyQiLCcJ4Qp4PJmwTRTvxSVJGGcHQy2VgUwDU687hQMfKZCvtRDo1QqD9L7uuPl5Y7rbcPHn15ZpF5vuG2kvIrIcOQ1wFCgN+H+pK472gaU18MNvmdZGWBY5HSzVGcNQnAQT/jGBzojxOh1cuKB1m1yaiFkBWcYa67EE++6Q7U7vOAeDrnhcefssOzpfpGEus2YbAGMxb807vFaYXMBUSFxoPK/U8Zb4SGi47CKP/XvtBUwwDR7NAhOpwkihIMFbIj2lOgaYK26zLfxsxljh0g6OodqFP5ojPuolUbJ0lj0YnDYS8U8pnJ9tHvnq52zVxgvO4smXcjL/UUuHk2aEkJ46IkxyE1oLqolrBNRYrUaikdXiWYMMiq2LeN23/HudcWmpsDeMDi0BjcmqV1p8FW6u7vBrGms3jslwxxyjlFjYWBzhSkVbQeFpKVi2+l2b8F9MenzDBpNG5ly+56G00quFT7b4YLQGb5ZKlLJeH5aME8ez5cFpzmMCBqvh3St3fqpqaC7jIifrE1Z3+XVxv9ea2Hwn3SzXxlLQyO0vfKWZ0Rt9CMkqcTCSSDMao36ZQYsS4T3PE9i4AS0hDACUrshcoMWGEvDWmuAOEdMU8DpPGHSIhWjR4gkhlgcCBPj4O14LrLuWov+7KU2egEqezl6O2A605Tp2ndN0ocyGaGcGPerOW5cfZ6M6yKKX/v1/4MiZY8HVjty24uUAaMDgkdUGrUkDwlOl4wcCRp6EiyLFB9+0kAfIxy8tbRKaVwKemXNOIXs4uQxLxF7KTidJuylKFVYSLdWv7/LacKbDy5YzhNOlwXLZYaPHstlUmErWTd9IuzdspEGAwvrAFtp4GqcGnFaOg5U8OHaU8H9nvB63fB6XfH2dcVtXfFyvWPPGbVR6+N1C/84+jfQ1fu+FXW0yPC6J7poEq3vnRiob5IqCNGrlVH3orO633LI3qG4OnqnPomSrqxxAKLR8M4d7tkPBcrYDsEd8SRDU4VOAtCD3PRB1gyyydBMtSPl1BgDF5iWbJV5WV1Fj7rvdDGD/h54X00Ts22XJbKDztzLtUbGlhgDOPtgHSX6fB52RdE7lAJUcLKQxkNvS/zM15Qx54JUNRG6wybAe9fEW6NLdf6zNXt8D3cwV6NnkzEFxp7UB/KCAHwuAEhjwB8Uzobm/mydqHDfsK8J67ojGO4QEf3wcFw15K5IZ2RaYCYpxxjN1dJp6nDxYBNgM/dOqRQG5iXu1SBkSVZl3s7esYDsmShJVqSkFOylwGULYwHnO4nD6XWmbdDTecG2J9S2IEAbIoXja1E/T6H7Qs512AN1BxJR2JSmvxTk18GalLE77AWrp2p7fzAFA1ikrFA/182f5xhwPk8sVqd4/Bn98Fupx0rCqHGxPbwTfWSTu5xmTEoYCsoG9Oo+gdZUjsFC0wobuJIyUq7ISV3pS0Xaqb2CcJLVWCu9R9hgQUQt47ThxeP0ZLS5Mu81kD39+5u9PtdFqm/qRrSAPXzieKBZTPMEPLGy1zVhnQKuBkP4dt+pK7nth2P4MkX4QOYgnR30MPB0Lp8LbUPmy4yny4zn84ynp0Uno4jpQgeM59fzYIF1NtqHH1xwOk344rd9iPl8TFF9KjCA7sBokmmMGXqYrJ5n/b3nVPHycse2Z7y+bty5FcZC51Swrjvevrvjtu64rQnrzn1O0640Ooc5qufYHAbFnTqbO776eqP2Y90xa4bWh6eZ4sLgh3eh9kV0VOAdC6/ar/N5QmtUtwfnhqC2CXO01rWQxegIAzUBjKNxbbNmpOE6ZxGDV4HyjMtpwWme6EHmLB+uDre0NhbI1lL/dY4BJ+8Z0/CwpxnuFPp9rHdjYhIho6BDyqTbsxDb4LGkol2qg3XAp+8iSskj5ZiejUx/tf0qGU4VDM+0I15kNQm5FLREAWzOgvueME8R23Cz56HU4V9rLbyyQ323zhGQ2twEzZAxedb3flY37ylyN7PninveNfpbcJ4CluDhATUwtkyJboJmLO63Ha/XDdfrhn1nblawbNZEBThigHtmUa3CvV/0DgYWU6SXpEA/X9PQDCfI3AQtd1Pchvu+47pl3DMp9QIefFv0mL3Dcwy812qBaZW7NCGJgXCqagZ36e0+9rxjTzs+fDrj6TThfr8gl4YYMfRyZKsZSDVopmDNBS+3Db/8tXd4fb3ja197war73T0l5JJRW0FrReFlmrTmWpASf09KGdYIrA3DUZ+u54WuM5WMTu+YLhARNCDVDYmK689XI2mli2TpqAPMxgCWjd3pQou46cSJKniryd3HedIbE6YYOA1+rNhuO63C1oyX+0aofN1JamqCxVJXGrRKighSzujhoZ0w5K36cSoyo8canymF5Lsjzzd7fa6LFMkMUFfoQ9Vv1EMOUK6+4snzMkFqQ4qBy2GU91h4VRf3MBjOyV6hhea5A1lihXV00J7VUHKaAqaJmK+PvNFowWJ1WV+Ubu7w9HzCvExYLjOmJareSPVdnbjRZFCzu4ah1aaMvoK0FexbRkoF19cV9y3h3cvKfVutMFW0Azq6v+Fe8KBBWibux5YpqPaIyaWlcql/v22MwN52GM+Ax+xJp7UCOEU2+l7u2EMp+cEqhX7ibxyxHIlsrSQVdk2jSOVdFfPteJCHgwL4eXZPO6eU2k7nP+j+CkGiw2CHEWsXQQMH7boTDnrbNyY2aNLrAxnHKvTqpO+/LEquSLngtFPjllPGMgWyntSuySus0ckIgBnF0xmDYinMtRC0Susla3pEi4zpUMZ7fJikzIM+EDzwS6PXXNH8qg6Pes2qcs6RldiaesYRdnMGcBDUGobIG52cgXbQpVsb162JUfNdTlsCqCAVqIZF1eqEL9BGErobc40OHOwFeL2L6GRaD//EeugenSFElR29NA0wDsWuOTu0kpSgiKIFVf++1A1Tu+arP/Pa5VOgxzOkVE5Rt/uG633Dbd1ooaWMydY4KXX39VKLmruSYbnuO7aU9K8lYag3w5TDVKRCbzteuzZSD6pC4rA8Dw7vx3Z8Pr1ZMQZGixQnNTsQB/PoGGO0UDSl9GuSTpe0lEKC0bYThdn2hOt908w+A0yRVlGaczfMprWh61RQZzp5jDquJjKMZsUQrdrzZytTn+si1U1OAY72IlUJFOyG+kMcooc5A+fnBc4apNvOBNsNuKsL9nVPAPhHnwwGVMWEXAODoIJPwdwCGgTT84Lz04LLZcb5acZ8ngELnErF+XnBviaU1Km6fC/LeUacPC4fnAaFfkxKW6KVTGljQvSq3xJp2F5XpL1gvW2433esa8avfPyK233HJy837BqD4HWJacGI9VLprBF80AmDB/hlnuiwvcx4vsyEgUQY3XFb8fLuim3PqKXAxYA4CVoOTM8FU0WtUJTp1YpFNAEXpi9deTN7zxC7fc+4t4Z1Syi14bomhcAMWsqomfqhdU9MuS2Eu2o/yIyyjrTTNAqtpEzH5pQKpAm8NTjFQMeMOeoSWWFUY0duTqkNoVJMCmC4jzPsT47OT6esAWcER+aW6Y2R4KM3F3hj0FKBB533n+YJXsMPkwpui4L33ZhUhDHze+K+oFvWHDF2GBBJL0jQLlV02hawUG+l4p4Krulwbw+OSbHWef1yqIWT533bkXLhfqJVGJmQFjLPBpuiU4aVUMILhcNaCEAqPKybiMKTAIyFsQwUFX0urTYKYgyCAKZWSDJqulshhcjGumfsmaF5uX//xuvcmsXiyMYMhl6O5eFalkorroZ2rIcAartyG+4Uj/q9dvQpoLKYXohbLriuOz5+94p3LzfS3PtEo8gBIdKG2gq2vMEYIHiPj19nCIA3pxPSnFHyhE0dLnLKnFL2jJsmc5caSASyDvOWyaTVe7kL+DtzjkLuThRSNqzu36bAKbgXxm7xxW7gIDMABSIWprmR55ZSwbrteL3e8dVP3uH1vuHty50+lNbiS28uOMcIf9J4GPSwQ9FdHU+eZo/xqTdE11xQ9X5hkdo+0zn/uS5S221H8NMQw3ZLpAY9SYyGtgmZgGEKaKVxetl2iDXI7bA1Af8ItpTgnUEuBd54OKPOCrB62LIDnCcGr8UlIswBYfKw3iK0NmjlNbdD32EMYg9qU9FcLRV5TSi5YL/tSpGuXII6i2mJY0fWp5B9TbjfdtxuO15ebrje+c+knWH3lZscD71oDTB58OOOOEeH6C0uy4QpBGUZTXDOoWpH2MMHLQhDzN5jDoFMQNWXDFZl3/UMWnkd6aj9IaGX3WFddLtTM7Snot0w6AheK9b7itu6I2VCJ93Etnum0WX7gOqkHW7T3avNKazG0EY3DvQ9F7TdwmwZdmPSaFB/Nud7YuoxlUtrfPDMoZfiisvAg/T/KQaUOeKDpxOCsbCFzEALYAkRAL0Hc6XgNmUyGKMnEcdbTmfRWRjhwpqMMjp+eHtANT02Q4+cg+0Hdqy5krG3lx410uiFB07/fhAw9P+E1HRjRBOCLUJk2nLQ6BtRiMg7O0garTlUJ8Nwt9PLjU6xzgCihIVHW7Iw+fFzlMYiX0ujpZYWmB6R0iUhhwi4oRgLqxOzt2xWlshn5IPTovCYQ9H7JtWCqsXTOw9rHGphodpWWv2E4DHtZYR69uvZaaxNKCjPuSDlPIqFgVESjEe2jvdqJTnjtm14ud1hjcUnr1fsaWIx1EmIU2oD+vMmQHWCnBt2R9ZizhUlq0OKwqrDw1DPuarv0zidwMc+7H0iXbd0guEkb0AGL+yxy6fUhM/npmLz651fTovUHMKAhgMo4zGjUKrfYKeja13tDvJv7yvvzcphYi/fAkVq3wumLQMREC9HJ4R+kwFOXK9XcMENxbVVT64qOnJnRukZAHvKutSuarnfA/0INUF4CIbgmdfULUYib/LhtmzJEiv+sLAPgYWM+w0y9tKWkPeM7baxSOWKHskNGB3dzWDD5b0gbZki5fuOu36lovsD7yHeIUxaHBT26kyry8z8m8vCwL4pegQfAGOwJYbidTaPg7IaNaU3KHzVqf58jmU4U/M9Umtj0UYhsw8Ha6mcpAih7PrAA071VzknrLrLGxENnc76oN0BMGCdbmIJQKncdhjkenuYz+65otoMs6YhTJ6XiboobWY6IYPfzzDyHRi5IsZYcglEhmVODB6XZYYTA0lFMfwGayytsXId8E5pDR4seEF3VtAww5xpuNtTVem+8VActf/qBUp6wyqP1l/8Prk01FYVqmzD3qnh/QLXO+6uG6M/46HRE9Dp3yvDtU+GDN1TF2UcLC6rS31YOxh93fIqDB8+g1CcstqKFv7u5t+lHeo08eAaU01DNXZAwM45el0ag8syKfPUECmRBlfcuCb9PqyVhJF9TypJyFhS0WvSnTIU/tNTpTdJpVZoyC56QGF0DslaFGMZX1EJKd73Hd55vN43SOUawunzMqaifl/rNFKVmbunPJw9gsKZk3d0y7cGFYb7PPTnC7BiwQGm/yRaZ/Vf+KzaAfsZd6QXs1jxfmjaKO2pYN8ztj0Pws+aGGiYS2X0iTHKDjb8+bqNlz2e9yqC1BruKWHNBfeUAQOkkj7TOf+5LlK/8kufIm2Cp6cF0xwgcwQ6HVL1H07Fa0MvMQdMlwnTyn1QL1RryqiNhyIdfAV3DQlz+gEZGIWDDOAMgqq348RdlPNOmWLqcKBCkcJ5WO9NAaqgJLV9SRn3d3ekLWG9booNywi3q0WGoWtfmJIZV4c+quu/sj7Y0Ro4MEHzNBPyCmqEGoJTpwY+3DRZZTGkC4CBh0WwFucYIb5hCUFV7xGnKWLEmxve2MHwEJNUULeEbICsuLjzDk0hlX3dsd023K8rXl/veLne8fHbK6ruRKjp4TPVoThvHGYPeOOwRI9z8PTXA4Ylz0GAMCPsrxdlANgrxcSlNeyG08/8umJNGZfnE5xzmJeI89Mydk/OW90RYiyFe7w4ZQ4Gpln0JN0QPC7nmU7Pun/r+jCMLGEMbRfd0t14qJs05Faw14QqPFAnbxADqeRKfmQx1kJVCrOycunEClHtVJ82OXFywybskkVgGqet7ocYPXcaHz6d8XSe8XQ543xZMJ9nhIlMQOsMLkuE1Ir7KWDdACsNtQBiGIfiDCfiKEAzBuJVBzhPeLosWBay1frushQ+Z7s1SNJ3UWTT9aTc2oggdNdx5mZZpOI5eVqL8zQBEyfoPTNHadcp8mVP2HLGfSe8Cd0L3e47Pnl7w/LVd7htCXutJD7NpG735IR59jifJpxPE/KecI0eprK4BOvQmkM0vMLB77inhF7YUqq4W2a95URa96yTsQdjLYKx8GDuFppo/hk1hdU2FFcRLCdYCWRQiqUPYREaV0Pv+dNMBvM0OQq/rYHxjClxPW2gFyv0tQgA2/3+GIDYPTkfi12fctfEgvW67ViCR3OOfpLaDHqnTGidpASAM0IDWghSLbhpynoqR9r6r/X6XBepl5c7nD3iuZ3pGL4M7Lw1QQvqRwUA1oyJKmin2LvmnjZbalWrG4VLWoMoXdLqTdID2gZE1EXE/QZQiKGqJUxfhHZoKTY3Uny7T1kbUwgjD1oViE2o1aP4qjdwjwUxnc2J3ix1NX63FIrOYtJJaeglFAIjbdkNFmSfQvpCvC+kYaixmAJZYd0vb3Rm2sUZwQH5ZSbhAoR26IBeUFJGSZk5Q0VD8HI32KwsUsYovZ2QRXC0Y5m8YA4Bp4lF1yt0OFT++urL776jacKivmlezy7UdeUmiFNg5PV65kRVG0L/O/iX6c8pD531Q4et3X//LJwWx+g9inDf3aqMKA9OImQPDjsnveZbzlhTwn3fAUOqLzql1xzO203Uqw0YSax7PiLiB8HAAgYklkxK2ybTiruxpKxCZllxhzFpcnTo8Lk7aPkASGvOBacpcL1RGzJUzC1CSMpZFihrR5GalFwUg4Z+KoRm9f4FCNemXqRK/3k0FqMewZNZGiCW8Kl3qCJ6jwCnKXLi99xj5trQdJIv7Zgga+MO87ruePtyYxqtczjnilIndZdgLE7wHssU8HSaUVPG/TQTCq/Uh0ljRlNtNJutStoyhoLZo+Fiw+KNgRE7bM66Xs4YUSNYc5jjKnJALZTVgEF9/8qIzLrfLP6Ijelsui4JoLPNIVkwgyV0fFnHHVOnu3vv1L/QjVBMoCcGk1hlhAhWRyucYQPWDX37948WKFYtp0QQu2cjvgV0Ur/0y58i7TwIni4FRvcRgJpfAtw5RI82MWoClnuhaYmYTxS7hcAFvPS458ECKsixcCqwTi1yLIxOZTH6ERPy9bYg1MtwZH65rkgaWRECP/zzEhkNUptOVUot7YWqn7t7YSfknSbBKqRlDgFn/+pQwil4nKLHEjVmYwo4qY9f1OlpQKN64PWgvVK6ZQ6hFGsMFiUeLPPEpNn+Y0JGZ2bBxXpNhG7K5iGBHWfeM9KWsd9WpDvjw1smVb5qmmjKhbsZYwDvB5SyBGZGdY3PaYq4TBGzD9pYdCo531P3ARRg+P11rddeCvbKIjVtCQBhn+enE4wxOJ1nNgYeD4A+xo5q7ODGtcMolBBlWXkq/i2AAjPYicGz0LcmmMGCNgevO4yCt7cbXtc7Pnl9Uad3j8ZFDYx5jPIgQtCaDKLIdSM8uqUyvOsm52A8XdRPyzSgw1wrUmq43nc2Q9LgPIvTHKjROQxVOVF2RuX5NNEtZNsZWw+GBtZCIXVQmYANnlCSPxy3T+eZz5mzoK0Wxn3U3VHue8brlh6mKE57RSUfuTQUA2RncYu0PUq1YVYYOnoSJ6pOiqlUWN1bNXYZY+K8rQm5CrIIlpcJ1zUxtfr5BO/p1tBdKN48n/EdX3qDJXq4Rni+pgLbWKRKzgjO4p4yjAvqhA/aG1k/JuBUaJcF7zAHFpUJngxJEcC64cbe4XFoUwLpLhBAEmCrlQhBZ/jlQzdYRShVUHq5cY5nlv26+xfHfc70cXr1zfOE0zKRrZoL7sEfu8HG6/q67ki+InqeM5MWNrE6VXU7tynA1AhfKj7MGXFlY9iaYN2/BYrUx5+8ohULb4CyJ7jWBoliLD6tGYm1kMhuIzhMS8T5MuNyWZBzxWmZYBMFgtbR+HWvlTeeLnq9p01+dyf3vXvp44y+KJKsuL6uuF03fOUrn9LAdEuYIzvKjz44YVLtUS3s9qVSiFs0C6g2Yrm9G5onJtIGvdmm4HFZJrqFl6IFTLSj5B5srw0tF7hSAWsRgINu23RH0RilvqeCV90TXTe6OgfXvdi624NizXJ4lYkITGPkgXGFsFKpSlwhTbtokmvNBSgVzlCnNWmqK90E2JGdAn3LHn3Beocf1aew6nvOOvluKY8JAejkBsXDuyC2kvDiGg/ebUsIzuF+3RCDQ9oIGztvB7Gl2231HSe+7hkf1bH/Z/2ZrSfk4hVWm6ODgF5xUDZWcBbrXlETbXXWPXEnGKhXS6XR3bxUhNJgHAW2pHxXXO87qcKvK1LOgzlojMUyTXT2Dg6n0wRrLSoM6rpzgk2ZkSLCHebYkTSh67g6hEAZZFaLL5rgcl7gLD+H9b5zd9sagmeczLRMsMEBijDAOZ1mBHknDNmKFp3SzXEL1pSx5jxcwTsu8nhQNwDQRXzfbyyqwZu8Hc1CRYUTEOL3HjFGZAGMbdgTo1K2nJClYl6jQrFkpH7wwZnw/nlS6ySLL3/nF/Hm6YzLNGF9vWNfE/KaxrV03iOmjGqcmgIIpuBJ1lAjWdoN6d5QdHfqHc6O+3HnaAbtbDe7Vk8/baCbdFq8qLFtQ64sUt5ZFnYIpyb9zIxTaLrrwMxhrfV4LxvDiSyoIcHlPOPN0wIjwga7Eq4EeKvc94xUGrzl+0iq5+orAG8DYcMpYjKAb4Iv1IZ5neF8QC4V0/rZys/nukjdbiuCDbjOEVaAc9TlfvCDhi4WwwQ2KOzn1QZlmiOWZWLxmOIQSjK11gw2ljUFzjk0EN7zRh9cPUCB4/MWUbZZbtjWhOt1w6efXnG/77jddywThXZe4QlMQVNEtVOpNGTde3hdKgN2KgstUsxEKrl3jhY3tSFPcZAMoppiNgHp2wbwmYvOqWmy7TH0oWlR3XPBfUu47wn3lMdN9/7pfFjCDNaVIatLjCGzzRLatLYNej2NZtVQthHqYWyHG0WqR5qfNSbb6b7HatKw1V0OdA+156Ix2Y+amj5VEQrpYZXUwzTAsnjbQlV9Cgy8S3tE0alAmgMFUvojj0mq/++vr1UHkQOm61IE4tQyqvH9i2C4xPOKGmw7C35ntJXa4C3tkTpTr8eq2yowymzbc8VtS9i2hKvaXNGyB4CxiMEw3HAKOJ8mFuXSsO7pmEC7tqmzwfTe7Z8ZJ61jH0eSiFr6NKCWnpxs0WrVSZ16RK9FqoG5VxXQn0mZmXqfM4yvDn1Q/5LWFL7mAdqhbb3cqoljbljPGxvUfDEwpgHGHvs/ZRkeeqSKUpmzlUtBjBQ6L3Mc6bYA6NkXHD788AnLFBFgcA0B623F6layba1TPZrDVkQZbAqdWYuvT43uhaoXjaiapqjO5/3saTqRER4FBctQGHvopVikmsoBYKBwmz18TJVswYbrAUV57yYWGDQ4XQks6r5fc8VlSdhzQbIk43QzZYa4Nl2XAHvuadUGRchuFcf7xhkmqRtrx/lGccI3f32ui9T1usLC46Qmn2dvURQDhx7EDewoXPA0eJ0CgjpCCIAPP3yCMQ73tao+JwPqA3fdM9ZS4dYNl22jyWM94dkanJwdAlZgEHUgVZATw/7evb3h449f8H/+n1/D7b7jdtt0t+Ox3ze8OS/46GnBZEkframQGqsmnmui2r1/lM+XGac54otvzhyvrcPzMmHxFid/RGf00L/XbUO5AzAGly3hskwopeHpNClTyA6s/PW+4bpu+OW3XCTft4TTPOMktOqpavuS1c06q4altDYEoz54LCKYIZhLg3N6CI4pUXd9uUEa9UiXaYJEPiAXnQAvc9Qi1Y2rCOlmFXnetzRcn1Plz7r2pNNah0g3155DVMcBF7yediIkHewW623DFDzW64ppYU6WDfYhjbUflg8nZd8HDuydHaQxGNZcAoGrFoDD3IL61vEw7Uy2bWejE6xHcBGTn7kjbA7XtcDahGlaYbzHyXAfupeK257x8esd9/uG6/WO/qac7hDiFHE6RVwW7YitwZqPmPl9S6iWk1SPq7HabAzhuCesZZoSgUrVWA0HFwR+FtjcYCRzb+IdEDzcFOAjj5asxW7dM3Jp2JLaMEkDMsWjJXPfyvBKlZEYMuGsASZDmDT5zlBU93l0+O6QH/CeaLglujmspSC1Y6fIHVFFznQxpwuFhntOhOEp1CWtPWhUxzRPqLng6emM6yevuL/e8e6rLyQ83Ta44DFtGblZ7KXCV5WRmMfMpe5XyGmHzi0OT8FrGnZQR3MgtYwsDfdSdKpq3GEaskuhe0CnxdlbgxhpJDufJ0zniLBoFPwDG/e9CvXYWVslfwROUlJO+OJHSXfADuuese4F724rtlRwXfs5SauzWiq86abFAbsBQik4WYN5jgjBM3aoCXzMoOXot4AtUtWl6tAw7EUdUPgwd/qjVduYuBAPnbWL8cHpNMWcGXr1WaSUuBTWwx7KPFoKI65dcLDBDbV670ShI7ro+8opq3AwYd92bCrWLdnjdb4jAFisYSS4wbCEKYUU1G1nemsnBkRPCKxkikUVJaAuxXZMCmgghr+lgr1ULaB8n9FTBCkxwFkzohc63HRXJ++UCU1CxYodH29N/elqRcrs9EujSWUQkJBS+21lcFyaQ5DZGYoAiRHWAN4Cl5nO3+c5KvvtCBlMys5rAiQltGQ5PqPuGFKrHmQi7xMJcJidMqvxcLLPqdORE9Ke4aJHrAEWD3Ra89CNAiTPPHwNj8Fe2CyApqeswrWcDI4iBZDC3tOGU66UAui120uHwQpSroihIcjjxK0wjEal9Mm378YIc5E001lh85Qw7x5z9DT8BTAHPxifrhMlhPKIkczbLIk+5UjTFWBQ2rtIs/Rf0w++NTIQay40bM1Z/R05FRJSlOGPydBBix7S6IzaLgnga0XWpigoHPw4DIwppTLYNKuHYKerD0f3vu9q9NqzFqi1QFrl/W46MUn3PM7BWk5z0xxR1LlmX3Ye0LliKhW18VrCmEENNwrFWSjhRbopLQlHnUBhlaiCprtHaUitYS3qRt8aorOKPni9p3l/WWdZSKPHPPHzjiN5t5MYcMD8Dy/B+6+e5hCCxzJHoAlSruqlqXBsE1iTFSHWe9GYsccHgGwAXyp2AJcmmKeD9NGjRap8/Xf/378+10WKI6+MrKh9S0MJ3pk8uclY5PmZGo1ONQ4x4HRe0Brw5jkNJ4MrgD11sSmthtwdmGNAqhWGG358UCpV4b0ItB5NwU407Qn7upN6rV876LUWrYErFVEE7jQzNE6hh1Q4LVw3BuO1caAzPXc/RTgIrNArrGPpTZk7pQm2XPF630mtrxVbSkg50/0dBkuuHMsrYbPruuG6rbiujPFIueBpmWBw+N1B4QYWtop15+4sVRb9WCv85BFa0CRbfk4ih0KeSbuFUFITBrR5g1P0uCwsUqd5GoVhxKEQo0FtDZtCk3t9YIL1A0i6sFcGiYBR1m4wHo21pNw20n3XbYcPDrfrivkywXqL6RThoUSZfmC5w3KHIvHO8DxYnq21kSk2MEKFWSwAWJJeRPTQCx4lRpznGa0Be+JerTbBlgqcS1jWHfs58xBq04DrqCmiLsdby1ww1TIF3w8tQtrOGfjskFJCyRPSmtSKyeBpmejfqOJd2/eVtaKmDBSLZsDdou6L+kFfIcgA9z0CeOHE28/Czugse0EuPehQRdcKW0EoP4jOosXIOHRgyD8mZd/mxmTjMp4HDdnUm2yQJkoZ4aS5ynEwtja8/Yo6l3N6A6RmoFGr5hSK7g773c/PGAOZBfU8AwDSLcEZC6ky3tNpCxov0+E3g2gNDOmQakEl2BKLYQgMHvQqe+gWY3upuOeM1z3pe22YHf0g7QQ9q7RIeYtp4n76cp6xnGfMGtMx3n8n/RgMpl5/iYz/N2yVZA44n2cygI3FNCXELeG+Z5KqbOJuUTiZVghyIkKRakXZdhjv4O8bPkwF59OM80QotVYStEr9FihS0ZGKXCunhk+v63vEiaaLR+dJObfRoVbB6bIgBjcs7JdlwpvnhXEa3sGahvsKrLuGmpUyWFgwgnmmAW2qDVV6tAcAZft0M0YnfNC8YWCdfRCilj0jx4S0JRTvYNWQdUwbvdi2vhcwLBLa+d9rw24N1nUfFFcoC2xN1IpsKWPd9rHv6IdSq5XRCc6oIalogrC6HgQHB8EpOJw89ShBsXVIG/530n9saSM6vofM+eCH7RNTV4nAWu2OVd6M4Axp8kp/9t6RqIGjS29jLcZpgIakZOxRtFoPlxEMbSnAP0IWlXeEVKLX68SHNZWC622jiW308KcJYgzmy6w/D4MpH12bCfORsuuChS8OEj1qKIQRcz3U+7p/yI33ienXSMkdUwxoteFpniFNcN+S7vrq8G6jx1y3JOL043WfV4JD1RTa4ByWQPr0OXqcgrLIDC9Kz+tCqUqgYVdONmjQRT91PGgCyRV1TSMbrRuToqr9VRNlePJ69A+5Ty2oFVIZImmlwaMhgtNWFUGrKs0oRCusJVoAcOLlfWewKCGitF54SBhw1ipV/2FH2vc1WkTphGC1iGuzJRUYhZAH/3kKuMwRT0vEMnH3aw3DNHs4Z6uNhru6y7NKsHGeTUHx9MK0umvqWsLZOz67reK2b8i1oinlPCgZK6ilkbSGIiQ8pdIOL7/SYJvh3jCKGhjbIaVZzgueTxNdZKaAoGLsA6bux1R/OKTXJQy3f/3fxqhDj1qa1UqbNgGwTAG1VLq4gESvaHt2Gf1Hc2q4lgoxgFUz5VYawrNKMoCRe/ZZXp/rIhUszVJb44N8twYmsbvqmSdVuMAMtWFdE7x3yHtWge77hQpClCaVhCYNXv2vmnaHIsJcGaVMV8XAueN4+LCbZshAHRv0Q3SGlGSjD3ArZN612hjd7KzeKzKW/wYY8HHvGGvRhx+Cay9SrR2Ej9IGDFT0EAcAZyxufoc3Bkl1IAIepn1BHvXm8wBm7xUeJLRileVoDa1vOk3WYKBaQ9zqlPlo3MEK7Ph8V/6L+vs521l8x4K5T1+dUCLAMFjtLgSDMFGrLsnN0VkbjPdIvYtTtpvXxXnj9FU5sdgtId5pkxOngKwQh0R5j8HXH3ZjCPlxKU8DYustbD2w/w6HVV0Ra4mjlsQIDFRXFVSH5guic9ishVHSgrz31RsWjTSxZhSnfmj1r0llCY96sqq6PCkFVunM9BjUyBlzaN7QGqSAbMxqYTQBAP1y6L1udDnXl/WPydT9/UNZhBaCYHlRBCB82yczbfacQqY93C867lsBoDUH71iktlwV6rbjPR1OJA/XagyyFg9nNSy65RevX6ffzyGoKTB/vzRBlYqyE3Z7FNRT3I8B8/awziYOzcvYq86e6cy1Ans2hMz1efHeqhbJ6XUjhbyLl6uSJx4NkQEMdujkSRRboh96uB4fNBxhHu5bAA9n1QHF4+Ez7VMj7+ujAMfQ1B2fqIRpFkUY1RIUnm9CM+GsPn2mNmx7QgwOtfCM7eiG+1YoUh9eJoQQkWvDbc94e9/YKUi/KSmeDMFjngqWOcA0wfVpIXvIGnilO1tjkM4TUsqIs8Pr9Y4tJ915FdRCj7mRRySKb4wuXx+OJuODmDwjIj5YJvjaYIsm2wL0bBPSfWuqKGLgBOPmj96iBo/LHEcRWIJHsNxJ7Zmw4NvbnZ2WUIvjrFWxYg/t41vjtJkg0rDvOwuJtdzLxYDn04xTjJg/eGbXWCrenBYsmhAbHVmNosvgOfJmdtYilgrjDKY54rRMWE7UoQWNrg7afcUYUFLFFDzQgFKNihuh7CDVfmz7mEC6e7NF15KwAenTXy9S3vFgif7oZCF8mGNwuEwRS/SYg0dtwoiWQVvnTqtAMJ0nwFp8cH8aBrnWdcNfvPfg03RWFf1N4CJ1OjZZmELqfp8GAdKAveE9Z0DnhxYdIDTBLbXiNEXsWZfr1qq4E2Onwawj3mN0EAmosSJag8lZXCav953H5Cw8BGnbkUrFu7dXvL684vZyQ9oSGZYxIlsgA6juEH1mQxdzW7j0NvMEWKfEkQPL9dYCHgiRnfc0R/jAvVbOh6eiNUC0tOpKltd7XasauG56IDc2E8bh5B2WSPuuRXOuABmmsy9bHiy5JqRlNycwWnyMHL6CtO5h4WtOPS3FwxkQIlsmfPHpgo8uZ7y5nBjR7r26YhRIbViv23B46d6UKWUyRFUmsOtzaAwnwjl4ROdwCl71RUBKtFWDAWIkSSFMajTdZEDpKddBsgDoiefU3ixY/gxTYKx8mPjPSQuUctfZDNgHVuQ3vESlB3iwaQK6u2MXoHt1JamBEydqQ5ojiqft26zEmyk47No4Ojw27Q2mEl2yCn+Ktyj+W0AntUSmauZCfH5X9pIIjjBCa/SGBUPcCk1au21Nt7WflgAXLEL2zKKxwJvrgpQzWqu8+Y3BEjymGOjaoNDiwd45PL5an6YMb9YaA8pU4W1B0/cHnaxoKApYkRGq5nSJvEyBsQbGDIgOuiDOD9RdTnTUUvDVYQ7HkDIAEO6gNsmKuVvMCIO8ELyDD8TYW22YI2EP18kAD5OTsxbeHZ2rsYQEnTOHbZJOR1btkbqB5+jyaodk+XMXIXy0Zi7IWXwsnFLQcz1ycPqrwxmuw11TOAgAAtVYMTNr8p7ptaXBZXWvFmEarpJvssaJ9L3J139BZEyeIr2LPrKIzNf97PR6BEzA8OHznu9NKtSWit0qv8g0y60N1lZwZkw53Ht2mn1fzithQjU2bhRpfo5ZgFw0miKx4Wo5Q4xBgUEJHsWxKaKfG4klzQBFm4QQozZi5oiLqG28l37XMaWYv77tSfdRlDM4Y3XHwamSbNGCNefB0hN9VluT8cy5PgWJoDqeqd6yCWnjRsBwQvFa3J0FTDkKV0dPgnN6DxssMbIRi0GlGweU3ZowmbizHVPBvu7axDWkvQwh+pYoRr4npggbkDUp9mi0gsKT3loUpdULgCw0Vq65YssZW86Hrq8pcUkb7uh4j3h1On90iHB9RyedpKRaM534oX+PGIEIjQGMIfO2aXPfm+zj2eooifoHBocWPM5TQHGWRd/TwSZ4B1eJopxqRVEj7k74eEQGjqntm78+10XqvEwAPG5boqBzz2TyiLLYGrsYC1qYmNYAXQaXFFBy1RRcizD5QX4wDgiTw7ZvZBnpMtoIMIWAy2nGaZloLNuxZOiBp44RRScZZy1OMcJUgamCVc05+WKR2nKFZzuoCcGNexTDQ7U7iUd32Oh0VX1WcSdFjjzkiTbR0qU2ijs7jXXsAMBdTWd3ecPd0CUeOTHeKnSgk2aXAZKNZyHuMLE0HeZ70GgwyJEssxKbug/QbdqmwjWbHmoNgpYJAb3ux4QTvOZwzXEw/fqrO0CwoFtMkcvjWfcr/SAKuo+KWjB2y8V6ZzrVSrZVUgJO7roq3QdS1wO0kS3F7y8CZe6p47t1sLaRBu4bXGXhcdo5eke9jje8vi0LC3jlFBJzwDxFxJRQOvzqnXbI3GNBuncj3dkFR5E01g6IyYIHTi4VeyU7Mu+ZhSqzSEGoF0rOwgMozgPO6N7iIJ0ABmHucHLf0TDbrCpZCM7o/VVRVG7wcl31cM9YIpuHi9McKCNq6VRwU51XVbIGoqBMESJ+TEJ9Oq5CI1jnLBroNt8rJH8P7aSCMyjNwIKfIV3QLaRZZqc5hyYe53nGeZ5wmiYEH7iH0vusF6NWCoq6pmy3fUB+WQlG9y3hVYNFr+vGKVcnt+DYXHRz4v6+Mh9/VABbKbCmIm+F8g9NB0g6Ufcgwcm7wcScVA8adCflNWK+N7FofN/MqtMCpc3mI8QnVdOE+z+lE8AOWLmzYr21WIKHnQXtNHGirE3vcTZae62IraEaup83IQIUrYUooa2LvzvE+81en+sitUSHKlZznjrif3Ra0VucJvq9nWemjk5K82yFo3uIJCz42Y/dCSwQZg+BYFkmfPjBiuvLXQ9uizfPF1wuJ5zOs5rUGg3dK3h5XbHeNrx8esPr64rtto1CEUNEEzuwZjGGRp9qStlpmU0nAGeYphp6Z6jEiJQLu+5aycbqD6GlfVNwFs21sWcovo1gu9bKIZaUBoeGaIDZWSze4TSp+r7DlrpfaDhAniPNFloQjymyZLq051yGv2F3dB8O9MFDLB0tkhZMU8jELK3hVeGpVAgNhr4Xs0exbtHjVOPA6i+nCcsU8aUPLlimqOa5x56Ai3ODtGfYPWMrFVP2qNLQjO4tPam8PSLDqUkt0Vxt263gYQs9XsYSzrOeJsNBevBjt7KS4+9rPBQyBK14SKWnY+yu+j7AFwGkjQaBZBP1Z9Rz2QJ6OBAGDioKfRSDtkwqc7caMsJDr+nPZFuDqZUkh1IhTXeUOqP06JVSG6zleyKzM9PxQidgdB9IZ7HlhJQLPn33MqJnPjjNOE0RBk+jy2+NKcSlcBrJpcIIx+stRsy6+1jUL9Jao1MQEQ1vKXyeAyeM6GlY663BObLgVOFhmWrVXaXmgolT4kuE9YGxF1rUcy5w2aHVoBN0P9S5QysamJhTUalIwm3b8bJueHu9AegUeQAy4RLD8MpcAvVX0kjtv68rrvc7RBryXtVl5HBTqa0NS6tlCjjFgNOiybsxIM40yl40zy4ukc+Ye1+DZJ0SgB5/XXR9oTKAVoDWqkLbep1g4IMOPk2wzJFnkZBZLY22WjTkdTgLd7DLZVFbuIxuCHBdE6qyiWttWLdvAYNZZy2MHBAHg8EAQEZaZ6fjsiPveqIHxXZtaE6Xhbrwn5YIY4GnpxNEoL5rxKitMbicT1hOjH3v+TNNGXbrmnC77bheN9zuO9KWUZSm3kkFXJDqIdCJEEIRYidNTH2fo8vj4EibrpXvvS+7vaMQuOmOwlsaVorYsZj0taJUq0tqwEhF78O7c3WwXLgHT5+zZrpXGKMqOmRqH4gD7+mC0DkjcizDa1MHhL6/4QFuvSMLzAA9DVnQE12b2hh17zWloreGnlXjvUMEMMc4qO6X84LTHPF0OWGZuQTvkdmcGM2IVxAA0+4Rg0NpDhWiBeqII+lpwV0AOn7AnpWh14B7Zv7CgPy0OLOYu/F7CVtZWhuZilbsMHJ970unIcIxD2tP9B3LwRJ0lrZDPeepF6miLLEKTtx1wGlHgeuwmxXaWvXICP6s/FE7dN2p4iKWUKweQGsmJVz0+8IarNuGLSV8+u46ojf6juLNeenhraP498Ti2ipKMaj2/TypDvB2kg1gB8LgjDkiWVTGIYYTaAMQa2XabzsWM8bY4560DujTkxwaNOlavr52Hv8PDxKA47pmdaPfdEKt1mDPEbvXWJOHIisQ5Mpnv2TaM5WmHp7NojYzjJH7yz4QvbrsgcxCf0QQTT1exb1PlNCbprv4H9gsyKQVoDlL0bapA+ejpRUo3nYNzlvESGKVaXFMXFaLlHUOYgARAxM8i721SCpd2NRFZ1ca+pbyr3m+99fnukgxmZUmqq0d+5wmjSyd4N7DbBmdbYEGtCLMZQrEkP3kYH0g9jsHHqbWYTkvyIn5TT3aPU4BPjicnxY4tVraNmbT/MqvvMXL2zu+9tV32K4raipwjbBJ0AOWNbHyJm380HJl3oqSa/CBYYpwDA6zQj6mCYPmICrg1IdFWPKC9xoCR5fqw4G96k1fUWvBfbUsBKVg9h6LUmfnGOFDVKuZir0QInSmIjar3nk9b4mkCysClO5qDBjwsMl7Ht1YrQ3NAH4O8LUinCfYdQdyQUUeDUOq3Ui0DhiJX2Y4WsyaddREcK4nEhecw/PzgtMS8YU3F0wToY+eWmqUnIEm2G4bpm3nns4C0+ax5II4BVyez/jwgzM++OCMZYkIkTqTMThp0QCOwgxdlEOLk9HdWxdoIrQBmXR4kq73DDhEozcbFfnqEKCdc4OMSXYcCNpYRGdIN+f/QFDiRC8GWy7Ya8OmNObWGiRrzpUe7FYEwdgHYayy9Tpjz/QdPDOOrHUQS4LLPRW83ne8bgmpVDTTP6uG67qySL28DOaetIqcF3x4XjCHgMmTEFGCx+QtSsFgQA7Sg0BZsuYQYlsWf2toA0YXdDsm7J4BdpaA4AnD3xN31VejidKq/XHG0CbLWJQmhCkfdspAbzwcQqShcYgepVTY+v6k0jdZHbHIAO4pwVua4DprMVmH0zxhagEuJ9xLwjUn3K+vnC4EcC7Cuklp4ExC6E4nvL88i4JXiUf0cDHATRF+mRCWCO8ZvNFafYDenf7595kUprHzdEKtozQyjVulvZRYGdFERp/9VhpOp2kACc77ATc3vf/mvWDbeJ5tecV9L3j7slIHqkbIe/oWmKQMzOEm3ah5yWqT07v0NefRRfgYINYgtAbXBYmVN1zrsefaGRvDPJk4BfUsO3ZWvtON1SG6NQqK9y3jfttxva24Xlfstx2tVASjO60eMWFYYFEralPjzUpGT2/Mu6Lf6A3GVFgCb90lW89ddoAwI5BwniK7o9pQgqfOQYW/tRZYCDaXIBtp7z0zQcDuLlfBVhq2zBBFbwAJbrxv4yz8FGGcAaxFVEFxFUFQtlEnk/SsqCa6t+mwn+qozMP023dv3ipF22JQx2P0WGaymIzjFFZA9w8XPJ6eF9pWPS28Dr77DurEpxo2a2l5VIWMvWkL3H1NAeenE3OP5shOdUyIemBBC+a4+8Auv2+YdbK0ms9lrD7cONzZAe47bJPREdfS1ADUDj9Iaw2kYvjpkcn2IFTXG6Ub8YaxN5WR/NrdS1LR/VqpsE00FJJaOG9IaLC9kOs9BxyHI0kELKj9HmGQoIzEgNoX74oM9Kj5QSTS3anIMRXMIaDERkFzOVwVoiN64PVr6I765wk1Phbq7bo84r2zwRo4OfaA1lbd3XEi79fa9OkW5mhAcMglrBZrPxH+j3NQQoPA+cJ94pARkAJeKw973tUHga6zNa00TGhoaCi1YHZsGJqxsC7AeI9mGnPnuncfgNKgO2uBbwKnn3Hre8F+eNh+new33JvvWXtB2X9iR7Kv0d27EYDJnoQKB5lCgBY4aXb9ldH7HWBD06oAyEpKojxoTxl7SsNjkr1t+Uzn/Oe6SAkIoSx6KOZGSmvbQRFua7juXEIXaXDRo1mDqTa4WuGr42LUaeR59ZARIUAfNGMsQgCw4IBDFCoYcfVNrXW2hOt1xfV1xcvLHWlNkNrGwtNpsepR6iKCDLpW5NKwlzJgtKqwRceRqSFS3ZQAC8KA+0gYNWOPMsfI69M6lMhdBN2laf3iraEDhcI0YgyaMciNBplrLrjtGdLoBiBQSykLGO+IfatHm1glc9TKgtojHgA9nNrwK3PBIc6MEffJPxwcqptQVmMT3pyTLomXmXEjT+cZYfbsDIOjADcGXJ4WxOhxWuI45PuLju88uENwCJtnWnFw6ijS6D5yWfD8fMJynnWCMgdDSjGyTh4ZB6aSCfjvZuzfjDU8AeSgQY9pSE8tFzxsrnChaXF6/6sNeKk3VGr7BODop1QQ6jgRSevhmmromrnba0oD9k2Zbo5RG51xdtiO8jMYZ5ke4K0TSEDxdnf16DucyjfK6b7wEENnRSqM1v+81SX7MkW0JjhFShRaaxR268QevUXw9mCt4UED5xwed6P9PIBC38Meyjl418Y+k02fQofODZd9nrdmfFYduu1M1DgFws5lImlFBDkRBYiBMSc5ViwxjJyvfo80aHSGkmagxBQSTRwuwSNb5m/BBYgLKJIhqEiFsGsV0WeTz6dTSUupDf7RlUUeCpE8TGG94ejNUr9oTQDbvrFINYHoM2Th0C2ixNmH664uHIa2VbXSjxFSByTKBOSMbaPNW8/qA8g4/Syvz3WRSqkg+hlzCAjO606HNvK3PanOoA6LmK0UPJ0WjcTmgxoKI+IZ1ldQHL2uAB6wPVrZWd09qUhQFKzuTKCijL6SaZhJxlgZAYC1VgjIMDKWsKMTC9/cgM+c9eCKwSJqLH2Ygk5IFigVJMQYzM4iRsGsN4xAPcKMgdOprzWBBMvCC+2sa8YtRTogWIMiDWtKuO07mjEIjRoimsyyG4qqz4iB4Y8meMTzhPkyYzpN8PoA9+lPBAeLCACqIbPS+HFdz5cZIkJKb3YQXwcl96zQoRhgnrkg/uJHT7hcZjw9MTHWBUcj0+Dh1FjTKSX3oU/Ufxpl6Al8cIgpIiwBy5uT0s0ZTOhjGJHyKWVY9Wr0epD54NHbzO7Bx28jD/9kMfbu2EUZ/W9NnRpsbRD1UuNkwq9HoaZX+DBYQwivVpSccQOQmmBrggItjKeZPo5K73aitj7aVD9qhpx281G4Jwp9ZzeWjXr4ODMm3xEzrgeds33KYXxNa3YkKuvwiKIZbaVxP3mZJ0bIuD4Zh6E3nKPHnhxS7oasGI3GYYz6/qSkb5WXvfG+ezSbpWuH3osPoukGgyYsIH1yNzB0TFHvrn4Id/kESQhhQGViFMbO3N/smRrAqTac5wWlcAezRIYwdudh/jk18lXNHZ01Aqo0VOewi2BtgnJfkVpGqoUNorHYa4UpBa9b5kRVKmwwgAXmbULZI0pwKmcz4/4CDs7P+1dRxofG7CkwYgVEHYxGQIuRMenDP9LIoeQLGWzIouGmeU1I646s2Vvd83Hs2QSf+fW5LlKdbu4UHgpq1kjWWFEaZ0LwFinTx8wYgz0VTLEMq3upnWrK4EFpqjYXGdj8WJajU3T7ayxO2LkIBrbPvCWydWAAlw1yDSPBtUMKzpGG7JWy7Zwhw0y/em6V6HRFTJ6fc3h4Gx2agFVjV33/sAIjutRvR6cOww45N5IUXCmAdepWQcU7RNCsG9NahxOMo7O8nwKmE+O2GSkgYwndb15ConrIiSDGw/g0Ro9mqbfoepimEBMsMM+RvmRPCy6XBeenGctl5veew8Gmi17tl6x+JgdMR+iT+DqaPxqP4HWxL+MQFiiE1Qq6g4XEcKTUNnM8bEbhvl6ZHzr73mWOga7v2NoRTyAdNqs9qkPJKuiTQLeSwsEsg8HeGtbS0JTmXxTuhTUwsDBWDtjQHFCZMUw99oB6OHYCxaF/Gzs8c9DaD90Xbal6hEoP3RRv4XR6cZbQY/MWInUUjmWKmEMYWjFrDj3dgBT1eTlEujKIRCJQ1qFqHvszqGdAE3Ww0N0Sz1DCHsZgkFGcbUq+OOBDAIMcMr6vfr4jukfJCL4FxFzRqpCkUOr4OTi5eYULLYI7MpZ6kepTqhWeAeJpZFwhKNZA1Cy5SU8eKHDGjem1VAqHjV6LJRf47NS4ml+c5PnhiqFDjDSBWDnOND3N0OFqy0I9prDjhhh7UZ3d+SVt3PvDy7HU4WpSS4WUpu47Msg6dJrhNX8AO37N1+e6SKVMqMxZdo5OR5tSCtZtx31PWLdNWTV8yEop+PDpBO8syRUpoBqLog7qrbQRfczwOl066veUh///+OKHwIX0pFqGfbPIWbCmHXvN2EqGcRZnpZUay98/R4W9lBUWosPzmzMu5xmXN2dEZxAAJLMTC/YyOkzTD8jWhmp8V5HvlsoQ+jprx24gN01ysdzN1Jxx2/Yjplop6h1a6RoJpzRjaNPZ0z99JMPI+cPWiZAIjkZAoceSC7y3yLloIGIhBbYJhcPugMuMs1hONMp88+aE6TQxTXmJLE7RHS7ko9vHKFD9+7ZSxxNho4cJDn6OmHVfkraKlDK2LeH2svLfU6LSPno8P18wz5NCtMdDM+CcSsucLvbt/63rxABOQkYPCGYxNWx7we2+4Xbb8e66MXOsExGaDAGsxTGt72nHvVS85qokDY/zeRmfJ2DYEXuHUD3mICOK3IkgGMBDEFol6aVyopaHrptwmt77wcMGp8yxAFiDWaeVtARYNOTCkNAuvLXaSKV8GuF81K4FLDGQ/ACDIkAWwVYr1pJxTwm7IX0+BN5TxlmEEOgmYTjIWj30+jNZtbinTDLSo5bOovvzWczOAuJGcyu6MGqV+UbbnhG3hH3PdElpDPKEAa+FVR2WEG5Ma6Lm8boqJIdh7RUclCnqYYwyCGEYHcQLzOlXDKJjMc4GKPsO0xJKTkhpJ2HFWDjxyLUBpkK2HblYpGwRI4vevu3wNwcBCUiknDuEKPCBnyN/jq9j/vWP3BpChdbCmHYcagYDOhSdmvqz3NR5o+xs+Ktq8GouaKkAtSEYoj7VO5ToKb8pfANefoPgvv/wH/4D/u7f/bv4T//pP+ErX/kK/vW//tf443/8j4//LiL463/9r+Of/tN/irdv3+L3/J7fg3/8j/8xfvtv/+3j93zyySf4y3/5L+Pf/Jt/A2stfuAHfgB//+//fVwul1/Xe2lNF3U6ltbWveoycs7IOaHUAid8cMYCt9Oja0XLBcUAyVpe8Fx44DoevmMhCWVxPSwg+9KfMAp3A8vkkaaAZQ64rw65WLRdP9xCRbmzFkUZP0FdJXxo3NMEjzh5vHlzxuU84fK8wIPZMZCGVrjTGFHQqlWQckQQ9Cjx122nG4dQfNthp6Tx9FMIEOGM1DF7bw27RnS6r0Y5TEGLiGPR7vuSvmuo7LIP0omGC3YMW6CUa97wy4k+XnkvCi0AcfIjkLIfkvMSEaLD6TIT+tQ9mOkFwGB0f/3zOTq+Iz2470SghcJaA71tUMqOddvx7tNXvH33inXfkXLGNDGhdIpMdK6Fu74+ZfcHu+9guksFvzOO9waMgl0LRaD7lnG/b3i9bbheN7y7rrhvO17vO0rOaLUhWqAa5gvVKgC009di2CcXr128GJrvktRwBAEO/ZBRooEIbNUHqE/b/UDq1k99v6Z7vzAH+EiyTBHmBp0SLbtyrWMXcpAYBFkPpVLpg+i1yJfagJ7Eu2cWiET6thHw9yupRAxNcLs424Pv9ZHGrp3RaBT7wN//k7MGrin8CZVA94FA742swuJtz0iJziOdgMK/iP/oyIcPdGdx/tFx5iiQfVLs4nYzPh/9yzqbAodTBkz3LPRYgtfvj4G6lNYgRZ8XsTDiuHPMBeuW0KzBWgrMfYPRDL1FDWeNp3avp0aPC9ixofFjHlDeOPvG1xGISdceFqOyZ0Wi6vjvPak7Oku2sW5zq6I0RgS7+w0S895uN/yu3/W78EM/9EP4k3/yT37Df/87f+fv4B/8g3+Af/7P/zl+22/7bfixH/sx/OE//IfxX//rf8U801n6z/yZP4OvfOUr+Hf/7t8h54w//+f/PH74h38YP/VTP/Xrei9Dq6A7JIbqsTjxK9OOH44ixbHEpQ2JaPXvH0LJvKG6lxYAeIXvRNoxXVl3FAkDHfO55J3niLQXLHNEDA57UqhMqeK7ageKukrEQFpvFaAYLmjjHPDBh2ecTzOe3pzY7WqnXksFreSUXVYqpAK1WlQpKLXivu24bRmf3FbsmTk3wbuhXCdN32CKgd09hCalRicmawbdHKAVyhwZ1ui1gHforosBDR4mGtv3eofGSEQg6s4BAEsiSaRpiKIBME108AhTGBMV7apItugHV4/C/saOsGOyfZyCvkd578CxBqTTkpFN14D7hk8+eYevfu0T3NcVpTWcTwvQnvF8OSMGP5w6AIFrR6FuHS7uZAHwDGgwMEqq6M1RLZza9m3H/bbh9bri5XXF2+sd65bwet+Ulg4y/wyQq9CCCk2nABYXZ7i/9Kr4h8EoCj3w0Zi+q6Qbv5Ujir1/LuPS2X4/9xwlM0TYcY7K6LQjeLDkQtPkSj0de4QD4qTEoqk5sz6zonZerRHp2BM2ZX7RW1KQqtN7iKSgOXJn480D/PjwmY/6ao73oGUDYgAnGqAIheLNAQO2CoXkSa0Po0iVwZx7tNXvhClpWqgUiu/Xsu8Ve7HutlcdMutifaPMTSNHWrO1BtFREnKOytAFYAwRg86UrQCMeBihkWtKFuu2Y6sVZrWolrsvP0U8XwpOpwlh4j7Vh46GcKIy/Tnpr69DIXox7RNUU91bzQVl0xiWOwli8pBdxyJlMCnz2ivE2zWURoDtM1afX3eR+r7v+z583/d93//2v4kIfvzHfxw/+qM/ij/2x/4YAOBf/It/gW//9m/HT//0T+MHf/AH8d/+23/Dv/23/xY/93M/h9/9u383AOAf/sN/iO///u/H3/t7fw9f/vKXP/N7WfcE53dshQK5T+8r3t1XXNc71p2wDYwgWHq3XeaI8xwxdyp4E17kXLCtjJqH1e59CoRmRCDNwXoVo/a9kTopiCFGzkhtwfPziV26MMNJDHBLScP4ABGjlFEy7ObgcTpPMM7BLRHTacJ0ivjwC0+YJ2a6iJI6jDFaiBtQoQ7Nh0v4VhrWLePjlzte1h0fv96xZWqOuuAxekKR0Tl8eLmofYtBDIEaq3lC3+xS8Mv8qxENriLmaZ7gNV6kaqR0bW0smp1CmHiYOq0RPhwIWOpM/7HAW9AaM0LautVUbxisao86/sigtWOi4MRmBsTWn7lBzQVG5/wYXFiFFOD7fcXruys+/vhTvH37DntKOJ3JFrxcTphPE+JE2b3UysO/yoD0x3RejpDFVgXGsrsE+FnlLSPvGeuNBerl5Y53L3e8XFe8fb1jSxm3dddE2m4sCwSFYC2s+qBZBA86DEwRz1rUjYBMqj3het91umD8iqC7y2uWkzYG3h7u8c6Tzu+Cg58Cwx9PEWGZMD8t8HOE8Q4m6n93Fut9pz1SOhbjfWvbKu/PVBvWVB7ExLz+67Zj3XdtJpUxpxo5Y3cY55CbsEi1RmNYx6mwFyoWnwPdkH6w63RQGzVcjh8Cmu72rLOwxkEMjViZcVRQiufP0x3jA4lVY0gHhuNHT1CYFTlJOWPbCAd2ZKXvyQWkj6dSB1OS8CuXbZ1lGL3Dk1vwnR99hLUUvO4JWxbsWbDuGm3SCNsWa1S2kqiJq4Up1TqJLsuML330jDfPJzgLnPNCt3UYuACVSkAnSig0fuzmq1q7GT3bRckRNVfmg6WMumXklWsI0fBN7uT490bnIAHDYo2aGWoDP6PhxP+3O6n/+T//J37pl34J3/u93zt+7c2bN/ie7/ke/OzP/ix+8Ad/ED/7sz+LDz74YBQoAPje7/1eWGvxH//jf8Sf+BN/4hv+3n3fse/HT/Ty8sJfrxUuFVjHD/62J6wpYc8ZpRa0VlXEazFrANwc4xifIUrRBm+gvmdhxwNMS9FlvIHpdOK+kH7ojsQeaZazulkvpwnLErHnghg8sm34v8n7m1Db1vQsGL6e/zHmXGvtfXZVKpV8b4JpiNgxIYJBsJGgDdMREREVISBEEYKaNNSAQhKFCgg2RNCmHcGeICKCmIANYyAJab8GY3z1raqk6py991pzzjGev/trXPfzjLkrf6fgDXpwwqraZ+2111pzzjGe+76v+/pB7QdUiKEXYfiiix7pcUU6JyznBQ8PC6eqFNAyv95r6qdIg6BPc81BWGitzwOBH0UnKaUuewcRqvHhCfdFzygBr3TcMFg89oDSnKeQlp3jgPysEgc4whu5n1QFMBbG0QqIEBJmkbAKlxx7BV3qjwJ3Z7Hjgp8R7WP9NjrccbBBZE5Xzo0O0cy/E2DCgYNAMFrxAW904fdiqi1v8NNpwem8IiUV9o4CpzuN4d7+gQntmFAM2VGjSOIbDgAGdWqitFoCFQ0wHIts6YJuD5qOMZhWV6PBGdqcsUif6MKABEHIsBulPCtBwInAYTD+DKA7DEoI3J2LQZh2Vj4FxsjraxqrdsXGAA3oaJM4I2OKqiNduHAf0WSiACVXUrUxWIEGdVxTCsENe6wh1G/WwunEYhVCOt5Oo4YgPPgFosQBM+Fsaw7Wo9PKI0bhwntoS6cIGfrJKSIz8z22er3F6BkwGQOW6CfkOwgi4/4sbdiTcSJhqgAYmSMkQ3VljS6Re1exDsZUiFTs+xElL3fwWwFUbsOQ01stcN5j3yvvbQNs1wcE71FzZRNoVdAyYM/e76al8dH1GjJzhzcg7Q9WJ/1w6LhDEfmtRZQwwUgPckccnUb8h4Lo3+7x/2mR+spXvgIA+NZv/dYPPv+t3/qt8+++8pWv4Atf+MKHv4T3ePPmzfyab3x86Utfwk/+5E/+ps8/3zK2alCFDglvX15wuV3xfNtQSgWkYw0Jj+uCjx4f8NGrRzysK5aUYK07IJjOoMDSBUUES6lYc4XTacoqNm+VPOD8gAPJGBMAYQmw3uChr3CBy8wtFxhv8bJl9fiqhNXUbNSrFc/pFJHWhMdveY31YcHykLA+pAkTVGsYz5wqjCaBtqY7qs4/k/XDw+CSM64541ao7m5dUG1Fqx4QwSnRWWNJkV5gSVlIwAc7N+cctU16aDl1RRguGxQxV7Q2mGzmTjjdqJeKfk6fg9pvrJmWUj54DNPWmfkz9n06GRllK5I80JBvGbV2lL0ce6Ax4QY/U3KdCnong03zwUYRY+EiOeJ0Snj16oyUyJf8li++wdPrR3z+Wz7C6bTCO0dyRON0NOCmuRMZ+52BMvY+TttjwT8WzaUqVZdyheGrRwGoLvX7mHwO+ChYi7Pa3ljvsJwWhBjoYamH+vgd+h2kt1U6jmyFzia1dyRLh4qnQG1dVKZmiAFxTUjnBWGJiI8r/BIQTolwn7ewkU4Hxjl0Y2BueerxpMqkYL/sBS9bxvst491tV/adZglZCyd8ztF5dUnpMNlMdG2QRT4wUm5daeQjAXl4dhLSk9H5G/AQdo4R9IZwcreGESR6vTYzDmd84JFpAS1SCmMdCyS1iOK5FZPg4fGE2pi15PR9bqXB6VqAKQcNHcBWSGZqrWu8ikF1Kjmwgm6pRfIpwtuAGAFvdxjs2LYMCBtop0V9GBJcSsEnlyteNhYq5yzO6wIvgl4q3jydYEWwpKCavc4GUO+1cW1XnaBaYVrEiA4aj4Fg9DbOnWE5xdaDVHe9BkfDpKkCbIwUdtYC/Wkenwl234//+I/jx37sx+Z/v3//Ht/xHd/BG7tW5go1JtEW5eMH72CNx/m04vF8wtPjGafTipQIrZFdxI6mVpINcmesQXNccm65YFkZhT5SOK3CfHet+ISZAIe4RD2bOl7dzoA1uNx23G4Zty0jGSrqUwwzjdNp9+qDg49u5jBZJWaMA1oG2UNNP5smrZJZdviy3X8A7CxpLXOEjY39lHVajPRrmwj1FIY3/Zg8jGICXcMWocQB6j1GVIWdRa4WQn++NsJ43sLHY49jdPdhdAqYP0chvTHFze5OrXlKLtheCDEVtarqXXh4OgsXA4JaJ6UlHMaakxyg2wqFnZy1WE7LhCtrqYAx+OhzTzg/nnB6XJECBcK1cNIeUzCgU9TgfErTHRj0JpWjo9QnPUSiUxun7390DvA65QDTk+6AaQ8TVa/FPel0GxwlGMP9v4XOSIU+Up5ZBLdC09LaO8RxVxqcRQRp/8Y5TlPBU4O2RDZfSmSRMWoYTA0VX18Nz8RdI1GZSnDbC663zNiOzl2QVzLNCLsMKcIZ0FDWZ9TGZnF0/B9MrG7edsf+Qz8zPjcvsNk0sTFyMXAKhUzWn4wi1agHDNZgEq7luPY+qH56Vxnn4IMgLRHn08L4kczGcDdlSgqyMmybkD4+Pf+0IDlv4Iyg9oJqBNUAp36C94EfxiA5h8V7OBiI7VgDs9GCs3NqHlMfE7jBcyIXtL2g7pk6qq2gKgkFgtkMihIeWqkfMPfYbB33zkRLBMcH2BAYo1OuAV3Yu6BrHhd6nxOVg6G91wfLsN/+8f9pkfriF78IAPjqV7+Kb/u2b5uf/+pXv4rv+Z7vmV/z67/+6x/8u1orPv744/nvv/GRUkJK6Td9vveOjoYtl6mLqpWIdPD07noYRerhjHUlUwuqVm+9syOvbXqdlS7EoL3FPswzDaaTwv3CnsaoCv0Y2uFwl8W/e3p1gnHA5bohRXXzFnZsKYUZE2LnFGAVWnMYoY1d2ryJBnNmRCU0hYc4litWbu51MXc4unZeNKY9WFiDyj70KIMtSfcvFaTyyU6GYlNNxKgn9g6eGxezDZWL/9bRU4ePhOHs/Dp1hZgCePPB/9+/xyKCmhvyXpBvGbfnK+p++Cl2GUXKwS8VbQmIjTR/KKQ6CuF4HO+ZxXpaZrHonR3646sz0hKxnle+RmOhrP/ejid/x/6yTQ11B+SlBWyQCYw+v8H6GoayowjxNfazSx4pqKNQjT/7YRV15/PHIkWCTGt0QRi2SMdh2WYiMWEuYO8dRShJEMvr3ngtVDox3dOW+bpxgLHeTJGnmLFuoHtKKVqktoMc0URgjdXryiBZsj1X55CcRY0ezjruVUqDsU51b/e9/HwH5/+K3H1G8CGMZQ0AlSp4T7eN3kDXT/3OXSCGHoDOHLIOGTusfjiHwMqEwq3uT1OKaGuDNUDdCjZbiHb0gqIOESNpN9c7sbE1gHRQWSW45Q1FGrI0dGOwxI6HE+PXg7VIOqlBLFb19IzOoaBNEoZ0mT6V6JwCu+aI1Z3ZXk2nqQOxgO6UOno5LK1G4ZK7YjZYf+Oj67UzzgljjE5SY//ZBzuFZ4SMPeL/oiL1Xd/1XfjiF7+I//Af/sMsSu/fv8fP//zP46/9tb8GAPijf/SP4u3bt/jFX/xF/OE//IcBAD/zMz+D3ju+7/u+75v6eUvy6EKniSbQnBiLJTKeY00BX/z8R3j19ICPPnpCWrg4NN3A9MadgSnoUF804aKX/aJ2vRrYN9TmVenbxrQPuvyR2Ooj9UIhkGzx+LRiWRK2W8btuqPnCtMF0bip3Hc6OQ3mYa3s5g0Y371fd5Rrxn7ZGLx2zTpFsVgMTVPw7LbWGJBrhbcGzVHbcY4RpxTwel3x6rSouNLBWVrrX3NGaQ3XPcNbBqs9GiCIw2JV+KkYOKDFY7wROqXQWomHxdhrpCUiqdZpOWvsQPJ38dY6pY1TaBw448DbC1qpuL1syLeM/bpjv2xzedtlFClHu6YSmQAqgJzSdGEYhXZAaNKOGyQujDdIa5rFK65R93B+FmgAxzWh35e2MvwBtWp4Xeto5bCqMZZMPQOjvocsousa8ZgXWHU7KBpZMSav6B0TnK05JnhDrzkup82EoGEF3hCatcMpRQz3eXW8pn2KZUvjG7XZjq11RGXh2eo4/Vba7vjKA9xZAQ0Q1QZnTlQYhgrooDB8ULlvG+nlpVagH+a4HkAwoF2Yd3i9BN0jCZ5OO/ba8H4r0Lg/TVUOiBoI6Z3F4RZ3FKrBrBMtwGIMMoCtNbyUiq9dSE7ZS8HqPU4hYLVW7wFe17V1bLnA7RnbXg4/RZ3UjOikZQcCwddiaRHWGOynAgEJLKJwdFHLp3Yng+CeCmhCKzIjDde845YzrmVH78BpWeCNh4iFNxYPMaJ77q5GZpp3jo7xmiBuIEgqaj+lgFPwWJylFVejZrCXRnuj6ugtae20PZE+UBnRKA5R5507UfKY0BvFxbk2nWgNomHatrOGgmXPwi8ymgDCfUbIkP00j2+6SL28vOBXfuVX5n//6q/+Kn75l38Zb968wXd+53fib/7Nv4l/8A/+AX7/7//9k4L+7d/+7VNL9Qf/4B/En/yTfxI//MM/jH/2z/4ZSin4kR/5Efz5P//nvylmH0B2TRcHV+yMDucwKXg8JaxLxNPDiTEOpwUhRHZ9TWAaYLtBc4z+ts7CoaMbLvBH6qVTvYgI8ViBTi4GHK+1Y3C6czFeF/3GY+kJzjm0JliWiDVF1L1Qid3GQlEnJ6V7i1DoaQyZNXWvZNLsZaaDjlTh3sY4fkxQI77B6c3FXRPhT4qMA3VPwU/zTjGC3LjPen+7aZGisW4Skh9EAPahPEyGpU2Xww6pYpjdisKXnpCswTTmhahQVA+SexLDKFDjBmitY98yyl5wfdmQt8x91FaU8Tgctok7WBH06ibdfGi4DnGnMiFlsNAOqJYmpKrpMpj/rSjnsZMwCh+5OwlCF/TOBTeMmbEttejPthRjuwEV2pHGy4V7b12JE45JyrrYol8d0ZbaBdZ0zXUy6GJnN2tGh2uMHnwsFnUUzEH0AGYLPKjMXfos9CTYkNCRc6XWJgd4LUiT4u3voEr1izycI9R4Vj/Gzx3hiXSkoN3TMJMN3s3okA6BtQ1Nxm6K2VGLpiofAZyKvmHsAYdDhX5WDLqh391WKy57JuxeMmqtsAIk5wCnVl0CTail0XNQYksItFUb3xmAEi/swVq1nOKdH0nfhLeH3ZQdMLMFALJmpffDIUYv/d6h01ZDLgXe8dog9G4RrCb9GjOLtXe8J1PwWCNj6i0oBB9u88E5jao5yD0j42xOqWageh9OrQNSRe+cIAXzeiGU3Pk+dyV+cEeAYa0FYQMFQFcwWnREYPvvUVTHL/zCL+AHfuAH5n+PXdEP/dAP4Z//83+Ov/W3/hYulwv+yl/5K3j79i3+2B/7Y/h3/+7fTY0UAPyLf/Ev8CM/8iP443/8j08x7z/+x//4m/1VsKaEpiGCzpKT761B8havH084rQnf8vknnM4rTucV1nHA702D3nqHNQKXHfbe4TszeB4eFpzPCx5OETEycK2VhoY2DwZ9t+Zim0JcewhRlWggTbCeFu5QckW+Zp2Gdr1gQExe9Qu9EdoalN6yZeTLjnzLmgpaUfY6cfKBLQOYdkUzrt0rf0vYlZ3XiKfzgqfzorqngG6Ago7nvOH5esNXvv5OgxMDcus4LwkAmWRsADBV+sPyZizkb4UU2NaZOBuix+PDiqe9YFXrpNiUJQcA4mCCTihmMIgEJfOQ3G8ZL+8uNO59d0HLDT3TydsItRhtOD20BukOPThIPVzATekQUzHiF6QdAsKZ3+Tdh8USCm8AOkX1eVNzl6a7SS0OGNOjs4Ay9/YbnQuKOpzHlbq5oDscpwJp9BXRk6FXK11CSjsyinpruFXCdEEZWQtHebjaSF7Q3UDrwLVWvOwFby/b1NVADWotoAcVLXeMo2N3V9IGO3FAzI4GwZ4LSu86BS9IxsA77sFEuKdNuQJCiyCbi7p19/kcANCKSUkrTEmmYPUc/HGIjmvCGITAtFeAheAhRR7CuoNxs6nRTp+XJKoWK4GZ+7hLa3h72/D1lwt+/ZO3JDNBICdS2lca3RGNqSzc7rqhO4PH666uH2b6HwKA7xqToWGMYnkN9e4RUkRsHTFFpNLuODVKywcP+dy0mEDP/g7AWHQxqE2QS4N39Ui+NQYmeIyd4MiVosu7mVPqOQbc9jgL2dOy0OUDhNoma1EbgFFoDfg6ELoXMDhFC9lAlxS+HkbHpZF1mQt3sd5ZdEdRv7UOFrSTGnEhQffPFoD0jvx7pZP6/u///nkw/1YPYwx+6qd+Cj/1Uz/1237Nmzdvvmnh7m/1aFLRxcGgw1kgOj91QClFeO958eoLmhz93aL3sGCsfIgWJVeI58XRAJyfTljXhHVNNAoVoORDTDu68VEkjIHirx4+NUIRVi9gLwjmYLI55yiC9G6SHVzw7MYC38TeO0TFofmWGZyok9RIShWoT6A9LG066E7QYQDj4H0EdCJzzunHEYHgvSU8Z8x0Q8+FdFcjFdFHlNqRfDhot+DBUGvl7qF1XLaMvdI1fRxOTjUkTdlt0jvWhQ4FQZ/vFABrmuKgvW7XHbfrjsvzDZ98/IztlvHyfFUXVsJE3hgkO7o2LvLHnodrBsHtlgHDLKGudHVK4WgAG6KbAXFDWoDxfoJNINQGR/TaPj4OWyqx8oHIeLo/bwW3XABrEVvDukT04GB0Are6ZzLCKbDqHm3LVanXmjO2Z6A1WBASPMcAQVeGnUVojTEOjfY+t73gedt5rfSGqFZFXvca1vAw987oXsNMdqeIEGIGUAotjeISUHvnvipYJN1BekMmmzFAOkWG3E2xKCYUGXHEXiyBezU6nfM6qAJIVQhMD9AR4OmtxSn4KTa/31DxmjdKBhFspbFYo8JYXttbb+o+U9SpQabnoLfDv3IEbgqqANte+D5cdwRjYFsHWtU9i6iwOSCtaU4XvG5IDoo9Yi3UhY0iNxCHqtOHGO50HWjr1htgrefhbhQaxkHScc6hStPv0VFLwV4rUvB8b51FDB7WUP8FYHqEzunTHNO/GaOoXtewgMWR3jsJR7q/JDeDO/xaO7a98PpoSpswd9O1c/D28IUkG3WYPytA2gUeh4/l7/T4TLD7frtHk6q7EQbLJWU8rSkyVt1aMoVqhy8VIQSOzVE7EGMQokOpFeJYpLoIzo8rliViSXGmfbZC6GTEYR+dtZlefwAQSydN3KumSi+w3jvp1tbCFy1getFby65c9HN9FMPSUIaqe8B8mlg6uk65IwN0GEIJPCFUbGvngTijyO8OdGOhxBB2uq03lCJorSLdNnQAj+sy7XeMQkW5FOpXSsf764a9UHhYdT/ovUUq3OfEQNg0b6TGttLQPZfDthl0a2Z31VpD3jJuLxTYvv34GbdbxstlA2nGFn2JSE5NZVmPZzyI0+fRRbBt9Fbb1VtMpCO6oZsLJCmEjkF1H84Bxqj9Dg7iCQD1xsMsjPwzps8g87X4HjJwkn6AYhhNzukxUB+jMB1FuA5WNH3YDjEyu9fSO265qKTisL7yzmKtFa6qSa5QW5TVeuu6Z1LbWwOiR7DUwxkDeMvXx1mL5MeBfWjA2qDEF4vSG0qNgLNIDwtCj1OwbZ1F0i46LRH7LU+Ru9GiRN2anRE1a/SE+YZLBpinNj33RFRTM3KlWNC8tXScwHG6DpZZ6XTluKoQuHU60cMAWY6d0NDsRC1+XkW6gsPot3ThFO8L8paxWwPXGRgp0ieKENRYdmS9CbsYbUb5erTaMHK6uv5eJK+QBWfAps8qHMLzwul5cOwdB5Q/MvL2WscAjybMl/ORPoHe8m8muKJxQ3P/OwrVuL7Hta2F6gMdqD2kIKI7taySiZwLUx5an/98sDXndKeNig9eU4R1jytsxF2Ln+qc/0wXqU/ev0MXOmxb43BewuyCn687O+jnC+GKJeLzb57wcF4QP/8KbqFK3DkuAh9yZicvYOyDI1zWBlPpklFLRd7oHiHaQVjLw7HVjpg9nPMIC4vmvQfdPACtQY9dIRNt0XX52iohB1FG37TbqSM0TuEb4GAEer6FTQSuNrjGZX/oghg58YzYez4UkzZCyqjjAXteVtQmeFjOuKLg1gr2vcFIwbu0qbs0O7hcK56vtylCvWaNDmgyF/u+A60XHmCBO4enB7pxxBQoIO0CCKnqxns+z9KQrzuuzze8//oLvv61d7hc6WnnLEXH8nRGXwJOa4CPASkw38oFB39KaDBoTfDJJxdsW8a797cpTHw8JSwp4Om8EKL1Di5u+j66KSiOa9QDx81wS2eh8KloTMOAKQfc5w6hM9S4dCtoXbBthZljS8AaOMVz6aIMSw/Yzs5+pEsDjLq45ozrbUOrDdvmseUESKemyxikvKAIIzz2UkhcyGWmxBJSMzgvkcxOpVk7Y7i3CHQgcfaw96mlsQnMGTlXiDU4vTohqVkv4WyHKNROnV7tqK3hdt2RlhubjdoR9Gl6nVyWoFH31qoHIDOShnbIgAQQEyySsXPyH7ZPozvo4GCdG3DNDbdS8fF1Q26kv3tvZ7I1RHCOAfbhAYwGCTiniIcUYTu1WEY6TO+wTUh4MEB+f8G2FyB4NJ3Eeu8a7OnR9gIfA8KSpm+ldRYxeVi7wDuLkgtu3k0D26vKZW6lzmJsdY/jvIcPEal1BBfgLT3vqkKxn1xvuOWCd9craqtoveFxXXBaIr711SOi8/DWoRkzWa++dbgutJkKHlGF6T6qxMVgokKDuWntIeBvxqBVNTfeK96/bMi5YrvtEwJNykI9LVHF5RZRd5jWMWyWacIjnaHDiINrM8Phd3x8potUaxU8eyxgxkKQWqI9M4o8945YCJ8sC7OCaifxYUQ8GAAuWDUSF3Uu5s1QuqbuKty2b2Vmo1h9Q6Wpnx2AshcutqOHOFE/loMgMOjXbkKGH7KTTD9G8rumcXCL5uRG13Gn2VEAGiE21zpCcIi9I5QGEfKSps+cHF1rF43wMAp/OY8UAmoVOmTgbiqoDb4U3LYNWy745P0L4ahakTUaAUaXxnaIDK2G4g1nhYKQPSfC0I4pFEA3B9mhq2C21XoIX0sDnBqIgga7PnrENSCNuBDvYENQfUjF7ZZxve54fr7SpkgGuaIjWkvvRm/hCmnUwzVkiIC75+/kupuwKt+M8V5gFrCxLB7NA6My+LWD4FIcHc253Tt89DgVazOrXbO725F1OeI8NnREb1UTSMeGWhuK8FqlL17TQseLa6QdB0c/xqCiaWvM3Ak5x+DD4Sww7yXBlD4MyBT6+8IaJQzIdKUIaUTMNATf1PrtyORyY/+ke5jeZRarNl4PO/a+mNf86P5JlDCTpVgbqfV7bbhlFoC9VniNvuE9xGYyBg8ISMKwTlMT1CdRYW9jAS8C1zuQK3ds2pyNKXYgHSEEavQsM9aGBg6w8PBKjgBRkN4hO1BbnU2Eswbduel0P0Iagw/wjkjQeP1b63jRZId31ytKLei9KfGl4dVp4cRk7LzHJ2MZAMZ1GRw/vMKyuHNm0Z0TdE81zaGtmSa8w4B3z5VSCUURaLrrEVQWYXUitvOaHu+lzoDjRf8Uj890kZJe0RsgzcBYCnurkIX3smVeuIU2IMwbYvf2uc8/4WQWuBSwnJOKZtOkVkI78bIX1MoF/uX5irLzz4O04C3hmhC4X2ql0WNO5OjAnYHDQbXkSC9A8BPug+hSUpx6vuHI7nE0jJwLTstOPS6RBdZZXkC5IkkHLPCw8zkV7Uyr4t8d0HTPBtcslqaJpcJ93hISzssK6QyyGxf9gK6kN3z87hkvtxu++vFb7KWgtArvArz3WNOKBQYuMCG2Q1AgxPidxfW6w1qHZS3TDd0aAEpCGNoMNFK5e22Qwv2cNEZKWCOIwSIlj/PTitPDgtN50f0A37ftecNeGt69u+LlZcPHn7yQLAOq78sS4bugBIfgrb7eFKeGyCDFVvr0sDt0a1AShU7RxkxX9uGcL8EhLQExee4IAEBvcHROYLZ3uhs4y5t5TtR69g/G2IDgQFintKru+cApOGw5I0XPaO4uuCp9ei+FeVj6vUYyNGnc3AmNw8PbYx+Fe1KQ7swhY9860oH7FHw7QHPFHJsFjVJZT0nZsIMJduykvDvYY00qqpCB1/qxMxxEEMGguw/v8nGfcBdaGsWxt73gmrmH22vFVurcuwoGs1WnYqPsQmPVuLnD9o5kLYLjNbYaIPUOs+1otSJbyxiQ3lF6h/OF9zkM0lpgYOAX4Y4wOvVB5CTXPI2JS2to0nDZdjzfMt7fGAWTvIdFZLil90jC3VbQpqK2rlEmDb/x7j1etg0fP7/XIlWx5TNyOeNhXQBYIFpqPYcuSwQelGjYFOgesgS4qOGGs4lSxp9os6TvK/+joPaG20bYfd8Lam3zWhqJEcvCqTw4C1PrJJpMQ+Mj9pzQuf0/oEg9rCtqM+hVAFiMrJcPvNQGy6uSMZb1BR605Blqpot7C/qLjUj4vBV25Jdd/7zPFz8Fdyy+FUMue4HzJEeEFg7rej1wBuliwHbzMZeY48PMXYcNDq47BKGd0YiscN6p2amaTka+nQ8qTm16t+/CBfdeG543XoWlC5NoEWjcqU4Y55SADnrx6f4qqZ4K0pDzjm3f8HK98DCsFUtakHrEOSZ4I4hGrVTkyOqx1uL5mgHnse4FcfHw9x17rZAO5mWBS/7oPU4p6I1Dx/iUAp4eVjw8rnSEeFiwnBO8ui2Xxt9p27gP2raMvJdZpDbnYLvg5iwQPZ3Z7WgALFokqaXWNjOrfGRX6wzU8aNyP+Ytzk8nQlSei2YfHGIKSCkiadAftPMmu0pFkoAGWKoh6mhYcOD70XLKiUMPYy1dpLsWLPW023PBrXVcSsN1y9g2GrYOaM87Q1gb6lFZ7guYRxCZdHdgINAGMFbPEgcRagRLbig77bmss9rYacp1DFjPC84PQwBtFKbWyRNqhzOf63FIDfG5N3R9GPKIgSrIIAW1PrVHuTXsmTu4UZz3RsIJpaKCvQ6fuyEBMGi2oxqDau10n0jOwzn+fqtOm1EniNoarjuZjnvl9eGLg4sBsBZLpUP79JrU17DpeOyCn0YApPqTzNEbX781ELZfgtfrPsyk4y7c/163jMvthsu24bZtaL2iS8deCnItyK2hSmf8jK4WIEc0xxBm+xRoa6XrhuG5N5z1hzjdWguxh3dkbxoGqg7xtXbdqUHz5+RuP2tgu6FEpxLRMqUC3kEsXTagO9RP8/hMF6kUF4QGVNMhoktVwTQLvcdaexNNPx0+aWPsVKgGgDEkHcjwxCt8U/JesCml+HbL82az2no6Y9Bcg7Vm+l413SNJO4SgI1X3MCK97yTuYY2jUM3wuebgwYuHQlOm0tZSITCwrnNJHYB1CQCEOHgo9D/rdXbDjEAQnHKCsRYhkJ7sLQkFtOnn4UlqvwO0m26NwYU5Z+w5o9RKyrq1cBB4Q4dmQNlXojHXyjoLiYSLNqAFHK+J+qtMckfwtI8SjSYZjtPnNeG00sA3rRFxYeKyNEJctXCxm3PVm6pNEWXOBR5ADg5eC7yfmicmo1ZHws1wXwjqbeisWs2UQveOwHThEAkjWWsBB8KQ+hGCQ6sO3tRJAR8OB9ONU2RCc7NQATiYmG7uZei8NPQpnGyKSgD2XOdCu/cOGTCSFoABHXEvOXRYFvBsAlQFpzs2c3d76DQ3D6oG66r6V45dBtlbMdEp3wggZcSTHFo1IyN9964Z098R9s4RRSFTo4SBDrDY95FQqzDnhJybJkrTx9MqbF4br22nU5kFJqNV7GGZlZxDsCyQi1NXeENqeu2M2Mmt4VobXOvwrWPNFT7ytR73qndDL6YvJmQSTUhCYEFonYST2bjqxBfEQfwxMfbO57nnjH3fNQgxK71f70cNluxQS6LR4Io5PC0jC5VVEoNVdvHIHJv34f0BOwgWg/jR1JZNGbxdSU997lD55VYbCwMc6AiIQsDRZ8KIwTf26b/d4zNdpIyLCM4DaMqeGX+htjKOG4CBr5ItpCwhAx4OmgorvU8b+pIpnr0+3/jx/oaXlyu2jT5ko0i1lpCGm7cuIfNeYL3DfsvwMUCgF4rCN+MN7bXPPYYZlBcc092Yolzy6jumB6Gz8JETlYCFd5q6qs0SXTMMltzgrxsAwZbJvOu947ZnLJEH71NrcI47qcVbvHlY8ZAC9lOatGtqeCpKKTingJI9luDQO5NAo7NYgsfDEvG00s2ClGjBTW15em54vu2w0eMxVzwZAzjt7Mb6Rdm8QSUEpzWiPqxYY0CtHUEP/lePKyG+JSitFXQVzxXb84btstGVQt0qRnGFwqK7CG7WwDZBDw5LOPwDiy7Nq9lo9mmp9/LqFm86s72ChlPWx3UKLsehGlJAWiPWc8LDOcJbHs7jZ6QwosyNNlHq7C5Df0KrIqfT5Dkl5JThANxANhcts5RwoHupWsrcUdXaYIRwz1YyAHU46NS5MCrG4hwTzgvdu6MSJ0aUegdFqw1k0G1bAfyO+O6KkiNalYne1NIYULgmPL4WWpAtiblDIyBPG7+qwXdVOnw7mLEGNL2lV51FVCNmdZqae5beDnf0PFzkS0GtnCCH2a8R1SFBt1mNOsjeO6rltORThPcWMcY7qyG+X7UyJn6vFc/7jq00POei8g2HtCbYFPARoJlnEfEUoasuVFtRLVMOYoxYU8LDktBqx22vihBwXxedw2OK0629Kpv4sm2w0iGtoqsVWle4mY0tIXl7twrga2nhxOF0Sjg9rlg1YSEsgdOet0AfrFpBryoc72O3jyPIsFJT6kQQLAsTOunzpnPN0iunfBFNALYW3fTpM9p7h3giF+gBxnbs+++RmPd/p0cuFONWdeZt6JOuOSCTpG+c8w5rJDHAO6cKeH2DRKPNc6WQdFPBrUJFRbvyXI70UGOgjD1SrL1SUuucvipCLjDOwFcPauWOlNox5U18T3+XuTE2Y3GpE1Wzc5weFkqaiDa7OIEQolOohAJJ7YK1Cy21cScGoYFuJJHBG6uLdAtnKIoeHZl3DqUaOAec14RSKx5PK/dx2eG8JpzXhIdTwvmUsMSEomSLAqALO8vSOkrtM9Ke31+V+7oHsQ40Ob0XJCs7MQRHD0Q1VqUBLQ/psnOKyHtGyxVSGtlaY88Bney0Ayy1obrG6ciZ2f1VXTZvXchs0gnOe8KDFkcWk3VmCpoHfHs//Xol5vTWUVPTaZ1Ff8gTWu3abPTpNlDURXwEF1r9N7179EYmnrfqwg/ooWCUVm1UusP7wgjfZ15uhmsYdelw1sJC9WHGQJiLNx0FAIHtHMFsV5/LMuA+MzVponAy04mV5RiBuAR0nS6q19QBX+Gqha0dFQLXOmAbhj1W1AkmKuRnjJkC2KofXTD1Rl0ns+H6rgEU6rZtEO2A7hw6+LzLvCsOsoob11vwCm3rxDt+dqfWMteKJjTnLWoWe/8ezAgbS7amFTf9OENwSCFgjRVLUMITKLodH9SGGVQYVFNR9PcfDu1ezzfBOOPsnDoHU69pQ2SdIfwc/dy1DrnMRA5mo8zf5UM7pDZTp62Mxp9GyFYYMeLtEX9i9DUY+irT9GdAjmuqC2mZ0id9/Xd7fKaL1LuXDOdGrtAgjBgE5xACA+Ji4IUXosdHDysezovqqEiLbpVi17xnugRsBbu6QmyXDdvLpu4B3G9ct52MHihVt9EvcFwk7qbjrO6SuggZeNGNVAE+FIY8wB3cLa6hNjNMtIQcDLDxtb12NKFFDpyFgUe3mulS7uGF8eM4deVa0TvTVK+3Hck75CUhWhaE1TmIc5AU1PfOAM6gVI9SAvb9EdF71F5xuW245aJuFgs+99ETHk4rlhhx1UjwanbsRYWSRXcJtfKgNDzQ7N3rYq1CE8GTkqspqM5ahi6uEWkhjRYK7UmuhGO3gu15Y1LoXuG6wItAdfosVK2jgt5q0VKpnxSiMYYHT2kdz/sR3RJVTNrXAO+MhggCsGZGjdfaCR9aTUWNHnHh72t1ah9Fim4Keg527kCvOhHcckHWOIdcNG/J0JfSGSBANKhSizSgwlqL1Xgs0TEEVLjkL6UjOIutVGyBsPg40J3lddDBw7+MfcqBVSEag247jFolwTHocNhydQz4ceh+lPQRDJazmdEmw7S0lrHTaHCBxICk96AxNH21BndUIyBXHrx7B2rD3BlTX6VOIv3YkXgDRGeQvMFJAz6js7OhGPDitA/TXZJPkb6Sep1IbWhQmym9bnOpsPp99lpRemM4qDMw3kxxuIBEKIBSlBgpg3k8LzCAEjF4cJ9SwBrpTxgU7uyWER69Vtyixy3SGqr4imCd9qcG0QdEz8YboGQht6YaOI+0jCigiJj8IbbG2AMaJTCwletNpidoUxs2KXR5SdbiHAOStciuwhhtHlVSMMgWPnr1jmSEjpQhZtZGvHaIlbuk69/58ZkuUpdth3cyzU5HTk0XgfcB1jk8nhcsC3cYr9884vzAQMGUaM8zBLrv399wu2y4XXbs110NTBnzve8UUxaFF1rv2vWbiSXDGt4wgDoAEB8upcJasp8EESHY2W2P4LDhvzfG7MGwMQrsWnXwpBcWjh0XmGnTLT0HB128NHW/1sRcHmQj9l4pvqB4udaOnCuqc7qXMrOzHvRSGzi9tMTC8vB4QjwF3HbmZKUUsaSEz3/0GuuaEELAdcu4bgUIF1xumTYvyngav/x0kceYZPh6GscFazeY3bM1pN2HJU6nhaK7v1obtutOmPaWIbXCCotP93TYNiIo2nHX1pEN7VysMcj18FHLpaFUshlLE+xNQwqdhe8N3hnszsDXilAqsL6gOQMEB6MMMsKzDn4JSOfE9w18P43h7q93Rn834b7j/XWjU8Rtm0Wq1jY7eiMCZx3WGOHA96YLRcJNWOxT8DidFhQRBG+xZ+5sLtsO7+lKYRUeAjjhWsPg0I5DcDz89qz+T7T6vuh5Jo3NAWTnfrFRRDyeY3CchrwZ9yTlAoAgqEN97x1xr9MrcCADw0EbWghowVORAdz02p57kbtpEzh2bALB4h1j2ANdaFJwkz5e4vG7pkBavnXqAB88YfnagGxn6Gl0FtVx92rssbMyABvONlzD2+EYDky0gI7pFNwG7xFDmEUThi7yVQQjCtQaC2cZvXKKEWVteH0+wTqLvTXKSGCQYkRwAQaGNla14pZ3eO/wZE7cfen7Nm28unA31kblwPwgvf5I322lQWqDBROee/Jo3iKU0QALohK54hoR1oiwJKAolOcd6fkgdC7GcB1RuUv9NI/PdJHKpaL34U8MdQQgTVnAbmmJXLQ/Pqx4fOAuY0lBfaQMSQCl4nrd8fJyw/Vlw37NSn8mC4fBfn0ubTniCw83y5iPkKcgih0eABPoqxYX4tTUNnEBb3VCIgR1T6YAi5R+L0CnMi0Y0O5bukAspsOC3EMTalfUNAVU9BvOvYn+d1MhYy0NLXY0Q4NMY9gRD3Nd59TaRQ+tZY2wSoHOpSBEJh6/fv2ItNCOyt0CXMzM0oGBdWVCRHcD3rGo1SLFXS3hjDZhHSFMobDt0JqRbVYmC7NmxhKg0RftninG945duDFQOK3BN4vS+mTDFo1uaQpLttaV7GCQDVAtmxPbuTx3LxushgKujyunGj30mGbLW0zUUJguApgxGk21Tdc947JlPN927LkQEtU4bq/7VaIE/iAaYJpXsxioiXBS8+AMzI6fMRkOzjplsRFGa324nx/GrSMl1hqDMJfrqnexGlvSBKUT1i61Yct1ogLJ22m5JF5dFPzYnVgG8UFgg5rrlnoQjHSn1PT+60I/vUFVz3r/9TacxUcjJhojrySbO7eKGPi6KGUEsR3EKm9V4+iMfgwyBTQR2ipV3yG4rsnVdD6xxvBnQu+/0XDCHqQYQN039PW1SgixBoY31DT3rZ3Eo24G4R7Tg2+JEedlQQWwbDtp5iLwjrtlEdD6CR2XfUMKQUlQ/a4I6TljuA8XZZoOD1LRBriP/fzQxnVasDlL2LmN4qzniPc0EHBRPQ0j5TXGNZ5ZzmphNBM9GPvXT/P4TBepVhss2tRgDGdiYPhdWaxrwuPjio8+esSrN09Ip4jlTFZbB/By2XB5ueHLX/4Y799d8PJ8Q94LuzqD+QbutaL2NncRoh3b8K7rvSNsQ2DLsfdx27GsjJM/P6488F6v7DrikdHT67DGP5gfYycwSBXG2rmHgQDN8yAt1nCXpoLj7brj3fsLrteMt883Oj/veuiBItiRJVVawy2ze3cAluBhFg1jFMD4rrsOAxs8TPA4Pa4QCF7VpxkR4HTSOj+cqBlS/7nblrE+nfDu3WVCROsS8XRekaLuvQ6+CABR6IkCzWsuuKodFJxDtwqhApDasd10Z6hC66433MjfWZTBFJ1FrrxBsnrg9c7ro4MHIO9hmQ1Abx2mEyp00mGa4b5r7CcMYLzD25zxkjMblejx8Lji8dUJNjoEC5zqCS3z78ih1qTSPKaogsttx9v3F7xsO96+3LAXvl/S+yRPPERKBZL3Glqpy/na8LJlBAncv4SAZWk4Lwm1VNzAab63juC8Tox3fm4ACUStoeq906UfoZiGU9m6RDw8JiynBWmNB6swF2x7wfPLPmG45C2CN3hYI02Mk6fLiFKhvTYay9DzlKauKnxdeuuUDRiDioYGgyIk4eyZPnxtz7SDKgc8Cm1MonNYg8dJIbZF/98nP+2/+piQtZt3miJtFx64UhtCjki1QVrD0ylhKTQIHtPR4xJwih7BMkNJdK9thqRFm0+Dg6GYVbv5smX1vmvItSB62rOdgsc5Up5hhOeLtRanGPHm6QEhBexdcFNbMuc8uhjctozSC1qruO43LDEieYd925G3HeWWaVnkj/d+pkePPdGdbGdAqCJ9NpdG6L3XXFeXDADGYDklpDUhnRaENcElTwPj4pTUZWEcJ7bRRPP6+T8A7vPW0HdssOec16gKg+BJYQ6BLgIxciQNkTDggMb2ja4EL883PL+QxZczQ8S8OyIEaufei4py/VCIrbQGZCDbBpMPBXYTwVoK4hIImySPdGJarGjBuV9i3kNgYyHLY8MDitfPrwcAGFRX6enVlN22ZVyuO643fmx70YmzzUltJMZuBeqeLAjGoPeGqJ5bXqe2sUcZ9k8mkL3jop+EAad7v3ROs0jZ5OHiwM6BJQWUvSJFj3XVneAQkerzEe32WhvRFW0a1iaDmRE0PN5qZQHr9YjrAO5U7vbwPhMZ1GdCsgaGOTggoaNDc8H0OY9JZXTLLGx9OhxsIkCpEG+pi1oirrcdITo89BXDWHMyMTsgCp00hbeGOHZMdaUcbLXW9Gv0NSo6SY2Xyw1xsbAIkVLN18bCavYS9VW99+liERyLt7duinnHPTSgWCODCk6XhhQ9ljViWROWNSGkMBlbRpdY05WidnR0dLHooaPbhu4sujY845qylkm/xpq5TzZGWZDWwDXmWVnX5wE/Xv857baqRsdNu3LRHaeZhAKvvnZxjVjOC69Vb1UqUrFddz4Pa9Uxg5ICqRZhCyhqNps0MBHgdWitxaou7oedlO6yhF80JpPxd12vtVzbzNkahWp3tOBoMUCaJ+EL4H0LwFggBo+ldywx0qVjSptEmbsklUy6lJCYUzNd+a2xk5k4Eo+hZ8wsWOMTipqYcd9DjuGwcSKF5blAm6W7NHEMWybeVzLeXL4Sc4r9TVrR3+6c/1Rf9b/pI3o3w9Cc00RRPZzWJWBNHinyAElrQlxYqKyzStmtuL5seH53xSefPOPd+wueX24qVDMIGqLorI7GItPVWAyZfSIg3IRKcoIA0JvvZd9xWiKc5/4peIt0YhR5WsK8WY/RH3MMvhccdxEEAHEFd0VBTTydOkGAjtXXbcfLZcO791dcbjvevVxx29U+R2m5AHSnJiglI2eLnDPQOh6WiEVfw+i90niVkKKYOinxhDPGJWbVoimeGB5ovUUSYCkNSfVM+5XEFGctloXkh7G/MYptQ9jhku5O65it0HuwGzKmoHtEUbF1LaQ3D6SURpzHzmBEQHTV1pRKjL6LhZQKW9shsKxVmXMG55So1XKcWgwGREe9163RIDf3rnY/Bs/vXyF4h9cf9RmlEFegBYql61bQjIHc6BPZVLvX6iAUlPlRG7vMpgL1bGlb1COF45N8AWHmVuvInYtqA4MlRaQ9IwaPUhucofPEKVIgHZThOiyxhgs+QBagdyQgrUvE6ZTw8EgB9TjoS6Y+aLjZo7e5lxGjWVe1UUfjLCnKo3sHDzNRNpmTkfbLBmnYL5XW6UA+tUWML8mVLNuh1xuwNhlwdpqcBmXrxSVhfTzh8aMz4kIbLUbeFLy8vdB2qwvSKdE1Ywn83XNBywW90AotOovFs0kzzuBhSVhTZDM7EI6qv+/dwTwaz9o7tlpxzQWXjVB51t2Nswbb7nFLEXlJOIdApAEqxraMImoATkvSvDCZnpq5ksgAMfDWw1nugVrpyFvB9f2VYYetIcagEL46rdyJf0e/bOydhZsBxFl0V/nc2QVOFmta4wwJHSnfvVW0Wue5CeAofrhjZH6Kx2e6SL0+rzitJ6SoUIIuPb21eFgj1hTw8LhiPaVpJjriGLrCLmUvKHvWD+43Sm3q5KzY9J0o0ju+0qKQHLtGMrGYqcQYDTHAXgpue8CyBBhrsK4RD7czSQitT5q4C4ct0Fju1kKNBuEnQjshBU5zzk7vvVHQ9r0g7wX7nu8iu3lD88A7QttaZac+LV9cRgAPkFfrwgjy3gc/VwvpsPBnSrHVpGIWTbUUWsL0FBQI3LCGApBiwM25WfDcoJ6bQV0lPX/QaZ1nlEhKpOoO0a5X5/Nex2Kana1mCcwuTXTvMejDgwJ7xJx35J2FoFaaddbWppedgBPGKFr3Lu3DJkgAPewy9gsTg/eV7tdeQxOd5+6x6yKuK5W7tZHxZCbWzw+yGa3+7iywnLSsobjU2wFrk3RS1EQYrSMtNO1MPmANEVtKcIY2QQ/rglfrgocl0WVEp3I74J+BEhiQIesdHoerhzIq/Xh/RdS4OaC3hjUFNNvQbEcKGkIavWrMlNod3BEmef8wZprzQvdUxtqRzIIYHHLhLtdpIRswZVXYqPUOeA/vZO6EG4CmByyCg19ZhE4PafpDusiCm0vFshC2WrVISWUD1HRid6ZDbKejgrW0ALIWVpRtm/VQNhroqc+TjUhTolRD77RIauqx2PT+bK0AvcFDsDiruW7DVVyvIwCnFDk9dhVlq98f3z5BKQbOeqBblJ3T4ssnF5RbRt0z0x28Q9T30ysRCUZp60I7JHG0eINCnlIdrM0MZc1VRcq8TsY0OYhgtdS5NzRGQJ075zHn6Uk4TLd/t8dnukgtMUy82XkaXDrHWIDzGrEoDBNjmAWKF7jMJeIH5pm9H5W/H9oL4N74ExggXOsGrRtUI7PL2zLj6DsExgoEHbdtx7Yn5Fx4wVc9oDSiww5ozRkYPXxnqqyKNYebhbEGrY6o+SPBtqkCnwv/NskT44B2x5mukwN/XzFcau65IDp3QGxyR483h+h42PfPvBktskPXdBAbAGM7AI+WAowAbS8T2pkV84OfQahpFKgYucuAAGkhDOPDWMIejZkB0PU97WMBLMNk8zAvpUYfmow6GgDazoxJa9CpgysUK/eOrmw1q9/XmEOPM22OhmixtpnYDGCajg5YbMAcY6E9oLvhL5e8p3DyDmKekE7vU8fVwN08pQUCEf7u3o/FvqFrvHXojtHdcUTZaGLrUaQoCpXxe+qkFrzDkgI7b+9IXtH3aTple9o2xeCY9GvoPD5gJa+at5F+bD6Y2OROcmHmjmwkYfva4FtX49IK748sNGcNqmWhZld+NCJTEzf+X2Fi67nT9Cno/c/7xpcCuxfEFIm2LAFSKc4NS0C4BRRf0EHBstUi5a1asXXuSJvlRNcNf6YdOq823m/dM+MgRkBkQvEFTGcuWrSsAZEcZ9Xr0iAISC0PfgaMOnUkGYhaM0SCegfPgb1iv+1A76pfEvThKzngVXO8B9Api92gANZCGq8P2pZRxnJc28f1jc7WaXoBjjvckFlqIRpdhFnYfrfHZ7tIpTCnKGpTAlLyOC0RTw8r1hTw9HRGOkV1J7DTrXzCWAYIhmN89gE1NFjTAGPUBJTdzNmzo073BpmKkd+KQc4FuzRseeOBKIz7hvQpCh6ZUL1W7lEsLWkGltu7V9RLIM0fOgIR1Fxxe9ngdkf6L1jINoXRimYmDXudb3wMirUFlEZrAUPc2wrmHui6Z+L22cO3BIggmqNQjclq5MNYT13TSASe2iyDeQA6Z9DsKL7DRbrCGKD5A1Kgi7nD+WGZNOjHxxMA4HxesCwBS4ooewGa6PflzxpMpJH3VWvFLTMS4aqebrv6mw2GZm9VJ5uGYc2UdcrsvSl0DKyeWpunJTFmwpNc0mEYUxFIc/f6Wn7A6R0PkUMz1NSmCQxw7Cngc48rTrrj2DX08JYJ4+2lAjBoAK66f4IekgCQW5/R58OcuEyB8TCrdbqncR+gDtS5qAu60+tCi6b3Dikl+BEHUyqKAXpzUyu4JAqce1lmoR7fL8YIF4Z2hjZew1du2uxo4XakvHKnJPScE70mXpVGE2cBluiwbxEv3uB629B7oSVPY5MHTYYVayHWUpAtMuUjvlZ00ADauQAbHFptWEqZ0e8xeGUZqvGtHvgtV/Rc78wCLIx0lH1H7RVmsyiW71M1gPMUz3rDKB8HYPUOWUkd0wkffK8Yn0Iy3Bo9zomMPipQaBjtXUcR7oyXGOmLZyjaLbXO3WarHbbvuHkP13kPV4U5UTt6CtNtYiIig/QFygcwLmXVjKGMlHKv+1AZS1GeWcrmNJZEC/Gc0liwAOc8PIRxQrWhmf8jKOgNxnJENs4i9o4qjON4AABLxo4PfkYJTNsQhUy8tWQvRY+aAqR1OEs/POfdzEpZAhNM0yhyYHdeGhe73o9k12HWeCzgnR1u02OhiQ8o5xTXjQhqClClyYQmm9Jz81ZgCzUig86Zt4KS2wcH0gj/c5aFVoyhFsSQWm46UI2B66NQm2nRs5UCuzuYYBHLAgSHVUkHvQuswoz2G2vhoK+iw4hieDKo9cfXzK+bk4zCc3edXEwBq+43UuKuLyUKFo01cwrqXQ/nPhbp9JUbU2WuDbvuMMb/F+1aJ83aGvUxEzTD3rsLi5XVpxHM4ScXFA6MXX9nZ3FaE05LpBGsShuOVmY88d/8sIZQTQwO5yXpPsVgy8rculnsWpTGXrsKo8dvpTJ23Rx5VjDDgNWg9jp9KkeM+1YKroUL7iKdi3RrkCIQjTYvnTNI7QZozDKySkmuuR3XLnSqtIzfiNGjjQlOIVvjjGqyDhIBUQv+rqUOKjh3eN0JEDAZrFbZaIt6UdaqOrWY4a0gBLpIWEuXckJwyuhbE9YlYlkPJIW7M/0dxk7GuxlWOSNWPHV6bgkIa0LMDWmvaHtBdXnq1sRg6t3QO8QaZBhUABkCHzu8t1hTZNMbPdYlotaKx4XO52Qjjj1xpyuO94jeaeTJ2D2yAfIAToGQbvAepfMmttbiuvMrjb43tTbspcJbhz1SAhICz5hxT06uxEBIxqRrBqbKC1XGrlfPHlf91IUNWzlpDdD3nBEujA9qdZhyAgb0+Bww5ad5fKaL1GXbsVdBM9QMxZTwUBYYa/Hq6QyxmBk3JE34CSXYTOgsBI8lBjysCej0+gqZXZuxdu4KHpJHdAaresXBEE7YdTkcr05H8oOg4HQai3qw+eHgAHCvcVekjBIi9FurGwa/Pm+FItMt8/veFDaBwXbLXGI3mfuee5sVp2P4GpVUAgsPi+o8imHBdVA2nxG8bDuqERQjSOcVxjvUzuA017tGifBGH7ZNoiP+WLzCHH5sQ4cBGfDYHcxqSXe2YufrYJ1FWhOcs0jRo+aDlShNE4s1xqPXrru1hlyK0vB139Y6XRYGASNnBgHKcFnXhbS+Br0dC+jWyeAbN3xyui8KHmsKOKU0SSOwBss54aROJlEnlElmui9Q5u7+H8wo72Ak4Om0oMaIc0p09FZ68SUXdLvPqIgmes2JIHmaG4+iaIA7DZ962tWK0uhC/7KTGt0g0xrMO4cHEazCgD1jaWLrhAXR1wYUC58rXbxbh1PB6rAgc8pwbM6iK3FjpgsYbem0gSlq0TSMhwdTMwQP73nvWGvnhw8e5/PCnCJnsS8BOVecUsD5tsF7gxg8dVrCPz8sCx4fV5zWBefHE5YUsKQw7Y5abZNhyzgKGq4CLJDGOxix8GtCfGhMvO5A2zKKdyRTqEtG6x1lPxiINwBFgFsXxLVpoWG4akoBD6cEK4L9lrHtHpv3yNXNpmuNAacUmZGmFmBdJ7qh13pYaA9WFMYWaGaUEkysYWObW8NtK4CoSbShxdeM5MCAV78Bxp/FSq9e7yCuobshtifs13KhhrCRjCS1AcHrWUZUyIUK6QVNYzoMNAgR1C9+msdnukhdtx3OCYqQpulueeoeXj+eNLkUhKSmXQdHb6sC2bFAPK+JWiHnsO9FmSfqq+Uszom6nqDTkAAoQtsc4miOS8TgYRsPpjVGnFLEw7rgtDARNioFdsrhtAseFwgPczJzjNWF8M7FeNnKoaWyBjAWtehEIXQd8N4jLYk3lghGAutpGOEKsHlGUbTg6EUHhS5FWPh7x612nB/OsM7hqTQYVwFDKMMUC1uquikT5x+HslWYz8egUJ89umK9gKGHxHA+mLRUAICB8wbGBiYeRyWS5IpSKdjNV0Z7l0EWGWyp3HC5ZYz4j10P6NIaSi0oJaODDiFrjHhcEpbg4WBQasd1z3jeWBB61UnHGIQQ4WNktEkiM3GYyVrvsJwj1vOC9ZyQljAP54n8QZ07PA9FFwJ8B4ojwQICpCgIviOqZCLVxjBJa7ErY86MzhWYdj0woE7HjCmGLv4VlBcAunMTxphc84505e/oDCePp9OCc0o4L8uczKwnZFdFsBTGfHfh9BK6J3nAucPuxjmMUilNBeSNabe9qQwAQJVhjUUatoh6bMaAGD3vQ9113S8uvaUjvrMGMTXEJeCcmSf20WXDnitaZfFNKeDV0xnrkvD06kSCx8p9kwuKeGixEr1eRY7rcBBW4CwzmLpgaYKWyIor1x11zzq1d3VKp8vMtXXsXfDSGLYal4DTw4JV85Ye8orgLHqu9APNZSZut94pQvYeS4iw1mGrTdOWmXLtrMUSE+FA59TFAShTz+TwslVqJveKl525esYCDYK0BO5v7yZHvUI/2Dkba4BxTgnQB4FKdV9GrZLGVDmkHvwe2pz4DliSJNrwAQS06FNC8mken+kiRW2EppCCEFL29NmrQ2sCsILrop+7n+MFtaqnCMFBaoDRaWrQI8dOKgY3rV4AhV4Uz1VrURhDRs5Yih4TDcf2QSr4oMPUYjg6GoxD3dPIdMYVQIj56xLWqFVSVxiFT3M8HzetV7zl1HAKnhAFbRfAOUHmXm4rFVBj016J6edBomhkpFnX0W0FmoGpHXB1Jn7quorsK8ub3o2pE+Br4w/owin1fjyGLY5YzcABI9UpAiOUOV209aPp/qmp8emuHwMrbxPWGIw/QiPOkM57WthEeBjspaKLYFPWlO12+rtRe+NA+xrdobnR6TvEQP3doJ2PDlQGS2RMTm4k2Vq0OrR9Fq4LurOTnCPgixmCR2iNe6RGDQy08aC7gjkYosZOWFkA9VNTSvckXPBeIV2ZDMtFYU2SOJQIAMDqpBwCNXrc/XLqHO+PmD40oOzoRWkBiiaI7ktGA9NFsNemk23V6UdoQLs0tBbgDRtHCUdKQP/gvTQqbPeTCu2c1/TmPqUSpxN3mGlN1CcuET65u/uJUJ1REs6RcXX/vpkjzy0RYuvK9rPNQQo1d7V35EJn9602bK3jWhuKAaG/1pDgZqgmeqd/qKXl1Ah87Cre9noPC4C9Vlxzxi3vRHkcC5hXKNR6h24MXO+4xYrYGrz3KLWjCyfy2juWTNf+ojrBcU3ejfzzYSaRyaoPFtcbIoS3Tae5AHSHaYTOBzNRXL8/FCLsoNyi9QYR5mxJF+7HPsXjM12kFu9hrYe1XdlbHEukkF7dS5tLu+NxvxHUnYTlhY0outwclqRm7nYI1YGHn17QpVUUATteWFjrkeKiF7sgxYgYiYmHwPRa7zxpuHdsHF4F2pnrqTYSW6dL+IAJuzCt1ogyA90MJxxLb+scfABWGI0eMFi9RasVZd8hnnlSMUQWXmvwvGXccsVzzhCYaZlEgWkDdMnZt4wG7kWKTl9QgkRylgdD9Hh83RFThH9YGAboeQGP5fq4oNnVMriyuVFgMFltRX3E9i1ju+3Yrjv22z6nqBFlfdt56F32POHYoLZRa/TIxZMMYQ3WFPH64YzPPT3gvCRYALc9A0anFmjWEBQONA5dDC5ZRbMwOCUgeMzp3HkP4zxgnJK4FOazZpJKXPCQxnBK0elwhM6RKMDD0SsxYkkepTUEZ5Ed2YVk7FmNslCtEzDfxxRYZNbAylN7w+W2obSObSf0afUAsdZi8ZHeh7miNFHqvJsWPgJBbnU2Hwb04eta+G+1Kg18xGMAtpDYYlqfwuqhE3q+7nh/3fD+tuFyo8YvBY+nE130+1NFUIh9TKMFLFJ5NGnSOcVbCx8SnmIiutEOJ/q4REKIiWy9cKKA3GhRAIDeAdPGovSQLlijDZTwdeJEpc1HU7i5A5IHcarjtlE0/7xn3GrF+1zgMyntbz73AB8cHq2BT/TZO60Z1Ts0d8RVjGTi4fyeW8XHlxe8e3nB8/WCxQecYsIadP+phUqMgRGyN2PhOWMsU8qH/+Ogmu+tT3szpdOSYdhoZNwdPz9MDMZuyojlXsBZNo3ewjQLdEKjVhsG6Ht9fwt09QjdbszBqpov9rJdP9U5/9kuUjHA2QgnbWKzS+DSkdETg6DQp0Cw6zL/IF8dY651FrjTUg122vAsA5hCytgANT9Vlfe46YP34y2aS3Y3aKRDuGsnGnz3e7BQjQ6HOi25W2ia2XXPxDiY6cGm9XNmBg0q9vgxXTpqq9hKxl4ypDV4R3fv6B3WFEkC0CmrWzuV7EVjSKze4LV3XHYKbXNtDEd0FqfoIZ0uz/eegcN30IUjBXc+xtRTGyEkkalj6rVj38iK3F42apGu+7RBGoy+aeEy5AOG3aBXLdB5SWi9kxxigCXRKico0UG6zMl0PE9rRrsAtdypeNn2w7tRhB55wSHc6dsO4aLe3IZOAsba+dyHK8qYpKxa39Dl0Cjj0iiNm5IKr1O20x3nSa/zod8aVklLYDR8dSDM2Rrex0CSlu6rjkBQoBq6e3s1UBYYBCizDmpUXNRLT8aVrVlWteG6FezqkG6EGUOuEQpyXaagWkC90G3PjFC/7LhshGbzmPJ719h0Pt8hGSgiNPsthG57F3WOcFgToS/vLDSCkffAXpBbB6JjbIYKx62CoFCUnixScxB4BtoginSMW2icEZ6wvnWcMoio0EWC+8+MrVb6LzoDsdAEa95XHyRuNzMdRcYH5r2sOrzeUDth62DdBwJYg+PsoHfgIBTJ1FGOhnlck/P8uWuShyWbMYY5d+NfjClKp9kJyeti1Vgy+Ezvw+ZlPokjcBYzD6yrpnPYWG26Y//dHp/pInWKEd4n+H4UqRSpA/GWCn1Sf49l/dC6DBU4zAGTwVGY59SLeLgVQNlGXZhZVQUoqvRuynSyg+UU/ITRBjtnUN8HzdcOkbA+j9F1GKsjOA4Yx06YSHH6LlxkTuiDHfbQNQ2rnaa/29gZVMW2L9uGfd+BLlgWA2P4O8M5+O5xE2Dvgqz/trQ+3cKtwSxal8uGl9uO657RhMSLfF4m3Ni1ERgHMc06/UG4GBCHasZaU5aSszC2K4RXcXthUdpeNuRbRr7uKFuZMF8bLgd3hwzxdRAOMXSNgGHuVxfGXqwpIXjGwjPVdCif1IHa9PnGlFoZESF8DXOJZE8lShvC3e8yHPlZnAfebyFjIW2OpmdAhnNZrcYzgg4nxz40eodQaOwao8caAx7XhOj4L0oumj4LrJHTlcDPpuz9NQHAdPBnvEVHN+rgUBqcKciVVHenU/2IjG8TZj4Ou6bQ6Ms147bRv8+CXocRZKhGOYTbAhbN207brufLDS9apKJzk57vIUrtpodjE0HuvA6vO5uiJqL0+IDH84pX51X3vRSE19aBwr1nd9TFWWXfOo9JXBmH/NT1KCuXOjfMKYNP+WhijXcw3uvEQwbkVique8Z1p4/jnjPPeEsNYmmVmqYP9j5EP6bx8DhqRrcJgXQV/05bs6Po8PcnUjTIRCMRfDRM8yu1qFjnGMFjFcrDKFIjKRgQUZjeGhgrLMZ3jY0ALFBOS37n6wN3aOAmGnL3Gw/jgW3LNFXe9k91zn+mi5QP1DR4IX7rHLHz8xpxioG5P0NomRt6IG1JGl9BC1LQxZNJ1oyB2D5x/3u0tjWZ9N+tcPG7VWbaWENW0dg7GIVsXj2e8HCiA/vplBBToM+gLmq7dm3W29Hr3O0vLJxgBrH50NjF9T79vnoTakIAdDHqqq0RBoXdnNSM3QAOFdu24f3lBXnfYURw8pYJvcHhIQQ0GLQY8bJXPG8Zpck0irWGhWgIjLvaSpWse6vgEOOIKDFT82RmgbL6mpMSL03d3OVoIADaynQREiJuGZf3V5StYnu5oeaKutPhYbhxjIJM13MgaSPgrMUamf/kLbCmQPeNyr3FEima3faM51vGthe83PZpxur1vRABamcXXFUXVxvf89aF6v1QEBcGLobk52E+GHACuYNX7qYpR7mAiIPT5zEOza4khNH4rNodL0vAqmxUq4drq1Wp43d5anooxxjQBHjZdiwxKHOQB2qX4TOn0LdROymn7t6GmpkJKToKYo2z6AoRPr9ccbnuuF0Lc5ysxepIaTfWomkxHruZAcvyIFfLK88JykIgrUBEcN33aefVjUXrQB7u/p0efSkGPJ4XvDycsKaAEOmUshce6sYYPD6teHo84c1Hj3jz6gFLCjitiS4vfpBbuOuc/nVy3IM0LIbuV6C+fUbhLYCMuiMUsah7CUALagdwZ9MPUkYXMCFYp4xJWtD9nTVAFF4nHz09IgSPh3XFGhJTHU4rzxsQ4uwi6Lqf7aWSCt7JYh0w7bJErCsp+UMUPzWNwn23GKIRxlmY7MiAdrSbmw11l0kYMgaQavizjLIinVXX8za7cGN0d2WP8NNb4XXwqc75T1cO/vd8jIN9LENjcFiCx6JRBYPFc+hq6BYwOgL+27GTIjupG71Y72jT2tNw6dep8K9N5hTlHC1SnDtScb2zeDwvOK0Ji7peOH8Ejs1kXeAQ1E3ZwIcsm/sP7ovUgLWQHktkwmrh0smkU9BqGlk9DhW5FNRCp+TRsw9GUIhcWi8A9g4YpTzXxptvLHcncoBxc33D+zG61BF9YMdX678zRncvd4vWb4AIqMlh/Ea+ZZStaFrySE+uH0Aa/e6GDFqkvDvix6PXWHdnURwPL280UqV1bDsngRmR0bt2t/yfBsAIc5taExSj5rdeNVlKg2/qCi8fLkHvnv6AkfVDJ+SxiDbavY/rbXri63tkjUzj2ODVDeOu0Ewo23IaSSHAWIvX5xXeMWAwuAyf84zoqK3Pg3G4OURlXhIyNfOgG/ZJMHfZZaUhaypy1wIcPcXbzcmkT3N3y/0myTgym5TezWw4tkxY+v3lSpisNU7gYKpw0SnBOofWmpJ16MUZMmNXtkK/SgNwgmmkbyed2IJSn+m3xzOil0M/Z6yBEYXkxyR1f+BgXLf3bezx17wW7eElqOC+dD0zWh9aYXQZLjbQe2KswbiTW5ekZ1tQ/ZQ6gKhbzbj+72M17u/R0QwNApcfPqdKMZ9m1aM5On4x3gON6NLhKoFZxCHH9DSm099MoDgK8Hi9OjCjST7N4zNdpAg1MStm0IrXJeC0RH7OOdD0oaNrPAJ0ITumpRkLLmm+0ezU+W8AQPE8XmQgq680ppw6S8rwg8Z2wzJPKISAp4cVyxLx6vWZi9wUMEgJorqTEaBmRvc2ICK94Z2np5lzhPha67jcMp4vG65bRq5d9V5h7oJq1Ulq25FbgxFBsB3SKpyQseitxRJothlSQFwixFpEY+GqAFvWUDJhNELlst9aOsyn6HHqvKFzp2PAquad7NQOe6RR4I8XE3oHmenf5e7gypJZnPYL91BlL8jXTIfwzPTa3g6q8LgJguM+w6kdz2lRFpSzekiHwwKpC0ou2HPF8+Wmk9Q2YY/F+5ncCijdW59EF52qap8QBqPrC2Iu06cNOAggUJi5j2WI+h/a7uAEMF4DDg1TE2pjXlluLI7GsOi7YQYLtb1p9I0cQYBWBC16rImw4NkviCliKxVP5xPeXW54f73BGkNx756xqNP5w6qSiSVNshGtkY6MKqduu6Ldd8uVCa57wWA9tm7RrEF1hOuqAJfKxNj3ChHXNsL4DhYiILjtGbd9x69//AluOXNf5gOMpUOECBs1a60KSemGcPUeMDKnxF1p9+dTwuvHM17e39D3ilePK9A6Hd1Pic7y0tH3MovMILLAH2iKGQWgERYUGc2D7g21CERP4sICwXJasKyRcR7GMOhvY9rtXiqkcnKzul92vPGnwDxaYDkvkxTmVYNmBZCqCFHhdVhqnWnJRic4bzCj60+rTlJLmCxjmhFrPenazOtzEwCmdMBZ+Nbn2cT8Oi1GuDtLDdTbU/fu7ZgMP9j5u9ke49OVqM94kbrsGVU0X8iQ7jjoqUF1Fz5oRLR2Aew+oTuNprsgMpacdueke3cU8N+wM6Inl2uNb7zQvsWpx1xauIQPkTTkEDxNOb3GNVjLgLtcp5jQ6OQ1DmpaxnRlQ909uBJT8kPDtme8XDc8XzfcMunEMdI12QDY94yih6XpPLgAwBuLFCKCA6JzeFwWrJEhhUZZgjCHmG9+jMA2A8XUWaQEhH9S5+uwqoN0jP6g+uprbu6eEwc/dqsITicKtfdv4zU5CAbNHJTh0hpyHUSFsas1ZPJZQl1eLX2C/g6tC9N2a8OWD03KXqraII2AyLF3Obpn7wbVnx3wFLCaQbAwx40+cPg5vcvcic69TqNcAhhNiZlwqHGi7EEySAktjqlDZjc+J1qtgHVM1rnAglP0eQlYrEUwnjR778m3UWjptmd4NdJNiblP5yXhvCY8ndYJTw0oOyiTUN8IhW5lagejvU/jpUarNzZzW+u4ZFpT3XRahaHTvrMGp8hcpuQtbr3MWJTWqI0j7AlY62dHTp2XgRmThEKDey3Y9ow9UxNnDRCtwyUEvLxfYDun0T2T6TjIJ7a1eb13xyO0G32/x+uuBB0jRPq8Sj1SpFh4Usn10I76ujph55H3gn3LJAJlOjWYLjDeApYCar6+nNwB9T/UycR5aup666i9zOiPXZ3Vr4U2YNOzE5yG7mbBOcbLmGzGbs6SmTlcYAbb0Si8b4T/3o4pSaGGQQIzBmqLdrCRR8LB2LXfw9x+kIE+xeMzXaReth2lD7yTjCSjWgQ6MKvLr+Lzoks+LhirFinCHW6YTormDtn2oQ+eJXXTN4/QeMyIoc3I6ZRwfkhIKWA9RXV9VrdnQ5acgOSFpvDKsC7qaqZqAFW92zvYj4/xK9CWn8FvL9cN716uuGxlwgGkJDN6g/HxDW4ufoHgLU7B4xS453hcVywpUQvjWOyhr6XTHKJBwT9YjtxXLJrLFZpHAzvPYUWT1KncgDfU7BBwdFVGIRNnHaRbOK9ebrXN18bf/WwA04aqjMNb94pOi4Y3ZjpDjLwqkj+a6nMabhox0VtTR4aDFCAKH45Wcvra2cOgVvQGdJaQIYuU+aBosBlS6ES7Xgb7tcky5XBOBqd1VPHTldQo2nJvHjzScTUXqx9Q2YDdcqm47RlG+DP2UhEDRbur94hCBuM4QCiEd2hisESaMZ9OiU7pj6cJeUujtU30SkvXImzkSD6OzqI5wlrBHtEmvYum9hZc9oxddUTjuQwnloc1YlWpRM27uqQMQk1DN406MDP0dXcmuobu/U0EuVQKX7cde63qZgJEY3HxHi/rBabTqijuBWkn4uKtQbJG9YyejawBTMNkzyptdr6nxphZpJYUsarmC3oZWGuYvBA8rJA1l7fMWJVcse318A/V+8Ebnk9iBEbIVnSDHaz7HhGZ/pTdEELdSsNlL7jmMsMQB0RvxX7AyuS5haPADPRmFKUGGNMhDZjsvNZ1lyzo6mhu9DUYUJ+1qosc8KHu7D5gJ+sk5r1DqG3Gzfxuj890kbrmHR0WaQmIJsClgOWc8PDRGU+fe8C6JpyeVsI/Sk6ge0FD2bnzkEZ6cIjq8kyQm9PO5ueBI8LYaZcy4hI0zsMhRI/zw4LzI0f79Zxmt7JtGTlXvHveNMW0wluoapw3REyE0UIKhEvUqoWi44NYIHJASyUX5MwL/nrZdS+WWaSsOSAJhZysMQjWYfEOjyngceXNeVpX+EQH+a6OAS4IUoo4N0FvzNM6nRJSVINeo8Jd7xCh9FId5eMSsK4sUgZc6k6TXKO027GjGrD1LFpk9nXvsALzxrTGINxotFqkw+UKMTQJbcrK6wCiZ2tH5212aaQGd7y7Zepy9ozLnrn8HftGvRGNsTNGwgAI0WPxHucUwMg3wVaryg3ApNwYZijgCD6MMXASboK6U6vXa0O5ZabOlsZOmW8M9VMGcJVNglVWGm9ojQvxDNDk2aJdrtrodN1L1VaRa4ER7lS3PZPF2BqDPo3FWYvcgPqWLcM7TyQgBjyeT3g8L3j16qx7Ce4GYcxdoB2AzgK1eIenNSECWHSq8QA8uNPInQJpDSSG6CQ3pAEPS8QSHT46LUzztQZWGiwEz6eVomvnsISIFAIe1zOs9zDWoc0FqDl2eH1c8wLKxWgcPeI09q3AKAHDanTIw2nBEj3ePJ5wOln4aOa0zOmYTccgJrTChIHWSVqyagl1akkzmuxBomh0IJf3FwYWxqAFhntQgzGVM8plUMibdLhMNOaVM7yfrFWkTMkHncLodzdS+T9+2fCyMYX75bYxkaEUlN4Qusdlz1hzQW5Nm1EzXUWcPxjHMvZbtaPVQ8ZyBK2y4I0m05jDINv5wVOE0vgJPXvVq4bk0UWwpqbEo0Mj9js9vuki9R//43/EP/yH/xC/+Iu/iC9/+cv4V//qX+FP/+k/DQAopeDv/t2/i3/7b/8t/ut//a949eoV/sSf+BP46Z/+aXz7t3/7/B6/7/f9Pvzar/3aB9/3S1/6Ev7O3/k739TvQofiYXJpERePtAas5zQjjcMS2Y07QzacHI7XonZCZrwRgxkmXGSPhfTwpnOtA5aHaWh9Fqn1nLCeF0Td7XTogr0LttzwfLkh7xX7lhEdGYVdo7Wl9+nEEJKfHQ5g1QZwLDT4fzNNcyzLFb7sCr/AWmUUKWZuDqFnsBbR+Wn1P8Lu7qmw3tFlIMUO6XR+j97TbeBuhJ8R1MYwX8ppfpB6rBkobbUfZrjW3u2mRDs6GfHanCKsEcZ362TDQxKIl4BQKvxW4Is9YBVg3uxGIaCZxqtTSNYIldvOHcxYCrshU8BBThhkmRmcp3lSXKFRPS8AYzUCk58JL/uZtQQQNjHVoJXKCUp3EEd8wehA2TxZ7+C6aOSJhWvcc/RO6vx9UjNp1m1+tH64icuIregjiVhmZz8W6CkGnCKtjmrvM7k6KVs2RK9QVD+ut7GLwOAMsNik4IAWYFqH6SpA70znHc9zEKEHDXpA3CGoBVAMs0jtMaBWun8DzLUaRerptMA6eu3lxp/QBDOOZVwH3h50/zWGWahE97XtlmFKBdyAzyIeT4lTtMHUsxlrDlbePLgHQeZIwoXuyJx1M5G7CXeKDQ3dEkWRTrf2CR1qSGHrnKREgzdLLXDVI7aAdU2HmUBngWrTZ/Lwp7ypi8de63RDr60xKNQYjeBpk+w1m8Mhi5lTEEXYzTbADGkHlPQwOsvjj/dWSCNwE3o9y7yeHZqazfrA662LIMbwqc75b7pIXS4XfPd3fzf+8l/+y/gzf+bPfPB31+sVv/RLv4S/9/f+Hr77u78bn3zyCf7G3/gb+FN/6k/hF37hFz742p/6qZ/CD//wD8//fnx8/GZ/FVhL9/HTKeLxKeGjzz3go8894qMvPOHpowdEJQXwwQOv5yN9EvNvZBY76y2Si+jS4aObC+/x/8udNZH1dJROK5N/ffSwwaKXhj1nfPL+infvLvgf/+Nr2Lcd2zUzjiE4fPS4ctJbE3oTpJXu7bHxZw2z2fEYS1rnSRRJgZ1+cBZVl7qmE4NPwQ12rO4IDJboNH/L6wRAMav1/oOF52IsoHEORqjrWJcjodjeXdxGRY0jT8pr9pAfXn4C0v0VwzZ6kAwYYVjBAGCRVFjWa3hiWBjvEE8JdWD34M2dCyGTUeSSp2P9LLzAtAC67Rkvt53T1LaTMGMtkmdCLQ9cC/ow8doY2UtLDDgFTqjjBu8iCJFU3tNpwflhxePTGefzirhEGBj00tH3iraTEtxznQVwsJ8MFBIRi6iFZBQbaw1aa4iBQYhF93DcUzXslamutdZp6TQi3721EwasGk/ihAdJjJzYXz+sWHPA4hnb4YPDeaW/JB1R9P2SMakcLFoDWm2Z4IBTwuItiqfcQ2pDzQXVAHsZ7E5oAyGzuHnv6FgeGVuRFO4z55Nmagnd4EtlEQsBD8tCmx0BnlWsu1X1zmsN3itz0xosQUMbl4VTTAiT/HS77TP7akTUvHo6YRWuC1z08xpi0SdBpOZCnV5tqFUm7DwYh+N1Kq3hsmfKOHpHuDqsKeDxtGANQVMHJk+G0HMTlLLhst3wfLsiBI81JTpvtA4HM8lCt1ync8fbyw2XLeP9dSPRpFCwXxtF2la1Zrc9Yys6SRkenkazvsZzHmSKAVW3vc7C/EGawTiTxu56yEwGoxoA4GntJSSYGWs1zJJZUtY7VLRPdc5/00XqB3/wB/GDP/iDv+XfvXr1Cv/+3//7Dz73T/7JP8Ef+SN/BP/9v/93fOd3fuf8/OPjI774xS9+sz/+g8dpTdRAvH7Am48e8dHnHvHqowecnk5IGmXuVPgqIkBthzLasnvoGokxNC1krgC2W4gf+Dt/3qCn2rFUdvZQn4OCuNIoVnv/fMPHnzzj7dsXfP3j98h7Rt4Kdj38nJBqjU6bHBjBsscJ87BxMTOCw1gzA+bWFPGwJtRyR+ntHau6EJwjUzqNgPZKAE5B40bGBDWEpAOT1kLVHReg3qtGxEB3XYOFhAlnTifswajTw26k80IPY+5c9fXVTmvEbORc5vfk+0U20oQjoofvgrgQ928aF1FKQ140kbYLorXwGtY37pMpnDXs5w1ZHOPNnF6DznJvaLu6PBszjV5H8OKYprpeSz4G+Ejj0vUU5/VmnaORZu/TJxF6rdyzYQRKQoHq5QwbJNGOlew1QfFM1d13OuHTbLTiuu+47TtqJZw4GgAurknS2WuH2ysQCpwXdrSNfns0QwbOSAfRxJA5Nkxs9RLkn+2dW4r+js6RPGCCwDZBt4SSumaFiUKynPRIUvLOq3ZN40/GTsMYhcHjZFSO6zpq85F8YJHuHVlAw11D7aNthAClN4g4nBML8NNp5R7EOi1m/SC4mBEdIhOVIaKi5qswql0TtFJQ94Ky7SiVWrmbaub2UukVqm4KWSMycqnM+oIgaEJz8BbR+6kTli6wmieVS8Zt33C93ZBahDOYU1tTx/jSOt7fdrx/ueLd5YbLbcd1Lyoyr9hrUX0ZJylnANOMTt1cF0yZCpk0vCwdYWfv3SR0AYDRPJ/eVV8KGbf18TDHRDXp6J7IjIsBnvRUWjQpgQzOIbbfI7jvm328e/cOxhi8fv36g8//9E//NP7+3//7+M7v/E78xb/4F/GjP/qjM1ztGx/7vtMlQR/v378HAJxPCU+PKz766Iw3bx7w0ZsHPD6dsT4uiGvkQWnNHNdHgRL9f2O5bThe4KPLhTN6qPHfGwN2lXZ8nnuMsceiuwNFvtfLhuf3F7z95Bkff/KCT94+M5Y+F5RAWCXp72UBLAsdw+te4b1DC3bCKxMeMnbCNUsizb6pk3SpvABHFs3TEnTJzWWvgWBxLFIheKWgug+WmlOE5yxc7/DNTSZTMHcMNmCeXkahkVGsvWeB8sFrvPxEKfnPJs7PCas17gYPZTwgGPEJBsYpDBa4s0trRC9NY0yoE5Mx1XZSeIO1pCXr7mXAfxZDeynzKYwcrahx1rUr3GHIaGOB8qTVuw9F2C4E+OhZoNaEtEQ4FXS3yt8LtU291cgxA8yEKXF3QMNR+8MTAXDGQuoRqTEO1t47tlzw/rrhcrvxIBIolKvO8sZQeF4bTC7AzoPCA5McYnWiocu7hntaAyeklmO+5zLviQ+crsFJxDlHaDkIAwZVpjAYkU3oEN6FzZbTxsA7El1GobYKfSa1qwohzGtnCOCtsXO6SqUDpqHCKGV8wJ0OBh6v1oglerw6LcosNZAta8PajkJ1T0IZ78mAwMf13jtaqag5o6jIvWgTMATvAwbMVTV0+lFbAy9JXndBd5gOx8TSa6GDTeGu+bbvnOa9U2lBm3DjXhteLhveXzY86xS1KaV9rwW5DncLsjzRDUw/gj4//FCbaRHAkXg2bKxGI2KdOlHUzmTiOwngaGLu9Y7zHNVwWCcqbjcGsTaynBvXErHG3/K8/8bH72mR2rYNf/tv/238hb/wF/D09DQ//9f/+l/H937v9+LNmzf4T//pP+HHf/zH8eUvfxn/6B/9o9/y+3zpS1/CT/7kT/6mz/9f/7/P4wuf/zy+8zu+gDdvHvG5b32N9cT9kA2KofeRoMsxed8y3l027LeMfCu8YbzDY/J6WOjiTynEg5FX1L5nQGtTG6A3D99sYNsyLtcNH799xq9/+et49/6K5/fXo4OpDaUwyVU6PdqWlYK9fctzaT5G69oZdpj3iq02FBEY73A6LarjcVMXNdhSTylMGvDYgzjDKOoQ/AeMIS6QPVx0tEbyI35BNEFWt966H5O7Q2tSWO2Ba1vvCR8EHs+Cw4KKBYodWc0FJVfcnq+TvCC9I7Qww9AE47kX5NzQGgBj4UNQJwaZC37RqdS0jg46Y3hLS6FzCLilOIknYyIO3iJ5h/MS565tLJPPpzTth9aFNlsGPGxak5lMfHp1xnJekE4RI/SylarJrow+t87CR95qIpjOEgB3PbAMZ2m1K4OL11WtmtskHTkXGAvkUnDbNrx7ueDlekVtFc5YROeweDJUd+9QRRByRdgL1tI1+FOhbxEYYVJs8ixs3lJULJWuIaOITqf2oGRmPbk7ZF4nc0DUP/DzHbkW7CXTPaJzz+tTQLQGUUXCxuh0ql14XNK0AbPu8HoUFQ6bq8LSmVkqFfwGTW8YC4Ez1Ect0SMmtbBqHeRCd+qyhOzM5ByStfAwsB3KaGT2FQTqqlKwXzeU286PrplRuWFvHddclbVJ9l8bmWXaaJ5SwsOJhJQ3bx6xpgAnIAljL9huAisN3lgszuMxRjytK86nFacYEVQjue1kSX7y7hlvn694f7nhtmXk1lBKwbZv6s25T2eLCMxmV9Q6Kd8yNu+Z+KDvb8oNPnlYRzTEh2E84GBVwycqfP6Qyaof48IwY6d+7IZZ7DSjz1cILHypqP+rk3lLKfhzf+7PQUTwT//pP/3g737sx35s/vkP/aE/hBgj/upf/av40pe+hJTSb/peP/7jP/7Bv3n//j2+4zu+Ax999ICP3jzi1eszHp9OWE4JIQXirGZoayh2y7niettxu+745P1lxq7HwEVySAEuegQDNRdVm57Gf3vbyoygFtUSzEh2c1A4973get1xeb6xEO4MSBteddLvlPqFHVDOFSE4lFwRQkXV3CUxTB++3TK2reAy3L8LHResIY3XgdRgGnPaCaN4PQBgDjLA2CnNSdIO9flReEZni06DTIi6RAD4YHOKY1LqSmRovWv3NiBLHPsX6PXcCXW2SgeJoSdqKcAqs1L0oNu3grwVbDfCpTlX9ML9jhUDi+HVp/jm/K0wn8fc1Sitnc4jbC4oTtZIleDglTb8cF6mhc6iOwoItT9NGaHWuynSHsUIQkujCYmOw9a7D16vseccHm4QoN/Dzvb+47CwGa/xcLootVHPI4CDwV7spHjXDtQOwHm4JvCTtEEYeFwnY58ySEJQCjZgeEIY0ES0W9hO/8rZwOnUOCba6RmnsCgbuj7WkjOhmlMpr5mqFPJuDBscbxEUqneO7uOtNggy4eBKck4QgXdHQfFq5+TNEXHTwGazaCHrgumdaAzp88E5JRtpgVL4VETlKmX44ul9rM63XTVETQvUMDe2Cjs3fX5L9FhSYFLwwkBDq7CrqQ3VUAtIBm6ARGZ7nWIiNKgyllIPu7OxjxyxKKTs0w6qtmEGfJBWxo4ajRT2vFOPZt3wFOX11Uo7hLdy916Ou16XzQfcpxDg/b5qwPparIzGDs3kcW3Ywu8VceLTPEaB+rVf+zX8zM/8zAdT1G/1+L7v+z7UWvHf/tt/wx/4A3/gN/19Sum3LF7f9fu+Dd/yLZ/D57/lFc4PDEBzusTrnV3vtldstx2Xlw1f/eoneP98xf/zP7+G63XD9brjYV1wPi9oFhBvEE+R+HE3gNFAvZcdv/HxM7aN+qSmE4bTfcdIdyV2TfeB623H9rKh7XUeINYeF8vQj9ibw3rZIBCsl4UQorVcWErH5brj3fMNz5cN795f0UqFlIYAAw/qjEZSpgNmEJm1gCdVaRaYOcZbC1gHEzj1iLPMeBF+H90Y/5bvlYF2vTiKU+0MtkOpKCKwtSEkNxlc95lCotqbw1Vin5Cm1zRP770W8op3n1xwu2W8/fiF9kh7YayAANF7RGUterCLhtyJLgEeWu4u18sfokurjhKnNeK0RDw+rDg/nZDWiPOrE/V2KSCMpqcRnqmFRRTWMAYiBXahSlgQAWFWEfhEhb8bk5S+iON7jZuehavfUctl7kiG799ovAZMNe2+RFmUyvP2ziGXihgqQmgook4Vu5+TjzeY1kpOi7g4UYiWh/l4f60AYjtspQA1A+qy0dBzgbQG05QN2FULNl5/g7nPdJ4JsVbtsnJX9/tcSd4QwSlY+BiQTmkWKhFoM9MRip+eicZWtdYRmKaJvob3JUCadt4LaqOYe6Q8O2Onnu5hoUYsWiYMSK6oW+FKQKC2XBl1pPG2rroLMOajy7xXrDGM47EkGaXgIQDODytePZ3xSsk1KXhIrigdkFLp+G4tHmJUWv+Kx8cz9WshEoouNOe9bjtu246cM1qr1JRZQXCAQYf0ilqLMjrdFFxHZwnbd8F+3WGaYPeZu0i973oTxJTpxaeVaTiLoMtkDI8CNc4yMy/KcULwjGGfI3CiFk5Vmb6dtPW9/S9yQR8F6r/8l/+Cn/3Zn8XnPve53/Xf/PIv/zKstfjCF77wTf2sL3zrR3jzuVd4eDohLWHSpEWAosmXb99e8Pz+irdvX/D/fvlrePf+gv/xP3+DU9Vtx8N5xePDCcsaEJOnEWw8lO21UXPy9t0LLtcd79/fpjsBXZCOLn2IHbtizdE5mMi9U5c+xcHG0EV6Lw0wO95fmZZ5um1wwSEmvi21dVxfbtxtvbvg40+euXxsHav3hCqcOwTK4IGQIXTdvrc9MYCBRTfMSoKzEGvRdN+Rc0ODwonaN2/3hDsAAQAASURBVAWD6a49d0lGO2RjeEgZoO9ZTyM7LVdOZ2qGHs58PYfmSQyNetmx9qMTF1C3JlzS8xCseP/2gut1x9e//l4V+0wntsZgCR6rfpyCkjtm9zdEr9qpd5kRJgCmvyJ3fBHracHj0xmPbx6QTgmnp5OSQdwkjIzCEKa4FpMZN/ZRolPMOLxcdBMKFe04u1UXeGDqetpwpShNdyQHJAgcb+JoYpzzcN7DC4DeOC3qHlI6bXJ0o4QUI197LWijUBkIWndzkh0D4DH36h6sdfol1oYGg0spDJnci9LUhXZaYMfcOL4jeo9zjGwcDGGlJYRp+7TXOu/XERi51A7jBdHwerJapJwISTQ1IAqwNLrpdwA2F7If7WCLqWeg0DOQFlbq2iGC5CyCZ87YacS0qwRDakPLRXfP/G82fQ7WMlLFQa8Dy5miyeFfFz0DQzjRkDRxOi14WBkPMybI6T7ShqMIlMJOZuwpJVqdGbI8sza/3MOqiF1JP00sugGWEphxp7t97zweFjKIH9eEUwzwMKi5YesFxlZ1k2/wkc7oZSvTRHpAeb2qvyPMtEWy5n7HLEeRGojw6EoNWLA6yTosUDr5/16JeV9eXvArv/Ir879/9Vd/Fb/8y7+MN2/e4Nu+7dvwZ//sn8Uv/dIv4d/8m3+D1hq+8pWvAADevHmDGCN+7ud+Dj//8z+PH/iBH8Dj4yN+7ud+Dj/6oz+Kv/SX/hI++uijb+p3ef3mEa9ePyCtTEUduxzphOi2LePd2xd88skzvva1t/jyl7+Od+9f8JWvfh23245tz7hcV2z7jjdvHvH61RlbfsDjgFgtzVxzqXi53PD8vOHtuwv3S/dFypGx462bC3YDWg9RlW9nRELVGIDaOb53EaTbDmO5z1qWiFIqrO4ktuuOl+cr3r19xtc/fq8hZsBDClhDwEOM7GZU+9MBZANItzDiDvaUFiwBpoXU4aAu2ApDDLfSYAwhtEVZWCMSYkxRAo0B7/zYW+OOAodCvtYTTmtEDKr7gZ0QphmFrnNnNjQnda9QrjNKZdDh5XnDy+WGd+8uGoldeJMaamDaEoHUERAJYY7vPeBHuWOY3dGEB5vMe4cYaWG1nhecH09Yzgnr00m1H6PgHN/30DrROWTCeeMF1iJlAFivcJ4ztJbRYgAzID91jah9xiwMOBkf3Ptmul0MRxBvHacfiJqZmkmYoTceodxBDJg7wS5oEF2b6eIcmLCpMXbCkYLxNQ1ZaAr6fNuxF95fHly2S/BkhwGTrhycgyjTVKylhs97ph+r48d4RNWc7bXBNo3eUbiU0JtVEo1H6EDSKbSLTqymoQ04VAS7Nid7x2xQBlvVGO6LkqchNX0+VbLd+wfEkfFeD1isWQsrDHMMltdzwkFCWmLQi0CZhAAZoElDF8FJoitxYca7iMAYRwG3d0ghfqC5q7Ue8Ko2SN6qlk44dUdN4y5a/KP3OC9kAj8sdFB3OtHkRlulEFhUl9zgHfdvosxbzIRlkmFEJ0So8NfoekAODBvQa3UuBZS0Mk2nuzrIfEBX/50f33SR+oVf+AX8wA/8wPzvsSv6oR/6IfzET/wE/vW//tcAgO/5nu/54N/97M/+LL7/+78fKSX8y3/5L/ETP/ET2Pcd3/Vd34Uf/dEf/WDn9Gkfjx+dcX5ap8YHQspm0Q787bsX/N//9/+Dr/7Gx/jyV34DX/uNt7heb3j37oU0zdpQ9hWt7Pj4zRNeP53w+HDC+bQwYKwfO4h9K9i2HS+X252RIxlJ3jmskSI1v1B5vnqP6M00sKy9I7fGWIhccbttaLUo/Zjppw+PK9Ka8KCdeu9cmN+uN7w8X/D8/gVZoaYcA9bgUZdEKFGxdmeAVsOcEqzuM7hzESxWBXc69dXKvKi3V/5el7sgsodESvuTEge8NSqe7KoDocr93XVT93DBkgKWGPDt3/YGr1+fcT5HLGs4LlJjgHonmHQGprFj3a8bym6x3zLh0K3g7cfv8XzZ8MknL4x3KCxk1hjsCgGhdyw6OTpnZzEZO5utVOwqZuzSZ2ORUiDUtyasa0I8Ufztl6g2USOuvlIIrjers4cDg3HKIAXRNsgRgsl72B6T2CCRaMGbD6MsyTm+8J5nU8P3JxfmPs1JUA8hZ8iIS85i9Q6i19rzvkOkojUD74A1WDyc4qRwU1dlsaqeaBqD6g7N6PNoCt/lUvGyUTD69ecrPRRLw5rCYZkEmdoqAyCGgJP3OC+J4ngAFdyBbb3QX7KPhOKGfaeh7Lom5NbxqjU89IU+kYY7DGstWmrwKXCHew1IYSczbi8aoVNxVe1R1WmSlVLLvTG6izQ0zx0Mzujho5v0a3TR5s5D1oTmLA/u3IDSkAxh5iCYZI+Ygko7iISIkGQTnAOaoOyZ990to+wF+85982ACRkuHc2ju02x8OmFXpwiCNUD1Dt4TUozSIZY+jA8L1x6nJeKjhwec1wXf8tETYqCBQC5KBhvMWAOULcMB2NzwzbR6QfNXGBITC0xXHBmFyXDSGo2c0ftvliptygwOCcrcpX2KxzddpL7/+7//wxvsGx6/098BwPd+7/fiP//n//zN/tjf8uGGGG3g9aODbh3btuPycsPHn7zFxx9/gq9//WO8fffMGyHveuj0ufQtuWLfCm7XHdfrjug9MJy2zbHstQYzCfeDDzPU7urs4C2W4JQVRmNUWy1uuU7Lo6bahb1U+OpmVo7cfV8ysTgpDQipKzRUAORBhhg7Ml0I22YO92JjEaIgAYB3iI2eZlaXyaWyGO65clLR99BpZ5mdhfAO4aHZGi7XDe8vVzzfNry7bKqFAVqJkLVTN9KaPo+7jkkvD2OGl5eDOO2KtdCZSrFu3stk95FqzzwogId0tUr1VT1NV7hNx0X+PiLToJUdqPq+DSNaFS5bP1zb+e+qdvtFo+tbI3tweI+lFBT6GItGhW208+Tzg+54DJlld5PTvEus3rrSqc9RhwDgIEkMU92RSjuKtLeclFdnsXqLkzpTlFax1UJyh1VWp8JbI56kSZ+d+EjObfNw4e9gDdDlgDpbIdGlljqdLEQvy9L5+hBhGLlUThl8h0bOikwXFE7+lCLsufBaV3G6j356blot4sYZOOt1alU/ztaB3pjKO2DYZo59yjHeThIBzEGeGM0cCxQ/rD2mKhmSlR5gDAkcTkhldxiQrKiQlSQMq5pDmDrh1do6ZC8z8UDmZDQE2ppNJ4IgmGxHM39XM6Nokme+k+uaR2b4uq4pQYyd1mDnJeHxvOJ8ZyTtrUU1NJ81GPsk7kildbRcmWSs8DD0/pXA6d4HTQWwd7spY+npp2fVhPxGt4UxlQ8kYjhZfEOkzW/z+Ex797lg7/QnfPQuKKXi+fmCTz5+i//5P7+Mr3z1a/ifX/4qbrdNA+08rHUI+uFg0XLD5XnDJx8/w3tGTiyBC3zvGEORs8cSHLJQE3I/ScVAJ4glBCzBEe+OAUEp7aV1uFLwsmc4jVYohTYmThlNI3V0uLlbaw8TT8sYiq44d28NFYKb7jmk93nj7bUdBAkAMOyw1iXilbHwSwN8h2lU6++Z7LktF1yu+9FotIYaPJIx6M6iW6MR2QVfe/eMT56veHe94flKyquzFvKw6l6O+4EjiVg1Rp0MMsaQWFpBgWGHtVAT0mrXgpmxXTfsV+rkShdw90rW4VT8DxcQayEKcw8Yk4d8V8ROi5Oj80KKdMgYMfJWRb11TF+7Ni0vG6oyCkMgU+v8sCLEgLTwRusi2LPmbgGzmK2rV1f8u6ToAa+p0/tgAcIYlNL0bZNp6XTbC24bE3CL7u+c4X7HGsHrFHAKHg/RM2m3VpTe1FfRYokOpyXg1SnN96B2dr/Buylm3io1V2K7phobOKgrSBf0QtcFin0144r25NiV6JJLpZWUQqAe6noiR5Co18K6OYtdBLmxEdj3jNueEaPHXjJaLSg5w8gj78c1wWkibogVNfuJHhSXIdLV/7DyHumCzjQw3g96UFrc7SSjR1wilvNyEGD0a4c+SVqHRI9WmGZA4lEmKUr3XMNBPd65OFDY3HDbd3SpEOwIlrZlXgQtV75mtaJU7s/QBG70o2CRmnZWwc9rKKtUAJYFrVsDGzyWWuly4R0eloSnM2nspyXpvzVo1s5iMnaZ6IwmKthngzyKFADuP4OHAc0HjESMfDERURE494nzBpyHsjYMnQxEeiy2D77/7/T4TBepST3GmODkwNxrRS0F+7Zh3zbkfUMvFQYGawpIISLFhPV0QloWGDG4Xjb8xtfeY6+dOVCPJzLmvMWrV2c4a1D2itttR8kNBtxrxOBwSjRtPS+0eFm9m1ZCY1Hdx1xhyDjj9GGpwPdWfeD44ZyFdLKQ1hjwsDDnJ1iHnOtkTI1FeW9Uc0OAbJt2xkqkNwZbFxQRwDFSw6qzQ1PtxKTYtjb3IL11dKvGmkJtUclMoN23zOmmqKec0fDEQIeGmOKM7GhdUEufC9mWNVUYBs77eXACBQ0kD2A4Nihj7VjOCgwsjGjcNgTWDCz86JS/cdalM4ZDAK20aN+kLtUapy0w2HayJ1+uG27XHc/PN1xebqiVQZFrijivCS03xBRQ1qgBbsz5GhNljCTitLYiRQezqofkQEAMjsRiY2BAsfmAZ3lQynQ0H1Bn133cEgK85SHzeolYg8M5qGauqgEvSOt+WJI2TI6HhTnCPJsw3ny4XnjvsC7MlYpB5Q0DJXB6PabIsE2dRmGAbc/qxF6QIk1mXTWANOaYdZrSnkKEFWD1DnWJiKXhavIMQiwaPni5bIS1BDglZqUta+L0rZOEAdCXQF/E3udrZ4zC38LpgIO1gRPeItFR+Bz0OogpIp0S/BLJxrQAhGjF0BlKo9lua2xiauswpcPIXUAmxrTAP++V0SRff76quJeEp+gsHrxT/eFw19epFOpHqCiAwyBlDM2a0MoLBr4zPqgDKJbQshjLVAIxqGLQxei5c9wazhoARzjr2KNPkgSUjj/lBIJe+XfWYgrqvUoEyDDk2fMhFX0cyceur494ezlITL/b4zNdpA5e8/0nR6HSA7FWtFpp1SIdztAz7LQsOC8r0kqqsRVg2wpqvyC3hmVNEACPS8QSPM6nBHTB9bTBCLAjw4iZUeSnFBC9J8avlOchVhRhuuvYt9NfzKI7CxHeLF47b6+WRE5hBu8sUqD31zmp7xcHcFhRwS1EF8gGMGQ1dQFy58UuMMzGsgYhe1opjemjdUCX9eOmmYhZ75DOxTt1joavZyEM1qrmL8mRExP8YVI6ut7eOir49cMZfdDAjXOwXeCCRliPF2ng3ZM2MLhqOP6sqwYiZEehmo7NY7urU6WxysibUJ+d9lAAF+w9M5fo3bsLLhcSZa4vN7TS4IxBXiN6qfDWMnJcsf1cG16uu0aIcFmeloAYHCDc28Bbhfcw2aPmrmoNWMzefX5EdlR1MGCuFHeMi+dhe14SVs+dVHcNxTtU6epPB+6NArObeoeSV4zu7ejunhs7c+9pLzRhY132D8Qg+I4ooixRpRZj7P84FVhPofrWK1rNqDnDt45gSIP2lmy61XMyqCrpaGChEhFsCr0Ha5G3ghj8nMCp4aEZsA8eNjjY6j7MMjIqSeh9Hv7eGlgQnZjaOd21cBcZ4FOgZkgENleI79OdnE1agN08rNPQ0m7u5HkHYcfohLiVivfXDbu6QpxDQHIObk2cqObhpfH0OLwSZ18Gcwf/HSa6FgxhbUZQ59crXC3UyJElf9zTH9w32ng4a6YbC62PtOnvB0t2TELe87UJ3o1bEzbYaXBwnM2Y32fmqw1Go74nn/bx2S5S4zFhZ6XoKttuiQGP64rruuKyniFVEJzHF16/xqvzA57ODwgxohuDrTfcLjfc3r6gf90gLREvlyu+7Qsf4XOvH3F+XOneWzqCc7h5D2mC4CzWGGbnuXg37XZaF5QmeNkLtsJgstwFMBaP5xUiCSId68OC83nF514/4NXjGafTQk2DCB0TloR+bhDd1ey5AZ1U9LbvaL2hNa9LfIdNCL/suaLqTdMq4JzBnjP2WrBUj94pWrQGiIY2N/vQ+kCQrEG0vEgclAbM1ZS6BqgVD3hwLEvEw8OKx8cT1nWB9w7bVnG7ESpjEqno7kLmASxduAPxHh4GPntmYVm+jyl0LNHDtg5blQZsDZZg6Z6t0NRBFlL92ixCd8LisdsYIltnpxygXDZcMxmEX/2Nt3h+ueHd+wt2TWpO3uHxtKDuBbYDewy4pYC9NGy54t3lxokAhmSMU4QDcD4nBGuQkocJTtNttRCNgtoaGVveqARBEBePUByG1hqgcxWfP+G95B1erZHvh7UwKio9LZG7FGPgU4IxFrUDpVBIXlpD7R232vB+23HJTLP1zuLxtOBbXj3g1XnFm4cTr4/osbQA4wyKdyidAlmRDmkNaBXSGyCNhqwdygC84na9wrWO5BxOzsLGBclFjVcRXM2BiEAPRYrvMywMnt/fABisp52TJ7i7M9ZOn0jjHbrh7s9ZRnSgU49YhP583joEAyRnEY3qDB0RBb+EWais7janVED1UNU5xNKQNbjQ3birHXtCGdOXHuiX2453lw2/8fZ5QrZroCt7fzzhFD1OwfPadwZeQw6Hd6GifyhNsDfBrXbU0lGLQoxQpmwnzX7PFbdccSkF1lqUwqmo1gYHM/WcVSUO3loYI/DO8CzASC8Y+/Cj7oju8MtWIU30nNCGewZU3YW1zn+oZrV15KmxqT6W7r/74zNepOQ3/5d2iSMx82FdcT2dcDmd0bVIPZ3OeHU+4dX5BOc9l+v7jmvrKDlj6w25ZMTEQ2lNEYv6qnnvEJxHdRUdnTRme+cQjjsYRZf6L3vWIlVJDdbdhrMcvc8PK85nioqXRd3Je590YK8w2mmhAj35CjRCc9V09O64A3KeIt0GoDZsnYe6DOrtgG4wBgwDGDpRjFiIxSvcZ0SdqZlwO7otby2CtUjeYwkNrXs422E9p71FP2jr03G97pNabZWW7ZScICA1+IPrdU4YB8wxaNe2q8HvWCabg1wybKzYbctxqt9dG10Xu+P+GQvtWy4wrUFKoRfaXvDu/RWX64bLdUcpRcWMMvV3eS+03GodeyGsc7vus0iNhfG+ZQRvUUubk9vQmhxmrWBBcRbiHXxkoQlRs54SQzxba0Dz832IkVq54cHH2seD36rdEqzuR4TvB12728w7yupgsOk+yOm+4hQDgrV4WmjL44xaFXULKCRHdxHuGKDTw5h6rRE4C3X5MHD4hoYBSq3ux+9TO4sbDIAG7JlEg8ttgw0O59sOFzX+XJ32h3ekU7KC02gI7x1CJ4Oxi6COE7ffLe77YJgNsgg+PDwtdzhGbxjr1fHf3bEhjVIyBkTeBYL+AfmmVk7CuVRYfhG2EqZV2QijpM2ZuorrizR2qrlQHlILw0w9RpFS67H5nEYDyH3mngucMbhqCrIZk6vIlMYMtiPT4A8GHsZrcnfM9kG8GbBdk8FTmvccGX46EY7Xuw15hRxF8Bvu0d/u8RkvUr/1w1iDlFigvuXVa5giMJmWH946fOH1a7x+OOOjxzNgDHJtyK3iuTeUbcPz7QbjHVqvWIInndMHCtKs05vAQ9BncuzoPMbivIvgkiu2XPG15yszXxQmCt7i9XkhhLcEPD6dcDonfO7NAx4eF6TFo251FlzvGLPhHmjXzyVnIUPImykMhQ/o1sLnDp8Lsgi6oY+cNwbBqY3SGPFV7+CM0UNJo7L1KlpUP5LU0RhQsaIIHpcIgHuGvWuROiU8nhacTwldgNte8HyhlVOvnd2rTrmKeZEpqZ+fxVMLlBhMPReX1WqvYgw0cXselrV1Fj/b9aAeUMM4PLoepkB3Rn0YG/ZikJ8vCo92XG8sUl/7mEzQ641EEpIIgOxYpK7XHdXTQoaTVMHzjXBfB1SmUHF5XGANsJ8TnE5wIfoJW5m7G3UefLp3OWXulra9oJaK4Axu2mQ4Y3AK3G/QGovw1kj0cZ5RDMY5VNB9oeY6IaiqDdRWCq7bjuu24+W2wRqDkjOSs7Ai+Oi0wEZOUt5Z1G4pkm0NW85KbOxqT6WkBC1QS/BwiAigI0WwDjEEpXhTdrGVgkvesWnsubZSbOZAVmxMHkU6oka4OAcEt/CadITrWu0IMaCLICqcbI3Bpvu2Ah6QHWSe1lqnKH/olMbFMhmiwDF9A7Di1P3ez2DSabare2e60RhUc+ROtd5nUvbWOlq1eFZLICYb2KmPCioJGO4irTEz6rIXvL/RCqmXimBo/By87p2/EZ6DYOsdL4avpREWdGhjJiKIzmKJYZrfRt+BMMgoM2ltElMBTWVodAAh4avN537/MYwLhrFBr/oxyBIDhv8Uj898kZK7/78XJUbnscaIN0+PMJVL3JIpkn39+IBXDyc8nlc9ZCpe9g3P0dP4tTW0VnF5Bt69fY/oHF6FBG8cJDeYzkOiGf7Msd8ZWDgxcMFFYb63lysPslqxhIAkqvsIzCN6ejrjdE44nRbEFOGcQ0WZeC50aRpVvW+6QCwZPV1IkrDWAjFCnIOtHWkvgLdYVDviAM20oXFq9JqZY4BRgkJjJzdu2KAGn8O3bhzWwZEoYi0FtUWoBVnOC05rRHAel5cb9kLH5p7J5DmpcHJNQ3w90kGdapwMjFBAK5Z7vL2xg9yVBVVrU9IIkKtTenYl81FYVAE2Kk3fx3EgtdpgHEXSe6noL1dYZ+Y+pjS6XJTacbvuM6jOGo0Bceqa0YVQqtLhc+XvQGo2J6laCspukLeMHBzyXhCjR4+0nRqF6B7z6EZhHC9w8Fg05Vm6MK5jy7itm0aAdO539MOKzFgWaxihAS16jPQa98bItJomITM6g3ZCpITnTA1PyYUwYlSNjFHo1xxhmsYZWLDx6NKnH94pJvTg0VNibDsMFk+WmRmGxXeMr9b6vHda79hKJoHIAnvvSKcF68OCtEQ8mLEHFfRG3VApC3VOmjRrXYa9bkABamvoVVAtReoxehT9mV2p18PsdzCGR7rsWCUMpGbsX60Oq/MEGic01FfTUSx8SoHEi3JEzG+lYAR1nmAQ4LEGoIK7oa11mMZm55rrjIdnNEtFsoTn7ZjCQIeYYRNWOynt1y1jzwX7Xqbh63AYGaSsosSoFAMeQJq7cWaiL2OcYgEEAIqLXaHFVdDXzrc+pTKTyTqmqH6/jyIicgcO/o6Pz3SRkrlcH584PpwhLHdKC+pa0R4a8lZgDHBaEtYUkZY4L5QlenURt2rt0pH3TArycsN225FcAONVeBGOwlhGXLscHVUX4JoJ813UqThXxiY7xyvbOqs0ZsY9RD28jb3zaNOnZ43lOC6AtQLpjlZLjV2XDR42JcB7GmR6hwLqerJGRiTNoroXb1o9nCIA1xSTVuhgaFz0etOblDBV9G5SmBtYbNIaETw7sZdbxm3LePvuAqks7LIEtBC4awvszAPM9Pki68hMqIpMpwEJjY9GPYbBFOeKMNNnQDvMhsIkz3QZMIPAqMsC9VWkKl9znqLZsSOrCmlBiNkzkt4evnC1gXMOZkLuYC0R5iDjspXjY8AdE3a92z0Y4T5pQJ3WUbxKQ+IF0oX2PcZo0m+DbSxqTjjNaNOvKJ/FcJywQ7cFHjoTLlXtzbD1GsQK7grbTADovc8diDMjWLHT0FinbG4aaLM0IGFvqWcS54HedS/CQEOYPiG22VQbUXcIavGgJBgfOUFdtx1ZG4FRLKwx9HwEGByqh1+tjTHs3uokIApzGYUWGWt/r62k/OHDiI7/P3n/DmvLtl0Fw62PZ9Wca+2977kXf7YlAh4JARCAhMgAI2wTgZ1YIuAhQWQSAiTILJAIIAECyJADSAlIjJCQRWIhsIRISGyRIOzfvvecvddac1bVeP5B62PU3Nev40+/9evIU1p3n3sea81Vs2r03ltvj/5wrgxSz4DHHuHazw4lLQZep9wleDZGh5twf66M3bCpwHiPbhr8eOBb446yE/05clW0R81jG306rcKRPB9kuts4Q1Zfb7ynAcpyZiSRwm3ZmWlU7b1DB/0GCT0OMe7YLeGBGCJolc2eKUb/JGOX/o3qFzon0wcCxThIfr8RJ+ZFqOcXOg/2NUS0ld76h2fI1nWNWC8LlnXhAVQK3qcLbge1Qh9vd+yJGS/1yEhvB463HTY0eO8RDUfzXUWW25FIiih0NhifQ1Kx7m0/1Bapo1iDarWbESq5faSRqVO8fXjAMQb8IdYBiufPD7tDDAuOXyLcusAELtGfa8Pzuwu2PfGBnW4JBj4EiKWxrLFKfW/MfbEua3fJA6q0TseFRvxZaxZ91jT3yag2xASPLEBOGR+/fMVtO/Dx021aBOWdGT85RbVMcngyGiPuLZYlzAPlUJYYC1VHaRUpZ+xqrIl+WiOlEPC8Rjqc4wF66ISgLNRQFeM+qdj2pgFxFbd91xTbpntFdsHeWoSFE3lwnACtLoz3XNhATJouZqcKEIpxAKQ29FGoBtzxWE30Bh6O2UqwRAfoG2ginKe/YEkF+xpRjoK8J9Q9o2Wm/grAg21AUOZkCFoj8J3icoDmw1WLd6gGEO6ORnHqrenebKATAw5iIfvieUXpHRmEFQGgZnbr234oG4ymqLnxcO2VMG21nSQPAb3sDLO83hKJRZ/uG0qpuO/bGUDvOGm/bju2TA1YN/xQjTX04quMnMm54LJHuOgQbh4v9w21Vdx3Rmx0dHQrgDMw3gGO93BVsb0xZrIfR2EatlWtVOSdLhliDLz33EtmagKrFrGxv3taIm2KasU1egRrmWSQC3ehvSC1jgwDHyqO3qeucNDCTe84DnVMaUAdpUNH4WE04AVAcIgdEGuxlYK+7bilQsf0Si9Ho8QSK9wr59wmCpRzZaOnfqNiT4u3WWzG/XuMZrzDRhU3R48gZyM0XjPB1xlIFWVKPrCBfofXN7pIES4Yf/Zpmz861tnVKM4Pha0aRL/4gVp0RO+pgVnoc2UNJ5Co/nvQpSQPRyhDTIjPNmL7e6KeZXhGlDrCxqrSz/tvoIAOD7mhUv/+7uy00unzd5ZaTxxddF9jmeU02E6mMW7deTfx8DYeOt0FdYCmsN5hkA/ItKNSfli13I88g9K8FrrFW9hxw1uLkfKba9N4g4KqouXhQ7dLJlVeG8DaGvVKwfH3GjsbtajxwU2q+KMermo32kXQmsXQR30WcSH8XsOjLTtqhCgFUG1KPZfAnBjqZIcN6v81UjPkHfcpCv2r48J5gJtJWuBDySJnJjSJfhr3jumBv89Dk9WG4FHva/5SCg2Sdu2D54FRqGETabNpGYfJ2GWK4noGOEkMzaA17oTQDZx0RE/y0CV71ErB+OIdIWE7oDPdXwK4OItqBNWMabAjJwevjgZHymzOJmEo0UBVBOI7ujp8WDHwDlhDQNFravSXaCqj6IJpuDtYp48rI15vNqQeTu2JBGutgADPz+u8l48jQQCsK1EUrw71YlVx9/A5QAs0YciOdDDYMB9ZdYMsnrY1OK/+ef10izHCvDKxFk+XBb0zdifnotq3ioZK1m3RIHVlbo77xwqF/OUBIjNQBrPReA0DdOHPdcbAiMFFC8CeM3a90aru+7oQiuumf/bMj+c7lwZvK2q1DIIw5ryvHr4a+rlvejh3KdRt6N1MFEaMQBx9G5tpGuMjGvL5O7++2UXqs4daR3xlwAyLHUAXeRgO+4LcGooeWDAGVhyWJeJpXXBcC771tGNJCUcueAoRi3Vqy8+HxQhxeKe4dG4VW0q4HXQFaA9P0NBN8EeJsrDkPODNYHzZyVCSScTo54g9Fo6to6voFgrxiBYp8Q7Ge9JpASwD9qgN+051+5bKXJw2IxBn4ZbA7qwr6ePgzzlqw5YKPt536qJqw+JIexYRRM+OjGwnTiC9qOgxZeY+aSFonc7cPlsq7GvFEclktMGyO7UMB4wCLLlg2Q/ExSEky6wgOSG8Qe5A7wy6Mwr/6IHK6YGq/0s9XSCSNg5JJ4bpKDCoxqZDpGNxDk8x4MN1xVWLFESUbFBn8wDofsYYLVRjf+hommqoyzGDjYVzCzUORQZM9smc6vrX4/7pogciBC4oxVlzf8YhjtZgOo/Is8VhwbYAIEwbbgrLtQHHNMES+rQYqrofui4Ba/BTsOmc485QpQfdGsCfQY4p0YtvDR5vGsb31csNb9uOj7cNtVGn1JYFbQmwKjC2YiHGUu/Tz9ibUaQwn+CBuen77xTC994eNF0Grjv4Sq/IED32PREmtYJ9T+it47JGPD2vWC4LbHCnOfD8UM7msBSeJ/d7YgDintF19yLewQIIU2fY1d+CX9E7BGPQDYXutRF1yUrsaE1JCUeGqw1HLfP+idpgXbwDlCk3yE6Mn+FXR5vng7cOxjlqwazBUQru+4ERekicj3pB0x+0hx2TJl5yQTGC6jitjkZpXJv+eKbW4W1aYXXnOwySZ7Oke2eIoC99UtkBwOXfF0VKq3hmN5z2RIjhoKV9zuxY7qng7Sh4u9Ozz3vLMVthGmcExlhcloj2zKXgkQq2nOGsg7UOpXdsuaJ00QOTcRv1YQc1M4AAjM4MwLTUt4ZsmjUGGlsGBx/UZUKtkYw9DzuZyyC9MfRArVmDFGuD8SSjWjndAZw5MXOj1PLOdhNJIbwO0LYmOGpDBLOjktZQExe1r9uB773cSCjIBWvw0+n52gk7iHbFRg++M87cMpq9tenKUHujEwKoyLfOohkgLB7rNZLttHhc2oLWG273nTTk+wbpDbUWZhb1U6O2xkBCyryO1Jp4MeimInYam8IaYBRpAItzfEC9514JlZOcNbhGh+cl4P0acV0iPc9APRV3wx1VdE/z8PmKQj2DqWXtadg5YI8Bw83IhqaFvVSkoyjNuM39C6neXKgj8d/NqeB+ZJQjo6UCi86vB8jx3PWY6Wxw1IZbLnjZDnXHOIvn88rMNoOOGAPRhcuCZQmas0ZoubEjQFeIkCGSqjmydvpPWjdcvwmXdREkW5HdaVVlBBpUyJ1fsBbROawaHy8CvFsD3l8i3l9XFc0bNoyloFWyP0X3b3z/zBozC/Cdb7/D9bLg3fMFx3bQFs3SVWNdF0QtxAPeG9MBwEn/OGiP9enlzvBT1W4ZEVydutEvYf43LWk0vUIVY9c6nsdcG/bKs4VNBG2liLiMGBaBhSNjMjgV+1ssc1Lr1C9KR9o3koOOhBVA6IANEUG1Yqv3nKKqpgWLUdd3q88M7dvohGFn3ApqI/FGuBeck/ojavEAOQpk3kd6/E0Uyzihm72wEWqqJRtBoL/T65tdpBS+GrHs5cEAc0BcTNMl7XY7+NDbWlVTYXXfQOzVOYclBry7rEi+ICQ/ocGq2oPSC3xTy3pdvo6X4DfRGOBcTnu10g/eTb84Ow0pRScBmQvl8a3GfmVOjmO8bo2R1x2wfYhutfM2QihTuyZXSbk2VmAav/nQm1hPm5neGoy6InQBff0KD8Pj0FTcxuX2RVmIzlq4+kAr1QfTqX7MjaILKIHB0DOwEBbdjoR4eIbTaQRKsLRWutSI69OCWiuul4hcCo7kULPTztGobm24R9hp9GmU0m4BpcpamNZgDM11CemxgAdL5wPpFDx7PSijpyB88WRCGp3anRW4amB0+T/1LWbon8yEP2V+mOfCffz/0y6mTbd1OmOzUN2OpOajlQcjBLYBVe/xQyHVXus80M8mCWeV0h9Z0ZFaw5YL3o6kS3xGygRLxwqrmrionoZBYVcfPLzXOBxH+51uZcasDOanEUFMLPpO5Q6Pe40Raz4aOBnXRREGZy2F+N6THCGCp4Uw/HUJOhVZdUdoU8Ig0uZBCBFlOBqsa4Sz9L07oqe/XBfVoJ0s0/nUDkgcfNZKrprMnbAfBds9zYndmwBvqJ00zsJWWiXNz7b3GR44EJEhlxhRKgYyExWqjI/NKDxMiHT49tGomA+YRYdBw34wNmfLecLuCyLJUE6hbmtR3amvW7RIXQKdUBaVMnhFes6JXx6OIQF0VdH1muPh3v6NrxPqI/NdJkQPcPr/ft/V3+r1DS9SXBDUXJBTRbonTXzN2O8H9i3hviW83an8frltyFXtbdRA1HZSsy/RQSBYQoR5Z+Yu5p4qtlJxSwWlUXQ46LqLet8BZLnF4DnWKoyDruwkZ/VG8+wG14jrdWE8RHTwUSMCNMX2/P1w7pHawMfZYSf1D0MrsF2wuEy6rzFwsQ8HHrWQAUKkyLBWB+fZzV8vjLP2gTupVkV97DQzqp4P6HYkFqqcsXg/HTXGAt07dmhczItaOD2YdPaOCqfGspa6pFLxdttgBbgEi+fnFdYYrE8LljViWQNggHfvL4ARPH/5ijUGfM+9ICXupaIP1N44Nx/SsWgXMRBUMgW5PkRpGiEO+t8NsTKNapmS6p3F+3XF08qDcfH0IDRTZiDwpqCOCReEkssw0qwVVTqaWKxN4d7ROJgHs91MxmlOBdvrhrRnbLdD4bKM777ckZQuH/T3uzgHVOrkjredya61YrFnQCCAmfslIFW5omPLBV/dNvz6yw2/9tULsrI+360LnpaI9f0TvGYQ+cBGar1ELEtEvMYZ/dKsme7gI6fLFqP3KY2f/RrwdiTd9wD3jZKK1REGHQQhETqrWDEI1uF5XbCoV6VTN5Ef+M57vH//hB/6f76Fb7274roG2A46rmgo4YAB5+Gok9US2GjgaeU+STU7UwTstUnUQipdEQuF+obJ8KeXDfc943bbuSPU/fYlMOrDBgrYU2kYGuGSK3qp1KJtB97uB7bjwJESSi0wYlXXRuZj0LW51Z1U1OTg6xpxWaKeD/wVi3qTvtxeqfHcbuiNiNLiOUm9CwFtrQjGMBy1ExJfvYd3Fs9L0IbATSjR28H0VEKVMjhHgCgK1A90HFKnMHpMW7y2Oj0NBqJOTl24MhHIb13fvu/1jS5Sj+MnHrvSUlESp6qsPnXDFLKUhibAkaituStOLQMqgQacicA3wDZGWrTOfJg9F+pRBLTvx6kd6q6fy1cMTznSOocDxkVvuCUGxFGgHh5agFDBoOKe1Gu6FFdVrqfCwMHelRadCnwuXOIq3k1/M14oa0kzXZYA37pi9lqwBLMbHlf2Mx+5hwmxqp5ozwUhZWUjWZSqCapmRCsYWKOTlrVwtvFm04dhFPG5eE2EaUviYnqYv16emBf27S1haFVyYVpyPgp3WlpY8VAERCfdKQiGEh5Gh6td+6C/l1o58SrcN9zRh9O3Ubbe2I6IQCFMwpit9eliz6BFi9o7rnrfzWW/jAkKcxGf94zjdiAdGfttw+1lw30/8Okj88P20hiqaS1y8NyH1IqyJ4Vx1Evv+54NNrraufY+C/GhMeRF3fKjswhKLBmavM8mQ/n8G9OdgAazdJDrqFWLVCXi0EXw7t1lSgK8J9QWjMGqaIKzPKJtZ9KA7zSvDb2h9YAYLGJw+OJbz3h+d8W75wuTs6Onsq+o0/twW9CJEh16QI6J9vxrdEAczt2lO//ZEObKICvVhq50/5oLXctTBYRTQEoFXgRF05vFDMixTyZcBz4LtBwFbMoQRKFx1SyNqXz1LB5LOP903pEoMd5jq58xSjkG1hnGarxF8h5oNO4d937Dg7mrrhYGraeOSXcgAroSGOzeLqBOc74HDgnFGRJKHiZrmTvSPt9fxyNs+HjH/tavb3SRGhd4/MKt0RuqJEYK5PlFQ9SRbFkAWMkwArzed6bdguI274bNTIf3AlsaxBA/z7Vx+appoEahn+gsonr2QcwsUs6IesyRbr3EgHfXFesScL3EaUI69lLGmGmrUlon3KZC1pwLWs6ouWI/MlJtyK2jWbo7N5sQEp29ayWFuDW6L1CbRVjDudNza1nDTOw9YQ4oYfCBdWg0S0t1SLk2bEeGNZY3t/AhK61hUbLAdM32Hn4c3iKTOtsaD9dBBGm5oOwJKbhTze4trs8XRDX79epcnkvD7bbj7XVTBw0LxnuP8VEmHj4gr7FHKtp1W0DZd9zh1WaRXGX2kiXlO3gP6910hnDmLNzWyLS+qpld96auCaVWBO8QK5ldoX6uyudD3JD3jHRPOLYD+9uGY0u4v254+3TD6/3Al1++4CgVe64I3sNZiz0Gwpi9Q0qF7R3eCpruY2Q+E+drQE91TMZHwn3fdZISLI7w5pgMebAT+pswXXvYtoqyvrzRiBGd+rXhECOw3uKLnNXA1eKybCiZerlorbJmMc8p1y0CgG4iIKTfr4vHugR85zvvcH1e8eGLZyxrgPMOLRV+plLn9NpSOTt6Q0KPsw7dAuJkrEkeLJQI040E5hFOeXY0lA+0XAmxJk69Yyo+dg8vQI6O1GvLKR5akIZHZc3cN9bSFA6X87nUArV4EnUGI3XVfdE1eFyipwBeNUxVG/Kq/pZjJ8rAyQYntIOy3pNaD2BLRF+OyigPiKhObJQO/ZUrxeQBAJwlESs63VfXSdSZ90OjSFmMIB8ZfgloRXUUZv6AE9/FiTL9vojqMHLebK326YkGnBCZDPbXnC418bbwwzE3w51Lqdw/BN4sA6e2QmHicFQupWieTkd2DtYQ3w3WogO4lDq7x0mW0ANvXTyeny9Y1oCn5xWXpwXrdaGxZVDFfmtqrnng9Xbgy9c79tvBpa9m+ZSUALXwN2b09+MwUnbYYNnoyO2Ce+hglKId6ME8mJFFE2CzkiuGn1j0TiE73VuZkziSSoXRfdKINKh2uIq3Ca0ZMeidYXltiFAFCNHOjJ2WCvKWcHvZCC1YA68w6NP7J4ilpqyL4Pa249NXryowNXh6vtA38BrnFFgLXQ5CqAjBIVVODmMvMmzimgi/IKpDERRgfo1diwCAsqsMFMpsDI08EplUQ3BcW0XvDqlk5BpOyj9IB+6lEZa+HTjuO/a3A2lPyJt+7fzMj1K1EFYGRGZ278EYhIdmQvXP6JCzU+6dOi3BdLlntIQ6U+BRnEvxrTUPsQ21I+8FaPysemE377qnPZVT5pY1cwpvvcM4A98dnp+v8Eq+uL2xSNVEc17TAdSKWjtqzzz8BOi6l/XeYV0XXK8RT++fcH1esT5f4COn20GBb12DPVuDBH8KuJUck1OGmApJhCYBUPhuBCY4uOhZXAAV0dI+qpWCek/AnmBSRqgVpTfqkQbKoE1Wqw11XH+rHkKKQrZOUewgLKxaOGigzAmWMgePL55W7leHZ6ZlZpi3JEUZqMC9NZTSUSoQXECPDXLNiMYgGosoRn0coYQm0tiPWnDPGUZXFiKCpTgczZNBqAjI2miue9EGyOh1RSVMD50wu8pr+sGCM3wNRZje67zVM6Z/RkoZtP7BYP2dXt/oIjU0MY/Eg0nfHv+ONtZWR+vRLNVGD6otZdQ6GCw8tBbv4DRvhaQHmXqXYaQ/GgQBYaPxoQ9RJAVr7O6jp0FoDB5LdDOyOsQRaWEndt/awMIZ+nffE7btwL4dqClNVpO1jMJwg5ZqHn3g+jwRRbs+qwymuUsXsvJa1ThxveGykk1K1ZRTOUkGXgN5jP49AQtj0QTeUiuKYFrznBDiqXTvyvSz0gFzHpBjgVxywbEniDMw0QEmzkYkLgGtdbx/fyUlXJ0QpAPLNRJWjX5+9k4/rVC5X/GlqsP1WahETnnCyPIRnRadwq2iRWAYqA56Mpvt07S1qK6stIZShZojhVceb8quxJcBIZWjkN2XH5yi6zDl5E6vgju4Aih5AZMJOpxBCNc0oAlEXQnGwdbnZ3EuxUcnP0ySRzHG+PdFu2R0JIVx0U/zWlPHHuLczc37TgtN19GcLh4FxRhII9usJqCrrVcDUDqhUyMUcdOiiN58IzXXqpdmr4QnTbXz+xuvu5EykqGVGfnQYI17ruvDIWqhNO7l3htaKZzUjoyeCyQXuN7hAUQrEwob5wL/O+49x7h23imYzZ4f4ajVIXs//96i0N6qmV/OWUVhBvlmtJcPh32j8Nsah+A8JEYEAB6D/PD5ND1YmKV1SK9oTXCUcyrs6mQulj6ccULX8xCBmEZ4fOxie5/MTaAi67RZEl1vmjYP47bobWTKDU3r74ciRQOxqY9w0cOnSidkK3MBuXiHa3DYDwv0NnHT2htKPWAN48nLhd3BGjwk0GIkegsBF6S9NRzeIRuq5E+2HkdyQmm6SyqVi3TQtiV4JsEua8B6iVivC5bLgniJ8KrVaJ27svs94ePLDV9+vOG7H99we7tjv+8o6WC8tBG8e7oiuMAYiPF9I62dzCxGoh0ND+/h2mwGM8fQnRtamPZU8PK2435PuN0P7KmiFA1ZC0Awp9u7tWbevNCiXAt962rX6aS36X/GpTQhlFYKF7JisTqP6LifqKVh3zLSl2/wW0LYEi7PK3xwWFe6E1yfV/yQM8ip4NtfPKnOqWOJHlaZYUNYSI1YR8wR8Bbu5rApU/HYM1JrkKYR843waS/0d2xmw1orUgeWQFjFoM+iWDTu4rYl3I7M1NxMcg1PEpYHI6cF0ThsyOaj5ibtCXknlbwmmgHbzvTaaA1asyhT09SAWmCMQzAWayBsFjzddlNrOHayz0Jxen/yIMgKNw1bpqBMvugcnmLEU4xMlRWZwk7JQEmCZA3fXy4IkdO3bU2nH4FUwr61jkiGdjoTGD6n1htAnFo40T7nkIwqHWnveEsZr3vCljKsZbq0DQ4uOsLESogxju4mtrv5/I+NqbMGaGwC9tuOvGe8ffmGfCSkLQFggV+eL4AxcEtkI6mRIa0U1FqQbhvynrB/uqHuGZIyrmjw3sB7B40vwLqQlj+iVzqgTgp6+Cq+6wyTunuPaO2KSwy4eE8BsjX48LRiDR7P6zKd4pv2NbVpE6gNc+uMWyk6icQQSJqxFqYUriAeJuFWm5oKqA0UMPfmeyrckT24nBjLou+cxX5keGeRS9T0787Jt7KJGxMrKgun2ZKmOxjkCw2ojT3xm6G1TAfZ1/tRvtY5/40uUoBOBNZAWp97DKt6jhG7HJzlotYZ2KoTS1fBIJSRpZz9kVY5Qgihk8c1BgBAyhGpNNQG0nSVdTUWtdKBBmFcROfE1QClhZszcM+fhImx86jtNPfc9oRdI7WPI2E/Eko6YAFIYAaN8xbL4hECWXohOKUJn4fiUKeP32NeL93dmEa4ZExw255x3xNu24Ej0TYInZOoHd6GWqx47bhktXg0HiVLqTV2nc4IWqO3WDeCJgbeWHglVtCKRnAUCpRLbTApw+4Z14MH47vnlQLZoDvDSIp5U2ZR8J76JBVNnjT9jpArqu4M3m47D7N+apS6EmOaToVAB3ZdIoPvyxmZ0LF0oKhLxf3IdFgoFdCUVmNYYKKGX3qNjxgQ8uhAuxbYXscpofHqziBWy0RavTaD7BGt6l90oe4NiS8k9WQctQIiWDTj7NLpnlLqIBBpB6u7kaHtgsK3WTUsomORN9qQ9KZyK4pYPQhteWcZvqlFKqt+b6RjH5qjdLvtqLXCdTD80Ah1gY1WVrmRlLSlPJus+x4RF68N3wkNzHtY77kxGYoyWVEbsFm0nrHvB47bju31zvtYYXkYgY0Ofg386weYpemUW/aEljIkV0Qxmp4daKWk54NRsg54uaYfpjGC3gQitKEaSQbvLgticFg8mX3WGDyvkcJvz/dWQUs1dCC3htipcQv6OQ0WnaisZZB6IAIp9Zyh+kkIs/o7GnVFGRP4eF4H+DIo80MzWQonJNHvN5zdh1nCWDWdZsHnMzWnd0WpSmlIKeN2O5BSwcvr/Wud8d/4IjXYRt0a2GBhFRqwqtFg98Mi5Z1DKk0v2vCs4rfp6BCjgkQ3bkg7i9ZVzStLpTFkrZ2kAK8JtMZiaFIEAMSgoc39wHgwjBZSr+y1QZgY7yln0l73PWHbD3b9R8JxHCxShmJiMeqivgbEGLGscYpHjT2tgQb7ZxxEQ2xqdOFdFbqpnQfMfTtwu5MuexwUDQNKAlFc3Sg0NPwIWz8dsb2IEieApmzF2gz6iL3uAKwuz1XjNPzbUmFI4700YvuWGULLElByw9M14umJvn/WC5bopx3NODCGsn/m6jQG0cGyqN/vB5wjA3I/MnLuaPXU73CfREeKo1ak2hEzKffSlDmlC+baGO3Ow59+glYYTx4d9SjRu7NBGoLGfu5MBzMNjYVhuGcjANclwtjCAqIq/tVZhuUFj1Wpw70xaua2H3g5DlSwqXpfLwAE0TmURunCcOwfDDMj7NyhhW64+qOT2FKUltx1ciqtA54OEd0xbNA0Q+f3UaTU3JcefAlvb3e8vG7otWH1DhI9vPP0e6xmHsp74kQqCqGtlwNx9zT+1V0vpzOZHT+hZG2+jNFFI9mFtQPbfcf95Ya3r15UB6cCUiNwS0DQJAQT9CgUIcSaC+p+EO6rDXFZIN4BTwu6o+dfG7BvG/B/5+7VcD8+iAHSeZ2dC7COn+WxhCkKXoKbIZxZm8U9q7EvMMXRUwjeHyBEQwTBW8GIx0CHWh93LUanJZbTtYLgQcv4wEjEWGn0rvZmlXs9gHBzOd18RtijHqCzQI0IlMeU7d5Bx5s94+V10z9/vxQpYLpGu+Dhl4awBsRLQO8dYU+IjSwse+MBfnrhdWWgWbx/WvDh3RUfnq94el4R1DNu6JPeiSDGAuudapT6FCo6a1A7dARv02Fh4N7DXNF6Ax8JvdlIO6AxSbFxVT1U4W6iF1rdDIPUyrGFBY2CirPohUH1fljyA1P822qD7QIMfYph52lbh1O4UYyZUOVxMDep1QZRuGHsK/ScYJpolxn6OLD1obXgjqTjqA65NjIAVeflnLqKO4PS6Tj/uifspeB1TySqisHTEx0P0pbw/t2KfFwhH64I0SEGZVRaehQaS1slr++R0AYfLOMtljXCGMH9tuP5acHLyx33LeHT64YjFdSW5jTVyvgMZRpwtnKGH3bdDZSqlIjepov+0+JxvUQ8XRZ8eF5JkrkshHWtQU31N+yIzFjkWRJtfOgQZ5HKmf/Um5qzOofVOwQuiXDbM7Yj4au3G757e0NpHU/rgtboriDQlOg8xO1kegHspm9HUko2r2fvABp3YIsbe5OKvVRYl3GrDXHNuNaGS2OEewNU01W1u+5Aq9i2A9v9wNvrnffhQiPgdQlwzsMBMEMG0Ttq4/vKhRTv4yjKzG10cdBnajRcAFALrz9k0MYb9rcd28sdr1++4u3jK16/fOHva4czu4ENAX5d9DDV5aS1MNHDpDyW0EDviM7ARg//vKI7h2YN9pQpaWmErYwITLBz1WCdodtD6zMC5IoVpOyfMSDjuTpqQzkS7qXg037MqfQSPS654ANEk3utegNaLIYUfluZZVVbQy+EdC2A6NhQrMGh9Y49FyVUCK7Rna4UbVDPafy7esd7oXUcR56T9WNM/PTmk7MpFv37g2IvYpTo0fDpZcOnTzf831/9Cq+3DZ9eX7/W+f7NLlJy/jkJFJqe6YNHzZW05UFpfjBCtUoDJTVc9UtrwOUSEKKf9GTGPhiETm1RRYdXjHeSAaB7BhXbPi5MjfBmnXY9E5JUn74RJT4KpxYqKKQ/iBvOCLKMBTmx6dK56G+TsacXQ7upyXSy6k83DFDHDTS0IgpBWn8aiQLn+3l86bfm9xHCBVELzuKsZgkxkhvgMtlUC9/Ipcu1IvjP92al0SrmdnAn8XY/+HOEqaMlVywhMIDRO6Q1UF9i6Z044Y0xNWoRBoARTR+jRmnnFU5Fr8O0cz8YjJfKsKSCQoBqWdQI45VcptHwaCp4H/A8Y7Ak7WbWoCnFS0BUw9xHZ/FBlZ4PPB4mX42/WI3A10ZZQabbhwO7YhqQDvNVFtZUCg/O1uC1wA3ro/nVzj87+N+VolEQuRAO6kDX3ZrpgyUL5oaVhmzYlBlnaRraqNNKpeI4VKsktGlqdUyLDb19bozM+46FdJBAxk02IdH+/V/nDdjbyRjr7YHZmivSwX1f2hKSwuYCgXUNOWeVqfCLxswVfLLo1CI6JaJUdENdlHM8L5oWqVwqmrAIjfbNqPZr/A7DUshoQznuyzlpNP7CrXeklNEhlJ/o51FrnWSnNRf+6u6cII0RevUJIUgYgyqc+JtOW8722UiKjMwxFQwr/X001U2A6C3iDHVUwsP4fR7Og9N+S06n8we4c0hARjO37wm3+46Pn97w8rrh09vb1zrmv9lFarx0QTsmlXYNKGmBWIOUKkpn3tASaddyXQOG6uPd04rLEvD/fOcD3r9jEOIS/KThjpvNBC7kwxpOfUHvKIWwUdnoC5j6yYgbE8WyBFwuEZdrxPJE9X5YGSdA2EJ3ZIM6XjucAMEKrsHDZA/bKr3KwPzTo3Tc9oLXLaGJIAQPI04X33UesMMfzljh4ppsUz3QaRkUFuDytKABePduVXZh4iL5sQCOaU71V8GwKD0tXiEuO9XqzjtaFHmrkCdo46OQ0HCIvx8Je6ITwpcvd9xTxn3fZ20suWCPAU5ZYbZ3rM6ipQDT+tS8dAC2dZ1aLexwXDEC0wVxDZy+Fo+cCt69ozD09eWO1jr8/aDtVS2K2na1MNJiD9EDkd0tkQ7+c6s7kqCBju+eVjw/r3h6WvHuwxPW5xXrdZlw36Ag0waKp+44cACr7DmajA4D2qJTbSuFzjha4B5ZWwyFpPVXr4TsgCFoVgbj4+6gdyQhzGbHfsOMTDF1mq9kix2uoh+Ehu2RsWwRe2Hce4we4iyOXLEdRa2KgDUSWove4xIcWuEuyilbM0QPWMHlErEunuL23ejgMSalExWYziv1FO3nnezINsSyhQGG96/ecP/0huNtw7EdDDxVaHvsX3Mq2O8HxiA1fRZjgGsd/nlFMYK2i5I2HJxzgDq7HDj4PKgomjtnhjSOnS+1iqLNqZv7q6YR7o9OMqZ1dJMnjLwnknHGM+OcQ6wNMRCqs9Zg1eLuOmgw3ZqKbSmriMaiG2o5j6xGxwI4A4WNA56WMPdxYkkOidEjRpKV9OrPHbseHw/rAzbifL4CljXMFGWAu6hjT/j08Q3f/e5H/N//++v4+HLD69vvh0nq4TU7Uavw18LAuHgJyKUgpYjnpwXOCayTyZZ59+6CdYn49ree6QSx8EAcNJjRvVlDFqF4Mw+n3siaGkt/JbpQ52HZjThrsF4eitQlIKweNtqTeq67iZwK8lGQjqy7A57UxJ4tvHMoHegwyLWrHyHTS1MqdNsWoLexGDvpvaF6PgRjWtMqYAwhwOUS0Drw/v11OiKb1pET46q1RqlGyjxQmGUWY2dkTkgwD9OtDneiuxWrXWguFe04dzvzkK1jygBKzihmJN1mJGXEOQGyNahK5KitwTgGr1lPxtaEIzAYjQKvGUbowHFkAB3vXi9kVNUKIx1HstgPQ+aiMfr7AtIsfQ1Nnc4BDZrXpDY2MZDFGWJAWPRLIyGAU2IwhOUndj+6UmDEzc3JsHeVKHTUAkymn7KtSh87C1pvoQNPy6K+lJ9bbT365I3rMqnnYwLHOd0NpKBU7le7QjkQA787HHuGiMBBdGqi44ZoBy0wCM7hEiOao9ZrFENjLWzHJHk8LQFJ85ro4UfiiRGZTt0lFSVlaOLBzvuijBiMXJGPgvvtwL5zQjbC92C9Oz37dDdVc0U2makGuiMWwyRi/3ThPasJAxBRRKLS3icX9FyAUgBDd5eu+qwJ5eoOWHSSGji0dJJ3OhpY1YZLi5mSFmcMqn52HZhQPJBRnYXtBrmSsBWMmQJmaxnuaMSggIf8fEZHc6qEGO7enTpmcIp06tno1AaLZ914rydEDYX4hmA7LAFh5T1vvZsGsrVy154OTrX889Dn73d+fbOLFHEF/dyHypwYuQ/UaCxrmBb0744FIVjE6JQc4fD+/YVOEO+ucI4fzPDumllVelN1cD80rHda7bC5oLQOn7j0b4ZFJXiys4K3pIdfA9ZrRFwD4krx7ojlGHujon6CjO0uyhgjO2eYqaIBWeg1d6j7hLcWORVUa1CFYz6gD4rROIcJiZxfgEJ2ziCuESIG799f1Wm9oudC+rHi8l2vNzCsc9Q4155T42csQnPCAOMhbL1P+jo7e8w91bCumgd37/QoM4bmqzkjpYx0ZDgRpKEPMwJbKoxnOCOLlD3/mZImjMKQg+abE5f07152jKgKa4AjkcpfFc4bTYmDBZoBqpnkg9L6hN9CoEtACJ4u4ktE0NwiY820lqlKBOASup+RFDq8j33aI5zSjbAp0kkIdRSpqq72dPJfPNOmn5YFl0hvNlHYFJAJ9VjDBOqxOB9FC/OtnNMzD0gamY51k4jAbRbHmtWZxE6iwDRorYbhj47BkV2nCj9hdwPngCUGXDTHbY/01xPDyTRYS5gKhNQptCUpo00af+bEvRPGy0fGdt+R90QvQVHnk8XDawNh9bqUXCb7tjVq2kJwEO8RntbZ+EoFmamVBJYmQE8FSCxUovTWrtZBkyylExZdLcz5XI7RdjRS+pzYh2fJGUF52AVTz9nQO30jbTNIxcE4oM0iZVRcDXQxcJ2N8/yczdlcOjt8Rd1pBOwZy+JGkRrRKUpGmTEcer+K0e8RHNGK1cOvHjZwamxgkSraPKQjs0gdGeVIX+uY/0YXqWGHPx4cAHooGfiFZqb9wwU2WPjoEBavIr8K7y2897iqDifGoHh3R9qzFo2HvcE8eDVh0pBU4XUpL85gTQEpVzhnsEQ6THhnERfCTOs1Yr1G+MXDBjsXjbXSZeLt9Y6PH1/x3e99wq9/9YLttiPdNrhOYeYaIiqAAzqF6X9bSkEtBSUbWJxdeQfUbJWH2PhVamns1g1dz601iAsdob/9B94heOrDFiPY7wn7611/DvUZY9kb1WWZh5w5gRlRYWlToZ+ch9eIgt+OhCMV7IkTFA+w4fpMw1vpnTRuIxR/5oJ0JOy3A1IaTG0n1V5lADao1Y1CStYZds8rTVPj4idUEZYAQPCdP/AOz08L3j0v2LeEnAvuG22DuNfRRqVp5lSpSEedMIzVbLF315X7zctCy6s1wAdaK41Mn9NX8jF/p8+CPWQIHTSgHdT4+7Ej5YLbfWMBrxSXoncNGezw1iFcnxCcxR94/4TrumAJAanq+9dre4lxerdF55hUHQO8oxC6NDqFCwBvxq6Tb3BqbVpHr3WSfEQLnngDY7S5Arhw5wiuk2dHrh0mVRjH+2LxpGZLoUM970/g+bLgafEUZZeG7W2fe8pB2++lYt8T0lHw1csd6cjYtoS+J6BUWG0c1ssyP4/1+ToNYWlZNNzOK9zhUJ8iJ4zLimAd2hJRXwkLH9uOIQY/bjsFyqnAOEDs8FAUNOEzx5yr3+L8UkbdkEz03mdi7iX62Zx6pfovgUWD17+hgGQVA6Dont0IEYBBYHBKxnleF2Vx9nlGPj9duOsNpBpxD2zUfYUTfZezqeGuckzcfSIly4Xw3vq8Il5XhEucbjbqusznu3c4CKK1WJxDdl+v/Hyji9Tg6wM4MVX9a7oUdPjoTgPKzkOit8YiFWjZMgSvo9Nterh2lEmjHDRusWcwIUAMt2lHbZ1BqA1Wi9QSPZzXALzB6vOnLuoRa6fILc/l4u2+Y9/pMiHaVQXv0MQAwoU1c5NUXT7xemojusJGAzYDxr2ly2w0GNvO38MYWMdcp2WNKNeC47LQbToRTjFSAXCCGLb/w0ZnEA5GgWodJ1NRIaThbpFKxZbyhPiqdqejw+uqlzcgbd2bB6gsVxxKibUi06JJrEanZy6QWaQ4fYbVzwd7BiPqn86R9We1qC3Ro5SKuDDROGWdVFpXtw8a/R6e2qic6WLhDD/zGLzmgylRwj44BjzsIB7vyXFYTSG4QmalUxBdWsftSEgp4207SGRoHV5ED/WqmU5Gl94OS6AVlzMWuVUld5ywZG8BEGZqXQLzuIxlyrGoxlL0M5aBHguJFBWYHfloh4w+gmIErcskODT9/Yb4s7YO2ArYTEhIOLV5SwLT0xJRKhmCayB9f0xRaS+oIwyxD2G1shZzxbYxdPS+HTClwja6rhuhxiiou/vYl3aMgkhikWkdtjZAGxuzUKcmzqIbo0VZPQMbBd1j8kdjWCbhQFGXEZmSg9/01R/viUFlZ1XnjpD3/ShS3toJmXfhNxg7y+EX2s+jENBnhIw9hy4dFR3kWHBXPtz1Sxvfhw+vNPpDog/GHgZuzkkap0mv0/PTeiaDyzB87ud/OOQVMzPMO2T/+6BIHVtC8vnUDwycBFROi2JPxhm46EhLV+3BYNu54KZLctMDxFijS9lzPB+FBWaI99hh+t7hF494Cdwz9EYWUCCua3VSMSrotN5C3AQoJzMrp4K3G5kv3/3yBb/+1SekPcGWwhTTGHC9rhDnUJyfEMI1eka5AzxE6+iIQF7zfED4sFAxXhTG6fCDeaS+W8saKYwWQdszdmdhalMYsszCzWA57g2cOVl2fFCEU1cHDi1KpXJnkCpFm0dmOm9KGaVw5+XV8SBMGEoQ1T5GAJRScdsTxBhEz+iQAWPMlFoho7ALdWTeWy7mLwlx8dNhPah9kvUWT88relswTFhbpVZtChf1AJnO+gcjNUomfDGW/DEyc2lZI8Liz3tLzsNoCF2ntkYPGLqfEMLdUtFwPE6dRym47TuOlHHbtlmkou4YvMJ11lisIWKNHtd1VX2Wg6lnh44lIlqDuhAWjJ4iU++cxo10HKboHQMEZ2aDVmbxpC4tBgqVx2clxmgAISbtuKiD+NuWWNRrQzgKwubwoZC2H9X70q4rgphJ6vCa3GwqM7Ru5Y4jsTAYnV5jcDgSYe+Pr3fctoS320FvQ2sRr4y4WIJTVxIDo5T5Whu/33AbUVnHmjJi9Hj//oJgKEKvXSNY9sR1YOvKvusgWMDE7l7OPTCgB7ln1pXTiyoiUJcrshGVBJI0taHUNnPYFn+aXg9Wahlu7zizn+oo2vNp5/+M3dMHb/GEBR/6lWa6hk7/nOSAY8/ItStBo6OCz5E1XVmBZxM5WH9WiVI+Ekp10dEX0Vvd0XVObjKQF4dLDHh/ucCB3pNf5/WNLlL76wYPrxoZWtPP3q59LrSD0FBVANUxKH4LmUFcUFKA10U3hX96Oxh2BNrEKcTI7+0NO2bfaKZodRk59iDWCG+M0VljdJr6HgdTKXHnchwHjuNASgm+NvToYYzgskb4GCHrMjUI3px4s3RSW/mGdbSXzzv5WqpCP5gO57VUZSMpTDCo6RqIyCgMOhlU7dwszgcR8sjBIvOsgbumLRdaEeWC/Tim7idrV00G4khpJWw3HdSVFAFQKFo7jYBbo2N9zZWT1mBtgdZAI/k3eGYXDYZULQzi67HPSWostmEVDoND18ajP9K1K2HgdGSkLcGK4aEb89zfeMemhGF66rs28OjPYOMxmY/P6YxJyLUh1TFtFoqKE3VQKRekXCdpAo3QjPVOl+1mNg7emjnpUoxLIo8VoNBwjg2BtxoDYliAGgXJ/FgFQbPHjDXT8qpAyTzeqTO91b0J7+eiOqxNgzJTKrhtDFmsrcPngpgdnFDIamKgCbQxCrvz+wy4FuMZgaYSV5JvoB53VZ00su6qSqmwXig4tgbwDpaUOHSRGX2TasV9z9oIkADVRbCWgnUNsEaweos48pQUjh1FajDIlUVOC6NM0XFuZ/ZYc2oGrFpG6L8/7onh/5iGFOABYraWRK0GhpB2vcZDUD+QDBE1BBhMIYWnhzNHdA7eCOJDzpMxKnzPbf5uWYkfkKpQX5tUejHqa2o5sY8VNXk8/bOvIakZNlyDvLKEgOfLCoa8/B65oP+X//Jf8E//6T/FL/7iL+JXfuVX8O///b/HX/krf2X+87/xN/4GfvZnf/az/+ZHf/RH8XM/93Pz/3/55Zf4u3/37+I//If/AGMMfvInfxL//J//czw9Pf2u3sv2dsCbAz5UQnZtLCBlEh8G1tvBrnk4LJ/7oJNIMGmkQWB7nxRKHtwyP4xhp4Muczw3jvABCWG6oPdnQiuJAzIFggOGG5RaspU0DiDzq+SsbveEqdYlYFkXhOcrhoME1AbFKL4y4zaMOU0mdWqa9N3Gw9JkZcb5SvU/JhdlFiozbJt0UqldCwselsP6YMjDf98bH76U6TqwpYzbfePhlTOqklKGVdA4sM0DDh+cnT+HwkZ1hqhNDWb7PJS7FtB7KRpT39V+xs0DWTo9/gSMghD/ObljmBRDgNBOo9qmu6jdH3T10IOh5IoS3JxWnRZVp4X9ZMw9ElWgPxPzwR+fD8Wsfe7CjpSRCq2ChkXVEPUOuAvGYLGd/njGaNEeCbtGoUiLbnlQORE0a6Z9k9du2BgKfl1rU8MqELquWDqlNMNDMHdlxyoTbGShDf1XygVHKur/mLEfGfedhIjaO0KxyKWSYl8bFn3fpIA/oBdDa6P3bq0sAkMgawA0a2ZW08gzqjrxNQDdWohzMN5DJWDImU3TdhS8bgdS4XQ/nOqT7ogX7yEXDxMdRBuWitOdpOGc3rs+F6lzp3eM59IYiCcN3Bf69RltSgbcR/F/m4zXrEUKwGwGBxQ8ziMP4IStz/mpT1yOuyDTVRdluTeXoN6D+t+kXJFLn01SUTRmNE4Afx/IyZANAC2c9JkdhtrD89HUBvOorQKm+Dh6j+sSYSFA/z3y7rvdbviTf/JP4m/9rb+Fn/iJn/hN/50f+7Efw7/5N/9m/v8Y42f//K/9tb+GX/mVX8F/+k//CTln/M2/+Tfxd/7O38G/+3f/7nf1XrZPG1wbNkgGfgkcQUc4WO/Y98IOrjYsC7u+6DiK9tpx7AzZ4wJc9RveE06YpqwyIZualXpayNICoAyeMQJrjo12gaMLHUyYR7y46XtoqixH1ZwjYRQDjIFtjZZEll1vjA7rGnkTdSAjoWlnKO0hGVOohzBKoUYDabNqCAtgPtgl6bLV8EHZ7gfutx0vtx3pnrBpyCK7vYKiThjRGLTxO4oKBRUDd0bgOuBM4XvpDa1V1EKWXqmnXx7AQ3o8dIuzaN7NxqC2TpFqVad2lxGcg+kdxXMBm3tHbp2x6HqgBMdkVkaGA2gNi3owusBGBQ5qgslmgzvH4RrPQjLSXGEwnTmsNUoa0G6w99kUjGnd9M57hVgmpzbdgzWnIXZUY4P5TX3+zHNfoZqnxpgNZ9TAtNFXL1jDvKHgcY1hRqw7zQ/T2/P8Gp2ILhEfi7QYwHYyywZV3Sud30aP7h26MThqJ9uvNBRjuLeoHbkU7Cnhu1/dcNsTPr5tLLS5IOt1staiVk//w1RQRRlzltfVK6Hos+dFBCNnbcg6pkN4hxrWdjiN1RkQpB0SCG9howc0b+62H3jbEj7ddny67doQlLlDSrkip0j9n7lgcQ4hBhhrOUVlFcdrkwm993JreNkT9lxwU3mDiOD5OHC9RByl4vmy0AS6PzTQik7kSjg0l6Z7uY4jnw1MqrR0887iEgPWQLRAdLqFMja164FUZuFB9P7nzP8wdfV5ts0pWF0rSq1AgrpU1LleSrViDf7BXkkgB2UD3SU0Mao70ylSr5OI0Ih7qajXFatz8Ob3aJL68R//cfz4j//4b/vvxBjxgz/4g7/pP/tf/+t/4ed+7ufw3/7bf8Of/tN/GgDwL//lv8Rf/st/Gf/sn/0z/PAP//DXfi81F5QjEw54sOwnwUFdGQqx8Vwru+chkNT9QDrypH4byz1VW4m5h+Bm5wOMfctvpHHzoQfpmvK5X960IGI7MbFiRX4wZ5F+Us2jc+ziaodtlQ+c7q9Gpz1yg6aXVhmJnIA3epDqFGSszOV3160n78v+2STXBdiOjNt9x+vrhte3DWnLkw4/vMGGkHRCoWyzzolTyDRyHSys3qHViugYnpaqQWtVJ4c2H1IxxPUNeFg7OxbPfR7Y7eHr8f8XhSpy0egRnTSMCAkaCpXlXOCyRclVJwC1ysL4PLVYmfPzG3JG6yzxem/Ri1V37kHJ5UEJQCEOvb71wS1aoQ/5/vvj4f8PGIeNBqY56PBXk0GGkY5gh2vAmd4aph+iQtNNXSVq1UOQOw+aiz7+7BOCtEr7NiLTBNkGh+49ujHIuc3wUNSO1itEOo5UaCa7H3jbDrzdd2oUczmRCoUFx57CzCIpc3c7dsyi14D9he5woabI4zkbkwJIMCjqosDoeXNOilaRkE6ixa4Q6n0nBJ1zm0jKkjKKt0qy0i2rUUjYWZJVGzHzLjTXLToJbUfGlgre9gO9qzeoKERumZ3WA13sp+vI40ufrQmd9fbg0k/XltYBZ+nMnyvft+scE+1olHSCF31O0CpQBTIYsQpRDzRkOJKUSUQ7maXbCHoUwRqchoQ21CaozaCUBgj3tVZF0y66z5otAJ+RldA6NX1f4/V7spP6+Z//efzAD/wAvvWtb+Ev/IW/gH/8j/8xvv3tbwMAfuEXfgEfPnyYBQoA/uJf/IswxuC//tf/ir/6V//qb/h+Y0czXi8v9OHKe0G2nISM04Vr4EWm0wFwJF3Y5wpjDLUmGuJXcsHbyw37lnB7O04ywrsLYnS4Pi0IkdPXwMKG9T0Pdh2/jcA0g647IIyJ++GgG4VqsOnIfCJLZ4zDwVtcY8SH6xXb04bDeyBnXHyAE8FxZOS+4bVCXS/UPJVPn4ofDVz0MJ5EkeXiVcCKuTsaUBOLJQtcabxBv/z0hq8+vuF7333B61dvqKnC681Pl3O6OqNhBqthwJhQq387XCdo1e+NYI8OwRpsR4K/C162g/TzXJEVk28KDeRsUSKp4lH3jFYwnRMsaLljhR8L+rD3adMgVhFNSGYkwHZkeOew7+ncT6peyzijLPmGWni4u3N5NEkzPniFaBvQGqxTnzQ9WCYpAnW6I5TEA5pCzhNCGYf055Yy9szuah2LtZoJ1RAM04OzwtNoDRdFBT5cV6yRgthV012Hq36uBffjwJELPt13FuzaCOVZi6fFk90XgKgxCwMGtMbARwvjHdwa0IJHFYN+Iz1/yxXSB+lFcKQDb7cN3/30its94UWdz5tCiE4LqjckBKzRYwkeMTh6Wmo0x4AXre710AnL2yPDq/s8TWx5sBrQKegpOt3REtr3zuGyesTFw3qDVJiufT8OvN03fHq94ZP+LrXhjG4PDsVbtFymLsgow8/UNqw7PiNJjB3ip9uGtz3h421DrURa3u47ni7LNGy+LhHvLnGyI9mcPLpsnHB5LtxLjh0u9HMF+Ke1FlH/foDA44SY0U5RPoxBtxW9VrjqNDGAesuRgZbV5Lc0hUwVnj1y1fdI5woBkC5FmwO+39qBbhK6Iew+WMw8cxvPuk5kxzsHC0Ep/38qUj/2Yz+Gn/iJn8Af+kN/CL/8y7+Mf/gP/yF+/Md/HL/wC78Aay1+9Vd/FT/wAz/w+ZtwDl988QV+9Vd/9Tf9nv/kn/wT/MzP/Mxv+PvUBhXYrgFoqmgf3mBmHDDVQEDhZC11iv/ykfH2smG7H/j08T4x6cttR1w83r+7kKXlHeY2uZOeKdA/BQ87m9OhYt4o+L6u2QCATnxavAZEucSA62XFh+fMKPU9oe07WVzG4HY/UPeM++s+l7aPPlxPF+o7whqwXCKL7Ro4QX52QD7Ajp07l1zo+vDx4xu+970X/Nqvf8Lrpxt6qViMxcV7XLxDVLKBtIZgVYcxxDAqWBwx2qIPjwgnKmuA5SDjzjmL7aBbwZ4LtpxnTATmwSO46MEZlHFZakMw7I4XjecwxsDWsUtpsxvt/dwVDHf5l7cdR2nI5CliqY2EGWUnmqLNTNcoCTci1DXtFgqjQqEepUvPIjV2drpzQx5FKp8ECc2OKon3YckjaLKeEe4i6txvsHiSNOjMUefvd/UO0Vk8rQHRM1XaOcK7g32254KX+4EtZXx1uyvtv01XlNrivCdG7IOx1JoR/nb61xZQcSafI6IQXYkvBh27Hv63+47tyMjqDI/eJxHmung8rxHvrwuenxasMeByXSge9RYuKNnoIcmAP49C7aIHZ8l1oiFVQzfXwOsVg5uRPetC0lEqFW/7jm1LuG1khtZS0GqZe1oD7tas+g4aYs3otaIWkqhaH4iK6tr6SXwoGk2yHwwqLbUoAsBCbUWwOIde+/S49Hrok1hgUW1FcRat0kuwi5zJ0Qp99q7aylJxqJ+fVfsj9lp9iO5gFFkqraMag7KTnOLUbIAOKBrJUbkLzfrXVRvHVCizsMagloJaOGVWY1AMC1gXoBQLkwrEGOSjzESGcjwGe6qsRyHAr/P6/3mR+qmf+qn513/8j/9x/Ik/8SfwR/7IH8HP//zP40d+5Ef+X33Pf/AP/gH+3t/7e/P/v7y84A/+wT84YSqAMJatrNgAprnhcCRmR0aNUE5Uqh97wnbbcb8deH2580ZrHfuemAJbGkWuwZ0YvnAysEYQzIn7N52aegO6eYACgTlZyRCT6PvrTebOarDC1jUiX1fUPSF7j2IND8LW8HYk7K3jY+YCt4O0be8srioehhH46BG1UIUHt4PpnqE3+4T/uswH7PV1w6dPd3z8+Ia3lzt6bcjBwyx04L5YQo9GWYWPUOicPAbpwkDdOTq81T8dXaI7+Peywg+1D+iODQA7b+6UvLVYnJ2LWStGXT3c/JkjF+dcNJ+wZFfYN+WK28aI94IOcTx0fThdDQAl3ojCe8NoE5+r7YdGp3d21jN2QyHUrst1FO0gczmLVK6oqagTSkXJQyjd1D2CcBh1MgYGFs3z0HCmaKPUcY0e0VmsS1ACiZt5SVUPmj0X3PaEW0p42XYcueqhQ4q2E1KDg3MTvDXW6peZTYAMiAicJCfRp1a0VtFbJXy2HdhVqD2KhxHGpwdrcQke1yXgaY24XiKWSNHzMF0ebiEuuklAqbnq85MnazTlesKonULR6C0C6BVpldARgmNxL7wO922fTMn6sOtDp4WTEfpmWqFOiALuU485niFOUH1C1rVVjSopyKkgpTRF6lCEIFiL+xJhRZAWD1F5xWio3SS+VFQjQDeoRmA7v+qAXUCywihUIoLsKgzMLGJonTspPYKO0lAgSCLItdPwWMkuVdGhkR9VSiEjVmHzAQ+LNZoUrWnRle/TmAZU0XvCwhgmTsNZ9CYqQ6B4fQjYz5XB7/z6Paeg/+E//Ifxne98B7/0S7+EH/mRH8EP/uAP4td+7dc++3dKKfjyyy9/yz1WjPE3kC8ACvxaGbcL1KwTpIMHLnyNM8hLxXI4jZHuLFD3A9ttx/a64/a24+XTHbsuef1HQg8vH19JsXWOVkMKy1zUDPP9dYXXXUANDtZztHLdTpbSb+wWdOoQLuwFBi5YtOqxXBfUzPHct85pyhps245tP3C77XjLFV9umT5qEDxdaH9joqfB6crOdLlG+DVMSKs/WNqMfRgGw0yoa0pHxtvrhu22Y98OpJTRagdKQwAQjeBpDVOXI1BIojY4kDQijnhLN2ZORWItLASLQmZxCViXiCMVXK/3uWR/2w+UWmlauy74cL3giyfGphjIgwh2IpyDdwABEF2BQVd4pE+H96BFetsPvG07NSvB4sPtjst1wf3+rK4Z5z4prp72RktAVEKOV8LNgGhEMA+wXtoUQ3bh3mDGhLQG3M+d5nHbUVJBuiey9nLF65Y1U2kwuxS+tYLFmVkQL6nMdOBrdLpEV9aYMfoc8LDcDwpb3/YDtyPh9X4gFWZKWWNQvMPiLJ50gjsbpjNCBoYU6Fo6aiuoAFAbD8DWUFPSaJeEPSXcth3Hzr/XWoczpMZfl4inJeDbzxd86/0V33p/xYcPTxSPX+LcnbJIEY41hpNN7R0lAbcj49PrhvvtwNstzQKweoPgDK5LmD5yxhNePWrFy33Hly9v+LVf/4htO5DuB9BIUnqOfu5EvXPwzuP9yve6OAuLTmPnrHj5bEDOhNqkjcaA3gdm0ZW1N8xsU8rYtwMOwL56SG8KnxuYTkPp7g1QHaRWOFNhBfCaImBLw+D3WdGYeC1IrTbkrpA46zbfa+UzczQgo+PoNHoOwdGdRtgwjr1tUXhWwCRugPloViepYAQWhLGNYp0iD+YEujtOe5p2aPvtQN4T0v1AzbSzMlyI/fbFQ1+/50Xq//yf/4Pvfe97+KEf+iEAwJ/9s38WHz9+xC/+4i/iT/2pPwUA+M//+T+jtYY/82f+zP+7H3Lu8CdjaTxsUIzdGnawrRD6GafcSEbtpaJlevw1Hd0NOlxgzEfDuZ+oOSAnjyAGNbAbc63D1TZdjp23nzcKHRPig+gCHOd7HlMaDSIZ0mhqQ3P2M1ye9PfBtuEEMdwogoYpcrqTSRWHgHY+OJfXIkqJl5Oc0MYUMBf8ZuokSLCo6gjOV6sNFRXiqDHxwtX+mBg6tIh00oFHR84dj4EPFc2o6asziJtDqdQ+XWPAu8uK62VBsHZ28OyeuVTOtcMqmUS8BVPAO1K1XDLrwe2HmWhr2HUJbDMPxayEE2u4c+ud5I3Lzhyrrt2oD16p51ANmQFg0RLtsbrgFI7p7o9EnjZ3h4PIUMdeRQvc7MAL2ZNNPzRnz+vJv6OSBlBywPwwO9N1x+cIYH6e043g+7+0Wxl7Uvsw0Q/4erArO/EllEbYqGsnbHSalNZmdz2KvNFGzJlBBlInDIUovWMGmg9sCI0z87qKavR05D/hrUHM2A7ctjSF+QZeDVLV+84zWr4ByEfFfiS83Xa83tl8tVwmgzZaNxtJFilHWNsxW0u0CAwCBeY1HHKMPkXOrWNO+c4aZT6SEDVR8d6B3j4j1Mg4vsbkj/Nz42ekTg1uPFfqu2dYnJ0Wkenar7vbplB+qxQrl04fv1oKsgBHyoDwGd1TxpEpYO/6e3g7hLufm0kbkPFaTINIhW9dd8Ci5xzdOACSo9KekI+sCQL1nKB+ryapt7c3/NIv/dL8///7f/9v/I//8T/wxRdf4IsvvsDP/MzP4Cd/8ifxgz/4g/jlX/5l/P2///fxR//oH8WP/uiPAgD+2B/7Y/ixH/sx/O2//bfxr//1v0bOGT/90z+Nn/qpn/pdMft+s9dIuqR+g24S3gC9OdTiUBUXRa7I+rDLfNA6bU9SQepkzOQjqyDXousoLdZgXwKW6GFKRdDMIKd2K0+tY238/7712dFgHOAY5Io+p5r5pawy41hspFQ6MBOd5vJR9SFGaemX4HBdA54uEZeV8IkIzVKPLU8tg4GmDg9xphGN7zB6GPQpErXm1DQ003TvQjeGnAunJHCS7abiYgQRQIRnRIbwgKiNtNnhau50sonWYF05/17fXXDkjPtOmKi2pgnAXOhHdWjGJKsodFCJlU/NGnteOmlXhlIO/7EKdo9HYaZNVUj4OBKct/j08YWHnca/O2vwrecnPD2tqMczD6m16WGqU4YImq+cKoQx33PKsmauJKejxIiSaGowq3BVyU2tngoOJSOMPabVvZ7v54K6awMwXcQHHDcm9vMsBTD5O2QKKltusAadJQMuqAOHj07tbYwWPi12hXug3EDH9VTY6YMREY0nNqR1mK7sUtWsBmWqPkePp8jIjugtGyp1KvCXMIW7xqng3ajXny7wcybr9NPrHS8vO15vOwDMCXjAUVYFxg28X7c94fVtw/c+vuLLT284jswJKnhcFo93MSCoINxZUvcva0QIFL9agKe4JnoDmNNUVTZpHk1Hp34oNoc1eCX0qPmw/oxz11XRi0GzdULSA0IrteAoQzrDxtZZmvEOBuJgLQaFDOnlyfdXdGdEbz823LmfkHwrFRUdt00YY1QrXraT5UjbIo+nJcy92ck4VZp+qlMAHGKDbZ1m1uO5v6e5Cy77wTy0NEJDT27Z13n9rovUf//v/x1//s//+fn/x67or//1v45/9a/+Ff7n//yf+Nmf/Vl8/PgRP/zDP4y/9Jf+Ev7RP/pHn8F1//bf/lv89E//NH7kR35kinn/xb/4F7/bt3KK2dTNwXoVUmpXZqxAHEkKxtLjzhhBPeju4Pzo4MaOhZ0CNCIDVZlataNpkYJR/Uup+CRd9VBOMXRHmm9Z5/sAAIidLLihRzHmdD/PR8Gx07ePSaRlLoibdrtWBKt3tDqa1GlDR2fv0C0d0EvvSIkOC1b3IGKYswVg7ljGtZtDaAekDyEtWVjNNVTtAr01cNpB1dqIcYugC61wOoCnuihVWA8XZQxWfahd4EJ8WcPUkA0BYSkFuZQJ2ToxcMaSvacsuZJJc0XP6KIwGvR6aqEN3SJ2p6p/drl7aUj6fQYdq/WGbTuAreHjp0JLqdY08p2mpgLBEiOWC4PxYqkTAjOeE4hfqLvqnUzCDgDGnpPMA5liBPRNWBDAA4lQJ2OZn5lANWLqMtF7R8uFUgWdUtCVcq4TEe+34ZHGCPtrDBoz33AUh1wKiTrB44v3T3j3fMH1aUVcI+GyB7JIVZuenCuS7swOte95JLg4ETRjUYfreu/TCmf1Ds8L7ZqcduW997OBshqtYs9gQGqSiFsNL81JMsn0koQAYvU6GhWe61evLPZj8h6wlBkid0PrHuPcGQuihJwReHpO7xWtnvqtgcIMmjiDTvn5RUfHkrJGhdGrOjS4BzcQO/02Sy7oyig9Usb9yHjbEz7dNmSFkINziMHhsgR4Z9XRhA1K8H7SukdTdK8ZpQi2VnHLjMZokHOPCwP0hm3fseWCl+PA655QaoO3Dm5Z4K3BGr36QPLsEGAKjXOt0x+0tA6rzzvTEzi9Fo2eRymQ2mAbDYRFJ1SZUcu//et3XaT+3J/7c5+TAr7v9R//43/8Hb/HF1988bsW7v5mr2H3T4t7mYr1wSqZ1O8ptuCFHtoPseYkEcwOE/R+01a4VV7MYRiJJtSHtIab4skwGlsenMJtBtdrRFzCg+aDRQDgKrzr/qPVjpx12XrQFilnpVE3XcxD2V5Kg4WT2R5bFUFWaJxCZWfOqWPoUDSMzQ4eukxiCfTGlj5MY+kFN7zCSDPtM86hKb17ywUFZBzFRFv/oXkinHQazTbFymENTLB0JI8UYPcJNbbzAB/dVhc9IDqMzRDD/JlSCDcNWGNMHiMnST9qmMLdSXpYIM8OrncVFRds+526rVZxjRFrCHh/vSCGgHQUpFRhveqLFNob04tVOUPTg6o3TjvoGObvE+r9HG7DZwXKKORmrSjGPyYZqDkrdzwtVzgBuhHkQsZYrR2w3E+Mad0Y0cO3YVUJQmuVbg/VzUTq5+uC6yViXRkpMmJMoESQ8bnkKeju02aqtzZpyNYMOIjwI0Cd2+IcFvXOi25oZ3i4TxKKmRdBZSD63OrzcsLNfcKKDBMEujm1iZPoYQ1ab/NzHp52NCSmW78YFbALf6aVMz7EQKa04XGvAxGNqxio3YDHSagRAbwzaJ2NTqldd4X8LOxwAtECOBJrRzM6tVvHgbc9IZeRTXXeH94ZLEENpo1RRxA+r2PyNMagCX0z7zljTwkACyjZhLz2KWfcj4SX+x1vR0ZrHU/Lit7b9EVcAoXigEKc7XTIGE67dRSoqs9x67jf6W2Z98x4+96VkKRPoPTfto48vr7R3n2jqthA1wkm3nr4xZ0+eTqid9A6Bk3QrUFBx9ErXlPG65FwyxlVdz3OaDiZPOyNBnSDjpoZPnfsjDmvEHVVpzdgLQXBKXZ74XsaJA7vHaxtEJC1VFLB9rbj/rrj48cbTXO3jJYSkAskESN2lnb7i5CAsJeG1BpuOWNrDTsRFxyZi3V2becSvA/oEbroNOe1Qe+wlg/AJXqU6JGDg60N3Rr47rFYocNEpbD3yBW5E4u/BEd/s9G1dt1zWEam2A7AAMvzgvUScflwxbKyG5wePMA0Xq3lAbNXNtDwzNtvBzalbL+louGQjdi8MbgEPrxOBEX0yCaLAaZ3BEsRbgUnuJoz9vt9eggGEXgxOI6K+5bxcjuAcKfNjTeadQTEGCjeDg62K91d8nTmL+rHWMtISD793uaeSZsbsQZPQn/CpQ4aCFSI25AK9zGlNvRSdGrhTqW3xs68mpnyPCQYvNcMQmAD8a3nde5OQmCk/dPzBetloUO49xPR6qWBQWIkgTS1JEoqVRiR8AZ0vliUKegGuiGCJXBqCNbC644plQpzFMAlatacQSwkCEB/L8aECOHk0gipdhIdnBg1j2XBicFijQ7Lwt/HqsGpFUKRS/C4LAHPS0RKzF7rjZqtBuCuE+IuZaIItXeEpllK2mmZDhX0GkJ/pVIg28iztXO35yFiUDqneBpXn7tq+gPSaoghjtyTjvia+5Fx2xNe9+NBF8Xm7UkJJkGhd2kdxQgcVJ/Izg650Xrs47bjy7c77tsGA5BgZTq8W2DFagL3gbfbHfdcAAiedYqKkU3MEvg1bZ+SQTMPYnXdtxYj2LedJJFS8fa2zyTpqPcEvB3kxLni+Dqvb3aRGiQJpa46tYqfi9/xxOlrOE1k1Rfc94S3PeFtO3A/Eh+IRsdoM/ZD/EHnQTq/D92IG7qig7ziaedhmu4JaU38PkJxoe0O1jB6/YzC5oecc8FxFGx7ZkTHzjHZ5YKurKGxj2hgAF2qFccU6yXsh6dALwcy1eyjk8Gp7hedrobHIQx3RVYV+txTGPRqgWoYc2Bo18R9XZ/XYSx3H5YwmL5rGjGveCWWS0TU7BkXPeHQ0UToZ2O0w55sSktLHBjSbm1paEaQe8ctFbXcKYhOnbRFEDyU1fgwqcjwtuOBUsWi6HRJiIbtcbAe1jrURjx/OzLcltAALFtkoxE1RtyaWWxkKpvP3QK95OqEYcZU2brS5BXqgjFYQBq+r302RFnlFUnpwFWnCAgdPajP0YPfULfXtVE1Xf+0BtF6dAECAj8mGe4ZDmEJcMGfbL4xvpmOPiQTDwsE0RlZvz1EDGDYJJimE43eW8EOSJn7Q0hD7g1dm8ukDgW1NL0fCYeaJsx0rLx+3PWNyZCBokvh5LMEQmFh6KrG79E7uicFfdH9034ENQZuKqTltcsA9tZmkeIK1M+/JlIvMEbv70oxt+nQwkkJyvhchx8l65s2Xm04R/TpjjI8/u5HVkPexGlKfRoJ9dtJEc+qVzKHnWa4rrY5HRn97IbO6cgVR+F+y2ijm2vFiE+Zp2Pvet6ZyY70nuiIU2SKJBVFqno/vSFFKWCtoeczXwxD8A6iU+N6zwqlP/frvL7RRaqD3Y31qlhfPdyiqZBW5iE1yAutkhK8bQde3zZ8+emG7358w9vrHS9vO4xCXqu6SvtRqEQ+u1Ck17K7Hcw1bWKQtgO7s9heN0Rn0TJJBz46BL1RRATN9rmjYLRyIV34tuP1bUPeE6RUxJrge4NFRxYgQ7ADuJdMh/FUIEK9UXR0KEiXyH1FoHqfeS8WzpO9NeJDxA74kV52vhAGisExy0dvqMUYOh90PmyiRWMIkggiDlyPX9YxEsFEB6NygFV1W/EaOUEYmcWJOTyYESMiXZ0TeM1d9OhGyFAygr1VfHnfsB8ZRypYnMOqndoTPET8LE4jPbg36q96p0bNtIBgANv5GYkInGNqa23AdhS0tx0FgiUV2MCIB+cJ62pWL0aEy4A3x+4sp6KsSr4GLJLbMPnVCG8jMzpk7LBq69i3A0DHfVfSSinqIShwoMDXgFZWMIYWTcL34xUaCt4iRDtd7cUYfa9aXJ2B8567IGcfHq7+IMY+jZLP5TkIN+vujlY56oA9G6Ph7F6xlTqchHCtRAEu9x3i6FIvCte3XBUSUj2OQrtWmIl0iR69VDio28Li8XRdcNF7y/uHnDUruF6i+lNmoHbsR8Z+lHnwbylrblih36Q1yK3hGiqfocbrMPKQoIxH6R1OYe3FGDW05SqgtErEZeiOagXAqTJXUskBOvrfU6bJbS7YjqQGs0UbX8K/w0tvO5ImC1TUTqaeV1ZvyZFUcRFsKWE7MvZcsOeMPWUIOlOnSyQsD/WSFIq/vWXDtKpryaoekMMhf6QScyLWPkabX9c7mcg9qxyjcv8E/jve2M/2fDj5jF/r9Y0vUhBMZfyjI/PJ2x+YNino+SjY7ztubxteX++43Xfct4QjF7X+EXjLiuO07RQ9TAT8XoSRTqpo0/G7ZGFwoT9we9u4pylVhbsM1uuLV3aLzN9h5AkdKrx8ue/YbxukVlxqQZAOh44NggTgDYykyPrUG3NGgFvRKBJvpgbFLwHr88IOSSeYQQUfA1ATRpPfc8E98Utq4yHk+G5JPNEdoDHoFmqh46Zf3GiWhiBzebcwidhb/qmL7VIbWqaeohYauDYtcoKTDu21mFlrYLOHCQXNCopwV7OrDxvdr1lghyjUGwvrmU8FDHPTIT9oiM6gVmpihgN57QYNjKTotaEfGaIQ0Hrf4bzFugasa1Da80mAGcUppcLsqVw1YJJTVCq0t9kPugQYER4EltlMRrhXajo1lVJhcmGabSmkCOeKbkgQKpYM1R10JKgoaDq5e0fIZjUCYwPZatc4IexDIyVQO2Ab0AQGp3M+Jz1+iQWsE4Tu5mREWBazQeFhxZtpuHznRoTg7ch42Wn8KwLsteKoFU9vLFLvMokcGGsqhV5b5b7HWjtTlS9roAWY+myua8DTdWHy7oNOqrcGWy3WddEcL0J2+57xZg9sB/cxw23+tu2zqem941gyghtaR/ZkXYvvmCK9KP8CvGcaOJGVIjPfbUyDVejvVyqhMYDuIXuiZVcq/GvaB/EaTsKSPtetNKRGt5LcGGBI6Qk1T2yuZWZSNQyyiIU0TuVzf6SfBcMVGQzpnMP1suCyLliXwP26tZPOXvViGGsQgkxfyaAyGQN6KVptiLo2/U7MFMtzMuXz+DnO9Vu/vtFFamwrR1ruo75D/+mEoibDSgMG05GRdpqnTmt8nbzGQACcrKvhrDD+3oT/+lgkCqSqpUgq2I+EZTtgBFgOsqZao/fb+E6PH1LvQ0mvIsw9QWqFtIpmOrwAe+9IAA5wD9a1aAwDUjcOc0fSgwsOfvEIq0fQAuFGbL15YIN1whKlNe2+KCq1vXH/0e206Z9LajGwFjB97CGUlDF6JUNboeF+4SL3hkROhZNG5j6uloaWdJrRAuW8RYemfUIZnLrXG+4fHJBPU0wxgqNUPsCtw1n+LGtpDAt92MlkJLzTOuGSYW2zF4bYFZyL7VwqTDYzlC5rXEStDRbnLu5U7tcZmDioybWpYFI93nrH/KwsWNStcCJptcFUdc83I513TFK0/qpGJoOyV3bWWYuriCB3w/2ONYidh5FymHmPl8ZC3Mi6smpQqnbZ8/kZdl7WWTIezXnYtjbMfwePZ0SY8xgannBHKbgn5jYpdgZjBPuREA+GhQ5HlKEfPIkmY7/JvVYIjuQRI0xVXii7COpQMcJMezcwptJxIgdcloJ6RFiYCcPuhwqfK2GxYTAcdSLf1jx1XdVwgpV5AGDuoQRGIzyA2vXwBglHQ+ZyLmH6XBdUvbey/lla41MtqtvTCcdbsicHgSSVpvtgTLj90ODT3s3cYQFQUhnNNkdK8DzfxiSk9573XtOlnWrNeP+xeWt0hVGol2eOnjtWTjivayad3k+juR0nwzz7BJ+dqb/d6xtdpESZM+ycTwsXAA97kgcWTtVFrLK+TKNTghdBdW5OI9E7eFVXj4mM1Og+9wvA6CIxqZe5Ntht13GaH0KtDcslwkU3GUAYeyEZFjwqPKwNR87YdtrLSKm8/a1ALDQtkwePt4xpDoGWOE9LxBfvL3i+Lvjw4YrLdcHzhyvW53VOVKOAD6GgiMyD+LYdeHnZ8N3vveLl0x0vn24wndZF5VJxCfTug3BBH7zTBT0QA2EWCHQiAR8AR+JEuJDNZywhqmPL+PjlK+5vOz599xU9V6C0c5cRg6YdR6CDUOnidaK1iKpNW9eoVkJ1mo2OALu9NHQ5hZKDWt2rmXBa0/+mG7qRFClAU1LA4402F8QNNdPypaaCZgS1WpSDPpBpT9oAkVxCCxjVdOWCtz3TZf4oSmzQ3YIRPJkF1jOYsJSKngVZujK06MGXdD9pe0c1gtoqhyAAqXUcreMYDEkR7mMOh80Al1JQgpvU5TTo8LXB2Y5m++eHCNSayut1Cx5h7FasoO4H9pRwHIyKcaDtz+KYtAvgdAZPZQZdsqBxYrvddgTvcWwZIdDhw3iL2S0aAaTDwMHWjhAD1pi5t2yEXRd1VwlLQFxoC2W9VeKNoEZPDVdpQK7wYlCOTAmKgMGASgKpylR72xJ6B162gwQkpaijn03OWFOLTjzjb3ejxKzoUFePYDq2RKHwEiwuuvPtCpM/MggGLB2cwerp1OG8Uw0Xm8vSOvbpTwn0JqgajAl0uGpxaAPivWNQqjVoKVHUrmeNqvHgnWO+k3cIweP5+YL1wjTj1Dp6LZNxnEdMjwCrc7CGxBynOioZW4AGGNh55kLNZfVYmPvqr1mjvtlFatDOH4MFAfBQ0eo9VdvALApOBYxr9Fijn5RnUphVKGeGoPEE5h4L1OioOlTVDQBNsKfC3cZhcU0RPhR1fuizK52RBd+3lB77mbOrHIFhqgBXeu4McnMnA+fddcG75xXPTyuuzyvWy4LluiAOarF3yjjvkNrnD51uy4nx1dzxMBROWke1gs2RuRWMOeFFa6dPIkSmLqnXhpYLXCmwxT9QjU/ngOPIuL3teHvZ8PrxRiPM2qfz9hBLQligOqDdMYXGMXisS8B1DaitMs6gVKUYnwWfbmV6r8gpiK06tQ5n6aSTz+hqqxZwo5ONs0PVP1JQMRfoTX+nQWoYrguffzVl/dUJ4w2Cw/ismWHFQ6n2DlQeSGXsr8DpFeYhKHJ8jfNc70Xqwypyq0i1AN4itQa36K5Bgx8HXDlJNQ+3ZB9UbX/C6qbRD65vB3JvuB0Jt/sduRR4GKzeoYUACYRCmx76n+nCMODtfvrEqQau1kooWkkBA0rPGrFScp20d6Zfs1ly7vRXHNViPvf9LLhWRKEzmdlT0VnUSjf50gSl9skdoUP4cJTA1PiM1whALDqa8FPqcEZUGxaYhuto/EuBOrOYSm1IphEK0/ckKpyN3uKykAQVPOOCxOiEpBOR0VSGMY3QfLhM9mVpDcYaLCEgGE7V3D1ZEsOERBRokyzekkBj6P2YSp1+krvuyoqaJVsxcAuJMeOknVOR/jEII/N5UAjTPiBdX7dKfbOLlD7U08oFADCWt7PhmS+jsEEMxLafrwveXxcEY3CTweMTRGWJWXnQS0DUUBLfp5jmgVB6n3qgqvjH5RLoy6dFknCkmaQOPBjO8secOqPxM4xw3+SdgfUe1egU5T3EWjxdI9Yl4IsPT/jiW094el7x4dvPiGvE+rzqDorEBRbBShq+ajuGA0I6EvaNtOBtzzgOqsOzISXbqUtzHPoMZ1Gb7icEKB3YSyHs0xrkyJDFzRyq3rkXKKnifjvw8eMNL1++4eW7rzCtI+gE65ydzhKtNThv559d8e6LLsM/vL9AeQDIiQXJqcHqXugRNhqU6JjlAwyBbCGNvjbs6rB95gcxTG5Y7ITgNBfrXACjNXXgJs22KgQ4NDykaH9fodLpKueRzwNNW+1TwuBXj9QrehHuHRVCNCC0IsCEV82AXo3CS406lVK5kO8H78O9VqxbhDiLD++ubDJEKNGYBVjdKEYB1KYAuiiH4cGKUlBfga1WfHm/46uPLzhSQhDSl+vlCoPhsH7mfQ2YFzJWgn3uiDmBBuQc4LwDE3RFxeAd9y0h7wn7dqAXUq+DMvqiarvsyExTCHPYnaFWTku9wXbMUNFoDVZncQ0OBh21kV5fGtm3gGjEfMNRGw0nNMEAerakTm3Qkcs8D6IVlYsEeCsoNZCBq7ufoKngh2a0RUtdYrcdHdQmrdHj+RIRNYutgwX7ngoqKqytw7AGI+6jlIKjsAHLlZOWU12U7R1tPxOkveW+do0eEcCydnTL5AJrLWoDqfBbUlPmXRuKqobEls+rNeja/M6gyna6TlA+UWc6wGj4xA724++DIuUDqcBjH0IdCbsl85lQlzuNoWO6vlunzqDlyhTalx0pldlBtNbIntEH2slDJ6h/WhBvHrh2bV0xW7LbSu/oRnVc0cFHLvLNKFIKRY5OfHSJ3hggONhmcDUdqzNYLAPdijHID6GGl3XB09OCb33rGR++/Yyn5wuePlzho0dYSZmedHw9gCmK7DNTqx5ZH37qXqweVmMn0fFAbbYnTEkEpeFtz7C2wpZK5b81SAY4WkW4BLqNK+trvyfc33bcXzfc3nbse4LrgFgLP/ZMmYvmLMAeuAQPSviw1uD5eYH3Br03vHtb8XbbcWyJ+5/SkDNZVVtSF2pQAOyUaFAatSn3g4bCW84qnO4IxszJKXqLdaHtVIwe19VjjU41cGMK7UiZkoYjjdiNNq81GWEs6kv0nL51ZPHB4WkNuKyBzLTglMwip1tFIz3YWQqrXWPYYdTvFx07YF8bjGp+egc2ZLUTUkurUvC6RCzW4uIdIR1LSyGvNOO5f7JDTCzafJ2sxNRZ4O/HgZf7HV+9vmLfD3brpSBAcHEO8MMDUkW0EBjpcx/SWseeKsKRCfstnPi5rKcl0r4f2PeEr379I9L9QPp0g+8dXgyWOQ3yGeyloR6ZcLjQkbvXhpLKpEQb6XCWZJXWI4uNDMNVjz1T0lFbn16AtXWFjysqgOjNPAO2yqnldTvQlJDzHD3TkrXYCDxaH2LcMzXBAXPCzq1hLwZ7rVOYXxTVidYRku8Nu4ZWtq52UDp5dZCkcqSz8bJGNK3ZYbEWEgLF2aXhombUlzWiCZA6cK/MlHu9HQMmwtudOWS37Zi6uKfgYbsHVCxtzUCziJTQZqnjdvAs3fc8Rf7R0Q6rW4sgmib8NV7f6CJljZ0PF8DDV0xHNR2iAOmo8iKYprNx8VgvETUV7Led9NncJlxyDI+8TkbQfIBBTcQM3zNn8N9wTeCy2NIfTrtV5+3Mt3n0WZukC+2wuy5OrRGIs3DdIBpGZFgjqOP37HxvAwYLwWNZI7+UzecCRY1m4DcYTEKoaFanFfWRQ6Uyn90Ou7uGpgyeQT9mBz/cNwbUUXPljq/qNbcG5s6ff78ftEFSLVY+CvJB9lvNhV0WRhYXtHArNJaFgmdHKG08DDEyI6i8vxBGCQ5bTLrvSrjdE2G7qsp4nUrHHm4srFMuOHLBrsGE6J0NBUR90eiUsESHGFWPo/HlI2F20IyTaraKRhIMl4MBzThrET2vW1dszo/vrTlQduqUMAXIA5YzhsJSJ236qY2YluC4a6idLg+2EgHo/WxEshHkI9FAuVTum7SzHSm84zM+3VhEjXC7Jvu280up0ilnpJwAY5Gy12atorWpMJys0yYyCUmtg/lFmvOVEklMIyiwd07HaU+4v96R7jvS6w2rJVV+BExCyQSQkYjAa9u0SPHPE843hrtAUsYJp3rvqDnKFU49FKGf3aDQp1pJvHFK7hCgNHpT3jNJQOP5XdUCKXo7CRAz205k7jeDs2rDRWlDVoi7gsiE1T1OBdGZUmmq3DohX29pe9XQ0TOvRVPfSsaOCILaUlljOPnbgsV7woqOwnb0jq2yCKWSJ7nivjPSZNvTdN5v1p7GvuN+0TVEaZTjDP3XkSu2XKYtWuu8B4KerWXcDL/D65tdpBTm6+BeoB8FpjY6JUQNgLN2eqjakQ30boVV2w8BcH/d6dzbO24p4bbTdwqt4Ro8VmfZyYvAGxpots7sGStAqhm1V/QqgAVpz4vD+hRxfV5xfX/B+rTQecJz3O2N9jLblnB7IyX+2AmxRWcQ/YIA4J3pkFqAWrDnhK0Bn1qHWwKV5wK6QqwB4cJ4DhedJsFCbYbOA6uVhpIyaqpIW5rZWrY0LDD44rIidCCAmh0jgusS8XyJuK4RzliUWtF38AE9iH93KNNJu/IRbe/VJDXvGcsSkPaMnjIcOqI1QPAKwZw7QABnRVX7FdG9jQ9c8LbesKye7z9X7FvCsWd8+b034MtX0vPvutVTWAadsNlYkJfWlKnGnm46PXsGSF6vC96/u+BJE5qfnxc2BAtNPXvvOI6Mt7cdn17ueHndUAqhmGEquqrINC4GF7C5afxhsM4wCypw/4Dez2C40ujUrROdaQ1NOoKQnHDVvUVUQ1UpFd0I/GHhqobUaXFwoFu8KQWSC7ruGy0I/RjNXHP6Xv0a5oL9eGWA4afXO/Y9IyXmjrXa506nK/zjjSYoD8ga1MhcvOPE2Rr22gADFHTcjowugvjpDTawkVuXgO44Daa3HdvLDffvfkK+b6ivG2zwMNEjXy4wIijRwTQ2f/3hZzdNMBaFU60z8IuHeIvmHGypCLkiXhbkUrHuCVvK2A5ORkWNWY/aUPfE9+kdnkUmFDxE9feU8HbfcaSCl7vBU/TYy4pvXVZcgsMlaOKyOfOeom+ABIgxSABsLjjuzDo7jgLgQPCkNwwh8D1llKoO9IHv590lUrt3pBm3IUWnHMH0blwXQ4JIqeqYzgl6MGHbwf/2ljM1WB3UZRX1ORVRMpnB4qz+XpwW6QsqKClzKswFr3smWrEnFil0LM5i6R3GOVRFvr7O6xtdpIaJLKDL2HKqqQdGbRtv3plGO+i0wSGszArKuZJVZM7OqSj85ouhorxxavMKS3X9XsYKijTAWRy1QTxdlN+/v+Ld+yue3l0YPjg0QroXaU078CPjvu24bweORKt8o7hvFEGQhtorSuUy82gNR2VHA0vaagMetuh6vuvfnymxuhdpQ5NUKsqRSQU/MlCoDr8ET32UQjJGBOvicVmGwzrQMzSJmEatudJOynSZ4tZaSMbY7we2t4N6q8owPmm0bWoxwGr8uu0nCWbATkOTMTRTVm2eRADTuTm2zsKXpo4ZFtuWEe87/F29ATvQ0eahzQXvgErYwcbOyduKYF0CLpGFaF1IrFkimWdMyrVTM9caAzS3nVEQn942xnWDUEsPHtGTdOI9G4dB6x0mw4NtNyaCqrsU6dQCrc7iCB5Hb2gF9EETgK4fpAaLMtPIdOTXTBGGRmVYGs46oShblNAy6N3WmVmkrLeckGvDXWMuvvp0U9Yi7ZmMEaxLxGVdWNxbowsHZMZXDOqxNdxnShM0YxTuNCiN0STbnrDtCcfOSW/sYqHMO1MrTGFCrlQDZIOaEqo3qEnvSV25DJi06l5m7O3EMdMMtsNBAGshrnInVxugOxkxGak2QApSYTBl0glnTF0wgjCW/xBltLHZeat9hj1aCFoLcGLQ9b6VsfsWEhnokengFA4HTumDMbrzxCi+UFLI+MkqY4Bw4g+eDEowxWD4JQbvEI0A1TywEvXPTpcXK6dRMPVqHRaEprnDEkTDyW9x1PZZe8KXU2eoO9THr6qNQxKBrTwvfOPn/3Ve3+wipZMUoDen7lV6NydLyKsRKXBi7t7CNyr8wxrgcyGEYIjBZ90x9drhpMCiowZ+UEEhQzGagFs9THAIKeNoDRIcLtcV3/rWM774zjt861vPuDwviBd1WdAdz0gI3rcDb28bXt820s4bqanROyxGENGxVzpbHCVjrw9FylmU2pX1c7KwBh27lcdFPqZZ5oiKyEdGVhsn5ArXgeclIhqDi/dojamfMXhE3Zm0RnqzcbzhG9QRWWFRiCErsDT1JTxw85tS/pUZ2YDVe/iV4uQhtpSHz+gxE8yp5ZVz1MKMl1XLpN6ZNOq8w7ZlLLcdISZ6A3baq01fOaG9TVcoZ/wco7uZ5yVgjZ6TlKbHXpbAsMbhPm3MXMwfe8LttuHTyw1ffbwhlwZnDPIa0VrHdQ2A3ithCTM7iefRyXobMR45M3NHWsPqLBA82lqxoSIbAKXDCo2Ha6uQCnQxbGBKnYLhIYOwhkSgxbPgeRHYPk66Pp8H5x189Gox5simKx2vtx0fX274te+9kN3VOhZLCPj5uqLkzFTfVBCcg4hRaIpUZeh1X7yjQ0PrCl+RPXfkirf7juu24L4dyLkQHrOAqR2mdjhtsEzrsKVCQAp8tYKy08QZ84CnGS4Te0nDhqNDOiyLszeG36s2uOBJofcO1mdYl0i6AdC2A4e685MpWad+aDDUaMrMqbpoNMjmLFIu3H/WimgNWvdasEcvqVCZJWRvFbXo/SQdGNOmAHpA1uM1CpYTbeKsYM90ujDaTF9iwBLpYh5FILbTbmw65HKdYVpDAH0TsxkGtIzWGXBtsIb7zECG4uIp2RExavU1YkZGynTV1QcRC0LtgC2G17FaJQ39zq9vdJGaPl36yXeFhlquaKmidkF1BuiGN+poIYC5OxGNqx52MYN6LCLT0ZeYqna+XhmFo7voQLwseA9Cb+4acbmu+PDFM37gh76Fp+cVl6cVLthZUPHgLL1vCbfbjtuNRcpA4CDIXu1IbJ/aAlo1GXjhwzf8tYa7soAHfk4ZrQE5VVSNvxgKdiNq3KmFYXgHttqmkt8F0ExWdzh+FAnFzsUYXNaowlkSTLouc4NqcYLjDmXfMz59vHGiuifNMCIEaLUA9tpRzRlv34GZH9hFhlJ07v6Akx02aCwCwmfLGnB9XpBKRUoZ6aBCfxJgrNF9SEOMfj5AdPAWXAIhtOsasWjQITpY3BXKk67+fKnoJEpt1HFkBhc+Bt+1hiaA9YM840+nj0ZG4LDsqlUZiYX7vWiEe8XoYIrF3ipe94Rd92DBH3DWIYSAVDu2UvG6H2QulqKHkJoHW3bBNOKV87pZQ0PWqBZWOiHnXLHdD3x6ueHjxzd8+eXLDOizzysEguADvA/IpeN+VHUNSWhi4G1DsG7S4/v8nM7DdpCFdgPs24FjO1BTQbf0jluiQ18D7kuALRVJxfEjNHBYTpGUQIi5V0yD3967PtdG34Ogmz5REEB3xJbGwk1JLct+ILeKhoaj0Jk8lYxcC7yjRODQgpD1OnsrCM7g1imEzyXDimDPhenJgRCzM3XutxkLeO7poMcTySx2ZnEpz2aat+ZceA9qg0ZtpweegOviKcQ3JG88LwGLtbCT1DXqU2dSeaNg/yk4LN1iiUBWaPYIbqYSBJ3En9eojEMwsqVtU3uZC1nCTSdJo4jB1Ic/fviPFfd3eH2ji9QgRYz5VQa/tZ/i3VYJ5Ygh0276musnP6z6jR2dzRn/0aSd2hEZrDb954NOLgIDfojdGcTnFZenBe8/XPH0RE8x561GakCX4pjq8bF4z/plwD1KqY37JjMYwEphNR2xczcTFX5y1kydQu+dUda14thIc+2t6QQg2vW1edjrbH92VuPaGEaW47PfnX+6bhHV2HNCDKNIeUe6vOFvUnLBBjDeQWG+JbhJ5R6sXqM7EIAamknbburtp82CVW1Tb23Szk99TIfzBjFSYnC5RE41c18icMaSbVUbjLVqD9NUN8JoieGobgRAYxEHAJsM4PRHDU2Uwkp6Jcnt7G1qfLpeT6O7OqN70q6Le2lAR533Q3/4nlYYhTLe++jM90S8P5QGZwti48Fy1I6jFKTK5sGMzw9KU7dDp6K9mjx81gqrUvs3vAIrjp2yhH1PJBMZRoPQbYGT88gUauD+06q3XIeZ+iSI/vM+HA80h0koRuV0X04CkUDzkugy0dUGikihft5mTN3mTBEGJuzUoRrG+XkMc98+DX47wHNBIWXf7DRWHUjBkAnYYmifpjKH08WGu047prneUDN3SMYKtlxgjEHQAkflCZ/z+jAmiYzIE8KAXkXE47Nylo7q+eFnD/LImFZp/ErNYFS9pxGZv3/VSa031bwpEZx5cYRZS2/wravfJZ9Kpzu1qKYHtXbUltFz0bick6kMJXaM5mHAsEPPasf52s3XOue/0UWqN3YDAMu1Vfp3AxgHL4zNAFTr4U7RIgAWKaMOCpEOBtfLgsvloCpfgKhJskFdFcQ6dGNQR1sg1DCtamz79MUzLs8r3n3nGU/vLwgaITB/5nzz0KeVcfA8FDLQgNyBu055T4uDFYNLCPhOXJAgeBYLtwS4GPDh+Yrny0r9RRe01HDUjOMoeH3baEgK4LoGmkMGT9iv9fm72+ExqAdrUcEpXbgF1jdEALBGNUMO759XiOHydomOB5hlCqk1BknjNO57wtv90D0hYYg10CFj8Q6X6PSmtfOypFz5+eUCExxKbxQlHxlHcLq7o+/f6VLhlfFk8e5ppTGmMUh7xn47qN3qnT5rypQaQlOowSwaqbtG75+sEPKYsKNaxiwaIjioysE5XJaIp5W029YaYqBTA/do9FK0XqMkhgapnF1t72VCscNJuhcm/6KxaKRS8PG+4W0/8HrfEZyHsw7XlZ59pQNvKZONppqWrpMRRHd+9mRjTW2LHtJDIdo1M+rYE7b7gX07kA5meQ3XkKY39J4r7qngZT80TkNQuiB4j7WffpICCp9T7woBUbPnjEF1XLqXPaFsO4o1wBJJ6lgilssK1IbjvmMk07rrAn9d4C8LbKRBbjuyBvKx8RuNWO5Abmwghui89WEtxrvOCtcHQSyW6JCK7h/NaQpQGy2e6tZxP8ycogQsGM6Q5JJyx14q7imhCfDpyGjGwoXGjDP9XsYQhuxakKNKEJxrjOUJhJ0JC3fcN4NSBK1VpAIIOo6Uec9Gj8U5RAcs7jQD6I1aL7JOCa9yd9+YrizUOwXP3eiTs8ou7JPwMGQ8ImqR1Bo+3TciKRouSUd6TQm2hs+I7vyGIN07FtLLSlf68mC+/Nu9vtFFqhwZySeNOxAVAfLV3RmHMb/G8lFnzaGfcu7svo/rgnf3hMM5JGMQjSCoC4UYSyxdLXea4sFeHEzvE05yzsyF+Gm6ekIswEmNH7RYETDPqrAzytWhmI7edMlvRMW8FhfvYWKAi4x4Xrxl4F2hqHhPGfuesd0Odf4W/jsi9CDrSqLQTp+dqAGE/lypVLpFp0xNhrNYlV56xjAQnx5hia0rzddwoq2VHV9tjGkolaahzgh2LYopeAgWeOcQ/XDv6NPHLPcOOKFDte6mrDPYD0Zr51ThHD3dnp66/jWnNIke5RKRjKHxaa3qLKKRHwoJ1UY3+1oqUs66bAdSstONoYMd+6Lu0LU2ap7AnU/wTE1NWtgbOtYYcFk9llWjSQJjZIYTeW9tHvQjop1JpuUsVMNtondoqz01Vl2E1jZ6mOjKcd7jY7dHOEsJGypSna353P+NgeoBetLp2oCTkLd2fj9jrTImBwpQkEqGdJIFUq0QY+FbRxfuQYw2kBNq0knRGjn3ZKWhHgXVZ06vTSM6vIUbuj9r2VS+uyJcFvg1kvQkg7zQJirRq+50SmMum/A6kmEpU1MlIrBO1NkBbMSSm+atwTOn2avIX8CflbJqgJSS3fupjwOUPt5POzM4i1Y5he+lcKpoDfB0obgsYZq/Pi0U8y7eKhuVOWMjwmXINO4HE4DX6GeaM1MaWDjpHUjC0tgBlqJZYL0iGE6nrjN6pBM0mJ87DNcpYwo7SkbKFa/bjlQpgJ7JxvrseWchjhR9VwtKJbrgLZva6zVijQHpawqlvtFFKu8ZySZGRiuUMuG/wWZrXJaaWaQeioZgOmbH6HG9LHRf2AtSSDishesdHpii4dI6ctdsGJCd1QRwzfPB02W/9yd0yNfnXcMJwajdjgFa47Kx1YZcHIoBevc6ujtc1xVwDnVdNAKDmHP0jm4DqaAJw8e2PeP+thNuMQZ14fep5mSSDXeFcfhU/f32UvG2J9w2Bq9ZZ2l5BCB4zxvScTHrFatuqvIfMNehUFxr9K1LWgQEgDOccI8Y4KzBGlQbJnwIU6F+aUtcWMeYAJ3EjDW47Xkyh2L0WDSAcFmo/xhwTb9EZGvhgKmXGZHftjYYU1AKcBwdqWTc7hvhCoA7Nf2sWuc1WtcFT5fILlQbktEBXy+Bh5KSWJbosSwO6zVimQa76tANoIkBssyJumbazqQjo43F/4A6IXOCYENmdephASWEpXu8Pm7vs6CQhWXngTwoXp9B2QM663TEH/CvNUMz5uYUNlxBRm4R9VIM27SGi3FjlHmqk5SZM4ve/yInrVmhJqmVPoi+oKQ8IWPvHVoMCNcVRqfR5f0TSU+XeOoNGxf2eRT71oDC6I8mjJtgQeL1IwHIwhoAKrSVbhCiQ8wkysQQkHOjRs0QCRi2TkfWZimXcwLXnaWMjxYdVejnB+/QhP/tLWeFmCsuCts/uTjFzpfg6Y7vLfIQTglQQRuvXDqSALftgAC4LkFXCoIjDQJDnSSIqgbIx4NZgUXD6ki6iI3sXuaVKfAUiE6IPv+1NhVyJ3x136ZDxxo8GgIuQkJViJ47bQBxuo50EMhxeLosJGKVr7eY+kYXqdvrHWiM8B52/sYqW8sIerccOQFU4RTQbIMVqw8z6bcheFwukQ7TvcOUhnQE7G5X2IXTR1IH9b0UEgY6M23W6NEM0KXjXSloI+juYYpj1zreuSrxlYIcPYV1Ih2tV5SaUVpA63Za1xhDGrHxFmYJhDj0RrYdqEdGSZxojj2hpArkCrEnfNWEgs+xDB2OBro4ACrDBPfC1NsXLQZG6GlWKjOrevRwJtB13TNTqTXun0qh2HN0yeSKEK7aU5r6GQg1GjEyiM95eorVTq+wty3hZdth3jiZvrzcOBUYwaGx2sZZXK8rnp46np7WyfwbmL83AnGCHi16sOfOQlNmsQG1N9zTgbf7ji9fbkiJmTiT49LBnZIxeLqu+JAvFCSGgCV6WGuxLDz4ljXyfXnq6mL0uD4t8MEyRsIzdmQkDtdckFNC3jPebhv2+4HXl9vML1qDI5wYHJ5lRYgeBR3v9oTn+65TlsCIVUcIEl0GDX1AL8MvrfWuGpgOtcGfU7RR7UxvPDisHshP60JpgZgpoYjBUyvV2iRFtFbn9FVrnUQYptVSLzWasrECdYYIwVOgK4KDoOWMclgc9wNef551DvEiMMHDqlD98u6qAnmHkrJmxTUND83Ytp3FqjYUhRmhzdjlsmgmGN0hnLV06tdCMyCsD++unKAM/SSt0J+wKly3pUQIsJKo1RuXDaMQcweq1lprxOV5RSseOWV8vN04iRwF67rAGcHTZcFoPIabvLUyGxAfHHwluzClzATw2x25VSUsORhjcdvzFKuPswag5LC0jlLJ0oyG8SoVINmmN+zHAYBF6vn5Cd57eGOx54K3lPHrr2942xK+ut15/1jBel3gVo/Lk65LVrJhRckordFk1gDw3mJdI0IMONL+tc75b3SR2rYD1nj42uFcm7oo9I5auPispc6lZLMNBgyH68rZHgJO52hWWT3JAKZ1NM8AtCqYnfuRK7ZSZrKmtYx7CIeH83bGOAxK8ViIs1J83jlwUaruAZ7uyKUIapXp8TfIGWOnJUI6tdUu3gohn1arRoMTOhpaG9NlRmA3tXaZXnqNp/YjAaUpcYE0fP7OAlrH+MTfz+vkNE1YVbzbjUGV4aY9rHDk4SHpU79TWps/Yxi9QhR+K5Vu8LoHsXqwG12SFy36w0anZI8R0S46dRho7dVJ4BEuYwS8IOUMY41q45q6T2QNEzw7fxFNLg4ei1oP8fbRWBGdRCwIndrgEKJDiIT7xnRNXo+SI4arhhp3Hge1QrftoMec7gqM00MUZAU+l1UnI4Ocm2qSwCV4acrIPO8vwTD75PUu2lz1B06zTDgak4E17s0lOIXKVTxvDE1I+9Ainp/vuF7jr0TG82XgnRYrtfHpABz071k775Ph21gySURwek2NQLyD0wbNLWHu1ib9vDfVHxaaJKdMm6DWcTRMjaQYRrBDBL4rbOss3ToMd4+lVKxLoJOENhUD0aitobTh1kEgszTVZo3fe0xtxpyJ18Ex0UD3X7VVlH5q2oK6lHfItJRqA+k1RoNJPZx3yLWgFeAoFV6dU1pnUzhgZzpn9Em86rNJmQfQJI+kWtFrweu26+8g8DGSGamSnJF3dzuYZO68gzecbH1gJM9yCVgW6k9FpQFNYVBBn/EqPnp0+/tgJ/Xr3/uEfa+4XugEcG3LzFKqtcF6whLWO9RAjNs6i97shJZGoGEMDjU4tOARnEFTkXCuFXvO2PYR8ZypndBx2hhB8BZV6arP336CjQ7XPcEFC0iHU6jEjFFqLH/Vf+3pumLfDrx/9wTvD+xHxvW6Yo0eIaqjtAiOUsjWKQWmOS5sayPtNlPYSS4GPfgWZ2YsQs91UnNnryfnbkxPdB7iOnENJhA6yQyHydj3BAtg0etsLaPyIA2PbhGD1eWVFWQH5V8fui4GtQtS7Ui1I5fGVNOqZIs94W07JrPqdt+nNKAbpoheLguCdyhLOGGuMUkrlAt0uCKwzkMc9xl01i6ovaH0Big0kRuNaVPmNDyukXMOHoKlAwWEjmAtjFLzxyltA0PiwuJ1f2ZncYIWpyEArrlwp7rROPXTpxvebhu+9+ULorX0f/MW3gYE5xADD8PgPY7EwL7toFfbi4Z2dmR06TDD+Fchmrsy86KG2DnvUBX65OelGNPYF4EatBAs3j9fcF0rynNTIbUgZdoq3W53kkNm4TotnKy1c1cRncVTcHNx3nSar9oAid4ztVIAbkzGoX5xvlMEby2Lv1uCemEGva50sJjC8pKxHTu+ennFfT84VTQgNU7ezlmUkrEuEesS4b2B99xJDcZs7zRnTakiOIvVe9SdDZPTSTXXimsiPOf2AynXKWcwhokB3tKQ+RIcrro/FgA5UOJw650BmZo9NpqpDmDL3CudekGDd88X+EgD4k8vBq+yTeTjfhSEBjhLSK4UQnvj+Q5mwPqAM8zLCoGGycYZ3O8b9n3H/+fLL4kwGRpaF3R05/C2H/h03/Dl6yve9gNvR8aTWRGWgHfvLvji/RP+wA98C0+XgGXxiEs4iSHKQmyVWWhsLj3s/vuA3fe9TzfkQkx+rVSDO11ktqaMltpgfYVL3ElYb+CLnzTWPqYOnNh87Tw0b0fC20FX8G1PyKXhUPZU0SWtCJBqgbtxN/V62+DXgHd7QlgDMW/b0Y3ufzC6c8yHeV0Cnq4rvv3hHa5LwpEKvrgsuDgyyUx9SH5FBjLtbYyzAPr0QRsGt9OrUCcKAVOJ62h5tXM27vTkG3spHzyCz8pm5IPZdYoZ6vJhsVQhs3UeO6kBbxqcqZ3BW5TusLYws47c0LiNuqbdfR2OCQ9TaEdHamPLSqhqRDUMyjJZVpjyAGsoUkXvKEaV9rpjUUCL0+twOQ9k7uWqMEW3c6/jQ4B3Dk/XFdfLgmUJM1bEPBR5Zx/c+JVOXmcdOJNmixI1RvBjShnbfuC+HbhtO5pz6N6RRq3XkjsORkw0a7E4h1IYnjemCe7ENPNJp9PaGjbhM7IcGSFlBN2lDOPPcZ3HhI2HYuM1AHCIT8e+QsDoE4uTMg0AViyi84iOYYFR/ekW9Tz0OgmTIMMGpMlJICq1wlVab9nGXasqNz67d4ky8J2YbGkLJUBuDffjwKf7DW/3Hbctcx8lFr7TTmvbNeSyFFgvqL3i+XmBkYWawOAQOnC5LpPAkQH9LADRXYq3Dt5VBO9JVui0AqMm0SqNnFZCTgDbu94jFtc1ojfKRcSopVPrMwV3kI0gghgdvNNsqMC9ltPU6nJkJlDbcwfuDPWVtnHXbYSN9IhmGZfxKTp4EThRA9tScEtpRnoQSenTq/HQOPqxixvOPkv0WNeA64WJ1UP4/1mR6h21nN6QYy3zdV7f8CL1hlpl+qitS0BvVrUthHRKqbCO8dS9sRtspc2OW3R3BJywR20NqZI88Lon3PeM/UgohYyeCZ/oeI/eOb4L8PK2IV4i9j1hzYWL8mA/g/rOxTaZhesSUS4F337/DmnlxPYuBkRjsFpDam5N7IwASMo0kHUW6DSz3VPhw9+5aCbJYFYAugVAO2W9WR0s7WB0QrGDYq77FK/2/hWYgZATOtTCPuhgTWnSQ+NzssLo79Y6DSWpn9FOTXchY5ff6oi2HjDHmdKa+2Psg8BVixgDf+ZQ0M/reloo9donJCVQ12ydaL2jIDp6p7ovr0p/ShrGAxVCRPAOz08XTrhLnM4Ts7mR04hX9D3XXqFCqKmnaZXBiU3p5iUV5JSx78e0x5LYJ4PszENTjZox6Mai2QZvNUJCr9OcUJQhNpwnGjJq71j2hGUJuGTuGttjkXpoCB6vo7MWE5/TFmsImnk91SPQ2unWHj0F0aNI8cvBD4KOhlTmQg1TUViWtP42E41ra7D9FHh3fUjFYLo+iADiqPNijhip35/ud7y83XHfM8Q4OB+nt9+2HyR7pARxgtobPtyvNPztQadjoQl1LqS/N9Wv1RMGds5q/IZHykWZfn1Cot7Qv9GLUIPU6MJurMV1WRiGmSpGisMwYa29T+eQLpwAPWjZtQjgIhs8FxzuLztMJ7GLon7q/Tro4G61uYyeEDGNbnmtn4KHQYf0pu7lFfecaePUMeHhoufhUWgVlWpB1Th6aw1i1Gy+JaiFGC3JRJ/D3kSheMz9J5uMr3fOf6OL1Jef7ijVaAZKxxqoBQqKewNydtbOwL0pQ0k/ZKNLzeHvVTTWgNb5BW9Hxuuecd/ZPcyAOiMwhnRUelUx9qEJ8Om2Y70f2FMhpAJwST06mDl8nDZAS/DolwU/+O0PhO96x2ItPdZSwr1UHCK458w8H2PQDW/mbg1SbXjdEnclvXPcDg5Plwgr6vxw5CnQ5DURLjCDQ1xEF8kWz8/rhCKNEaQjIx8Zi1rrBDXpLaUgKVGj1DKL1CjGRokLcA49dkZNWF06d8w4jOsSsCq7qYBRFNFZlOBQqkcx1IXsqaLUgiNndDHwzWEtfFgm5Vrks/tjMj6tmQWCEC8/DyNq0vm0YkRrv0tZd40yobHgmX78fF1wUdgmiECaOk7rgU4+ghZWDLkDX30w9fSw662jlYJ8JOSDk9R+JBw5T3X/oBG31oAKstRa1SjvYZCrU32n3VCBOmZrw9LAglJ7R1JyRa7qumBJfBHn1LfunIxHQ3OkrK4ox9yz1tawbwktFdhOd5KLs7CGe90Pa8AaAt6v8f9L3r+92ramZ+Ho851b632MOddaVVaFvXeCeqOCGo0EA4IYESXkJiRXHlBQjEhErEDQeJUYsETR/yCa3EgkF+JFUMQbT6kIBnKhgYDggb1/lTJJrTnnGL231r7Tuy+e9/vamFZpVv028vutnQ6Dueaac47RD6197/s+73OYr+WiOUpN+D4cpeF2kHjgdPJ3lqLg4SMihkzDPpqYQWM3oFHz0HuVAChcvPeOpyPjzW3D2+cb9lKxpAUPIeg16BgMqJPPvTZc7jtc9KjCdcDj44UMQmX7Cj2cKAsobbIpl8BQz2NZAC0Ux5F1bzwE7Qa9NJS9MHHhYYXzDh99+ArLumC9rOilcYc64HBl/lIuwfdBVAsYgsMrf8H1suAz2wOe3tzRa4cbAmsAwdW5Px/lncBRB8TAW95vlzUCvaPVCrGUNMBQs+g1N26kd1MgPZimFiFQF7VGMvmCCGQvhNGtOTPEFEofTYZ1FiEF+BhwP7ZPdM5/qosUqcpNvzRe+wUhAAaqfdAPPRdW8N3OgzrFCOctYuCytCvVelB7B5On6/J4iCKddoXVUhg33K1bV8cEvbgMcEIUSk546VAAYHasawyTwurZSqOVQkZapwWLwMAcmUw2Q8PO3Dpum8ZTjIsajHXvmo6255PxM1lfSln1rcMHHvQheCwLD7icCw5vkRWySFpYjDHv6VH2XOZhSoW7neFuzg4WlyA6FqgOTKeMNWj8hR0OH0rNN+z2YA0azveOcOELWNOaCd0COKGr3mfnba19Hz40HUaYCWVV57SmiIdL5X5xCC0dmWVRow2uKWKJavkEUePeBmM4yct4ji+ex3gMGLQLJREiNEwd5Inx/slIO37x4JSkE6tOZe0F8WRMRTMBV5/HIJAMBt9Lk+WpeXpp3Ous8u37fC+PTFugp+eNtk2dWrOcM2opkN6UuUVPQW+gHpdmFqjoaMVkjMF4eTQ/FiXMyGk/Nu4LY86vAe05c5KJrJ1xImPiHp6BjNag88Yw2x1nwBBhDxd85ApjLW5bxn4QxRC9J6lr89MSrAGQJrBO4ISmubWTaFV7O+9tecGoE6iVEeNcWiOasy5xjPXI26HsQY1xAVlwsPQSHTEu3r4gYAhdJkxRe7DS5s+WDlTLe6k3nk+1tzmpWW3Q5h7XKJxr6MjinUdwnlFI9rSL4xTk4Bz1jjF4RE92L7owl87wORQ9twR40XSwKYornf/34zcAu++oDak2+mPlguf7wQ7dDsdhHnbDBqh0hRcg82K/Kmf/1eOFU4ehqM859fQbkBMGPGdwWdhZ905WjN33adQJT59ADDhC9xTvxdubAavw4CQDynH/JCQCojW0QlIBzUMLFexd5tgdtwPVMEH0KVeUzps/t45rpRWSJaUMb582ZGWvjbC0DoNrB5zzCIGOEUmNWi+XhCUFRjPsB5+XQPdjDdtW8HTf8Hzf8UYPMJGOa4xYYsAHD5c5PXkDdHvGzxtL4WKMDo9L0swdpbH3c/9F6x3yBIO3KM1MSr4LDmEJNG6N/rRhymRjuWoGEgsXlfpdyYDj3SnaDFg8rImaFO+QS5vRCIQJzYwof1wZqRGcJZOuM6tp3MBQd49xAA+obux55MUBZkS0CamaJ6YTE1Q/qTsGY4mJjMmmCq+JvTSSJ3LBnjNyHYJPNfJ0Dlb3oNSNMeU1hoA4UoATGVnpEhFShHXmZKWCXnHvnja8fXvDV37l7TRuTd7rjq2g5B2t7kA7YIyHEwNvOqIFVm81soTXGwB0c9rjDMZEl9m3DXxB905u+goOd3abAjWC2qTxe9IhYS91fh214mhVp2xoLDtp2nsVVKHo+ih0PH932/FKC1VX5qIPJMF0NUs22uyQzdlx7aKIAx0bjlxw847asVJ5L9aGpy0D0FTwJcEHj1fXBeua8HhteH77jKZ+fGqPr04NPLNi4M40ehZ8upvz3rk4j5or8v3gpNc6kvOojQ3pdmTk0vG0Zz1HKtaFk+41eYXwSa8PLuCSFize45oouI0xwHql68eANVFQzfthxWNK8B1ALtjePtM5pBTcb5uKvCuKCCqIqsQYcX1csawRpeZPdM5/qosUna/HAp4TBSMC+uwajXSNPi7YNI+lSlevLoeHS+Yuq1GAOZyuUwp4uC6KmwPOVzo3RIcHNR+FIQa+ZnZFPjh88MEVD48X6i6i14PGTCx9thezox0uxlYdikUXqozQuO+8cXbNexkODrl12CNPm5m9doh1MI6CUWOBeCfcICK47ZmTVG0IuvM5Ms0viy5CbbfKZtP3xoCRG5cIqOt3VfivbjvuhXY4H9/us5s/UsOl0IgzegfvO/Y6AvNkQqzOMIE4DEdwgUY3EKbIpRKaNIQIFkcSil8ifAxIKeKzn32NDz54xMPjSk+52rHfdqWq655EJ7PhzzgdHISUWGsIPSI4iDA5t9SOptZLvWPu4Zo6ezgQGu5qH8UunY7SE5YacFsbtOiT4OGtTnHg+8tU1z53PF4NVgfV3Bg1UAZ0X1rxfBx42g5smZTgscuDvt5kTxib8gaHx0tiBlUMc5Id7ig+EF1oxsB5HuCld9y2He9uG94+3eh0IMAlRQAdkMaMs3LgdmyIjcUr1xW1ed1Nyoymh6G7RIoeqwCXXKlJ6gwAHB6URpsTO5uRyF9T0ILlJ3wunWSFXAi5k/VYUDuF+0wYdrNhnRlb1qILmZdjJzR5QGOAczSTDktHLBHGEdK1pcLWjgWAryy43hkcShI4iiILQl3SXiqbhL1gPwpidDN4Mujkaaxh8KijwNgGN8MOidy4M29NlNjiWbCqAUzrqMagmaYTkjB4Ugv4PRcVyJfJ2t3ziuSoVSMLMaBeViTncUkBa4pEmpaILIJDOh63FbEWNm+W8Se9NpTD4GiCkjNqzrg/3XCUguf9wL1W5Cboqkl9eLpgXRNqL5/onP9UF6noyczymmvCEC0zjTANgN64ID32A8/brlBAV6o6aaaXJSE4D3TAacZLinSgOAq7anvwJr6kwC4oBVhvUHrHkgvE0YX71Wsay6YlKIPNTeuZeRorREFbJju/jDUnMzETHti1SB258vDU+GiUhm4NQ9lEmOQZIjULhQfUzZ/EkO3goVv7CTVmLVqlVD2QRydv1UNM1fU1qp8cmWJN9x97a3g+Ct7uB6HSFzuLhyWhdY+g+4fSOopKABjjTmjI25dQjWiRqlPrETXdNISIkJj9FC8Jy5Lw0Wde4fXjFdfrws+6NmylzP1UTNpwLIGHlRNIHdY/ygYEDyw4QIIBeR8Ve1fRr3oaogtaCrSWMgal0m+xlrHjk5mlJTohM3qDabPHUSYVNwWnQk9zOtDLEHif5qJjmsJgoo5JqjZsuahehfZVg1wQAllVwdG70Ds7jYivS8KaSGrwzk022Pis595UP5MhB7htO57u2xlSZxkFb03HoXvCLR9orQIClFJQYwWxPTehJMPFE5LQKmjPAb5a9NZeGCVjQng8sC18oq1UWMJ0l6GhrHrxtZdWXtQMtTbQj0EYsLNoO0so33XCFjPDTFmUA6K3zpL4JAGlxGlnxcZHTQJUKzWarj1ndOmwxU5I8agdvvAzO3JBzp66K2s0wwmzuSEJxcy04AH+EuHRhkn34saQ6OQMIKUPXyxl01l4TUduir5speKWC0d1w3WJg0c0zIeSECDLgqS0+zVyenIpooigiOBh3eGzTpSWJtS1NBQhgsMda8Z+37EdB97e7nh3FGy10h0/BtzuO65rQpdP5ov0qS5S/8/f9CEeHx5YZKybgVu5q3C1dezbhvu24/l2x9PtTtsh6YiBlie1duS1UkMCzb5ZI9xqgc+8xrpE3LYLYTJDj6xHhcJC9IyRNsJQteDw4Wcecb2u+ODDB6xXOqAbZ+YBMO4Ca9mlxYXmqQYG7Sjnnueg8/R9JwV+P4r6uYnCCJUdUmX2jXEGyZGyW0vD1oUkg4Hzi04X2plba+YF3dqIsHdkRSq06TwnMR+cOiSo8WxrEG9RDXCg41BBbu8C3zpM7XiXK3ztsKZgy4QaWu+k0i6L2kAJxZ2G2pOn7cDzRvcH5hlVrImY/wevH3hYpYiHD65Y1oSPPnqke7MBbm/vJCDcNj2MLB4eV6QYcHlYNPsGlBvoQTQo9d6p0HHutejPSAZYnQLTJTi06lCdw75nhVTahE+XJOfeQxlqT7cN+5Hx9umGWskAe1wSkne4RiYL51wp9zGMXFiUDm/tyJ3CoJ6i9BHJkTlJHXx/rRak4CJSDHhcFzZx3mFVuJLeg8wCUqRt5mJRhmFnhEstdEPYMxukXGhiy8bKwTrAGEF3Hs24aeCKSsPZ6CuO0uFtR/enHjEETkIxEeYulcmv0Vvu+mZBHtMUk4JDCghrmLRzYJB2Go7M++TdbcfT/cD9qKC7D/cr3gYE4zi588KDgAVfBDPFIOiqwA3Sg64EghbJmiv2JSLfD6IJ9wOtNnhXOZUDiN4hV4U2dZrK+neOSvgtOIvntzcEx8muHXm6v4i1QLMIiXAnrFU2sWgWV0MtmHZqztK2KC4BgIBZkeqAX0ENmR2xIOqHMSZVbYhicHi1LKjO4Wr9DKm8pMRGL0Ut2B57puD87dMdx37g41LgRGiNtCZA9YUdA4IteNo2PB8H9kqvzefbDdcUwVb31398qovU6+uCx+uKFNjh7rkqdVkm+2mMuPc9435k1FonTVcEWHNF8Iwgr1rYRke7RI/W0ns6gyUGXLVIxSUAzkAcc39ccHj16oJljUgjidedN97Lx+mtZlX0qVRUc3aIw/NqOjUoqcIYzNRT7pVV06I3mgFhoaJwFcAdBZeihNjOTvp0sxiEg94NeqeP2cswmOnvNqCYoc1wTqGuPg1AxaolipDKmhUaayJkWJWKEKomidLRI6tynp1wPSc7Q2ugZUlYrgtePV6xXBIergsnT735aSt0V9IFu+O2NIwsLufM2U2rRotv+RlD8vJjkokMjsmoozXCLbmqYLI0hvkZwHf6BBqD6SO367X37rajVE6z0joW72E0y2tkH5HCbSe1v7aK0gx8bzDmRTyEDD2ZTAeJEaviHQ+YNcX3aOCEEPmrVaNZUUZmryzCtmM6sI/MsaGP8ppiPJorvVThnIf1HsY6wFh0MSiNdOajckJKSmKZvnkqBkqR9+1wnx8My/EaReFTo5TlkcM1rtWBOoyGgIxGTAaatec176zmiQGENMFVANSRZWgKxwbQ6Gs1FkzVtXzuotAvAEip+lz6KanQ7wNgFpdBvKKAmdP5cRQ0SwZsm4awAlGvQdv8nCoHqWS4vIwGw7gXzhYK0Xfhbtn0PgvRaKJq74jdIyWPlDRpOnBqR/DwAFxjo+UtizonewMvJwHGAkxo1kVitBatNQ3VZPNhxj5VeeZ0kW8wVVBLVl/S3wBF6v/1mdd4uFwBTQM1JgOmMrSrks66FeLUTzu7dBYpMtNq7VjTguAccj4LFZ0pLNYQ4Ayplk0X+lFJBTF5LNdEBlB08EuACw7rhf9vRHHjhLv50LtspgSr56C0U9E/7gNuTZS4oQWK+iPuVzyooYDVuOg1wXuPApJK7rkoe8rgunKBvSRGogfvsGrshlMh5hTpmiH6a1P8N9wSxhQYAvdCl2XBZc3TefmyLFhjQFgSDEABsjEoEOy1wAlFl2smXHrkOLvNm4pZt+NQLzjRbCGDJRBmffX6AR985hXiwoX/7bZh2yp+9c0Tnt7d8earz+yYnUXOFWsKyHtBjIS8loUyBedGoitgKztPY9osYiP3xtkzdl46F+4NdTLBjtrhu0MUIASNjQGdqo9c8PZ5w7vbhl/++Am5ZLTa8LxEXGLAcbkoiQI6SVkkB2VHVTzfNtTKgzCmeO5hBNODcVhhiSGLL6WI67rgw9cXLJ46HT8IQY4Q98izaq2j7GUGO1pbuYPcGBQptSOFgMuS8Hi98Lq1Fo+XhYcnBOtyQakdMe3Tw2erHfaoeLPtgOF0sSoRIsQAB3rGjRj64e1G5uIwQ23zcMcgU+gNNK9Tne7H3k+fIKx1iCHAwmKNEYsPWLzDYrl/QafuDp2ogemd95Jwt8OiBXhN9CW0GFELJwFnSJeXXHSl0BSyP5sdCOZraI1axgGhHqXi3dNGhpyzGCE1tXUK9b1DMwzKdLGj9dOWzRggFo8UHUTtiIyhhGRMn1XRFVuZJLz0jofrQqJMrXj9sOJhSXj96oLFOiwwiAJ059BU+2eN0Vw4Xv/BGFRrYEXQW8XzfSN0WSu248Cry4oQPF6ppMTHgNA7YowI4YCvHr5VOCOwGhOC/11F6l/9q3+Fv/N3/g5+/ud/Hl/+8pfxj//xP8b3fM/3zD//H7Uq4/G3//bfxg/90A8BAH7zb/7N+K//9b++9+df/OIX8df+2l/7hp7L4xpwXcIpXoxuTgUKf+N+DzPBdmDPXSPX6RzRpg0+AEBvAAhHVhYz0jeNszDRTKw6pkBna2WZWWWcDV3WnKDmt1Zacj/TcIdmZggpBTI7sGFbMyamseiPzlLX4hy6MghDICRhrMNzqfPfiu4CvCZ9rkvAdUmIweGy8L2JQQko/ny+pEqPTvOkbxvQMmeJZCh11WZxGuBSfYn8GeOw9tFiOwKeHLvWl12nAchyaw21kixRB8NMSLrwuqNqnXDGEM222rDvGU/PO97ddjzfDzzvWXF+7iH2GFArtWNLVJgLETHK7ERtcNxNtQ7vG0LjHod7oxH4RmHyoPFOASxOLgyUMj8C+FoT5NKQCwkhuXLv5lRDxx0oPQ7HdzEQ3Pcd+wHsOeOSEkrreCXQCeZ0JfdKS672hQ2ROmisa9R8IU/fNGOmr+BooKwlm68cNBw1hu/psWX0zMnqcUlwAlgxUwi+LHHugqIPTOj1YV7Tm+7InuKO6LnH7QanLkvfsLFzGbCbEUFuHWaIeevw8as6vXPfxwLQp0cmD1SH62XBq+uBDx6vPFRLxeo8rpFZS1OMLjohahMI0WYq04W+HoWfeaD1GP35+PdC9KjRo5VK27Xeld6v544AIyNuSAXauL5bw/0oGg9TGF3hHRZPV49SmIHXQQcdFwLCEtUUFqidqb4pONQasEQl2ygaY72FMx6+cOr1tSI1koE+Y6A2YILH64pLDHj1sCKIgVfz6GFAPSdJvT+lNbRWVRtJNul933E/Dhy5AOjo0vDhccUSHRYX4JeEBOC6LNx9GUPHdWOwqgvJEAT/eo9vuEjdbjd867d+K/7sn/2z+N7v/d6v+fMvf/nL7/3+n/7Tf4o/9+f+HL7v+77vvf//N/7G38Cf//N/fv7+8fHxG30qWCLtVqradyRnJ+1Y0GGMIEUym4L6f9VmXxSCMYa/+KZC5wMZgknNX4HqcsgSNXPZ7INX3JY3Ppl1Lyw/zPy2hArkdCAfHeBI2Dw1PjIv7nEQjtJvAQQLaot8gAlUd/tAbzoxQO6Cw768AE53ixRYqJIeZH68N2oeysmJb0gfeBfHOgyvQzphM+hv2u8obLfEiOSpQBcRlNZgLGnQrbfZAIwObR5Y7YWdzwu4p1Qe8EV3iQOaIzOOsMl9O3DfC0kEpcGB63pvrTLSNOm49ulc3rvAEYPiTS5MXvXOonu6ZJAFNtw2CKW1fsaWn2+vUQz21O6IEkyK2sqMoD+yvZoWKdKZnWrQSEZgZk+TjrtOa855pBh5XRm+rqhFyTkL1+0kSQTPQpVSnNZE0mQWUR/cTIq2xvJ9VxNVQNmce4HUBiuCSwwTBpvQsadlDqTATxjZK9OOJAbAYMsZe4nIvb1330AnpqHnaxj0fLLhXD8LUFNnDmstmh9gnEJjak1iFUlYl4jrhSsAK4KWKxLsnCjti2aR/CUNPRR93ZmM2pormndAC3Sz0OcOzZ5zwU5vxqbsTijxYkx6onKXAUmOrKujUFR/AIjBogQHiYzRyeoaU6XT5SU0xNZROpOXq6IKLbjp+uG9QxBMh3oYNgK+M6EhBsJ+Jqio1gAPl4QlBFzWBNcEpjSYSjahURIPz0+eV8MtpY09banYc8Z2HNjzweQYZ3DPB7IsEGvgQ4B0ugCVmuAAJM/7aFVPx9b+N7H7vuu7vgvf9V3f9T/982/6pm967/f/5J/8E3znd34nfutv/a3v/f/Hx8ev+bv/s8dxHDjUQh4A3r17BwBY1gUpRtgjw0rDxVGLswSLa3I4aoTpDdGR8kuWW8bwq/OWVv/euwkHjSV6b7Qauu1cHDeh3VADEBYPnzyGhQyAySh7z/ng5VCpB32vnbqLo6LmYTJakPeMY+f+4nbnZLDvGc/3HfuRp+mptQYBwINzuCSPtCyEHFMgrCbAvXbkqtEHo0M3gHek/z5cF6yXhMdXF2LZ+pp5c2Fi3pzGTlGq0dfpncNlTWQ7rgmvHi5TFzRYXF2Fn7YBzi64LgFr9BOiebUsWILXxmIE4DkNLozTdDN3gVct3CBn9NpQAdxzxtO7O969vfEz6oDzZE613vFuz4iloVRlMpZKV29RsSQADz9hVoATkHeW710fnfr5WdbakEEWnVge8iHQ9TwpjBoMpQmh+lkUfPAqyDVovaLVguPYp33UYwwz2uR27Dhag8DiIdN7zus0kpaENXq8vi7YjswidXBKXlLE5ZJwuS64Xhe1J1J/Q4WCfKTaP62c+I0x6FXQ5CSIlINwH1rHJXos3uEhRrqMC1Bap9FyLmeToY1KqRXNNLTecMsBlxJx1IYGgaj+kBNZPx0lxviuRWvAfrUwXyrfMwut9Ck6HtlxxlhcVrpWfP4zr9ilG4PnNwvKXtCPomkAov56oKZM9ylj4t22jNttx/MScX/YgN4RvMUQcNnAo9IF5iVBBHmLENAk1liGKbYXDWZv6upfqr5Cg/3Ik5K/poCHlbCzM2YaEhyl4agC5zPiXpCb4Gjc61pjsEaPfC0oawKEIY2LwdyBh0D6f1KWb2wNqyFbEBZYUpy6MVjtPCqJY6arO38DzQMMGceH5obVMRX2htIqcil43gFY4M1tw6vrFY8X4DFGkpVaQ7QGZQlorcEpEuStxVH/b0BB/8pXvoKf+ZmfwU/+5E9+zZ/9rb/1t/BjP/Zj+JZv+Rb8iT/xJ/CFL3wB3n/9p/PFL34RP/qjP/o1/9/5059uLNAFJBUEQ8bbNQbUFHGsC/adBYodOSGnEQs/gvKc4+jeuzK8SsOhHU4TQSoBpWin94LQMAxST+znxNDH4yVs1tRpgIy5StbcMBvNBbcjYz8y7plWI0U7W+nEzj0Yxpjs8Iw7NRTTeVxfCPU6bboScBdmp87MeUd4ogla6e9NdINVdHI/RMWRHgB3at67+XeJw7dJmS/MuQYApBDoO2c7UnCT5GHnpMedSfD0ThO0WfBFMGHRWjlVHVvGsWfse0GrWmB1qhHQWaCa0yCzFMuU3+yQj8JdnE5SxPUdetOOu3X0Zginjk9SRI07KeIcN32InF44sXPH4FR/RF9AMvYGXFsLD7DSGBRoAERrEBzfjQFfdxD+OmpHLg2ldiQh/LSGgOuaJpElaMTM6Z92XtfGDLYcJ4Cgn/mUaQxiyJAQTLhVIR/L3WcXO/+u1U7m5aTA+4HQee/mhLp6V6eRE9J+CZUC2uRBLce0gxedlnrt6L7RlxOY1684wDpBVE3WZU3opSFfDshOGcEx9GyjeAitodpgZQzUQNGNqvB+U/IIEQ+B9SeK4IJDaAyxbNUrOeqUCpz3voZS6hSUawMs0MWqo4SFcfTgtMbA5KrXLp+PacLzpzbsldH1xgDSGZfjDM1djTHwldrPMfUN2zdrSUfv0iC9aT4c0LtDtBZOYVYZjZgxyppUo2cA4gcppatLy4Dr+bq7FvohHamtwySKgGMMQKvwlrChNTyfaIX1te4qX+/xv7VI/eRP/iQeHx+/Bhb8y3/5L+Pbvu3b8NFHH+Fnf/Zn8cM//MP48pe/jL/39/7e1/0+P/zDP4wf/MEfnL9/9+4dvvmbv5n4trXs4ErFse1k9qhgNMDgwyXBC+EfALgdB3xghowxBo/qbE11td7UwDwMjyPjvmXkzpvBR4/rUZEPwoBWL2RpPMBEaBtzXv588L6TKYpteWDgFXkvODb+nOf7gXe3HV992khBv+/opQClIAlth9A7zSp7RwIFk1Z3DgCU0cU9CkqfGHjOtH4SCKwziMkjJI8QPfH/1nBIme7xpTSFIl9mM6kGKQaESEhv+LG11vF833Dfd7y93bEfBduWsUQyiR7XFQ402gwYcB8PPKekjgaKAo9SYU0B6whfF9lRDXnPEABPT3c8P224Pe+ouaI33R9YQmkdhI9yawjVwllmkBkRRG1uehcSXbyDj7qnqQ0OKmN4kXHd9FzrAiwwCPozfNQ9UPK0cgKQqkdLHY/XBcaAEMlhaTO0dRy54d4qWq1AFwQDdO/hTICoeLfDoIrBXju23BBCxfUCBGfxuEZ0XLGXBddSJhrw4asrLhe6GXhv1XVbY2mUTh1iOCNGhCyrru9vb2exGfCqgYF1Y8rh52yN4XWsu8Sqe4thANzHYdc7Rfa6DxnXyvQk1M+fXyPWZTjJD2lEhatmuvF7T2HveP7OckfXS0MwAHIF9gLXOo7bjtI69qPQcUaJz10M+iAmyWBunj+vFVLOrXewvkGimyy6mDihpDVSdhG5jzZqKjDkAoPZ1kRA9kMFnEE0lhqkJWG9rkhrhIMmFXSBdzLPi1IpA7gdBbedDg272iJBOmKkH6YNDsafmkvX2ZAMv9G6F+yl4H5s8J7OG3jVEaxDMlYhPpmf2VHocNNEAO8Ba+ckTPlOwlEbslLOW5cpldlLhbEXGt6uC4IFevGARtSPZAb836FI/f2///fxJ//kn8SyLO/9/5cF53f/7t+NGCP+wl/4C/jiF7+IlNLXfJ+U0tf9/01oFPn2vuH2vOHtmycu9o3FdU0IGh1+gQApAK8u2ErCZUkYANYrdbVeIg0ojU5FXW+QWrvGGghqs5w2dJlbD0ZAV6Wfu+7eq0yTvS0TNZgPGowSby+Zib/0SePF+LxzktpLgdUpahk3M5TU0PsUiOpAR1hPF/3e0RKq1oLnjYvn9Tng8dWKED1KbWQgQrsvIcW3dRaobc/qiED3Zm8NUqTy3UWN+HAOwRFWrK3jVnbUrePNfcPttuPptuNhXXBJCUuM1L14Bzv2XODEG13A1VjY2NCdI+a9O1gIM2+MQSsN2+1Ar7yZ3jyzSO1bVnIL5s4G0ahTNyn21NMInu+7/l1S9NfS4KIDNPHVRwPxDlIbqmH3Nz47K6TyBxjA9unU7jU2ZARQNm1c0GiWixQhjxccySOXirdGcDOC+83MEMoz1yzChYgqwNG5c4AxZGseqrPRie1xjbgsgoeWpoPKJUYk52CUoCPm9Dh03p+yCG32X8ZzAJgd9YiqF3VxH3A5+ph6eGEPmJj/lv/YGnXJUPi0KoHkyBV3jbzJteG2Z9pulYqghBCjezkLzH3o+By9smZ94hQzJhYDEmCWS0QrDetKXU8HsFW1SqqNBI1xrdtRwLnbu1wIgS9rQtQifspHdOekE4qIgxfGq/vSJpwbAiHfVCLWJaEbB6fie+donXRdaATw0esrHi4LXj9ccAkOpnfIUaa7iVGyRgV1T0VlFqKN2pAqhBhQusAGWkWZ6eKijiu+Ab3hlg883W74lTdvOXl5j5wrlhBxCRGudZgmQKH5wbYfeHu/o/YOn5Ky9AJSiLguglfXK5sp61BV/0QUAChFk7OthV8iujOQGgBl7JrGAuv/r6ag/+t//a/xS7/0S/hH/+gf/bp/9/f//t+PWiv+y3/5L/htv+23feKfwZG0MlLjvuHN852COusQIDCeBogBwMVZ9CVhiR1e9xICg8ua6Gnm6QLAXcy5tH2pVRrkhhH/XUuD823m/sjLSvTiv4dn2zwIXhIk2sCA+7R3OXTEP9ROyAtv1CEUJarAIjW6UUy2Gf98wCbSNehQeLjd94MCTXVCGNTdAZONp15bQ84s0r02NGfQdfFpvED6adLroudL6w1wTOfdcsbzfuDpvsNoKl1tHcGeQmKMQ0NdPhbrYIKg6oFttEh4JVi02liQtOO83XbsG+HQsTcbvnBmYFz6nvfekaVjO7hrMhgHAXA51lmsrXMwjhZIvTd2Bf00FzaWTDev2b1GDzvv7Gxwphdf6/AGEG/xsEQkZ1FCQT12tFpeZFvxEB4TQTAOAgPXRA9IJkQf2lUDguACC6AxWAZpw1LL4nSPIGpKP81B/elswg96wNQvUGqMA5kOHRjTLgjXGYylJWb3PT7HcR3NLxiInPARzYgrC1ap2FS8XmvjFPByhwsWfMKh57TtlDk7IFo+Dx7YMQaUWBCCejVCpsPM0ZomEhiKdYflkX4tqybKJkZ1eKWeD+X3dMGwlp6ITnSf6ybkzX0q4d0UI6oYEhKE1kZDv3ZdIh4vKx7WRN9IyykxOofmOppzMzJ+Eq20UI3JKB50FFn3AhiLlAtiifChTxPolzBkafR6fPN8g2izlULCGitqbPC6RnCtsUjljOd9p7YK0Aw4r7usgOu6oggNo/ec6XsKc5o7s0pxx28M0xAaUQNRKyzb/i92nPjxH/9x/L7f9/vwrd/6rb/u3/2FX/gFWGvxuc997hv6GW/uO/LR8f/+1Y/x5s0Tnt8+IzmHhxSxWCjzjcr9ZC1CihBr8Rnvp1O0taevl9SGrZAeXFubS/Po7Iy59gC77IPW+8yosgjJw03WjUDDa3gIjDDAJmi5oZczW2iy+gYMImfwXxtFx1GNfwkWF2OQHA+32jr20thv2YLmHZqle8NwlGaKaEXJ3HE4b/H66QofHPa9zN3UgAqNsxTiWlo+DUcAig65WG2etihxIfstuKix2MTr4RxtUrrgqCSgWOuwHTTRdDDUo0AQNHwwrQuWGNCNxXVnJ3e7bTjuuzYFHc9POycpIYQyirh0eo95Rxgl6OdVFLLdNAusqUbFW4N03/E6F1wvCT4GPEpnntBYZAeSPACmEg8WI/QAnsJNLaZGBEWd5m93RsHve0GtFdYAi7NYXYB0h7rv6LViDYF7EDTKA0LC63VFiFGlBDxgdz3cqUXrWKPH4xrx+EBHjTWGWWRabmilE/qKhHLXx1WjJ9I0O24K5Q7R7qCgW28RLO2HfNf8JJ1oxr5GckGDKMQ3whNZaIdZsreOAl+wSB2lwhwZuQNbLthyxfP9IPW5dSzKqEwaxDnWusZgevj5yHtsTIMAX7TVzCs/EI1AuLRBcC8FW1EISgvKmgKulxUP1xUP1wVJ5RSvHle8frXg8dWVRKolMG3Z0zVmmOJyD8jJ1UeHuHgsR4Q0wevHK8khxiBtZTqteMdU4o8eV1yXiM9eVyyJ+q2xN/ZGERDLxOrZQMiIku8ovaFm0t5rF4h1uBxF7ZPYnLiHlQa50SHaCPEGNgUgelRjkGuF1IZf/vgZwe5I1utKBFhAse2Rd7y539B6x6VWiOHUliJNCj4Hi2VdcL0ceL5vJHA4xrWI5sIBgI8JLkX2v43+lb2xqWzpkwVKfcNF6vn5Gf/pP/2n+fv//J//M37hF34BH330Eb7lW74FAHdGP/3TP42/+3f/7tf8+y996Uv4d//u3+E7v/M78fj4iC996Uv4whe+gD/1p/4UPvzww2/ouRytMTo5F9xzwd5IVRVLk0YfdQkPTk3OgLks3s7OExgMn66TmZwHklKEmU9FWMcbw6gFheqabxMSOwuOaov099SPEN4rR+W/08NhajbMiACxLyxPLMRa6imswRItkjEYvMIqXTOwDMTQV0ysZYcDwSBNEUHoc4E7FfCdz3toTTCmNW81nkDDCqtV/OX0pZswj+5prHadzruTADE7UkI/tZFOblGpiBcBooHtgmQNJ1zLhGMIc256LqhCwWSTroa6XQsVf/ywcQne4iEFdRgwyK0jW0PGWWcxG8W/9T6htP3ICLtHyQzHNP7FRClkhGYlgjg3CBGBLLOT1zyJNttecN/ocCKtU/yZSDM2CoeNxTO/ZP6/4D2WEOC8R7MNplDZXyopwLeD3nDQQ8MYgxiC0rlJVlDwjR57zk5yyNBZwQBds4akv0hUxtDBKVwkTq9h/p0OTjZN1Ay4DF0bv9dgfgbv6B04HEn0PmOOVMWeOUXt6hk5LJmgk0/oTukVeFEUTpjRDGRCSd9zAHs5xSmhhNe/oEmHAxuKoSN79bji1eNKM+lLwsM1MWMtMQnAqXk1CQn/w5iHk8DhPButFj0ua5zNobGnq753pMJfU8QleiIKAAk6Q5oBTCG57dqgAmp3xfdV0UCeK50+jq40ta8qSNmjtTbRCecsAjzWy4prKXj9+hH3LSu6YojatAbbO5wAiwF6zSiV5sUigBtOMLkgqrPIugR1ICHpo7cOBzb0Q/je+4Dz+TyMsg5FaIJbvXzNe/r1Ht9wkfr3//7f4zu/8zvn78d+6c/8mT+Dn/iJnwAA/NRP/RREBH/8j//xr/n3KSX81E/9FH7kR34Ex3Hgt/yW34IvfOEL7+2pPulj6w23nPGUDzxl5inFaGBjQNSY79U7he6admZnhLN1VqemjntVQ9dcUduAjkYmzskCS0bjKkpFPSq1V+rvxkKFszh0QW8yC1M5KspRJwW9awaMs+o84UeMOcPNhgfX6h2uweExOiQIQufPO7pwb+QqWqmwkVopEzxTI1T3NAxDuy5Gp0asDrhRYKxM5foY749SAcepyUGjs83pgjEeo7P0lpBLShGXCx3k970iOOL7uVEUnXNVtb+G8gWPqzXs0lRo6qAT656BLthymRoNJpbSQSGGgOg9HpeIJTi8WlikrDXYSsVWDI7qUbTpKMoY2xuZVLUJPnjeYJ3FZVtIcVaodiSkvnm+437fsR9FdWYJrx8uMCHQwQAq/C4V+1HxdCfMedsOOnyrq0DyzM1SK9PJkup2GMtaJMeDjMWSRbE0dYavDXvhFHLoHqCLIKU4P5eiouraOqDd/thHhRSm9qXZAlS8gLUxzVatJ4FhPIZGpvEiYihoYXL1novmuKnI2nskHxA0oTd6j+DchIJy5V6K/3Y0Dx2t01pnL3G+Li6bjDo5jD2a7to6Waai/2vup7RhcuNgdEoN18wrY4F1TXj9+oLPfvYVPvzgSheWJTAFIQWkha4cVunuxtkzWHtCv6AzuSNhpaUACPCq98msXJZjTuHectp/SH46j5tOI+mhl5yu/NaiWXZgEcDiPXoC1hzhLKdrsuqE9HZjcN8yLgsh/esaYeC1sPF++uCDV3AxAN7j3TPlLU/vduxbxv15Ry9sHBcAkAbpGUfN0K5Fk389HpyHV2ZpjBEPrdORR6ezJQR4a6aQuQtgnIeLHiEMGJJ7YruHT3TOf8NF6g/9oT/0/u7l6zy+//u/H9///d//df/s277t2/BzP/dz3+iP/bqPZUkAHB5fP/AGXTIeV47rjx+9wuNlwWOiEr5qN8zlOskHtp8EBNsaUCuLDzO51evsDDi0Vj2/egeqOUkLSoIgfNcVSmR+UWsdeS8oyuAre5lsQMIoYx/BiS0NKnEMsAJUA1xjwENwuCaPIB22AHep2EvDm5JRjEF1FYsIgtD2yAeHRUhHLkoLD540Z69CWggnwqoMtkFbZfwz93Y5V0Tv5uTjjZITvIMNTHQd7Z0YQqKXJeGj148MTgM1Rw78M+mCvSqrTcjG8zFQk9Ea0AzQGwkLrcP00x1cnO6+LKcF5yjmXqLe/N5R1a47KQnsPJfgUZrM6AS6UwNH7bC2YjsqYxT2TM9FZ+Edi3qVjuf9wLvbhufbQYPh2rEEhr2JI+YvWiQ6gKKkjFwZbMj4BkLB3XFipw7VAOp7MLR7Tvdb0Ts0w4NoiwFbaeqoDZjOaJatVDjnUNSuZxQ9AcWVo8tnFpM6neskDGuV/DFsiEjhdA5zdzUKnKhmqbQGo0GCWfdjuRTURkp08B4pRFxiRHAOD0vSL/oIWmuRW9UpgISksWPpFmiaRDt2R2My4l6mo+eKpqxC1whRD3cPDPKJd4gp4nJd8CoXfPT6AUGtoIKKnD98fcFHH1zx0YcP+PDDB6SkgnxvVdj+QpBvlMUxyCEv9pzWGYUhHULy8899YGGI0XNi6aRue0N7IQuZPqHzPejUcgEnWYbvxemd2IRM1dwooTGWe6KobD1j1MzghRB6xMe/Wulxui4L3r67k0CUfxUlN3UvL5DWUAzg0OHQIRg+oMA9V/TbjmYZivrhstDP1Fks3pIqvx0z9LSqbCKXBuc7jBOE5GYOnHMWLXztdPr1Hp9q774YAwCH63WFdEEJAQ8pYr2sSJcF6bIgLhG9VppD2h290upnqOhlLGc77VhoAaJdmiaCDhPzCTUMCGRAXoNUMYqW6RC9uXvtaLnS7XovKHvm5CLj+h8MJe45vNXR3jl032GFWTtp5DOJgXSHjoYigq3RJr90gY1BPegsnIPmOdEgUs9Srvv1phYtqmM/xi6H7EjnDVqL1BJ1AYa6H4MyfC6vB/piNOokOofHy8r466KFpvOSL8LDbngkjkiO1qjjkKawpX7xvQGZl0pcMLNIUbkePR0bktVDwLBTC9aiOuEu0XVYTSsdWhQyNrlbGc9paoTcoIErtNU4xVhjkBw97nroemidDDBa/ZiT0i0siLV1pqXCoLVhecWRe4hSp2+gLveDE4XPFDZrpC8Man1tCn1qd08dj0KgWqRm8q5Sk0UhO2PPi3pCuF3moW/tmec0phanu5mxNy2tTXKR0+t2jQFrZCjoJdKKKkV29dBJaDgyiD73ORFhsAtPNp0Z5BJl20JZqBADYzust/PznE4wwRNFuSQ8XBbC+V3UbzLg4bLgellwvSZcHpLKKTyLjjYLg+E4L0Dgayi6M+3Xa6SHylQGBGyN0d22TPKJaWyKj6qymVJJ0AG1cmecCOFAFiruuK4pInYmMotS3Y2+737o3ox5QczqsGLhzBlmGmLk+ackHasTWekKuxpRH8gRc8Jrs3X6gcba4FojbBo4LVsRFFdh29hNYjY+w3PQj32uc3AxwAeLgPiJzvlPdZF6dVngHgNW67BvB8qWEazB4h0uupsQb2FcRIgBiE6LRgMUC0cV8vd7h1H9EXqjRqW2SYV1CjcMtozpJ2wDaPdSVFfjHNBV91Aqyl6Qt4zjftBypndY6GRmqMuR2s9uUFNUDQi7LYFO1kHFdwXUoRQYZGE2D3DSXNfkUaWjoyFagTMNUg8005E3YL/fsQWHYzumYS0PgrMbtcbM7KFotXDoDSaKsYxDZGZRicA2wWI9Pv/RIz64LvjwsqDmglYq7rcDt11wE3bivQuWWpFrIelgz0DtjLQ4MqRUOjN7B5/CdMOug3LfhZ561mB1BsECtncq55XpJqCy/6idOyVjIaafcQzWoneotODUCFnHjnpJAetK6LLWjtUHpBD4b8Xowa7TZfKIBkhLxF4aQm1oWeAwvPw60M20SQIwCRhJWWZ2Bh3SiSIGh4swdM4FB+PLzERybhgSKzxtwL+jot10iUzeXSPiGuEXZbWKILbG9NfW0U1BN3X6MBJqU0p8IimjR6cC7XrCx12tfxSyjs5iDR6XSM/Ax5RwSQHXGBG9nXteUSH72MOMzKtFoe4YA0KK8IGyEFF4HLLPHZGPgQVYyRRG38cQAy4PKz786BEpOPRScbsfeH4+ppbsc599jQ8+84gPP3zAw+vL3D+N/dcsRuMXnZ4mh1Gvf6twakiRWkLH88DVNmMwWmszxVkqYdKcG948b9j2jNue0TudJAhZ++kxOdBNI4AT4Brow2cCpR9wFjBuJmpfU8ASzjRuSgfoUUizazJBRaHFJQXEYDlUK2V/mN6uzmKNuleMAd2Yues3mtGVPF1vAgSHtWi5aOYdGxhTDI5cp2VTah1OzmJl3Akp/68en+oiVXOGj44W8UtEE046rgvqfuCeMzaNdjdDXQ0u8kYXK6YpM0omm2hQsYewTUQzoywnDduF05XuPmYAm0Jn3Q1nhEGSaOiFEQitEsbq4ChOa50+YQ8ASqJwCDqheaNL1jFdKPTGRaVDNwY2enaxIcAZnRJqRSksALmwOHprcN92LDHgODLx/3YadUqj0STpRMAQW8IwkkQwLJQwCzQPG5kwgxGhoacY2FVwAMgANnMAOJX/XfqE34aQ0im+YDuhRXimocboJnQ1DtJWqB8jlALCJQKlTWN2pJy2HFJoOLIBxEK8mxOr0w72fKgprxIZrkuE0OETyXmsIei/G9Y8vAa8IVyUUsCSC2oJKEPAqBNKUwIL31c7tXlevx9A6nQHGyJvDBZrcDUGPja63oMoQErcYb68DudOaXbOw1VkTFJ8DNlAiH6SfoBBIddhRicFEUK5BoOlyql3+k7K6TdZGx0HvBnedXytrVLQO8gWrVGrZhQiToF+j+tKrVJaoxo4q/tIp1OHaQJjuUOzzsK1Dlf93DGji+rNPPoS8XBZ4GDg+nDvZxEYScAzbka1RUqj43nQT2bdSdN4Uaxw6tu8J+JQPffIVt87iGFOmZA4kmvFXqiFvB+ZRJje1RbsBatRm4kBDTcRQONKgrUa/uiAUQCSVz9PN3+22uISsRn8Hr12nOF9kSITeK2weC2WqMTiLNcLTouwZWMcNVI+6H7VGYaAOm2qmqHV1fhpQ37zHvKE99Qhv+7jU12ktnd3xAeH1TlY6yBiWM3zge3GFN6tN7qTh4DHywUxBCyRVHTYjq70TeLgJ2TQQDy4NEGRqnCCQicpwGOEv53slloq8m5hXUNzdi6caz59+pqSLACgj/BBQBl3fBLGGOLoWlQDBE4EUmnxIq3BdIGHwaqU77QmPC6RF5wxyK3h2A/s+4H7vmPbd2Rr0XvHu6cbvLXYtgPBe7S1QRod1Zs6vvdmJ8xhoYgW2E29dACHYFouNc1FMuAeKCoEdxPF240WJvX+6p07n6KQQKsVDULYEELZgOfBuSxc/IfEZWvvgrwfJJ9Uvh9mTHj6HAg9Ci6RItouHsdB2yNnSGSIfiQ72/cO6JGQuwSPDy4rkrFYrVcYzmm6rdL2dVkfDDUhlzXyfewdB08nGBlau2EczMLk9Wcm7+Gtsrf0wHc6xXpHUWRpgrQfs8hddQ834jeMHl7cywTEJSKmSJZlUCq1Tl5BmDQ77KROzZp5L2dsMOq6DEibjgy9VqUSa6aTcG91uAqHAnSa01Z/0vdz69iOjCNn5ELfNmedduQRj5eEh4cV1+uCy+OKZaELDPTn9zzITwbIGo9xMO3VeQb/cX9Jp3YrQLnys0vCAuA8QyDjaE7s+8nY/EBfsldHQ4YX17zMacsYIhgSPAwMaia9uhozm7sugPQhyC0MDbzvuB8Ft/1Q0gnfb+ZGydRHHaVNEkmKHgGeLv8Ka5oQ4DyLclSrMaeTvdXn4PQzHJIYK6SbryHgkiJePawowxDWOURrsDiLh0gt3rImOk44pi/4F42aBdTQGbPZG3A7AEA6RPr82ZO0JS/iVX6dx6e6SP3X//L/wWdff4DPPFz5pjVBPzLyfcOvffwxnrYdv3J75lQSPL7pw49wvaz46MMPuGQ0Blumb97RO7qxcMEgBaM4LU0pW6f/m7VM8OzWAp5LQKvOzK000nRbnzDAEAXXnfb/g+HXFLulzZBGhAMznCzFgEvikhjSkXpDEIFrlTkstWKBwDog+AAbPOKacFl4IB0KLRSNhDAwdDJwDjFGXQy7qRUDMPVc3ahbgjmL0SiqnD0Vrx//A4w56J1FGuAh4jW63prTHmc7lD3ZhZY0htBdaZ0GspW2T8GSWRWAeSGPbBuP4ZKg8KweImQe6oWhr2umpVqLa/QMaysVpRocBkqVHjtHhe308yBVHNO92QSPcNHdkar6B4uM1jlWXRosXl0irFDI+9y77iMzWqmqNSI3dwkBFho7D+4vnrYDDcAigsclMZRujVgtG6fHsk5aONmCBs64SUmeMghr1YjVndRtYEK0XjiRhVjRq59QM3T35LxSiRvdVfb7gXdvnvDm4ye8efMOT083Bvep1q8Z0CLnKIDQr+4Sqmp/ChoYL37bM7ZMj7foycB7uCa8/uCKD15f8dnPf4DLdcEHHz6QxGANyUa1a4P2wlJJP2sXaLRL0gun55rVJLfLFAU3bfJu7+5sTC0AbxHXhMWlCcGP5msgAwPp0P899Y0110l+6iPhuLMpmaPCQAwap8gtZxanTAr+oQ2LE8yU7SaUMrRGl44hTxFJaF2ntNpgfYVfmppjWxjj5i5yFD3u0PU16GRWc4GUiov3kOsKDzONpZMxTATQqSpYi3WJZDl6B7GEDZ01QO+ohY42vbdJ/vJW/Q3dGZI4WJnDgKCrbOeTPD7VRerjr75FEIercQgLYbFeOLXc7zuebjd89c3buSS8mADUjktaYBz9qLZcaO5aG6CMmeAcDTEru/wG6mWsQmzycqlruF3v0iFGl5XWzgu5d+qpBvtvLBNFl/MGmF281SiAoGSA1i1QDIIAvsvcm0lnSNvYG1jt7JchYq0yoS8utD3WxM49pagWJ34uWl92WdKGb7ooPVg0rVhpoy9ErGNeH8V4LMEBvDfLj+lg7GKanC4Hg3FXqk4YZE6zGDgKMgHM59ibGl/qlIrWp2Zm/EV+X5I1xgRIMbbVTCjLADdzTncylDljL/EC/RvTkw3nstopqcbqQWvH4roLluhRi0crDbuzaJb6pdzoKjK0TN6qGa05oZ0tF+5YnMVV937e062fURtDa0Jq+6B2U+PH6RSqsZEBT403Wz+Tca25PhJdLXqj3Q90Mhh7sa77o7xnbPcNt9uG231XgfSZW9ZlEFEI9xVLFmDtujwXNfrV67/3F/vXFHAZU9TjBZdrwnpdyECFoOam9xamk0ctbRI+KO7lbtc7C+/thNbHdTL+TYegW0urHm8RLoms1KRZY+ZlIoBokdLsN607w1GkHoU7yTFYzULYX7zp4x5QEk0jSaH24WWoH5AxfI96h61AzkWTnSk+952QNQAcTsXwviHqDddWbSTt+5MwP/8xwauzvO6IHYDkPLAucwIKxsC2BlO1MTYGSQuU1VBEMZoWPOKMlHBkjVqxOTVYtva9M0YU/u2to1s2HZ/k8akuUv/H//Er6PeGtQLm4Qq3cklfS8VtO/DuecNX3z7DwSA6hwfx6PcDrhs051CNxdP90NBDwWWhr9+6BAgMXO1oEBToCO4s4C0zZrxGetSGAkysddBiuV/gxVFznYWqtmFTxIPaWssLw1kkQz2Jg4HLDk0a+iEM45OKqDCDUX25g9r0OCAZaLqooIAOB69TAl69xnF9YDBipMbn1fWCdaEly6KBf6MADBJE6zI75aJaDGsMklJ1owpJYdhxnrKEFwwjnXII68lk+gw2mghDJbNa5OTcEG3D4tzE0ZuopVGtaEJsfs915g0Fy45tjX6SW6pQn1aHzsOM9FeZ2HwTdsIGgHeA8cC1rAq14D2GobMG8A4+GGXwqcWQpdPIoGxDb17r6QovreNJ2VP3XHEcauFU9efihGK2o9Al3FB2cAHw0EjocUpPNiCZhcGH6pIiwH5UlC3jqNxmuS6IpSJUz6aoMPPIlwpndPFNTJOC7YWkFBcbmxF7EoLyUXC/bfj443f4yn9/g1/51bf4yq++xdN953PuY8uB6QTSxq4KfUI8pP336URAqUPEelnw+sNHfOY3fYDP/qbX+OznP0BaIlaFTFupKihnocwHm8pjz0rd7rPJC4GTl7dn3M52P2gSfd/pG9gbdyuXhOXjJ3y+VDy+YupwWsj+G59/zXXukms+QwzzUVByxfa8qxB62JCdJJax4KbeqsMqM3EYE4pe4GRZ8jC3KpmoIrjXqrq7PBm/APdtduc1aKzF9eGCy3VBWgMul0iIeA2E3ETO3VapyLni+b7j3bvbTBCw1uIxMX2ZMTMs9v0okCPP1GIKntS9v9PJg/sy5rcx04v7Puc1r0wnLqdklBEDYxybiHLkT3TOf6qLlLTOPU2hvgmRMAqFksROowskGFgLKxzV9/3AIQa7AG/vO5p0dsrWUlDqeAGIsQitIQhQ9ea1Timb026Hk89YoA/GmFgyaAah4oyS1l87u56BN/PMIFMNjkvKBo7+ThqcMhBJnBjJmYS03IC7FNZwABbr8JgivHPszgJ3FzEFLCnNnRc6b8jWuK0tlZEQpVba+mhRJTTpdBk8LkI7p0kDJZK8h0dj0sgtzEySdQYYgYq9cw9WVNEerEHSAg8R7IUO23spKI17jT3XGZGyBE/NknfwhjT3rOyivarRpVFKOMYiWqazCCC4l4pQKo6Rl/Ny52bMC+cBOo+QncTCdBYp9x6BYDotDCacvNAGtUbNlBmfG4MOOwRiDdbKsLvWyUg0uVDLZy2QAnOOjJvbcOmnSFsAGOnz541dqTEgPDUmRf18xjUrOo1Ar/PpU6kFJpeG7aAB8p4r4ah+HslT0wROws5xch3GxFX42TjD60gApBRekCUiljWS6KENYKuaFDAiWbYDx55RSsW255nXxF0Ima1O9zUDxaA9VcZtO/C0cU+dpSPmjCUXhCWh1o7LmtAuCX1loUbnwd5rPQlRWvCPoyCPItVfFKkxxThLOzYlY3jpCN0jFM9Yl0Yj2qFtGRDzmoLKNTqbnnZalDUD5N6IpABQSqkmLdNnkqJtMh9HYRjMwpqpa3u+b3jz7oZtO1ByRYwBl9XM3VpQ4fJIHOAhRPd4kY5d78G3+zGn5ugMovdwanIbU5xpEvOsBt9PAYDDoHeP/BuhSBme4rNQTfGnEg9iiLikBU4EwXBCkdZxvx+41Yan2vDxfUcXwZoYJ3G9JO6JvINxFGZ2GOLX4IXYeseRK263XYWXZIGNm90oIWJwgVo7hXtFP9jWAeNocQToYaE3+rCuaUbQhBembXQzHktIPhsQP9b9jOkdMBYBhuO5XeiiDLL/rEKD0GJjO+izVYd1qPAQOgq2I2Pbi0Z4CEJwGlmuwYC+AR6w0F0Mzr3QYIF1UdagAN5YROcRXKWiHrQ56lVQrdEoi8Kp19oJD9w08PF5P7AXhh/uRZ2lrcN1ibh2aua6IYR3V+ud5yOjawF3uhjPIylXoCGKHcgVPlds6gVIYoOSIlRszF0NKcACaBdsEZbhLO6mUXCr7SxSRrVTL4pULdxlDPYVBNhrmd/3qtdIbQ2mVtQdp3sIBKZ7BDOcP4ahJ+HKDihJY0CidNgHAJ/LbLSMRrEPPZQ3AkC1TJadblfotzU6Xmy5Mv1YHS+qQmCzKzGYB/UQJAene7N+6n+CYUOzLgmXlZqly7pgXZLC0JYZUEfBsR3Y7zujbJ4ZX3Pkivu28/3UPaa19MbzgyWnDcOm1/LTtuPtfdNspkqfxtsOH+gGfl0S6lHRjkoURJhYPHZhJfNeKEc9i9Tt0IlZ5ucdF8b5LN7PgmUUgk6tYVkCqgjWNcNVBx8oxHYaYspkhArrPc0GKj+TboBdEYvWlG1paCUWop+OOi44xCXOIlVyQS8M4dyOjDdPN/zqm3e43Xf0JriuC6FkaJPsh98iUEtBl45WlbkMqItLw8e3HblW1MZgzDV1QrTBI60JQeNEqoa1ti6QUtWFgiuQ/TdCkfqmhwd85nLBQwhYnLqYW4vFeXywrPBigNqZc9O43SitIe8H7hokVkvhzQXPiHBDb7+gzCrCZA1udyhqUPru+Q4Rwce651hTwHWJ6n4cpjvFYO4Nk9Pb/cDb245SGgQG6xLoMqwaDY7VHbZ1+FZhKnOkUKgZEk27HdlAxo7IdxXV4rQr8or300KAzJwG0GmhMCcm54qU6KxsvEUTwa+9uzHT6r7jOPRwcx6XhRb9lyXCOUJ2tp87HKuj3cDyjyOTKLIXSCVk8HqNCAbw0nHbndKRqWsq2qnz53GS6hB8fNuwHQUfP9+ZrltVCW8tUjAwtcO6jr11NJ1Ib7nieS+cknU/4HV3BOvOPZdX4kIMMMGjW1ro8Et3TyNq3VKrBYx9uLCTDeck1YoBTJuNxlhiezU2HXHluZ2edSrAQevULXnr5783IHyI0tCFNk5ZBCYGGCXojOtr2C9h7MksKcU58/ruranxKsc3pzvFVjU+AYANbkJQzQIoOCGq8TDmDMgDJonGO00HjgGvLyseUsSrNSE6wrFbqUCnxMCrTdbr64JX14VJBKrJGQkDec94fnvD9rzhza8+4dgzbs8b7kfGUSrueUz5jfRtY/CYkprUeugZjSMX3I+C5y3jzW3HXgqOWpFiQGkd797dYWDwZn2Hcj+wr2m6sQzSBJQUNKbSUjTCJzeFrRUVsKKFR10iVLQqzSmVv+NVWUlXt3bCnzFRdxeCw6HOJ6ULfK7UB+rEnTOLQt4zBruD00zTXbjamq0R1hLiLq2jF0ajbLng3Xbg49uG59sOQNCMQVwTbrnw/u1EZhwwz5gxTZdOFOOeK562g1ZdrSF3JmU/1oaLkERjIOi9YS8ZWc8CJjnYCQfuef9E5/ynukg9pohrDEjarQ1VtzMGi3PoPuAaAg4Auy4r6RZhJivJgVNMsOeXV9ovnCMUa+ifJhDc945D3a6NdATvCKcIpnffjLDQLmoUt3tmPHzWIiXgYVFqI+lDJz20Rlv7SiafKO2863MAMOnBI659dvjAPDicFlljlZHYhS7KarHT+kjZdUDnTXPfDuLWzyymMMASBcG7SY99+WXt6ekHY6bmquTKn1OaigfpQYbY0VKcVjHSK5f/Qix+LNsJkZH1d9fQt1IboUsb4GDglTDUwAW0FU4RReG+Q33TunQEEfhOJ43hanCa4p5u+SNiAvqaeFP56WRAYobSyYET7nwBd41/O3aT42sss19qioZPoED3TuN7TUiOqcpVGZbFmhPWdWrcO/abxqh+hpNo130i9L30OUwHCgzCgzq9D8h5xHqI6egvi9P8jPXiG7CnvlveqZA8BjVRDViCn/qzYWgMnB37EvykTo94iXH9bPcD99uO2/OO223X8NED98wis+myvlQyPJ0BgjGAcJqCPV3SqcfjNVH0UHfdnRHvuis5jIHpULPlk60nXc2OlQTV6oiHlwFqqPnumWoNnDo9mlN3uv1Hzx1RF00rJgowY0jUOX5ZCt0ZAjPV+JYfQK78fHpXOF2mc8c4FwbMqHbQChu3SVqpuiM2GDtE6rdcNkDvCDCIg+xgTiLSjBfS4pcb/SR9tZpIcP6MBoryb9uOrBCtFU3g1qZvL78BitT/43LBq3XFJQRmSInGQovgoqLGm7M4Mg1K90JIZYkJ1jpcrcPDusB7h8frio9SwivvsRqDYC2Mhp/F3pGloTTium+fqBavtSI4i0sKuG0Fl8SFr1W4LyQy6DoE9y3jzfOGj9/ecJQK6cDDJaG0itV7SGqwwQNHBo6MdtvR9x3tOCClQGqbOxIWQQd4r/55ngXVntqmuR8Au5qxD8u5Mmm4NjjnmEIrgFjuct68ecab246PnwmDOmshYpBCUJhSYctyMnMG3AcR5KMgHwVv397JpioNUVOCH1LA4i0W5xCdxf0ogJByDVDY3K1F95q23DqeS8VzLng6srpBAGsiXBSC54SidPqmhKqiTMIylvUaPFe7KClD90ka+3B9SExnXhd1ClcHjjGVBNLMXXTzNbd6Muk6VPCt5BhR2ytnLZl+unspOSj0ZpTtOF45pmaHMPVwvNZlc+POTkQgOeuBNsILqRPy0SNourTVsMvtKGj3rg4NDkU6rnVBq/xMDHjIwdLJw4aXdHUKovECXh4xHE6bMJIFnHrDLXhYEj68XvCZ6wWXGHCNYZJitqOwaRBRmy9CtQ9rxGWJ9IGzUG1fwVd/5Q3efPUZz08bnt7ckHPBtu3YutoCec/PFEArlQ2qknSS9wzKtPShDNXDeW3YGjv5FAOu60KiVAhAB+rRsNdjFpqhdetd0GudFmJKAtU7i48xybTq0Kql+Lt3GNFdX3CQHtBqR/TMnBItpjEpxdsa3LaMmCLgGCYI0ewz6bpLYsBnmQ2YTFYloTSZDU5tbDpvtx3Ptx3HUWBgsCxkNI4dPKzBO2Vt2t6xeH52V2dhIegTXRgSBiUJySkfsdbidhS424bSO7acsR8HPn77FvnIqKUi2KA2X/QZzOX4ROf8p7pIBQgNV1ubXZ7pAqNYPlR0WErGdhzYKj2ngqOYb9WLNXiPhxjw4CwWMPjL1gYbGesNS+U/Leh56BVl37SuzKKjQGDQdG9lrUGUPt0ARvbRUTlV9danlcy2H3C9I5QK5Aw5MmTfIceBXgpEpymjjJ4RFU/mkKcFvmZCibFz2hLBpHUXvaDGjTcD5cgdJoGikck0PP1EMKGdQfctpc49SmpePc8seueUUJTFxn0Wv5c1tJiNqgdLUZBKQBcgRnrRiTFYlkDBYgowtaHihDWddTBGXTaCV5q+04gON1lEvQ2VvQoLtaN23qkJqsJ3gV+0PqIR75IC1fwKFZHpZs/JQfQXeRE3rloPMYYGqK1zD6ACY2+5s1xjQIkerXg8T3mBDiZ4oZELXt0naMtVW8N9zzhyQe9naOSwT7LOYr0suGCBi36uh45csO0HCQbgriIrQaXXhnU5GWDWMXrcRfpacqJpc6rprevkpXsl7xBagCTRBGiLxzXhcVnwuCQ8pIjk/WQf5tpw1IqjNU4PwmPHKhQawpkPlY+CfTvw9MSD9fl2aIIvd0tZ4SQfApsNWDRLmvhsSjp1fs5apGhxkY5DOrbWEGJAl47rSpbrw2VRZpvnHgeYpIs6/BxrR1HHFkavmPeKVCfcwslc98lx94iJK4SwhBOKi+dZJUPyEr3aBPH7CaAhkXrfK2FqDdxZRXW3gJAibmEA1Wi24WoDYD8KofvnDe+eN+RcYY3Fw4X7P+foGBGcxaH3a80F1xjQlgS7RE6n/SUb2UzH/okOKHTTO1cIxhi8u91w23f86psn5FzQWkPyEc46REfX/t8YRUraLCiAHqSdcJmpFSgVNVPhfj8O7I02LJeQ4GFwDR6PiQ7NlxhwcRYLBE47A9ca+cnWTFEadUNNqdONy01j4Y8yM44GBJQ6QwaDtzNqIVdSQWutcAaIzmC7H7C1w3urk9QBbBsLVinTSdurkhwvipSPASYEIAZOIeYFM6t3dYNg1z5CAMcOwoGUddNU1d7o3iD/o/YI58h/6EE89D5JhDb+1gDgovY4GPo390Hjgh6EDWuwVPojLoXEFLGGDK8UkJYIlEp7mbEP8k5rhJkRECk4xOCmxY2FAYy8Z9IJAF3x/ugsFrURcsHDRU5Ia2I3f1kiO3BgalfsLFJj98DrrDU9vHI74c8has684dF5iEf1kqw5oJWK6LnoJlNSr2XPDjOp5YwzdkaGv32+K5NUU4r18zeWdjyvegOcwXJd9PAT7Dnj3fMdb5/uOCqzs7JadKF1SF3UsRp8L5qDTyf010cicz1hrfFzgw8UUFv6ZEZn8XpNeEgJr9cF1xQRdJdBK6AzaRqA6uQ0osZTGuF0es1HwXY/8Py84en5oO/ellFKwf3IdEHXppGZSYZ6LeGENQrVcM4I3ilDziKLYCkVAsHDmvB4XfHqesESSZoCVHqgcG6tRB6yUsF7Izkp6OFOggEZo5PdqHTNGOj76C0QFVEJ0TOF2E5sdRJzBgw7/oxQrdcIDBarQ4M4k3cTinRGm6quLOLCwtq7YD8ynm473ry74+3TRrcJa/H64cIGwTsWplLx7t0d237gfj+wa3JEMiDrU9miIqcNVNAolHE/D71eUYeMN093PN3v+NW3T8iFUpY1LvDWw1s2uqX+BihS9ciocECrM8J5JtzmDJQCUyunFBg6Slim9CZrkAywGIMEYAUQeocrFXLb0HOhu0NiSmnsHYsRXLzFPXjU2PTgtUiRZpiwDqXR1sdY0BXYQNlGJEG31jQpN8OJwPeOZ2chgVlQLmfYkmHuO6QWtCNP3Un0ggCDFWd2T4wBJkWYNaFpkSoqwCxHx5Ybcim8yfSA9c4jerKhondI4F7HALg6h+r5VRQWi47poTkXHMfBCcMYXC8J10uaS192/5wuiISx+2qiIYx2OH0DV2sRlgazRJCWZPD4sCJGjyVF7EeGDQ6Pe4Yb5qtgkV7UeHQJaoCrcR1GBBmCFBxqd6g9ognxfB5EDo/XBT6QBDHMSR8eL1iXgFdXxg84nVScMxNSY7N67gFaVTrynvnfeqhPV3k12rRCu6RrCkBLcBDctwRrBDkXnXrpcH1ZIl5fFiyBE9HzduDdfcN/f/MWt/tOc1dRKYTun4L32NGBYLG+WrFKBDrwvG34tbfP+MqvvsFtz3DW4fl+UGd0VPQPmhZ3Bx8bXHQwwcFVj9A8ajnFsIDwOtCEgdevix6QwCU4GvwaZmElxwILGL3WO47WUTStdZjTwtKQNyy0ujKOi/79IHFg2/PMDqsClA4cGrwISx2cUxb+YM/V/oJB2wXRGKQlwi8R6+OK9WElVAhhHE4KuC7LlFPQtxKQImidQYLP24H7RiujrgzaqE4KgIq0a5/MxqNVXEul00kkhLm+WjUxmIngpVTwSuXzdv4s1qMhOpRif5SmRtUFRgTRWrxaExbvUGrHEj3WJSJ55qB1ZXPS4aXg6X7g3W3Hm+cN3jlc1oSH64rLmrCkgG0/8PbdDbU33I8Db293lBKALrhGD+keSXhP1y7InaxYa0g7FwCr+oZeUyLUDCjD2SOEBFjPAm89ml4Xvctknf56j091kZLGLqMbMNZc1LpkQH7CtMgAg2gdRBNQRwrttDDpuoxuHbANotRLOiJ3IHi4xuV7svS1qp4R2bx5w9RZjXS0gWvPZNP+8kuFga2hFoN8FIQuKM2h1wJXqna7vAFqb6RzG4tuG2I/F7ZG012HK7JRTUUTMneOzvjxuzLn5l4BTNEM1syu0BtgcQ7ZO+TgkWFgdH9kDQ8R7tN0SWs5NeZKHY5RRTwdDexEyIYZ5tAWWUsVu+sdXckIxlpcrwt3CIHuCrULrpeFTCktcM7ws6SDBO1gxiQFEbjODjE2jxR1ILIUwcbocb2S5hzWOIvU9WFhsYtKGzYatOc0qmIsx3XfSejznC7Hvq+3pgasPJB7G90nzoyo4LEGj1o5DY6U3cFKW8OYKsw0I70fB27HzoA7DJmChW8eoXdcMxlvYy8BY1Bq039LVqlzFus94Xbn7uhYCiR0NEdHFQ9BLG1OjyPjaExPzEcKWJeIh+sKA0ahXLxeQ7Wrh9t5/bcuaiQ8jmNd7Cvt3Q7o1Zl5rZyC8kkRmqQcQJu9zh2QGf/vBXmAn8WwThppwScRqhHHJYkheCYs6/dpInS2F0z04SgNW2HyN4uUoI4ipXB6rh0MjDwz6HIeLMCmBBs795ssTFURO3mPfDMc7gdsO0JZW6NMxFqDJZINGENHUkblsA0b51/vZ8J41S9jiQ54ndiXFNBaUwIO/02pDcVZpZc3vtYpn+jkcrWTqOPdiBHiHnDoAoPziCFgXRL8eP5KLju1ir8BvPtMrYCrEOkkDbRTQGZ6hxfg6j165KFya2TUmMbdyk3AD8B7aouCg+0OUD1J23aYFAHdE6yt4UMLuOTwaCOaCypeS3RD6IL7UQhzACxwHeilo5WCkjN6LSfOrIaqx7bDFs9dS2vwrc7u/KlU5FpRWsMldKytw4cI2xpcG4VZXdm9FgJL5/BiCp5rw/OR8e62s7vTUT15h8XTm4vaJ+pXPlpJ412DxyFAtwY2BF7oteN2P6bWK2un/OqaAIlwNsE5g5g8Hl+tM/JhTGwx0q3CB4eLI+vplR6qxhmkFKe2ZDkK0hrhg1LVc1UzS6MBjNR4DfGwNUZp4QAcoZVFGXHOO+67UsCr1xcKmq8LvBappNHuzmAKpX0iKcMFd1pc1TaFjjQo1p2ZOX3dBJh5T2Q6kfFJEkWEh8H+cIXXxXhrhF8/WBdcl4jXS0S3FkUwqdNPyrhkMzDCEUkUiqB2Zc9FNUQFxpDSn0vFUQr2zHiP251MuecUcAledW8evnBX41NUxibFtDCYZrXXhxUf1o60BFyvC19P8IgwMNJxvLtPE+WqBe50PD/htxGXkTQFNyY2BtD9FQ9uNlLBebQwRNdALhm5sjCUI9Payhoc+6Hvo0E1hgGNpaI4h9YEMbCRvFwXTvNh+C7yPhG1W9KFATooWL0dhYGXCjlOH0mrTiFdJnmAxddOZ/w1BVxzRtTdKyxjRYIxsJ73zxlM+NLQlyD7aGK3TJeSWhoCDHyI+DDF6S5Pp36H67owQkYLjujUb6yFDR4uxilId85rWKJD0YI7mK2iDS6tm7ToQ/eKueJWOo4OFI5LiN5jjVGd5S3lLgBeP1xwuSx4fLwi62716Xnjnqxu39A5/6kuUs5AY81JAkBrM+Z5OP0m65Bdx+I89lZpq6OisqyO32to8NZAxMN0p8wgA9Mbv3dr/HBbx1IyWqsI0iHW0ynJ81CpIpBqQEs5ZqeYrh5mpcyR3RvNfrK0RBosKuld4SmDQ4BDBE+Vk1Culc7sxuBVrUjTuYIXq1HmDQZO3DsQ6GxeYZC7qBmrTJroWDs5nUKsNbjorsE7i6Oz2DZn6d7QT5sj0n95EOZcGdIYqd2JemMMb7sx6QxDUa/FFMaoJRUAa5SZZucE4yKXyVUhD6OsKtG92XCcPtdnAiseXndlVd9L7x0WjX54fLUipDALoFWmnAGmGSeASeW2nhq2kZwqFrDCAmusgddOeexsKBOgpyADEAHBWDY72CC4LhEGFDq2yibrcSH8tASPou/xmCK463gxQXlOmzGQ9GNBg+P9fmBLx4ysiM7huiyQxn+7xIDgaPzba0dDA9T5xBomSUvgm2zVmkoM3TUEFHimFHBZ61y4W534ZS/0Sixtmpm2Plzx2XE70Ml7wLTB6m6nn4w077jD9Z6Eit46mieU22KERUZpQK+Eioq+j9Lpmj1eAACNmElEQVQ7/cH+h2uzlDI9Mb2jA/4Qz7phbdbZXohwghyVostJE+9GIyiUJSpG2YRyum4Mf75RYFodRIaK3kg9t8ZAxj5Iq5JVH8iXYZD8pjIp/8ZahKCGr9GR8CQdpvP1pBQ0loW0/+HusQSPS4ooV64n1iVy92Uw/Ta5X3TwISDGyKkohMmchX6Ok5jRGeQJ1WkSYjU4Sp2aTW852YXgsKuh8N0czHozoyCfO+//1ePTXaRAxwVqTXRfYMxcsFtDz77UBcl1WLAo7brEtYbYeqkNyVlY6XCd4kwZkJUITCWN1/aOVDLQG5IIrAmwNETHAYPSgeoMCoSiS73YWq+qXm9w0IW8+mRFY2dabhdqjhqAA8AmwFPr2CpJFwMqyY16oaYHsQimU7Lx9OKzXWC8m2SKqm2qCGaXBJxaIDPeS804Sr7jaA1VBIdwGV/kNNNsOlllTRcNngeK89wNLiloIBzDJwcraIj5Bhw4HNZhyEA785A6QuJh3HTfY7QaDGLCsWU9CFQ/prAhtCMEoItpx9iHJeDyuNKaJvFmHeSFM1mZvx/hesYaWFiIochxQH+iN5sPFHQqEA9YXmeoHWJIDoERpb0LnGE+lTNQYkKB9IZHDZhcPBNeK0hpHpRfq1/eeyVYRCTdzVnQWmq7H7jHXbONCLlclwWDVL4mEkOcQDPNyIZt2skzQgET3jOj8/anBda6RGV4Gnhj0I5C66JnD6kN1Vp+Hl1d2fXaDNqUrDFijYF7RWsJD+obOBoKH1igamE6dQ+dVOgYKHaH4KhsNMukhQtMsEqe4rVZHPczjCp3CJEBlEEnZGaTqSt3Z4GyzZxkToXjRiEaxYgoyfn78eDleWoIh/tIU7r4SDi2TkMIR5EaU8wg4EziEm+MAbn6SKr+dQ2aJE7Jg3XcZ/no2QAqLO4NwzQvS0RropRzr/H0apQ7CBjOzetqFKnBIB3nxiy+A5GSk4loKt1gxlToR4Cnd2fwojEqiTnP50/y+FQXqZEbBO1sxhIayvxxAIIBRbqq+O8i2GqdXmeld2zFwxmBtAjTA5y82DE1AaRCCl3IQ60wnXoPcwCmVdje4HxAMxbGCA4j2AEU/XtWiCcb7xEi9wBo6mUGcKdkDOA8YLUj7x1NgOz9dCMfjC6AEeUVBtU5UtCXCLdGRjNbAxM9igEedrpL3O4Heu3qDG5n0qZRGruzPMpc6/CtI7YGl6m/kEpleewdyZopcoYxqI3fW/T1LEvgxLJSs3N9WKbbw3COtzq5CNR6RfcPI5jPB49geMMva5xmrexFqPRvjUWqDtxfsf1RAEdshrUGPhKy8moZM/ODAABkRVG0yIlhsvj6iyIKTJr0YD0aQyHmELQaY2CrRi/4Dt8EkgRGLLyAB2BrsClisWSS9hLQeyNl2zt4pzsgZ/H6sqKJ4J4zAC7cY/BYQsRlSfM1tNZwu+1omVEWa4q4XhI+WBY8pkQ9lmDGuzOQUh1DMPaycnpCOjehUAjfg6CBelWTpkU/k9zpyBAii1T3Fbk1SOe74i1Td0MkDfuyJjxeFzysEVH3b+ii04TDuiYIgFdbhjPcRXoAzRokCFZrkIvHPWfujGplzgUMFs+QPgFjQVAatj3P/gHGILTG6wyAcSSOiO4hB9ScjoLaRR1kKpIyc5s2Vc6YudN9ecwaY5hRpgxRMvQ69q3AeA8fK9EBbVLHOTU0D8MwVxoFyt4YXKJD84TQHi8k9lwXj5YLaqFBc4fQXk0LIIsCD/fFWUhwMEucdPfg2BxQ79iRS4eIgXUea1oIzQcPgLHxUglbdx0IrAxNWkNHxz0DrlrqpQyJUYvCuN57RpKUplR07i45AFh8ksenukgNoaEYAeSlA/FQMGAG3HnV0VDjYGaHVDvx51xHqig7WbEaojfgJP1ZDqAGA0pJbg2mFEJRziEJf7aAbbcVQcO5rxh8FirU+UwBduZQuM5Y2u243hl7rz83eXMWT0NPOHEc5WzwsJGMNbHMXYopYFkCco5YUkSzdK6YQlBr1RPuDF+c9Gthke8i3H8pfBkN6bzBidrH0K7FgFCNel9jqQv8gM50UT6TYx1hFvOiFR2fh5ExERkVReveoPVZPKyr6I0hf7XWs0gZtSnSvdw4dHxg4XOe1PMBN5xCMMxpqo/pFwADtb9+tzdd78cewZxOFdMRXvcUNP0VWDEwYgnJdgvjPa+NZua0qSs1ehiqK8MaI45IjUuaU1QEjC67W0HWTKPrnmGNwcOFlkTO0e4K4B4lWPsezZ6N3oC7zlFxfFYY7DlxCJX6OnE8nHofM9owWGWEyXBBmXRl4VTmnMW60DqMbvpuXhdsXniIxtaxLFGLbgdyQRUH0x1M9xpt0qldfPGZRI01B8ycZHJtsKXCZwvnSR6qpc5UX0KA9Loc92gIHiHUGSLIIEFmLg1k2ep7OdzPeS0QufF6/Y1zZjBBa21KHtIJGRYG/aQnNiVcySkR8XpmiTEIKmexxlLE3DlRmkYEpVT+jNBPpwwzGxBeyyMjq0HUl1O1fqLTzXzeZPGh03psJBhA2Mp5c6bwdtVRVT37DGjz5ZxFCLQyK3Nq67Ohc78RJimO6o2H9XzBWrAMLydOUwaLM7gGRo/fqkVuQO5j0a06ouY4OUhnldcdxRjjjVGPMnumw3YR1CMDtcFai4sPCMILLAiXjg20QdsA7I1muEdpjA83DtZQqNr1ZvHB4+oMQgoQCNpxoOeMV4adUdKU1e4sTIqwa0J4XBEvCS5QiQ/vcDHAB/kKby3qUfmV6YyxRA/nvBYsZQYawFQy1NhCsUtGrXBNkLrg6i1fn3r9dQBP9wO3I+O2H7iuaYaktcZAtnGQueAHJqKfn0wmVmt0Wu5doRhlTho/iojMX6JwcZzayZIcRWqmJQ8GmaWH3oBUgLMYvWRgttrQcpvJxCM6fUwr/yN+Tl83mRMIdyFtZpON5+S1sw3QnZeQzu2FewkxSk4xoJ5H/54TwSU4tCViv14gMDhKg7dONWKe12qvOI6qhVGwRvrWWbni4h13EI6sS1G3hIlP6vOh87ZOq/o1Co+x2gyJoPsGaXqd6P5EdQ3aNVtG4FgKbLl31b2kwkyXhwXrdcHlYUW6JE62w0HbWyw2wQULadSERWdx73RDLxAUY9GaR7JO7XnavD+7Qt+DOdarwB52MgybCGKu6qMnCksthN6DQ3gB0RkL3G87shIX7tsgFXRl4QGLOoMkf6YgRL0/o3dK5qGw2hwOfmNMhTfDMuu0vxIRmKbNJ2Q214s7jY4XFa333rHvBbf7jo/f3gEDLKXARLJiR54dQ1YL8kaLqUGEkTUheg90wXFU5NLQaW4BIwatCY7e8LbRusyUCqmFNm2i0gNn0QyTGnbdP2YNauxd4Ax1cEPf2AXIpUxkYry+T/L4VBepUWCACcDofUd4jZOLmRTYaA0u3uJ19MhdkHunO7e1jJS2ZLrMJbp2NePB/c2Jz7fagd5gG1VGBpoRZIkHB0tiwN4Yl15yxn3bcM8VuQmC9YheYC0v8lIZ2tadYPEOyRiEywIJDlIiLtIZZeE9fEpwKSJeEuIa4VNgQXzhaNEaRZyXJeLDVxfmWh0Fi3dcWmsX20dHKuM9pW4qt0GMoMbKCLC6gGgNvLHYG/dWW6bd1HEYlEqblGVNEDDGGzoJ+dontXt4ge175v6gVE2StbT/j4wV8cOHbBAH3FlsnIydwrkxsNbNvzuuC2sxP8tWNLa+1Hlgs9NlJESrLF7E1M37kQPygn6uWH4rfO4lN9y3jJIrbjuFt71Rs+WNQXDn8xkPZw26GIx5oIP4/l6ZFH1UvrYUuFsKvnNnqNNQ1x2ivCigXFxbONUEerUxMlDTCKF3Ze8N3HdMdrey60bMvcwdyihmvKYaWlHRaK7TC28KmsFiMKnnhsxGLvYDrg8L1gdqltIlIaQwE66t53sFEaQUUFJAyxXFe5gmEOfUH1OmJMEaM9/TJqIefepP1wRbGTlkfH9La/BqpOu8oxuEU6G5A5wXpDVSqB7pbOItAXaoXrGDmksBJ6qkjucjWNN7h6jEg8GU7CqA762juz7Nfp0b8D4L5MglE53sh3sE0FQTRtebd893CnWfNgiAlCu9JcHEAUDdXzI1ks+3jZ+rAcpRkKJHcoFTpZxOEoAKmbugVt0f1UrEqHGnPuJQ+LcN0LmnKkpYaZ1WTtYahEYbMoDn25zwhl7uEzw+1UUKOMdvPtRUUc58nWbM1CN4S4eHh+jp79ap2A7WYFUsediMjF0XQNIACxeXgCMzChYvLIZIN/cKLQbn4LugdEOH695Q1Z7pngtKM+ieh0xoDc0Y5Np4+Lyw+lmXCHgHExtia9Pqx8YAGwPiEhEWRsLnzhvhvmUeMCLzgHh1XdBiRQt+hu05PcwZ82Dm+9nBG/6oDbkWHKVgLHCTJ5zIOANGjtRSucCWk3r9eD/grMW+Zb5fcl6cZty4Ijg2Gu4euQBmQIFAXQIbBGX8OfWVg3lhoGnOZfUoUtzZYcJ3AKeEMTnVXNUpouinLDOCYb8fbDwEkCTTiHXO6C+mr6Ej6pUdZM4N+8YYiZsGaYoILCKsUnMHrGYAZWwZhanNHHBqazhqIVmmA10YO7MmixBkNkPOULRtxl4DAjN0gAPCVQ/JYeND/RMjVIaVzUt9DiA6PJ9wEaydBWtaQSmsNL8aO+iZ1Cpd/5tFlfohj5Q81svCr+uCuEZaIgXaMs1YC3HcnWg8TPYOaB3NnfCaNXw/rRuwEQujbbzfS+M+uJWG1qH6KEHrHikUFiiVKThlF46wx5QCWmmUhOiZMJqcl0GegpcaOD/1WGQm+rOh6sqIVWbc2De99MEz0mHr6Tgxs7xm8sF5nbfa8Hzb8XTb8e62E10olRIOaymqtQatUrqRj4r7nflPjaMmSgzAqgJrc1odGWPQRFcf42d2FijTGuJYnbyAuweEWMoZRTQm1dZFLc3U1FshZvmkFQr/f1CkgLFW0AMXPK5KbyhN8NQatt5xb51wmjF4jJ6CVu02KRC1SJ5OFM6TfjsPvrG6ENU2KbTUVJgLqAO15R7BeO6HSuc0spWM3hs2tWe6lwqAz4EOxB1dlMzRKlotsNEiWWAV+gcGb09YwVqYJcKucdr85FLw1Xcbnu9UkNOZweNxiVicwwfXld3/WiFZ/fu0kA3KsDEjO6jgtu/4lacn3LcdT883vj/O4fVDh/cBIUbmwvQGUSHp0UQPXINtL4jeI+8F3lhIpbP1y2DA1hlKd+SG+16mpdTDw4LrJeLxYcHDAwW+6UJdk48ae2+5cNZ9OIxxc5cljQdxy4UdvyapnoxAJq2+tHOppeLY86SSGwG6d5q8qgWu1HlAD7uproLrXBrePu84csW723EWgEeBRIcFDLSzIhrbpPspaGeuU0wBsNXOaVvo0H5NEdeLhxjLyPHWlLreYI3goS4wOrE8XC/MRPOeuVB6oM7J0rzcNRl69r3nAg9IZWS7AdCrma72ecvIO7318lGwbbQtykdBvh+opaIeGff7QYgsV6V6OyyPC1KwWF6tuLy64vrqgrRGnazPjCs0smhHcOAI0jONybYN9KE8hhOGMRqsaJEAZNtgDK2YRkCmsQYuW+QWlN2mkSbOIl0SjDVILU2I2QevdG43Jxvo/hZW49NhUJqgtJP0E5XZOl6zdU79NBWO7GpTprDeiIIZTVutnYJ4S7PnLVe8vR3Ius8ig5lF6rZl3I+Md/cMgcBlGi6LCK4p0bDXnP6Ph9o75VKByiKVrIV39C11lv6i3hnmye0FRx4EM0x0OKudW24y7T7u6ku6a5RIkxEjYyA4xfCjLgl4n5pPWKg+1UVqwFJCLi374q6wTmdHt7XGgMPWYDVq/KIHbvQOXh0ovFXlvB34x4vlOPBeNz0xktlNvKQKm5NZZkb8AuZ3OLvWE5JqXQkg0mHAPYNrBsVwNbbasTfyc2lrjNqE9U4T3d3gftfu6nljhPaacAmeMeEvutTmlFIrfb4OLoV5AGYVhz7tB563He+2DYtzWL3HEiMA0P6md3ghKcDJiDiXCZVCzg6yALPjLnUEP9IG58jMkqrCpzOMLqGYeF8iXz9AKMt1HvTsHPj/FTYZ8F1vHXnLqLlgv6kdUK449jJNU0dY3lhul6POPdR0MWin2Wo+GDlAN4Gqhw4V9KVpCmxmaiy0aSlLQLRAD44FapAKLA8+Wi6N60jm58twQNpNLSnChgBYi9xJ+9+l88w0Bkmp+97RRcEp+3LGgVjHa3FMokYvrDGVj0W/jqYj04lsNhapqtErrbRpInzfDrx7vuM4Co57pvtLod9gLg1bqXCuI4As2gbqz+jYziLAyeHlJAP0phlpc69oJ6zZhXrEogt4Z6hvMlCYXex7jg2lNS7/e5+TQql9ToGThj3u5bGLtGMiNXO6GVIAAFP02vUcAsZ9f5KS5r998ZjnsjZU4zWK7irHHnAIwo9KlOHIFaKhp701HMdIR9aFR+tTs1hL4x7UWWAWmBe0eP1CE1gHzSGjnKG3ru4yp4haukCMBmCKgP1Bm5qosZs/z8vT3NkpLGt0+oVR4oXjLv6TPD7VRap0gevU76h1JYO1AGQhPfZtLnhTCj7OBSlEXELABxeLGDweY6TaWouG5T3Mw3oWDS0Kosvicc2NTt5YTX5VTYA9M5wMBMZx5+Sdo/N6CCBvjOQAimsrsgiQO7Zd4Kzg2TQEa/DgHD5ICa9ihF9WsvuMgWkdpla0nZ5uN5vx8Vef8NWnDW+fNlJIBXiVIkQTdY1YwOhFaMjSGxBGVxJCLgW3+4Y3T3f88tt3eHu74+OnJ1y8x4NqWx5qQjDU2yQRXNQSBgAFynrAOoDQQi6opeLptmHP9EHLalSbtQvLRdlDBrgsAQ+XBdvzilYarpcEYyx640hL526jexTeGM6fu6iaqUt7+uoT9vuBp48Zl52Pglr0cLNU6nunYl3wZgzR82aCEipKQy0FtTTcnu8zinzbMokLGLsc4HbQRud5O7iUtxbH4hGMoEU3D0/rHWlRQnq6gSisaBGs4BLDDKxMKeJyXeGXBOMcnkvB821DzgeFqRZ4WOLUuKxLQvB0PB/mp0ugyW9yup+yyuJUNqnzTtEDvhGtVGQIetFdmU6a+Z5RcsG+ZTzdNvzax0/47199h/t24NjyJHzk4R1XO2n/LeBVqUitwwYPnwLSmuD8vOEI4Vp2Xk4wn9OUC+gOo0rnhFSZ4eatRXQd0Ew5wKB2NV1Fp55KzrVAVxg71abOFA1e95QsNJrJ5e2LaYguDa4DXrRRBfdcw3JoxG5MdqnagJHF8uLL8NdRnJz+LAiU9k9SVNMz7JYr7nvGth1oOSs7Tma220COupCOfmRG5VgBAxMF7+3tiN5xR2akIxhg9Y6kMgBr8ni+H/CWz+lQ5m4FtWSlA9IaMhrRKOcocsaY0EdSAfWRIXjMFl9ICAnWqMny+wX8f/b4dBcpPVyfS0VWXVGwFot2XgWGXwJkIa23GqNptXRHLtpljB0CmX0NBkIatyXVNOr3tBiTEwvR6TCGkzClbKnWSKH1xuDimRZcGrDVhp1m1NRC1arBdgUQ8gGfeoU3wM17lKWgLAsSSPCwCMCRuQR93lC8x2Edys6IiBFA9zJQbjIVh/ARLxhrwhTN3hrKwSTNQ7vhXBuOxgnPwGIrDcHxAPSGRIfXMWDxHUlj5r0yCxNIX6fhZ8evvXnGfT/w5vk+83AEOBe1CuHsm2eyb60Miesdy2XR99lobMmIIeHEMw5Zo4vhkive/toTtvuONx8/4X7P2o2KWhTR4SEEPzvsQSO32tGTKAGNauCE9HxjE/DuvqEoyWLcrHQakfmestD10+sQujPrGnppDMRYwEFpvXqUGoNFAKv6omWJcCmiW4OjM4gy14ojZ7RSkZxGQljuP2jyWzhZWAOzCESJQTDcTY2DEn6wITk98hrgQdyrXuO9T7PZXhkhsR8FT3cal97uDLYb2hx6CApyFwSAE2BlcekyAYhzooTMiWmwv75m/9Vehurx1zFHj/2zMcNP7oySGMSP0e2PCaEqLZoT1RlFMcg5bjD+gkeMvIfqOFT1e45pyDpHy6MlIl2SNqznJOWCP78G7X6iL2bq71xw8JG7MoqNiYIIeFbkRvKGN6d4H8Mu1Bg4o24ivUG6BbrjOWY4LYmzEF0bREcUiaJfTcPWBntQ6qULjkhUpWSL7C16raiKuDituRSU44XonMJxfg5cabQuqJ4/LzmLFNxvnCJVW8dTKdjU+XhxDhICgjckTehXA38VY2eB6jCoyg7LVRe/vUN6hYUgapch6uxMjdS5bIa18zBSU7epNRmQknQWqeQcHmNEbjTytJXsrb1WUpdbQykFvVe0XoBW4ABU7+EF8DA4YoQHTW5N4cVUt4zqO6p1kKIZRirGYyS3U5+xPqGCcUrQZUAZa6rcp+6oojQq+gczqgoINYnQtwtcoBpr8RA9YheE1nmjOYvFAEGXtCQDNDzd7ni6H/jq22eKEDsPpzGNlEoq9WEteieb6dXDCmcd8l4mWw2Nc/O5G+pTIGwM4x7KUfD09ob7bcebj2+4KanBgLEYfTnhYQQ1llUroKl5EkKiTQ9MQlwZT/cdb583UncBUsK9h3V+duycyuWkqIsAZpjukmbchTZXMBbG6kJdYeAOwMcAFwKS9zBqceUyJzBeL5wYo+F06cbCvbNQkBxDaNYaA4l+fu5TI+dOtiq0yxapvIadg/Nj0h7kiWE7RFjvvh943g6Ug9erNy+kGQJa5zRHuO8FLCQT+hyHmx7a/Twge228FmehGlpGJu0SVpKvKXwjCdnblwm1ZzFrylIcBYrsRIxvMCHQ4XwR1NZrFL2XPwvWUAMWuC+Najs0rmtYow7oLyZDezZFA1Yce635d8ModHa66PT5Qs0MydSrjEXKnnFCk0UndLEJWqCknzls1I2yQXJKWrLeTZi+1IaQqbXLltfSsTneE63Ns3Ds9b0mUXhn8bAyHcE5q959HaUB3jDeZfFuZrH9eo9PdZFq4CL1nU5TR2u4hgD4gNeWxIQHMejeo/uK5PWGNxZHowjy3ZF1SVlQe0XrDa2xK1y9ZVpvCPjMahAdl8tdVK1vzpyV1ht3JKWo1oSsoi4dvjMp2McIZyxhkFzxdBTSNaUDnU4WaBXQqQYAurVzUd5KofjT0i1CAJgjw3ZBihav1bTTpIAlBrx+WPEQHKKxdIxv8+6azDjgxKtFd2PeWqwh4sOHB8QYESJZQ8E5PDw+4rpEXB6uSs+1WGolfl7qZJutvcJlAHdwCikVT2+f8fa+483TTScOgyVFnt0G2PM2dz1HWVBbxcO6ELa8rpDaqUfTTpZkiDpp4+P1UDhZ8fGbZ9y2A//9q0+4H0WtpdhJlqpTUiKbLwQGYQ5oQtRRf+xoSO2nq/i7bedCu9ISaY0dWASvNAZ8CR6Dtg29NmoTioOdQT24F6sz3diwOMHAGC6dLWh+7KRPZhW6ne74rTTsR0bJWYu2Cr0DJ7S9tikMXjz93NqYbMwgKpzWT4DuRjv/XGCGkcN8GGW2jp3LuaNgszg87iBzFTL5jKJi43ZUtKOi7pz0TBgu6PxqStK4PW94++4ZT29uePP2CcdRsd0ztlzoaIGBnmnDqL/yWzlcYgCMwavKoNG9tkl2uu2Z9xYEfuWE+ngURoeA0GOIHsu64HJJeLguuF7uMEZQW8WQm6XEKXe9Lrg+rLg+rHh4dWHD4ylsFxDG88EjLvTXc6qPm6sDMwgtlDykJWBdE0puuKyRhJSiSb+9Y7GUNHhr3zMKWL1DshbQHbC0Tli9NizBIzmHh5TOiB7vSVfvQgZfp3+mSYB31HyV0ZDsGbdtB2pj7hSAYLV4K0HEeYvLQtur11emF0CAey44SsWWCcMGR9FzfU+K/T9/fLK/pY8vfvGL+PZv/3Y8Pj7ic5/7HL7ne74Hv/RLv/Te39n3HT/wAz+Az3zmM3h4eMD3fd/34Stf+cp7f+e//bf/hu/+7u/G5XLB5z73OfzQD/0Qav1k2SLvP3su44Yo1bshUPUInkFmlxTxEBNeLQnXGLCq40BThtCtNjyVinel4qlUPJeKW6m4lYLnnPGcC24j12ZCgphU5DE9TZpuo6uD6Gg+GF0W7DKjMWosazm24/wKlqPw6hxW53HxAasPWD0vsNE3zZ8tMr//sEC5Bo/HGPAQPRbHvRE6/e1Iw8ZJ/AAwvM7mIW8IX4TgcV0XPFwueP3wgA8eH/D64QHX6wXrZUVcEmKKCCkipYQlsjAm7xGdIYmiM/ZESgUqnd+t0AMvGINggeQMdVcWgDT0XpELgyqPnJHLcIHm1FAzBcn1KKh7Rt4yjvuB7XZgvx3Y7wfynlEOkhtyqUqlVxZeI3x5VHoODkhzdHsvNULja37GcuqIxtTNyO5BN5cZZOl0kmKD0eY0UCr3VntpuOeKe+avW2XuUlbos/fBBtOcqtqoVemi5snD4cLMCYGf33lwzygLvN+QfC3daly7w3D0xeuflWZAxJjUdu+dQp12jI7gJXQuyq0ZRIZBiujacJ3Jv/O5aEPAsMHCg/G+43nLuB9Ky9f7cB7sxnxNIZ2enerYkYKfFkZjJ7WVgtvB77vnglzITBvx69Zb+OS1YERc1oTLGrEu+pUiloVfKUUENXgNCtn5QNguJur9QvKzQJkX79XZWsksVCOf7XSLp2PMEiPdRiIn9+QdoTP9iob6zNEA7QeLQ66dxgHen89ZWcEYk2/j+UBvRE48USeeSwz80vTqRX9+9EpAc5YiZo2aueg5uwaH5NVl3SgqMT7fSvj2kzy+oUnqX/7Lf4kf+IEfwLd/+7ej1oq//tf/Ov7oH/2j+MVf/EVcr1cAwBe+8AX8zM/8DH76p38ar1+/xl/6S38J3/u934t/+2//LQD6jH33d383vumbvgk/+7M/iy9/+cv403/6TyOEgL/5N//mN/J06NTgAq4JsD4gd8HqPS7LgjXRFiYJsNaGa+UbYsAP8ugdW614cxTcKw8L6ZU7oV5h0LEXuqlLF7yOCQYGyXkYq3BFay8uMZ1IVK3+8s6xInAKtwXDbjMaFf5Kn75b3js4MXDiAUSSFGLE6xjwGIIGylFx7pQ9N+yKrAFi9BDv4dfEEDXDJWdvHU0DxixOdqAohNN6m0XKOIcQgMVYfOgDHrrgtTKjvHP48LJgCUyzdbqj8+rI7kuhkW5r6genPmStw7eGRYBqDLIfOxGL60JXjdobntEhveDIO5wVxMNhPw4cR+SeyRoE8LCU1lGOguModAWo3G8NvH+kCB9ZU2HHHqMbiDQ4WxFc0TQVg9QEsBYhVjhn0DT25aU2ZjzOIWkuIQHpsEYQjMBZknoKGDtuQNGyeC73b3tGKR33o87rZ1XRb3JWnf2BpPoagLoocRaujlwzi2gduvV8RwzhOxqMWjVBVbbgYLu92JlKF7IKlV/cx+RoAciApGTu08bDwExd0KKZRINVNnzdDEYTxAV59HQ8tzDaaBTk/YCLPCR9d/qzVFR9FNxuG96+u+HX3t3w8fPGPVuHBkuyKRupsOP+6wKFr5jPZY3FsQJewyWPSh3Xcy7IvWGrlZOUM/hwz0gloXeBjw4ODukScX1c0VrHXgrWLTJdt3X0DqxLwmVJuD6uWC8LlpXFyg06vf4aF5otD7HtS+71gCENTvhvGDTXS8PjA5EEawwOx0k6iCBIhxsNq967waonX67oAHKnZrELBeEheFzWhBQ8SSZDWtFpEAucBCjbuybzsoX2YJN9WxMcANM6vJ6lAzIOkUnnlyXiIdGVp9SOO/jh7JkoiQEntdrK/+J0Px/fUJH6Z//sn733+5/4iZ/A5z73Ofz8z/88/uAf/IN4+/YtfvzHfxz/8B/+Q/zhP/yHAQD/4B/8A/yO3/E78HM/93P4ju/4Dvzzf/7P8Yu/+Iv4F//iX+Dzn/88fs/v+T34sR/7MfzVv/pX8SM/8iOISnH+JI/gPWJIsDGiCunowTmsPuCqEFXrAu8bggrpBMTAu7UoxsKXBgcDqQIYD3SDYGkKa6WhCXB0MgWj4uijII0ms8/pZFx/Ku41Y6/xojvVPRUV3IT4vGHM+EXdHIJROq3leH5xDqtzcIY/v4HF0+rkhtJgPB2IYQxCq7BiYZqZC+9au54bPHzYgfcJSVRjUEHn9b0DWweycI/nNSBuiWRlBWfRnZ2H93AHIknFoHcD25pOhzS/XAzwUQq4OIOrIyRlrEGKAbV37K3iri7XjMj2dMk26pys+61eNQqiNWz3Hdte2DHWBgODoJCbVabX6E4nDfYF1bo03pyyl8nQCioads7qwYGJ7XvLw3lNAevhYSFEAKShlAP3jTvEFKLuXwjNjpGl6b7oXqgpedoOdUMBciTcWD2TbsdyXERgWwdKpYaoMVfoGgIe1wXBMe11SQkpBFy1Q16bnz/3ksKcJKR31Mrr1YpMujEM5mdiHNAtgSSr54j0k6V5HBm9NcLCGjU+NGRdIVADGtpGT1NbLuoNKex7RY4FYQk0Pe3j8NYsqFzmrut5O7CVppE0hCENZO5pxvt6suwwO3dnSRgo2mDl/oJA0RqKMDMq5ILcKprwbhjO9yE6XK4LJzADHEfB9fGiBVOUURnweF2wXl4UKDumWIYozt2TEphkTL86wQz2IkDWqQ/MPxMRvH61IjhaLe2BJr4mF10NsKiLnEGPo+jULtj1Vzaf3KlbzwRmMRYtD6PghgaZzWaTU8do9Jw1wuiXhyXRM3FO9TJdeIL3nOqsgZuvsaO0OgM4W+cu2luD2vInOuf/f9pJvX37FgDw0UcfAQB+/ud/HqUU/JE/8kfm3/ntv/2341u+5VvwpS99Cd/xHd+BL33pS/hdv+t34fOf//z8O3/sj/0x/MW/+BfxH//jf8Tv/b2/92t+znEcOI5j/v7du3cASPuOepiJ5cHnlImXQoA1diqqIUAxGkZoLTwMQ8S8h++AsZV4vAUCWKSgB9eIxCD37Hyce5ATXD7JnvzLoxsnpPLCD0/a7MC9YQdzCZ7R9k7dn53DQ4wcqa2FbRWAPg8ZN1uHsQ2mNhjfYCtTUmGF70lpGCmlzLL52s9RtPBVGBxdcHTB3sjOgmH8uXNeTVqJPzdgXoSDDjtcPFoXuEa2mhvwC4DH4JAsGZODxeY8tT/GCNYQUFtDDoQyokIFzmBmPYku02ttM2b8vmfkSneDRTB94gzOYu9sh7OYOhhOW1DaOydiZy1yafCuolU3C9vLgLnoLJbgkILTz48iy9oa9kPQm1ckVQMKzFkgx1CSa59w35hgDIDmRZmFaleF0y1lsjJbhwfJEGsIOjUZZks5Pi/vLOa5D04Vw/JGOq2Luh2TE5spUbaHgAd8b7ySm+7/uhJHSi44VC9mIO951WWhJm5c+yOXLCo93IKTUFWtVasNrlEw3cHJriuhIWfCscwiIkw97uNBghw/qAu1U1Unx/dJFGZqdfg6mfhsGmUmA+ptg/Sk33fAmTF5QBL/bqmIS1RmKt09gvojhhjeg/LorPCCZYHZs77XuL7nJqFNgnMMCJUuuF6SxlxYBFAe0J1BzwXdqKi7czoeRsINHVUEe+vqtGGYYACQLWgtheF6fXVlSnarxJKu9kbSJ3192HGtkbT2Fgt3oW24ilhtruiXOkDmQVSprc2039Y7nLFo/zsmqZeP3jv+yl/5K/gDf+AP4Hf+zt8JAPjlX/5lxBjxwQcfvPd3P//5z+OXf/mX5995WaDGn48/+3qPL37xi/jRH/3Rr33yzmPxYWaXDJbQS8bMUZjpkwfF2TD8zQaDZIBsHeJRcDSNjagVyRl4CIJhV8u4cjenqNY7xCj9XCco/vwXFvwvdgEK6StDh1OA1AK0CiMdqxahz15WrIF478heSlYNToVeg9D9Fm1hDEypkz3mewei5+5b9wTjgLVaLMYScjx3cRYijmLU1vEmV9yOgqedkeQQUFN2qXjVF6W60vtt7HRqVQuaJmTc9Y4kXPC+Dg7XYJGsw+I8mqi3mshMAPVs9fDZyxUPMeGDy1UjriM+XBcq4ntHz9wf3O479pzx8dtnVd4XQj3O4dXDBQ+dJrdpEAYeBEsMyJUEg1G4BgW/1CGUbAgOaDWocz4ZXb1xt7g4B1mSusEDWy54+yy4Hzu2fcNxCKJnHPmaFiyRe70YPGIMahnFw2OvFLsS6lFxoyVMJ87CeLqkW37YnLhqh2kUx16DQ7+u+l4yTPCycBcZXopzx5fwM2+jsXE6iVgaNLOyEpKDI5zau9CUuFbsR8a75zu2/cCbp40x4zrVRkdKP7ogo9CuCUASNynHHgamC46tzMyhuBISCmp9Bah84FDBdG7ImrLLs9bO7r3re1UtALVk8tZwwtcJdLxW7g31951ZSBxwT2bcaFyMcGoc+72omrW0JjqC1D4Fs8OVg0GNblp6QfeJQpyOoZGjWdZ7r7fhTk5JBJydzhs+BVweF4TE9+W4FOS9YLsmlFyxP99xf9qJJNw7i/l+nE2xMepTKOiqz0otMkcOUHIMr0PuBQm3OwCQjiMXWrdVhhh++LDiuiTGrFwWrMEjiCAfFMuXqmYEjZN0cwbSz0mehU99FZUp282QnPz6j//TReoHfuAH8B/+w3/Av/k3/+b/7Lf4xI8f/uEfxg/+4A/O37979w7f/M3fPC/2YTQ5DSdHkRC+EbkykKt0Df6CdtjOYtFo9OgcCQ/6b52hMDXa0QnStBPaxZ1EA7157AkljWlqFifhdTFHfO1WAO4aFscF5UNKupQM07LHAdO/q2in5K1F086G4WOEwkjYoIM0RIiJa3fpdA81bkYY7p/mQrtUdGOm5cxR1ThWBKU3OE03jepuMLQyRy5q6zTsjviCmyHUlZQM4u3Q8TBAsCopYFdSSumkTC8+IHjGOARPbY8TYX4OWOTfPd+xHQc+frphOwq2XGEMaffL/7e9t421bavPwp/xOudaa599DrdwudAWpKbaYFuiaPHGmJiU8JLGVOVDbYihprERL0201Q8mWoyJqdZEEw3it6IfrNoP2EhqE4QCqVKqiFFbQ4qh0ioXCtxzzt57rTnH2+//4fmNsdYBernkT7nnyBpkc+/dr3PNNeb4vT0vuaBqFhq9gTUeu5lBgARX7qFTpnytFIMtJcFbQa3advW0MCdYhbPFYAw2waNONC5MKaCWhNTZCGNvaZXfg8/Jg8rOlSoU9PfH2eFaHLyD91YdVPletcpZTNWKyhsCZYjwMoiBeyjqvnYdZq5CqA0YCgJ6B3Qf40HAgz5PECJTu8LGzZ5KJjeHFXev9jz4mwxIdjfDZMHLG9E7CIb/McAZtRKdWHNDDWpaqZVSh7ozez+KA3f4ddd9a6ZrzOlrgWWFZATWsLqvQhueon+zByxpomrhHa7etQCPFQ4TQ4xnplvGWMuGo4WhI7G28KsGRtEb0hTcgWZQKzsctdRRAPZAKDADPGKMHe+HC0x2J1WA6C3svGagMVCaZPV5ZVUOTXisolRJYiba9nQ1ZiyjTd+Erg2stAngOSTyo4xRHpQSy70ngGsKnjP5UlGMOve2NsA+D1rZqP2QPgcQaGX/u6g48ba3vQ3vec978KEPfQjf8i3fMj7/xBNPIKWEu3fvPlBNfeYzn8ETTzwxvudXfuVXHvh9Hf3Xv+eL1zRNmKbpSy++cxeAwQcwveWm7bC1FOxTwr3DigrAe48ddPDsPS4iN/0cvOrm6ZY3wGRJnmUQUTIk9GEcw8+OpDp6yPTv4VLpFCHXqPSesR420VoOG2PEYxdbTDEgBmaYVEEuOEijuriW7pN1tGtXgzfTBKZUNN9gC2dU4hi4+sNo3Imsi5I5rQGscPNLKqi2IjVVNlf0U210iO322VbJfksqVF5YE/IJGbLfj63OAoh2c3BWEHSAb0CY+JIL7u4PvCeg/1X0DlOkvYT3DpOxsFWQ9guReTnjt+9f42ZZ8dv3rrFmAiLmQPTTJgaUGCCRQBPjODdJSghNOk/Iyt0i6i6Rj5MTcl4xRw9TK6YQMMc4fIMcuCdCCPAQbLxFzQlSEtbkUEWOoILg1QmXQIZOZk0K1Yald1I/xGMIiNFjniKmQMRU0DkSlK/WWhveWQ4GW+94KDp75MZ1/osjB8o4qheQD3fSfhrVE8bQ3nRZHnN0iV0XzoeeuXeN3757pRyxBd0KfVLDzI4O67MM9L8p2ktoMuaNVPGoqLmgJouSymiVdRpEFzz11gGSIH2uCkXC9kqoNTSxqFYQhyeUGbJbB3WOTsq36jJCRoNT6EmBAjsYYHq7iqsTVQED7wA0wDQgIalKS4UrtOqxPQo10g2MNWjZkQpv86imoGeCAcEk0tjR6ATfEDycp91Fjh5TqpjniJQyjDVYcgbWFUur2OeCqzWNezcHP4SGYZyqThxRl9LPo95qhBlQ9irAWituUsbVge9zAztFm3nCvPWsDEsc72fKlVV67c69ZsjHjffR8T4bHTk4Y1CeW4z66oKUiOBHf/RH8e53vxsf+MAH8IpXvOKBr7/61a9GCAHve9/78KY3vQkA8PGPfxyf+tSn8OSTTwIAnnzySfydv/N38NnPfhaPP/44AOC9730vLi8v8cpXvvKruRxKb3g/Mrg+6GP3nez/pBydfc5oxiJC+SDgBpj0MN04i2Kpl1cbFSwW4VwlmN60w7EXLoAYzSA7Oqr1DKG/4eyTH3JBKhWHVLBXnkduhIVugsPFZkN4/GbmYRW8ou5UNdg6iG1A4MPi50nlVzgXGUrYhhL/VeHe0AOMKKNx2R14BXhtnxgQMisO8zYiQbBKRdaNZwy9oEygi2kFToIZvbGGu64GQmcMrAUWWBxAa4/oHAf3lkPdm1zwzGFBaYJqBHeUUBuUJgABUXGC0V5ccsYXrvc4rAnXh3QUwAwY730/EDvc3ToL54TvbWkojfIxS05IueD+9RWWtGK/LJhvCKNPaxoOuBcTEVEXE4U4gzUQ72EBXG42yv3wHD47h4t5C+8CfXlKGgdC1Ws1hpnopCKgnEcGwn0V5OAdP899QB+ynKmZZ4Sto66U3+WdvKNXUVcJ7yhOY5TI3vctmOA9oI2nRM5eTZVaFGRRkFLGfkm4OSTcHDIWbXPRobZCrKFqQ9cfQq/We2dBFZjYSTzOF6t6cmkV5ZxDiAHTXLHbbnBrtyIn6tStKGqrzj+Ra9MczCAIEByQalXRYzPktvY5n5julWEfYo1FdEZh3I7zyl5RahcDloleawLUAro2AzmxJbkeVMtxzeggG/pIsZrupN7qHE//elSfhyGIQSrbgtZZ2CZDBsp6naN5kg2cU+Hb7NFEcL0siMsKcQYFgqUUUl6ESaF1vQJiRe7MEfkIfX3eKwoUTCAp72YQWkPMFVhpOXO9ZsxrwmZJ2G0mBMtr8UpQJgm70xV6Sc37GJzFZoooABad/RE4YVHKEWfwbOurClJPPfUU/sW/+Bf4uZ/7Ody6dWvMkG7fvo3NZoPbt2/jh3/4h/FjP/ZjeOyxx3B5eYkf/dEfxZNPPok/+kf/KADgda97HV75ylfiz/25P4ef+qmfwtNPP42/8Tf+Bp566qkvWy0923LODwRfH0JaCERZ6K01VUOn/Tl1wezIXjtDnnMnHfoZlWRBRZKGbA2KJRLulNPAXolWVHKC3tMKqwfJeqI1digFSykKC9W2kPdjZhE06DrvOShXVJCxFhhGfg4+0IPHOIcTqjx6/scZCjR9YkumiQzV82MuZ4akivUWvnlMc8QsglU46+hSRSQiBthIszRYMyw9OsxVtI3loe0YY1CMRbUO1Vq0PtdrrJyysK1YpOnvwsi+AHNUAqmEcjNIUelgUe0/QSdxPmizYnp7COQuNXNi3lf74Vuw5oT9smBZV1wf9sjZY/Ue0XmkSDWQzjXbeAsLZqcOgFgSd5vMMGDwttYihgCBRWnAQfk3uTV0sEiXYoonSgaTZ2vZa+Cx+r6IEU0KRAEXtOj2PUiBnBbfEYHQzP/ko8vcOGsfbMP07xmtL6PzVIzWV1es7yK6uXa/IA2ieiYdCVXa3jHH9jfnoR1KchQg7hQI/kyXI/LwgdygzUw48xTYvs25Hsnz0kAvLkGXwyKPkQF57cP6k2se7Ucd7ju9j7SSgQIa9J6MSpP3rnMMW8OQamLSULAsabQGxXse9N6NFmfxZfDtxr23ZpxDxpmT7JF/0oq+F4Yq8YCFVeDPtAmDe2WshViaMbIdevxVTlu33aoFoOybBZVVrDEwFhA5gkusM4i16pyQ50vWhHTV2XMwGGapp/5uPTgdi3X+3il4ZBHs5mlwEb01yO53YSb1zne+EwDwJ/7En3jg8z/90z+NH/qhHwIA/MN/+A9hrcWb3vQmrOuK17/+9fgn/+SfjO91zuE973kP3vrWt+LJJ5/EbrfDW97yFvztv/23v5pLAcChZvABUDXgvhFbH9LpQ70WtrGCtf2cgIUM6aNqgAtvUb2BOMPKp2YcSkJpAalG3J5m9mKd10z15EK0HzucyDVrXQr/9uf2e9ykhPtLGnOBKfBh3MaIGCe4ECDeAyrVz0cbMN6TPxO8GhXSxNA7tZGudQTI2vvCSpIzxkCqA1wDxNAGoDXYRvFSI2wBidEBcQzwFzNeUCsOpdK8T3v5k6pYzN5RZTl4FJ1hJc0SRRqCtp02M6uPOEfytqKHnTz/fi5wucALEKYDrA6ZqTXoYb0nwKAV3F9ItFxT0lYCM2VrHXbTNKSSLjZk0tO8UuVhah3BrxQmKof9in1KuL8/4GZdCcC4usaaVxyWBSlERYxqkCqEfKNWbDQrdo6QXVaOHrc2Abc2W4ihNUaqgpuV7dJ7+0Vnbg3B0UTvBSFg9h4vUBsSr6CBXgESVMJgUysRk4fSsOSGQ65DUzJq5eQGGtVA9PC2CuwxEEBRWg5HIA9LOp6sFkejThidq0NOAl1vC1oY4+AdJZiiYzu8292LCLJvnNsYijhHzbijc1RJMIATtqelQ9arqG2Fx2xZTadV0X8wyGvF/hDgrCdQp7ISbqJ0DiMQWKz5KNGTShtAlY7M7ZU2TSM5M7SquJDXjLxkpCVpq82MqkYUXNCJqOuS6dF0SFiWjOubBdD53E4r4SnoXNHao7q+tSNAhejhJw8/RQQYWM9kgweUoFkZZ4worNjo+zTFgO12xsXFFtvdjCUX+BBUQEA0SThBpwqQUoFzGct+wbQRttPBM8tYYAqqd+iiEpI9igj2C6WvqvA8yxqkQvRwKzUrYQ2kHiHn0qqSgklKvrQzZm19F72HEMHy3BDoX3277yuteZ7xjne8A+94xzt+x+95+ctfjp//+Z//av70l11Ns4amH7Vn81pBpVKxFBWfBYEQTjPXLiJqhVYTkwU2zqB6i+YcDCpq7Q/1ca7UhDBhaFult1Bwci2ibZ1eQV2njOs14zplZm/KKeBDHOjl4xy6Xw2nxQ0GR7dSsRZQr5ppil+CFCoKbTci8KJim5o5dx0vKQYA1aONk2Ppb1UzzFnEyWNjgFsADqmMAXlUwU0PQ/hwo3pDqRW5tGG7vokBUwzYbSImnctMc6QJXLCjb94twzdTHHOLTejKGgaAoIpBBsWBk/bOYUgQ1ZEwkVXOYTcxuGyiHxXKuEdyFBdtrY7DpitSdGdZa/i7Ju8xq8pHdF5N9TBaSAKqcXfKg/cewatFBoAsdVgtEJbP1pRTi5TeDgo6d3LOMbkw/H5p1FP0+v41QFW2DTpcXHplaE4AOq0RKCGAMTr7AjPl8Zy0470w1sD7bvlBoMvI5rXqdvoaWe17TJXHfUftbYJXtGSBgUFroI2GMZjHe+HGr+V+bCpgW4fDca9enHfwsWGaA+ZNxLpM2MwTmgBLqpqs88TuBo0889iy8pZCqk0Iaz/1musKHN0pVgDVIay0G1kT4howb5q2H81of6I/ba1rGLYBAimlqTo5UKw96ig2QbXk4nXggHVauen7aVyFq/z6sV3Gf/Js4f3qJFiamar6u5Jz17Vgu5lQE1uinatFPpa+l6XBpIK4JCZXGjCMVl/Rci+56Nn2F485etRWkZIm5f3471VvoLKGc1aDU3+P9dvAM9IrLH8OHtkwgHVcyXNZj7Z2nyhHB1AzNPXQaYJUC9ZScJMy7dvBHrD3ND1jCaySQkKyaXMWNnhI8XCmIdej+GbPQHlgqDgnMAaRfTWBHlAMUDc54+6yUmJpzdiFAGsd5hCxUWsFHzxM8BBHMVs2j9nndQYIjbMVp0KoMYaBFhRR4l5VgnBriACtzxWRJEJl7GYErRjY0JTcJ7RECIYupFPA5s4ONjqY6CnPo+2V3jaomknCmJEkVPXnQRNsJ4IXLraEXm+miM3EOZu3RCqiNbgYEFvD5W6LjlbbTmrWZ8wY5mYYZEM1e0pgWUyRWX9Xcw7OYROIpOvVWFDFgeOg+MRsUbk4q/oilcJKM3hWttsYcTnPmEPAdoo6ZzBYUkZpbVTmMAabecJuY6lSEDzbRkVUvJgBSs1+NcgqYdPy4Q06wE/C+7kURaKJYPIYbRkoTBm96rKq32iA3sZpzQKoIzhbEYgTUvIARReyy9BnjTE6dG/gPlxvAOcnTSsmT4WR7aYwMCsxd+Mow+WtQQ4ewWdYa5Eqr2GneyHqXKW3WputNFMsFU3h/+jJH73aMe8mbA4zSq7Y7TgGWFMd1vTcI2qpU4rOxxS4Iq7XkCPIGDki9LoihohByhXLWrDfL5Q1ih6b3Ub5Sn64HWAkAw8GqP7RFAiVnAGEbdhToTej7dTuEjD0Bm2FCw3GtuMsBxj+bjj5ewBG27Bzlm5fbCFVsL+hXUrJhWocIH+u6O/JqaBoMphzgXf9XjBBia5vLyJFjQM2s0drFYs3uu34A9Tpc/AxwE8FLjid28mA8Wu3mr5WFrAg+tSCTt5dju25rEc6SKWUYcUi68N3nQrW2rBUusvmWnEvF2QYGO8x7zbYzhPmzcQ5A0CuUi2YAW03GHgrSHXCJnpsrMPsLC5Ul847Nxw8eyVFIp5OXi2ziiyCver/Xa0qRgpWGremCXe2lG6a5wk+RiUOEnptFTqO1mAyIdgCwAWqCfjokTWbq8kgScN118hrDTvvEEz3tiLCjc1NnZMpeQ/CassGj2k7IW4jLm5v4acAN/dgAW0b8SktKw/2zvuYtPopuUBKGzyvjcJVJ83AacDGliwJw1S0sD4ggBp+sycvpOjVWoVyE9nGQBaMweVM5N42dpABEXW9OhlGfo4zsAbAa8ANjgECQuJorfQgc5ZE2McuLnA5T3j89iWmGDCFCBjet3v7BTdrIoesFsAY3NpucbsKqrG4Zbs9QzcrDNjWo/r3pOi70Mm1+rqMkcHjYWDmf1tDkIbzDpsdECdqw9FGhkE6aLAitLcTgw0grFot+GzUppp1SpLNlc7Euykg54pNLpimql0CO4K6EyIaLzcTYAwutjNSLnAAghhsdH5WS8UaA7ZTGdVmR81JExxSQSoGxTdMlQoJYZNhg2Ow0la9txYIQJgD5u2Ekiu2uxm1AdOBQJdaGyadD7YG5FyxtoZcEhOV7uumRGdjzLCB78mmCMEXh1QAa3B9syLEgDgHlEy0Ye93mg5kgMBkHuqMp2Z8VH0+OP+T0YFowqDS6QXOWlSj8zUwuYZWIjTbZNvcahJ8WlEdYfx1jAAeu3PBzoIAh5sF65JQ18KRh2LMqwiDXm4wJqNWgsYG0MwQSFFK0wRYOC9eFyzrgpQWRAtAwgg+PlCTsIlgtxYUn1F9GomXV+AHFSfYgTgcVqy54OZA8Nhh/V0ATjxsq9SK4tpoq3SS5L7U8SCuTWgXoCzuGAlOsLUChWKwplUEbY95C4iZkKqDMyDPx1KzL9huGmcH9h9QtFK3YTDdnE0GAi7pwNEaWnbMXWIoEuFF47NTkzceRNIMrEBnR0TgmT6wNMyEyRehA2lVBniwBkYUnm41Cxdmnv0hZRdEYcfBUQF64kPq50DDNM0gmanzB7PCyLfbCWnNqiBA4deW6G0UncWk8kTkCrFaqPVo/12kAyXsaK304W2DDn57O83SNoICvAbbKWIOHrsYGHQcHZfpaNszVENUpO6VTvAeUkumtxvInQvOYg4Ru2nCxWaDW9vNQFquKhK71DbgvmspepH0fNrkgrmRSW8UAh6DxxypgN5EW6a+mx920EGHaeuL7h/2+P4QQQVIR1KJjNmKBVtePePmj3XrD6CJGe2pnDnLOSQqwjvLOdVRfso8oHAO3XvOULViN0UE35CjpyWM0I7FAqhGwSvWIisXrPdzWhMkqTzAO3gAtDavqi3ZRY6hlaNzbti4hxgQYxlw8VIqogITirVIQjJqrSS0t8aWF2HYTjlGKoQrHAsY8N4XbdelTDoChZjlhNBrjoGqtQFkspZ71it/0FZocDnCvHug6hVDt8YxTe1B9Pn1kXvJlzrumVhFAvYEUfcq9PcZHRtspkht0cstgjVYnMVqiMqTVCnPxh8j2rgKnGG7rWiyaoxRpRWrava8nzln5JxQckYNQQnv+v4pyCWExmcd/P1UiOktTQWvFapNLOuKJVUK+vYE4TmsRzpIpVLgTMHSGtYqFIvNFVdrRiqFGYRRNv5mxu3dFpdzRHRueNWUTH5B0AfR2IDdPCuiKasALAETTm3cjVPb757tAIA+2IQbU+tvnwpu1KXUWxKH72xn3NlscGueGDAV2m06es87GJX0MSKQE+6FVwtxOAvTqD5gPCWhEshvaJlDWrEOE4BgGPT6XEVEAK9BexMxbydsLjfY3Npi2qppW6SJ2zFIQVE72rQywMWtGdaA1UIMKGtB3iftcYsGAj7IXVqlNCIdD6VhXxpSETjpFamlA6plWyhai+IaNnqod7jwxjvcUZHbnRqrue6JZAAj7Xj4K5EVADyYGW+miFobtsHjEDxao4PoHANub7d4/LFL3N5u8NjtW4pgsiiHBSIJ+ya4rg1XmgQZGMAlOEVkOu+HkOcUvZKBLecetQ20kwEDR651qAxY/d7ZWjht90XvMQWLbfTorr60OOGrqplq4vurG+ScUfrM0zlsrUUw7viAC1BbRVb19zURJehEKZWtAbUpXL8bSDKxMQaYrcG0iYqQBMEEahcvlTPfyTlsomdbUXgIHXKh9p7CrydvsSkeZfKIS4D1DillTPpa/MRA5YJlpr6ZsNtOkNqwbMnNCYb7YUkZ1gjWkpEqsOQCZEWwarCb48T28zxTKs1a5KqQEmH7OReryhbtaClfSDI2YGvRx8COhGHVaoxgmyaODpwZgdZ12oDy4QYvC6KGkJQqohC1hXGF8/JJ51aeFVdXd7d9/uM0UdG3n0kKK9spBmzmyErqsOL+M9c4LBn3rg5oa4Fk9ns7abk/mzSH5rgkN7oErGsmRaMW3L854Hp/wNV+PzhxWStZYwx8pHWRlIaaAlr01AfUa66Nwrz3b/a4Piz43P0rrJkAoAqLNX8DVFK91VYrM6IBUS5U2wUEwZGMu1NdvAADo+oMrRRAuBE7edQ5P9ontRoGg9bgYR+E7fZrMMwK5SQI9EF9h6x3YVJyYSi2SW8blUdBoSmhUYSPtoyMMegEDmMA8RocdS5hgGPGGTyq+jr1CxVAfxdbRp3LAW1fhOiHhUCI2iu3R8UOnMzRe/9aR8+sPoLHZha0bUVxGWuTMQynvlqvVVQloAMYFFhS9X5loUMyvX+1d28EEcDsG1yzaJaD+jk4TM4g6AyiQ65tR670IdwA+ZhRmFht+0RVcd+VAuP4Xs8x4HK3wW6zwTzPCDHoPIKwf1a6arXuHFyfERgzYPhJDx7nNdN1nOcUPdhEmEmXQufnUjijgMGAk8MLfD22JoNm6j1I+ZOWVa6kSdxbVjopL4ku0s6NKpLzVP5EP9iCJYqVc0ZNhnQvssK2OJr4nJLkj3u+Ggz9xlNYe9CEhEhDtr2SqopwHx15RCmr8HPp5N6KVtogOhrD66fsEPdbDI5zLfEQEaTiEZxDcpZzzNbQqqov6GzTGDWm1GBvLVU6yKQ4GhACCsYoFS1XVCXODoddoy1wBZ2U3EZbi0LOrNBqazCZ1i89YbLKn+ztPxpCcs/6TKWbmApEWCFa8G+KAoDE9tJUjh/mqI84hQCZWUmv+4TWhElSYVVrtZU6edpnUNNQ0dCQIRPWqQZrrshFkAurr9ydjBVhy7GgBtVAzmCVNuDuAgwKyX5NuF5W3N8fyFmrgBiHXL8BKqlRxmrfc82ZQSplHuDW4sJZbLzHRQiYjUUQUD6oFNSSlTfQ9bc8QmDGBAikOg52S1EEkpbfIujEij6EPT2QH8ygmIHMIWA3RcwhIHo6uObW0DJlVYwlsS4aIvm8N6NVZx3/KV1hwBgAaiAWPEJtmKZIEq/OmXqQ63JNNjD4emtJ4vWW3KdJORcKFTYdQSZdzoa3urdH+lC0t7MsDFAF2TuYKshLQpEy2jo9YwU6uk4UgKKBSh+gtQlmIZQ5WiCAoIAqDkHNGqOzmL3DZOk/RZ8uwq+Pjq5H5+EOsTbKp3EGCJ68jYuZWmxTDBBA1aw32G03BLPEyHYFBNY72OqUwEhOW0eSdgBJ0lYzFHUVvKKavFUoOQ3k2PpqELH00xLVrwtEdwZjhnQSoeH88FaVQ1RVoQlQ9yT5fn7fddwWeo8FT0qDPgNWB/DOEAE3OwsvRNx1uL5pHc7NfyfcXDTfYVuw868o/SajzdiDVE8EoNybpu20TgPhHNBzDmJJuI2qPkGfMHqGGWch2qZiR9UOJf6snCntRSJV3bulcL8Mbh0TgW4rPwWPW5jgHPmVVYDSMFrRvdqHCAOUzcjeDEdddhYMPEhB8YF8wbJy3lrysVVYVCPSj8Cs1YU+461xv4tChr22vahRKEC1A53rVIbM9l90zL14z40mtMHoe2ywTCtqbQzqyiUUcA/NwWNW4Fjxx4QxOLqPl6IycmtBLo0BqhrkCvVkq8NXj6MPgm+aoQtFT2RzaWgguvpmTbh/OODuzQ31BGHhXEBtvwt+Ug/d0gO1yBFNt5aMXDJm5xCMxS3vcMsZXAAIOVNJXN1OKV3PkjXGaZBpjWVd3YNNs5aeSxqgunUDgAENbo3lfVGNNcrWeNgZg5OxVbmfCuAqFZLkFJ1orCV5sVRsW0OcIjMpA0jh3zHK2wheFRkM4CePjTV4AQjhLinD1UqAh3rY+NnDb+d+2o3Jb4ePAhhGaWYtsEXRf54PC3SITg8nwlxb5owhBgczB5qtJQI3IIKa+aBCKwCgkzU5Q+ow2ZxU4DLz4Nt4i1tz4AHaM3bLnw86HxxKy9IgRVDNMcuHdGwrq1ADVqKWcZ2Gcp6GcrEGnekdM10ef1oFq7TMNAWINbhzueP99B6HRPUCbxUcEvzR6ruPlXqCoOrSuVSkXGChAA4j2LYIGEEIbCXDsz3Iw66gtYrDWkcACaFXeBaHlHGTMgEd+wU31wfMmhB5H8jJawIrnPvsl3VkwlZnSMFSYsgDDPZiYKRxFqoVtdU2pdOsviilg3MXHvZQtONQzQZbXE3RlCnnkX37auGLGyK1eS3IKzlKfvKwuke7bcbYxzEgTFUdgJnMVQEuSgGsxaJOx6VmzgFbw5ISA501uNzNiCbg1nYaVVbvoFxsadzpraVJpT5foWkSMQW1dyc6uN/DvCRYSw8ntgj5Pvf2v6/KAzPsEHStTaftwKZJHClyWtHaXkwK28GeFYvRVrhAYBsVKkziXqmqLJ9Wlfuq5NoREKSVqCWBPFhW0IOyAwxSbmmsog5rRakGTRysC4BxqLAoVZHCOqt0zkKCp1ZnLUNOq6uer7liqQVLJbiF80Alnj8oKfg7rkc6SHUpIODIYep6YQbqhGuNGgwKLbjFQMAgQ88hZiqdq9RZ8qI30zzbBfCpG33devL3uzsol5IHHU3CO7l0bQ1Lpeip0VkT20oecBymVggKlNzpeVDMUeAVOWWshfecDdkmqNZAskJyuySKU3i0t8zEdSjPjcnXUftps5YRDKXqaSsdCddQEm3opcq4V85ZNE+UXXFW1SW0BaRBqreerB52VofPp3Yf0bIdNHnKAjkdwhKgoqghe5L5nmS/X7o5cALzNSMDNUoK6gCXPkM0zTJzbF01m0yAfkgGANs5ahVoEAJnns50W5XT2dgJL0dPbFYVnAmhNYh3yNENtew+iDeOc85qGlsthTbxtVAJPE6Nh7b341pLbzc2Gkwac3QbTrXCNBJjDymNIBVZUpA8PlqkAKRRR85o1ayt5i7ejJEQqD2LKjuIkuW7JBkgx++TTrSHku5P2r7tVHCWrT92mjvFQ2WtPDk5Pvjx8yziG+YUUWpDDB61FqwKwKnAUMvoUmRNmBDwvWKy5B2RmDEcwRXdBof8H60otb1ljYetDXWi9mJdi1Z9Dca00QIOOrsRBTlZw7tjBPDGsvIYycC4cSf7F8e9pCMAQvWpbm4V/cORR0XJBMfk0lQ26tgSjHpeEOWo3RBAeXYKzwcg9UQIufFCrKH+JGAGB1SgZ4giaaV1usdRtLmePEud42j6XMxRXPm5rEc6SFnL3jv1QpiRGG2TTM5gdgZbZxAh8LUAGSQsum7p4cfBEnSoSvZ0Gxlg93uxJ2Hf6CGPftBBkYY6MEWjR9RFCKjeYxP4Q92HKdeGfSXybynMqAh2sIB1gM1YRV1ElxX7nLGUOlpsty93uL2jfP4mMPubHNUQWvYoa+KmcBY+RrgpEq3nHUyvpAxUEsmiVsFyyMBS0Gy/NwbBqztrpblayWUIRwZP1JVXOwxKPDErbeGEqdeHWgC8CKIA89QwTwG1NVzdCIf5a8KSaEctrWEbA3ZzUGa8wogVXBC8gkxOB2b8F31/mKaZE4J0h3iLBot9E9xNmaZ6a0bwDrs5U33aGsxbJVNbekj54GGcw61dxZ1Mqa1u+9AP+M4li2OGxAOeVVTGYV1wsyRIrVp9GaRKPTNjAeuZNZMQ3rBfV1zfLHjm7hUOC8V+N/OMzRxxsdvQfttYhBjoKLxmCOxQs09KuK6ZavWfu3cfSyaoaBfIYRLcwgVYKUlrqDAQKXCipPdoRzU1PJl0n6+54OqwYE2Uj5qcxVarSkDbfyIg9aBqQuDY6oUSi6ugdLWHsCLOHmgBbqblifdULREAm6yvNwQCRUrFZo5jPx7WRDi8ugBkY/S5bGMMME8FITrsNjN2m3nwJqc5kioR/Ejejq7MMpI65zWw6/zVGArEGmvY7quAq23oILINWPRR0Ha7AYJWTrW1o+q969QJBRzZo6ZiR/8CTERZfgEwVN5Ia8b+sGJ/SLjaJyxrxiHTeLR7YzHJM1qp97kSv66zicG/5DzNAGIJGLOe5+0Ya2gC6giggAHyyoqSjtkkyjeAgKJpwu5iBxHAGYcpRLT2DTCT6qreVNolsbP5hiYNs3OYnGZ2QiSVs0SRMRuiFhaDQj8YqG8nmhm2eoRc8rA9zZCPop3dhqFL1QOspLyzbJtYbU+AA9s+zWLg0mTb0oisKxWUVJBqxReuD7heV+xTViJrRDPcOM664ZlkNUtpfXZlqVBhIgNTlf66qioLaGVTmX2jsKNMteb+QAGAoKaCnAtKpkmZtRbzFBGnSjFYURXsyuDknMK/9Z6RjCgjljjLg7wEB+8MkhGF0BckYzHpQe2Cw2YKcPZEQNUexTcf0Aw7SUONwqhtJwYr4spIg1iHaiyyALlRE28pBUXhw0upmLW6bf1913bhFA1ht/GYIY45W+tt4MHFBoRorlwqD40147AmRR6C+nIiaEZbk0qVgP79w8ogelctMmpt2KSKCwVoeIX339pujpqFwopzs5lYHUwBGcyMxdLa4ZArWksoIthMmYCh5mh02AyMFRqCGsCfzFxFA3Lr8kAqPHtYM1LOmLxDjR4XmFXRpaPsMEBH3ZIk+KODgdSGmjLyYpEOgQonTktQY+CDRWse84au3d5b5GSpv7gaTMuKNbtBM3CWnQtWYh0OfUTfdqWSKXpE5ShOcxiiqdYdNfM6JL+bGfZKUixOxGAdrCIGoYRYB4EtlB5rlfu0z6WMpVZoB1h11Xo7kufOtzt+dNHZvlqtMOVYgXWF/JyLghP44Qy1NKFt6AbOT7vEWL8/3ZG6u20D4+XCWUL/o/K8usW8oFdixzN2zRQkTpmqNERYRrZmrULwYRGcRy5fB2fe53v1w9lbi2BJ4uuH1cY7zM6BRMmGVNnfbQYI/OFjpm2MeqywOSzakmmqizS0y4xRrxaglwrsyx9tCsYQ2XQILxsXFRiIouHCaRQUARXr7EFKGkpuOKSMZ673uLdfcL2scN5hjlEVKwKHwfMEKIigZzfNe/XODrCBeoBFZCiAaxU/INbWtQHX7i0CA8AZHr7rYUVOBTlTUcB7i7YjPypGusOiiWaMwrakNSNIZXWglQ66MKzSYlCAiCGIpOSiIBZWQtMcsbXUEYvBj9Zal6s5YnLBNhXfpNGuHEHKGEiraI3VajMGBUfdwaVU+NZgrMFaCh9iyNC66+2WqG0/aFuD7Ss5ehUlRZVWEjJFRDk4zOIPKWGfGOjFMCBmAZqBwpGtEq+J/DwsGdf7Fc9cMUiVUrFJrLy8d7h9a4fgHG7vNgMAgMYW5E7N6Sa1A8mtAc6iCMEWqVaKfuaMqUaE3vI1BqhgktP6jO2YDIhAW0usfveHFdfLikNKmL1DLXHYMvSn5OjzxgDjvR18MQui6UrKsNZgnajybbvCtiN4IACYG9uiOTiUxA9nDaa9R1jVosTqoes9A6U9it02VfWAzlKm6DHPQau1MEAS/cK7vY3RGe4IVLonoO+Z824kYYYYbCYqKQOV+62fFX2258LJQKa3U+2xfcYGwBH8RENVNTo1QM32gXZ9nwONOZ8GKrFGA7S+dyJ87zWg9fNTDINrT9b7dNZqi7PP1fuZwU7MEQRVReWlcsayJpTMDpF3DhurHZ0Yhwq6hcWanttQ6pEOUk0PqC52eXuKyM4heYfZc6OsSnKtItgYIIqgCZ1eHQCUMjIGL6qMrsAI07SvPjIgJTrKUXW92y+c6hp2AdoBI7fUdCvGwIsqIBj6SyVRKLbRdpnOa3LhAPRmSeODGm/AfslYVopc9ofbdgKiNFiVWbKbiOYcqjG4d7PHzWHBves9UqEsCX26IrbzTOWLztESlgGHlFFywb17e6xrwprKUHjebQ/apqD8kNV23LyJFKO9NQ9SaLs6oLSEpC2amqksHi2lXdaUYR2QVwb7+2uGCx7bUmnfEQLiHI8EXM3s+V+imaQC/vVwEWcf0KIjwsk8EMh8CAixwSe2iQQMGGL5O6xjO8Pq0LqDajqJW0TQytEjaZWGWggN71YrWWchuQmraDWgY6AEcmOgLK3BNgbGrkVZRBWoS8WiDsiwGTEFpEy7jNl7bC49LjcT1t2Wwp6GoJ3oaOkdHOHbWRriNGGaZ5SSqVO4neDmCBMDcu1zXVZkpleh/d7ovLXWk8Bc1bk35QFh305Up3DeYYoBFyJMlkBez8U84WImIds7i1oK1gPleoo0hDmigdJIYYpUYgmeyUoiijUdEpYlIUtFAWXQDmvCmjKV662BdwGbSQH0hjBt7+ivRLBOQXY0CPSi8HTvGJx0e7ngxvvfg3WHkYvOq6A7rM8em2GlXK1FNYZIvj7zcw3BmTH/su44e3O9UtG/3zs2fe8e553Q6yzHElWvgbD/SqCXdPKBnlf6PqZKL7dlSaqcQWCYcwyoSa13gnYv5uBxsZ1xuZ3VXgZYM/lUy5pRcsa6rHjm3hVurvY47FcETRR2my02kUnzvjY90zJqaQ+MAp5tPdJBqjxQvdA11TgO+oIFYESzVbramtrQHK3NjXKgyFPi4RCNQWQzGlaIdrLH1Gn83WOQqjz0VP7c9FaZZjtdSNIqos9oRlnBAFZAyHU5QdiQx6F/B8x8alPHW9uHlnIkrGrvuCqqDvp7jHew3uvwWLBXnsLn711hSeRRbDYTtpsNqqpQA1CwB39PLQ0pVRyWhIMGRs7aLWqtw1aCLQOHzRxgPGWbnPb2BRieM6eaZ8CxogrOjnslMEMFnGRsZrH9YdYbjSMPX6sac2z4Udn9pE013rj+851o7IaKAbly9mhB0OcArh9SViH6Fl24F3qdpglgGRyGEj6O7UJj7AA7BB36W2cVMt1GBmydhW9agZwmRsrlOQZm3S86KwrWwkHnHU1Jszp/8D37AnCx2fB6jUXKGd5ZbDYzHVdDQENHrbbjvdO/OXAVJwmZwRH9x2vStreoKLHhQbURDBfo3Typnh/b1NBqszU+n9A9E9ZMbbjYPZYwKuRadDYjTStPrSJU8LjPeSiMG0bVTXX7LkdFkIF1hXu0eNjmiXLUCoVVjGr39fsuGK1dEnhP1DJOkQ86C0UnADcMTlLveFitKHuF1r3YrLYCTY9eYx339OnYoc+GugpO/zFjjgnzSRE4EuzSmFS0JrzUpl5xtQ2zU2uYSG6Cx6wBSkSwpjyuLaeEdUm42i+4OaxYl4RtCKwaLWjeGTyKUYV+y8SindyuZ1uPdJA6pIRWgVzIgrZoiJZaY81Qov+6FKLomqBGj7kaSKKuXyoVa6JKwmQtbgWPC+9xSx/uCAz4bS9MBYJaOShOhW0yOK8lcz8QWYGFkRn50TZpummr98y0LImsvczmR4NLBacuujhBxhGpo9YLhfIqVURbmJbs+BBgvEfJGYec8bl7V/jtL9zDbz7927ja71Frw8V2i9uXF3jRYy8Ykkw7f3wg90LOyfWeldxhyYBU8rO0tcogLQjB4/Jig8cBwDvc1mBlncV6SEirU1kWMvmtcn82MSBNEdspYs0VplS2VFVst1kzvKCAHpKOS8b/Tj+n/zx5vnt1gn6wqzwVD5tJ23kWu92M7W7GZkcbjRgCbOjgGj8OB1E5H7QGUTUbresg4DU3Y+CniNk53AYQ5ohN4vymHxwpJVzf7HH/asK2TOOA9MFjs52xSwW73YzaBC4XbNQypR/yVhSgYK0esOQ/DV6TgLwt5XddXmyx5oJSqNO3nSK84/t0WLLO0Aq5Rzr877OM3hHvluTeOWxCQGvkz/S2nuiBHDxRpZvNjKZfmyJV5Z0h7YFQaQ16Boi1IuYC473ykxymixlO3Yr77NMFyzbp9R4ZDWspOKQVa0pIpSB6zqhvbTbj8N/NBIugCdKScdWAmyXDB4+LJtgBMN4RsOOJA+/AKu2VoeWKVvlREsFEJPJqwszsFNYb+BJQYICQIKhopSvaE6kbJo9JwUHoQVH3Uau9fQ1Nyk52uX7OKErOeb630xSwloIQLJpYlEqfsp4IUktPNLidAIkApEMGRJCajD28ibSUubObsZsjdnOAFUFOGVeHdaBHl8OCtCbcu3+FkjJaKbA7tqIdqHrjjEWCIAsRhKVW5G8EntTdmwOiJ6y3C1Q6xzeEUh9EOJUOC4duAJXruFkTDssC0wRbb+BKhI/U6AuajjjrBuvbGItqgJzZUihVYJRnYRwDStWMqqMMDY7zIvKsjLYP9cNZHSaScwKcVEs4ZtTdLmGKan8RPIJXySQNjsZZPdw1S+8qHCljSfwnHW0XanuBUksXqu4N4GSACtUhwwhEvffd0CGqooaTDcGz9bA7rJiXCaVURfsp8s9q79uyhcRRHGdraaq4taEhWiqVKiHBjwcLwCBlDlLASAtxkvFjzA9GgDJHS5eewXYjtu0UdYahM4rJ487tHW7f2mJ7MSN4Paht1+M7DuNrozROz3aNJghG5KgSYi1iAFwT2OgxLwlrSrg5LESOlqqAiIIYHXLeqKoFyd5e0ZybOVL8M3hsYqBIctBZk6o19PenC6labU+dovL4bDCwVPWYisGPNpI1rMhLcSrEq1w5Dbps8XUejnJlFG256Yg+Y2iHUtjGcyEQjan3kVbzFaU0BsXG3+X0edhqNR0PCdNaEFLRqoK8vubsQBeGXE6I6BrUvIOt9QjQ0OrYq1r+UPIXIFcBrIULBU2zfT8FbC4mtsG6xYgwOKGKeku1AVSopQ2rFxGB6YokzqEK98SapkFWDvHoaEvelc7ejBJeTsYG5OodZ2GiiD4R2qUYa+CihxdgKhVTKti0hu1m0muwGqwJdAKIpowK41+CjkK6vmjje2uUHzgHova8PfrnpUx+5/2VGpBLKliWBTlnLPuVyhZa1feZY5fcMk3dsg2/7k8Q08+2HukgdW9Z4C01+Cg9Qn03Y0jSLMJAVUUJcKB8Tmskmu3XFTeHA63WvcXUKiahbJFoUKLDpn+g5ZIL+/BNKFMzvGd6xtXYMrGmZ7PsKjc9+IETxJMB8tibvY13cqiaPkx1w1CwW49H1fszwCD7UstPZfq197wq2qbbrx/WhFwKjLWY51UN5NhT94EZer+ffEaPma7g1FSyqtMm5WGMNbg8rNguCTlXhKAqy4pecs6iOYumrTEDZvnbKWDdTMjKaLcaRLwGBUCDFIgYe6Dvp7+n90jNyY0dNhZQFJJmqx3K30Q4xA8OwVtsNgF3Lne4dbHBdjdzPjiAMsxcR2PL0uphDLYV9GB07mCUZxQ16G8gmCePdfWwECxrwnXK2B8WLAuTg9Ya5nnCPIuKCHOeNEePOkeUWmklokmKNBVWLZqRisCocoX3bvR4rFXxXWchvtf6XbrJaabONhfnTVURiietPBUkLUWtSmo9KrU7Epq7d1mrDcXwGfF6SMYYRst3LVQuv15WFR+titw0OpQ3mNaMvGaUpAhbrT4suDdDbQiJ7rRh4rzKe4fiHVxxo23rNDh5R56QNVT7liqsbqyFzYSVhzlgSpFCqb2dxt4gWgbENDRFijYR1firw1oFwmTHegsbA5MEb7FJGdlZZAO16lCEsQZVF5xyqEBouxCJasQMwEZPHIGjYC2sgQseQYBaI+ZUUKpgo6aoQYNUcFSaEBEU2xCDIy/ReySoYo+eiUsqRNN6P2b+/RyAdlaWUnHv+oD9SmTnYVlQS0HNGRt/VPn3VtHNipg2Ta2RtEUd7PHZerb1SAep33jmLrzxtE4GsI0RF3OEeG7QDlAIYPvj0giiHv9JGhZpKC0DrWLKAouMVh2SsYOz1Dy5TrUSol1gqJbchN5HMIimw4ct4dzdw7mxydR7+U0EpnoGEQFWY7DA4KoQikzdPQaqmyVhyQUVRCHGGAl0iHEc4NaagU6EIVigGos10ehxv5aBvCpVYJ3HbrdDqg0pZ8QYEeJEAIFnSygEkjtbaZBC+SgnbKMaz0y2aCVKDS8STltr2NuEq+sFUwi4unsDSQWuVLQ1w9ZKMEtz3Lj6HjpnxkZ24NC2Cg3SgmEZxMz7CMu1OifpcwNoFmpGfDJjiN3BEj2Iex3iGuewbZEPogVi9NhuJ7zgBRfYbCdc7DbacidQoPPl2PNSIiUsJHpA3/tmDUxpqMagNIHTNmhHnU0OWJ3BcliQk8GaCg7LglIK1pxx/+aA2gTbmZnw/ZsDId43B6BRKuoiBszBIRiDlitykyF2azXrPaJRtV3tukJ57xP1iZ7OP7USNQYQLxCxQ/4KYAVVtKV3WBPu3ixYc8ZhXckDUnHVTsG41nbmWhJubTe43RqFTpxDrbRsuL9f8bn7e6y54JDzUIgXQxWJzW6mB5IqOXQ/JaOvz08OU4m4uNzisccuUavg/tUB+/2Cm5sFXtu6uSqZWCyMobdXk8zWOyjEbGsD9g5hToibhDu50E3YOCWuV5TGe2wUCScg+KJzvVrrzWRtvWr7fJoIMS8pIy+U2nLGEAwSPVvJOq+DcLZqGmADD3CrlIs+exszMcOkwk+eHRRD3T1jLFopys+qWrEQci+KADTWYp4KrDNY1oybxSDXosorTSkcKmvVGg4n1h+LSs9dXdO25pAy1nWFkYZogK33uD1PuD1FbLxnF6kUSLNqlGlwextRYbCZTvrxz7Ie6SB1ta6wphLRZoiOK8Is1+pmjtbAiiCIYJKGKAQaTGiYTYOKrCBKg28CWypgHVn3XiDNoFWDbAoaDH2QlJU9W/IgKqCWGvpg946UZiLSTpS5AYi1KNYgwWARwT539NfxgM2VbUpANbxc1xk7InwG+kdfa9W/sWTKkVwfEi3Sc9HWjMN2M3MelzOCD5jnCVMMKq5rxyC1dgJvYf/diJA3ZSxcA2qzyNoGEK0SS61IqWBZE5abFR7UiatKAqaESp8Z8UlvOuiOhRUiwEzXap9xKBLUCqv/a6ajr3orxI5s09rT98DoHIXyOlbMENHlL3CE3noqbm+3EZvdjHmOCBMfDYEAmVlga6IlmahAq4VtFj0fdI0zB+ccVQa0HeYMk6VqDYo9VidNjsK0hzXBe4eb/cL5qrW4f33AkkjS9J0rZiy8YUVBnyAGhq4k37tFp6oXHf7Nzx8TBHNyRhjBsWoyhhUhqC/XK+huOJiVJJxUdLQ1FWxuXZmEAq9NqnLiHKYpwHta6iRVkV9OtDZLa5j0906VagfDBVdlorTvfGyBe9rvbLczbl0k3LncIThSFWqm0HHrrYnGJFDQYKtR+LRKIzXKAXUzyI7W7c+iaMuvBwn0xKd3PcYsGUPMlaMpC+sE8xxQVEEFTb8W2O7rXCtth7DqMKJVOo4cqd56Px2+apUsjuCkEDxqrLzXll0Lr+AHZ6mH6JpFbTIQxBwzNCzZaxtfVBFCvdgEQ8FfpAwlk6LGoT1BddD2tHdMomzXrCTnVIxB83R7mJTHKSY+p3P+kQ5Sv72/gTVutBw22wnNkYG/CWyrhOLhaoFvDbdaha8CMRWQAmMqZicAKqIUhGYQRLN9S45Vz7hyBVYB7hdgbYSN37IBWydwjTJBwag/iwYro7tYlDjXlKvSjKHwYhPcq4IvpMqBpQIjrKcYaVLhWe+orhHUdBHQ4Ty6SgUfmrVxRnH/kLBfC+7eLNinhFIbXDCYYsTjU8Stix0zKhjc2m3w2O0L7OaIyVM0M6eCw2HB9f6Am5sD8pqYBBgqiAucDsh1RtEHsLnisF9x7Sye+fwVyiHB5qIyLBRzDUEQZoyDoOYCOM54JsojA63ACg+8lLKa9zXt43sNRIos1Kq5Bx/neh1xlKhCFSV1AyY4iAimRj6XtRTajRPJovN2YkU56X0WQQl8EFtp4+A01Q41bFsqjK0IeoLUILCVmajX+SQarVVsI6LT6gHZFdT3qcDYhGeuDrDXB0AEz9wjXWBNBbe2VBhxhl2B7vwqOII2+lynS1FZo8TqHpMfgJfoQdyrw9NA1v9PZ6OQPlEVhcYDqQGH0uhWXDhfk9aAVpFzYuJ4EAIZcoYxBjFGiHW4WRKul4Trld2Cw0o4fAoOl7kiVgUYgIGqZaqSt9JgjQNcTyw8thczXvhNtzGFAFTg+vqAe3dvcO/eNQ5LwtX1yv1jK6BtJpF67LWDSR4DMEVSeyAyitQV2CPFoV8XjsGJOovE7fnMdjVag1MLHh8cWg2oOaBkti+7aK4PnNedoG7QZd1IXzkCKnoUPH0XLbW7IM0hqkxT2yqYI5dBDxkanSLwgZYbzlkcVspBGQvMKcGH3N9uBjYI9okmhcFaJqKKRq2Vz4QFZ84XU8StacKtaUI0gKkVh5yRhFqPcTMjzhO2cUaYIqb4DVBJ9ZlN8HSD3c0Tbm1nXO622Ho62q55hckCkyuiVNhW0TIQSsJcMywqYJr6Rhk4WARD1QBvlccvwNoabqrgbqpYmgYIX9FcxVwqvPf05ukdlX6B2qenJho3aDVGM0q6pabUkACIswjwtFu2Bh7sKxtDCOvorfcBsSdIooGouf1Kq5JnbhYc1ox7N8swXNwaqg9sNxNu7Ta8gYb+ShebmRbpEKQ1Y1lW3OxX7A8ZB/2dJNIKpkiS5CZ6pOyRXR2ooVobcqZz783NAgtgEz3mSdFlG1WYd3YEqbxm+ENCs4btzSZImbDgrv3lWjeLY6/eKs22Z5kuOAqAOj74/f6XonylbmQnAtv7+cboPbRUG/AOQdGIpwPD3kITETSvvJgGlFyP2m6A/p1uO1HG32saoNHpDiJUR1EPoNracDCeJyK9ap/95KLzzzoOLms6TNmezIy63JMgZwMjgqzByVYdXvcKrhckx3oWY7Z3Ulr1OWSHVxuA1ymC7axuxZW6eE0McmnHw9Na3qNGZKk5HLA5zJhrhQ+Rr0nf26qoOKfQ814d91mgNIoaW2eRpowAgYMbFWLwpD5ImfCCW1v6p1UQZVYb7uOgc3shVN8cof0wBiaowvoUMM1BOVmcEzkFp0AaqrOjaqKojN4f6QkXP2ptsGqS2QWanT6nFFWmq7VVII7rs8xjJoH+JvVkoX+202U6/aHfcIOjTqcPuo8NKTqqcwRYGfPYbg8zTVo91YptieRnWbod83nTvytArUKHAb22bmzovIMF3QkuNtMwJDVCQMnNmnFQ4vjGGGytxUaOijzPZT3SQapv5uAdpsggdTFNuJgnDVINcfEQqZBq4KXBVIFIhS8ZU8twzJ0GyMFC6NJrCEPvlVRqDUsVXKWCRSgf5EuFqw1rbdiItnpwEqD6P3ugaoKGOpBSpaggZG7MioWHpBcH57hpg3OAIUGQLZ9jv9s4ouVqE6y1YZ8y9mvG1Z5B6uaw0sfHQKG/Ftt5wjSFodrQh97RkuRYcsa6ZhwO1P9aEmVWThRZFNILxFNoK9rIKFMqWBZmx+uaEaJHtBZxO3FIHP1ol6QlwXiH0hrCfkWuFWbRlhr0UBjtlGNW31shXa09RA8XqE4x2lnd48cd3V+bNJ0tGEWEWRKFXedBHVs8vSpxToEk2tfv720FCdqMQTKM8qoGlc51oXK+Vhoiitpz2MxRM1tKzkRVWC81U+SzB4E+7zBmaBYOwEbr7VYGz1R47yZPJFuHwhOuaY5ZuW7OXomxzDwNUji551CQhUUD0aUCtoGWTM3KdhwIwlqnNitArgX7VXCzLGgimI1VNNzRXv1YGXR1BXsE7TRKhFlnEdbCFplW0F3SZ4r0UrrYzbRoyhXX1wHrSlh169wt8WjyRfJD3iH0IKUwbt9bcUqzMCAYRbq8l6L8HnjMe1VV25hRMYkmmAIOEE+B2VZVcPaLZJc6V5Fv10kS0TsyDQ/w8Pg9RnMMM4BJ3qm2oCnoyidozLw7P1DApCPUytcf/UDPVp2zldIgTZPFPi87OQOcs/CN6upzcNhO1BKdvAMSk7JlzbgpBas0SAhwIfAZNDIQg19pPdJB6vFbl9jGGbemiN0U8a23b+PObotv2syYLGBrwRo8aiuo1dBJtFW0vAK1wNUCK5S7ZJaKIRPCTJVAhGYMDk1wUxvul4IMgwaLCyHgAQD7w+a4qUSaIlrYAjG9ZSInem9VLUOEV2AsMHuLzUR+Dp1gKEJaasV2ctjOARv1gbLBY2kVayq4e3PA/ZsFhyXj7vUBay7YL0kDsEWTBucMtpuI25dbTDHwsBNRO3K2VfLasOwTg1Ti3KE0fRCErxuwaveuqt84ZpGlFORkCVXtJozWwESP+WKDsAmYthP6Dk1rwWa/Iu4imgHiFbN0UaLkFAlhnuaghoOEaBvNTCnE2R/2U5LjsQJyjQ/tCBzAyGSNpf2CijxzIK4HGyHmLI35ewWwGJlwM5ypFWXv7/crci5Y1jJmhpwvsK1JvT+2ejYm4kXO4s6tLWptY97oncPeL1g9bVeKIigvNjM2U7eYOOpFltZwsyakVJBSwaQaazlvsZkCtjFiCkxuQm8n2xOQCY4tw44a6yi9ru4P8LWQ9sAgu03kawXncFgzpuhHuy/yl2FNy1B+X1JRNw+CEYIicXtlN0f+vrnTKyyjXE0V682CVir76NIgmwg/aQDV5M8I26gOynFUk0O2p9mOtEbgHbCdwhCTDXNEnCJuP3YLt+9scXl7h812QpyZ9EAPbuMpfFxzg2Qq0uMkWegJzCmfyejMqAvDknxP7h806TGnLVrX91lvs+rN79WszobGe9QzvbGOe58BEyiZ51AxFcbRH6sKkZqHntQekr4/DZN3ShKnWzG/r4w9YZ2BhcXOOoRI6aXJO2yCw2O7CRfOYjaqWgFVVRFBblDllUYEaVHZueewHukgdTlPuJhm3J4nbEPArRix9Q7RsGoC9Cw0vd3LdhFU9sgCQ6q+D2Q7X2H0hDs4QSuqprMEmCMH5YHstGeframVeXugPB8tA/2cAxB0uGm8U+dMHjRVBNHx4W5mcHop0QISeJdUdKOtJNyueQw2jxqBRwXnEB3mOWKeKDOEShvwLvPSW1jteDqNLO8otHn8OG0RAcd+/QNLf0dXXfBqINfbTL21t7tcAWtQckZTRNc8cQ43z2FYfHQFgM6uJwCDFWM1dVwj44s5+mIB6DT3/v6acWB/mdfSK+DRhenv4THgdfmjfr2t9mqPe6oosGFd8whSVV+zd04H2orKM1RRb4GoqN1MvlkuFZOqJUgPTlpldVHPJWUclozs+/CaxOPOV4EAxutlgcLGxzJAK7XT6h/HbB66vzttIFiKGM/RYztHVtKamJmmSFARpOCw5ow1pwH/hpAnEywwe7pFwxoNUMzCgwJ4oICMkgpgALc45OjHoQ6tXPrz1QOrQBF2lrJM0OeZ1Sor1jl6TFNA3EyIU8BuEzHPgR0HBcX0CqUHKt43PScUFdmrW7GUOzsVPj6CV7jjOoCn/8+O/Wwe+N6xD3sGYUEREGuUZ6kJUA98Gqx6i3kATk4VXkRg2nH2VlrDkjgP3C+Js18RzMrz8tYCgfexiCIYjdAU3BgYB1hHQFo3Upxj5AwWUPECPqvOOThDk8S+Bx6UlHr29UgHqZdcXODOZoc784RtIPRxcg6+VVZItcA2Rf/hGBh6u8jqjexvtDnJRI4ZNGVp1HcAxjo4GBhL/kXoTH82bnlhXYmgVGiDdwwju95fbQyS0VpcKAHXavDYTBFOW2ApW9QKlNorMT6EuTW0UvHMzQHX+xW/ffcK+zVRHl97ytBWg1d01TxHIqFubbCZIwyAmgrSAchrPqq5S29TOHjfaMlhu7irU1Hc49n9gOeNMUc4/klbpUNpXVC9Px0Wu8gWXZgYuHb7LaY50B21VMQYaKUQw4iNHZLMthqrmbQmmKQ8G81QRwtwCkPCiWUI752cxpMedDsnpTWtr7nGga2HQCsMTN32vJQyDszOhQM4X0u54P71QR1jmxonHltMXpFQBjzcwzyheI8Ao6KdBd55OEfPq9qgFt+sUO6rRcP+sCI4VijeWBXy5IEYnAXEAuLGU99EHaRbGzMQC8LVR+Du77M53iZvDeV+ggN2E7ZzwHYT9SBs6j5A08bDuuJmWZFSpvitzjOit8BmGoT7qEoatzYR2+jZtuuivcJ5H7QKbjpr7NwhJghHCHytDc6Qa3d7NyNFBvrtNGEKND2cp4jtJmJzsUGcA25dbrDZTpi1LW71d494oe8p2+wGplHbUZruOT0DrFbE3Za+75m+jDM4yqXZAT8nSOIUvNJ1JsG5kqquAzrj0ud7KPBXnYnmNvZmVfWQqk7FMICxFhmspO5eH3C9X3Dvao+cMyDAC7Yz3GTgI8+NJoBxdownRhDV32WsxTZ6TM5i5x1cpWpMYxmJECI2ziMA2MQJk/M8x1S147msRzpI7bzDLe+xcxazNfCtAoWaXk0q0MoDZLsG7ataQnhhDVozzCSbMCChbzDVaNO+9+wMtt5gF0kLNtZi6/nmeIBtPbU574HpOOQ8QtCrKmBUjQTBqNyO9/BT5AGmHAVRPtOyUj1bDNF8i6oV2Ca4XjKuVYB2SRmlVrbhHA/2SRXEL3cbXGzoPhqnyDkMCIkWnQ8A/J3BMQPdbaKSIMnJ8lZlbayBCAf+wTtM6sXVRLCZJ2w2EZs5sh0U/BDo7H3BLv3UZ0MusPrd3d4hzpHEVmX2+y5J5N1IMNIho+aCZb9owOqW3aCLMQBYg1ADfFVelDEwxsEZKCdI73FvqzSy+Pt5cgpzB44zgH5od6uDVgh1pvp4P2isKpI0XO8X7JeEL9y7RlIfqhA8gve42G7UbM/TbdjQHsOA7bDuFWagoAIFKBjLwzDXhipNtR2pcN/3b7eqZ3XD1iUaiboiggy2avbLOqq/6LwKirLF6vTA7bG61Iomjf5RTVW2QVThbhO1EWwQjBmk+SkGzDFif1jQigo36wEe/HGuF4JHiB6b4BGUFN9KRRESQKUeLXCY/KnpprOQ0k1M7ZAOC96hBo/tNCN6JhHB83X1w1+0yh4K8qIQ+nLUuRJtXzBocHNYQ06i0xmTNArJErXnxkwLumeqHu7QxBj2i6oonPReT6p5VpMYf/eUVsB7yORCGsVyaRxZhvdbVcmmXCqSJqENlEEqTdjSXzh3LoXeU9xTTOJj1GrcW0LPMyHoVPdhNRytxTaGIWZc1VfvUNniKzCw3tN8VtvO0E7SN4R9/GwdZmswGdXZI7lCA09VewZmin1AK+gDSlEYsADNAubY7hspO3RTGYvogFkMtl5GGTt7elY5A20jattQ4T5DdFIAUdHNqpug1UZ4qqqK2+Flwwd4aeQGDfn7RICBy55giFphtd235EKUoAZkG53K/ARsNVvfzazQohJ2faCGGqShhkpmfxPYYo98hynCWsr5W6uGfoocahXUBPO0YreVbat54qE0qV5cV3YeyuE4tiRw4ttjrMUMUDbGW3RHVKeZpnPm6NlkLcpqacIoRbXUWMVUfQAIlRYEhQIPNJzrFtwnwAD99wfE/oCTNt+xjdnnlj2I9mvq/JfONaoAaP1OAMv9axJgc20I3iNG9t64J4TGioZAkKAD7qjggNaMyhCx8jGgc66OY7jX9L521ORobwPoOtq9OhQRrLViLQX398t4HZvA/WGNVsF6D/q9oohrw2HNKOq2a7VlOQXPYbrhTMjq3+ufk9pQrSZXYHUdNelq9igP1BF4nbzbuxy9ckg2cZZrjrbyHcxiTg7/7rg7adJXjBl7oOl9aji2fK32jTvXC0bQjUj7rEgvaHDuXD9wXQ9WonzDPqs12j1po0XYn2/OQ09bzOakNTH+1ACu9Mjauz2nLcTBJesKGKWgdT1B7TisuQxSdtVnI6WiowG2jq1R7p0GzeDd6IjAgAlKLqhCQAVBUwRQ9f1bQYHo1MiJa1CQhaeHVOdi4uTZ+0rr0Q5S0jBJhSvab8048goMAQEiTZWH9QADg0wfZhhRVQg5zqL0qQAzN2aKl9YhOlAs1nLgfmfL9uLGWwQw4+vcKEB7/Bqgen+91Y7y0ocDgskZeEX6ZBGkXHF/XXG9Znz+/hXuH1bcpIyiqKbUmB1bQAm8bNQ462GMYIoR2zngsVtb3NpSdfryYoPtHBE12Fho20D8aJ11lvkpgbAqYq8fwEEzTlYuCp4wdmzu7SZingIutjM2m4BpUq0yR3HQkivMmsm2B9n4XUkiTh4Syfc4RZX1RLM/jNZb5JWyLeaQCOtNhcjCJWO/Jiw5U/XBWWy3My52M7abCRebaUg09WeELqTaIh0DGd1khkWWQDNvp2aWTSjuWwuzfJ0vGm3BlETgyt37e1ztD3jm6hopFZpvOlZSy1qw3cyYYsA2esJ4pwBEj9jnVTCInslVbYS004bCqG8TsG4ndOS8sxbRUT1jVrdZ78zgqtFqA1qdJ9y72aubNNC2PLineaJigz1WnKVU3N8v2K8ZX7ja82BrDTttnb1Q27LRB9Z9Qr5dBQWUS2P2DgWdWHBGRCmvMDQde/afchmuuaHzzmqFlEY/qVLhg8e0mcbBH4ODTAFlOw2ptMNi0TSByyVzFrgWbFLBrhTY6LCDYNpGtXaxMGuGLbTF6fOvoW/Yof3Owken+7JBLIOpj35IHUH3rEjTZ93CeHNy8P8OSdEXLVGQVdU2Z2/nifKUaq6oqaKsBXlNyEtCTgVpTVjXjJQKrm4Wci9zhVjq4JTC7o8zFiZQxzJMEzs6apHiLHmAS85YS8Hdmxt1Ym64tZ1xazvjBbtpeGA1I8jScOik3ybYBEvhgO2MGDqx26KZb4BKKhiB03aGgFml4dQU9MMbSIURrGifoJWTZiVdnLRDeQ1IKjWtwWugmrRPX2Agjrb1mxgQrSWvSYESxxPtCJmWMSjkYLmDNgy6WrUZ/fUqDWsT3KyJH0vCIaXhdNlUXPI4P+n/qQ8SRAfD7LnvNvHYvtGZjHQGf/85p+6ivaeu9yKKR2sC72WARLyi25w1Q1izi83CGMxTxBy9aoYp4VYzylobUBpMqSrISyV36FyIg2GAEDqtBMxx/iUq6OqDBwQIUxg9+VqPgIIlsXpZUoG1BsvC9mBJBVaopxdi4Hsu1JHrVt49OhnBGIz3ZBbWQFqXDBKdkZhBX2gGrKJ1NtJqZ+Yr3UD7+iIM8ClXeEe5nWgtirE6G9UPvT/eUSrIWO7zPmekErnFbjOdSEYRdLOd2doNntkrr5FJWiebd6FPq0Xk4ODpIWrUtVpq5cGTC67XhPuHlUFKRHl6lIGq4Mylt6BSbVhKxT4V3KSMWgpMaRRz9Q4I1GZUIB9aFazd/sZaxMg9SHtzZuDVsF1pHfeI8w4efogwdx5Qfx0CBkeqXFBv0DW2/WEMdkuC8xalNLigB4voadBBCb3lZgUiR+NBqj0oNQH83geAEP2X9dzngcLBjFkfHvw0jm1nGdfRQRA9oez/Pj50vx2/Xo9qLY2E8a6TOEpkMAizBcoA7Pt+0TYvz1V2gKh3Ss+unOvouBBuzzZuB0INMrbR9yiSLB+DZ+JqLcR8A9jHz0YQUVXAUDkflgcu7QSoywBpypPQ7q6xg68pzUL9KFmWgxvSCgBb4DVD9s4gwiA4QDxtMLabiYe3NEjO5FAoF2YQeE8CFGolGVUEXoNeR7wYzZTXUnGTC565PuD+suCZ6xuspaAID7wGUasao+RAbZlpq8gYHlq3LmbcubXFxWZiK0Zna1Kp3uxyhUye/XHN/AQCX49cIWOPMxrnjk6qIqJmiyz5vbcKmuPMKijyq9tNG7C6SGvhAaHvE4yBjzJ69R3EcPTTefAJ5uHQ9coM5jYPCG8PlqlUXN+suHu1xz1Vbgje4ub2Drd3G9gmmOcJ2y0ArRKWA1U5qg5ye0XdYeqj/dtbIALQdkuGmr2z0D2oQaoHJ22llN7qbUcH4TUX/n4Q3h39SeLQAScGnD84HjZAl8BR7UYIvDfY5YxbaYMuGns5z3Sn9o7AHrAlbTRRmhxBFLfmqGoKNCSMkYTWoBWBMUBFw7o23F9W3L054HNXe74eAbIAqQleUKjwDUc+UWnATc64WlbcvT7g6uqAWvg8XQQP0VmpMaop1xpSE9xbk1blwG47YTMFOjW7Y8LXKhOc7ssFAN6zTRm8xzw17KOHy2U40d6s9EOrTWAduX+pNMxzgAC4ffsCYWIXYRwOGE0RSk/BMM3VKqj/bWmCZljlDKmjwSvUEUDv1Igi9PDg1794mZNPd9RtSUUdoDl7YgWlSFhVZG+Fzze1+xQ8oW3atVDlvFfIwx/Nq4K9jghC5ByZnGX+fBemvr/f46AC0kar+lwKWlACgM7L+nzNGoMweWw2E27d3tENOarNzOHZz/e+HukgZdHba0eQAt9cBUZIFxk9ZsjQmdSYUfaWXA8u0iVJO6yWP9cFKxuMCpRyHtP9c0ru/fP2gDfEKRkVer1WMEiZfcZhwOyjaAa6qmr5mguVrtEPsK7lpx/mqCRAciPnRlMkmq/zjKD3hjprTduOvM5hG9JbbyqgKtIDh9HsXSGkCgwJ3qOGhqlqBaoeQ97Zca73+2iaDHsQYw188XBOjm+LHB9XOfncaabJauo4e/DeoQUPKdS6A1Q/MFcsK5UvWhMUBxwCCcUpFXinwAx9b9aUtdqp4+/0rFxEEx+dTeibykrbYAQT5y0H5Di+AFZExwPv2N45Bv+ebfahf+fo9cpYhNYH3Ha9ktJ2nwYR40BFj1DQCa6bKSL23yeqJydm3NJgDeAcagxjBjHrzDLqLLEbM6Kao/VNrXS7bkKlidqGzmQVqKda/36SffepYJ8yaqkI0uABREtBVGv5vJRKGbAlUY2i4ehUO+DP2j4DZFhmUNnBoVnKZg3FfW1rA6ecMoVpG1YVTjUu5y6Sq1nSQKNqNdErp74Z+z3kGXIEM/S3un++LzllwX65aurL9Pn635CTT7TWjsrr5cFKqp9f/Xv1LqGDLppWhYoP0pYxSdceUFsXVubUgVTtUBGaSbY2VPCzVlW5lCFM210U+ExA9f94tsUQEGPAPE2YJgYpY4Aq/w9XUj1wrCXBa5DqtENbHZxUuMa8p+YVJWfksqrApGGg0B59LQW1ZpSSGWDGRm0IxqO5gGroWNkM4ZsoApgGU8LoSedEuXpGKwySoYhAStYNRiFNZqAexgpcAVp2QAtIreJQCg4pYZ/2WNYVa15RWoUYIJcVOU9Y0wHWNtTqkfKCNa1IeeW1eItaZ5RikXKAdw2tOfbFNctGEMQaUbTcpjxRQskF65LGsLV2tCIMQnBwzY6DoqSCJdPmIxUlN8KgVZqtiXiU5lFQ4RsPPWQdjqeALBkxRWRJdFO2HcHWH6svk11CH7ymjPglIy8JaUnYL3vsDytu9nvcHG6wP+xxc9gzuzVAdAJrKq5vJtSaIaaOOdT1fhktuf53rDWKXvRjGG81MOTgkB3niTVl5LUgLYnWErnikAi9XtKKlBfkvKLURLPMVlGlQsQhZcA7wJiGVCyirVizwKKg+T6TOtIWOnXNNosGBxEeolW6S3TlbFUsSgFbL8bACQZpW1ph+66UUT0ySFmU6gDbYJLAC6tVAShyux5w0I8lLfQRg8EhAc417Jc9ghdMAUCjksrNYY+b5YDrZY/r9YCaCyIEVgosCqZgkJuHaxWH2rCUhuvlMIKUtRVAwRRosuk7Is4Arnn44pFawtQi51fBD9j1YT1gWRcsqf9zQUqZSt/ForUCkYz93sM7wfX+BsY1GCfIjXJZ9ovADWzBdsUVDSyKmCyZSZ/PHWzwRTYU5ijh1dvrXRD5izf5yNE0qHYvqrRfx/ypql5iWcoI1nkhqu9m2aPkipwyDmtmBZUW/jNXiiI3A+eoHiz6WpqxSKnBwAHGa9ekIa0Fh2WPNR2w5gUpJQVzORxWh/26hzcNwVQs64olJaxlpVKKNcg1IleHVFbAVhjDZGot6wPn+e+0jDxXiMVDtH7rt34L3/qt3/p8X8Z5ndd5ndd5/f9cv/mbv4lv+ZZv+R2//kgGqdYaPv7xj+OVr3wlfvM3fxOXl5fP9yU9suv+/fv41m/91vN9/Bqs87382qzzffzarYf5XooIrq6u8NKXvnS067/ceiTbfdZafPM3fzMA4PLy8qG7+Y/iOt/Hr90638uvzTrfx6/deljv5e3bt7/i9zw3rfTzOq/zOq/zOq/nYZ2D1Hmd13md13k9tOuRDVLTNOHtb387pml6vi/lkV7n+/i1W+d7+bVZ5/v4tVv/L9zLRxI4cV7ndV7ndV7fGOuRraTO67zO67zO6//9dQ5S53Ve53Ve5/XQrnOQOq/zOq/zOq+Hdp2D1Hmd13md13k9tOscpM7rvM7rvM7roV2PZJB6xzvegd/ze34P5nnGa17zGvzKr/zK831JD/36W3/rb32R/bTBd3zHd4yvL8uCp556Ct/0Td+Ei4sLvOlNb8JnPvOZ5/GKH471oQ99CH/yT/5JvPSlL4UxBv/m3/ybB74uIviJn/gJvOQlL8Fms8FrX/ta/Pqv//oD3/OFL3wBb37zm3F5eYk7d+7gh3/4h3F9ff11fBUPx/pK9/KHfuiHvmSPvuENb3jge873EvjJn/xJ/JE/8kdw69YtPP744/hTf+pP4eMf//gD3/NcnudPfepT+L7v+z5st1s8/vjj+Gt/7a+hlOemTP71XI9ckPpX/+pf4cd+7Mfw9re/Hf/lv/wXvOpVr8LrX/96fPazn32+L+2hX3/gD/wBfPrTnx4fv/RLvzS+9lf+yl/Bv/23/xY/+7M/iw9+8IP4v//3/+LP/Jk/8zxe7cOxbm5u8KpXvQrveMc7vuzXf+qnfgr/6B/9I/zTf/pP8ZGPfAS73Q6vf/3rsSzL+J43v/nN+NVf/VW8973vxXve8x586EMfwo/8yI98vV7CQ7O+0r0EgDe84Q0P7NGf+ZmfeeDr53sJfPCDH8RTTz2FX/7lX8Z73/te5Jzxute9Djc3N+N7vtLzXGvF933f9yGlhP/4H/8j/tk/+2d417vehZ/4iZ94Pl7Ssy95xNb3fM/3yFNPPTX+u9YqL33pS+Unf/Inn8erevjX29/+dnnVq171Zb929+5dCSHIz/7sz47P/c//+T8FgHz4wx/+Ol3hw78AyLvf/e7x3601eeKJJ+Tv//2/Pz539+5dmaZJfuZnfkZERH7t135NAMh/+k//aXzPv/t3/06MMfJ//s//+bpd+8O2vvheioi85S1vke///u//HX/mfC+//PrsZz8rAOSDH/ygiDy35/nnf/7nxVorTz/99Pied77znXJ5eSnrun59X8BXWI9UJZVSwkc/+lG89rWvHZ+z1uK1r30tPvzhDz+PV/ZorF//9V/HS1/6Unzbt30b3vzmN+NTn/oUAOCjH/0ocs4P3Nfv+I7vwMte9rLzfX2W9clPfhJPP/30A/ft9u3beM1rXjPu24c//GHcuXMHf/gP/+HxPa997WthrcVHPvKRr/s1P+zrAx/4AB5//HH8/t//+/HWt74Vn//858fXzvfyy6979+4BAB577DEAz+15/vCHP4zv+q7vwotf/OLxPa9//etx//59/Oqv/urX8eq/8nqkgtTnPvc51FofuLEA8OIXvxhPP/3083RVj8Z6zWteg3e96134hV/4Bbzzne/EJz/5SfzxP/7HcXV1haeffhoxRty5c+eBnznf12df/d482358+umn8fjjjz/wde89HnvssfO9/aL1hje8Af/8n/9zvO9978Pf+3t/Dx/84Afxxje+EbXSMfl8L790tdbwl//yX8Yf+2N/DN/5nd8JAM/peX766ae/7L7tX3uY1iNp1XFeX/164xvfOP79u7/7u/Ga17wGL3/5y/Gv//W/xmazeR6v7LzOi+vP/tk/O/79u77ru/Dd3/3d+L2/9/fiAx/4AL73e7/3ebyyh3c99dRT+B//4388MF/+f209UpXUC1/4QjjnvgSl8pnPfAZPPPHE83RVj+a6c+cOft/v+334xCc+gSeeeAIpJdy9e/eB7znf12df/d4823584oknvgTUU0rBF77whfO9/Qrr277t2/DCF74Qn/jEJwCc7+UXr7e97W14z3veg1/8xV98wNn2uTzPTzzxxJfdt/1rD9N6pIJUjBGvfvWr8b73vW98rrWG973vfXjyySefxyt79Nb19TX+1//6X3jJS16CV7/61QghPHBfP/7xj+NTn/rU+b4+y3rFK16BJ5544oH7dv/+fXzkIx8Z9+3JJ5/E3bt38dGPfnR8z/vf/3601vCa17zm637Nj9L6rd/6LXz+85/HS17yEgDne9mXiOBtb3sb3v3ud+P9738/XvGKVzzw9efyPD/55JP47//9vz8Q9N/73vfi8vISr3zlK78+L+S5rucbufHVrn/5L/+lTNMk73rXu+TXfu3X5Ed+5Efkzp07D6BUzutL14//+I/LBz7wAfnkJz8p/+E//Ad57WtfKy984Qvls5/9rIiI/MW/+BflZS97mbz//e+X//yf/7M8+eST8uSTTz7PV/38r6urK/nYxz4mH/vYxwSA/IN/8A/kYx/7mPzv//2/RUTk7/7dvyt37tyRn/u5n5P/9t/+m3z/93+/vOIVr5DD4TB+xxve8Ab5g3/wD8pHPvIR+aVf+iX59m//dvnBH/zB5+slPW/r2e7l1dWV/NW/+lflwx/+sHzyk5+Uf//v/738oT/0h+Tbv/3bZVmW8TvO91LkrW99q9y+fVs+8IEPyKc//enxsd/vx/d8pee5lCLf+Z3fKa973evkv/7X/yq/8Au/IC960Yvkr//1v/58vKRnXY9ckBIR+cf/+B/Ly172Mokxyvd8z/fIL//yLz/fl/TQrx/4gR+Ql7zkJRJjlG/+5m+WH/iBH5BPfOIT4+uHw0H+0l/6S/KCF7xAttut/Ok//afl05/+9PN4xQ/H+sVf/EUB8CUfb3nLW0SEMPS/+Tf/prz4xS+WaZrke7/3e+XjH//4A7/j85//vPzgD/6gXFxcyOXlpfz5P//n5erq6nl4Nc/verZ7ud/v5XWve5286EUvkhCCvPzlL5e/8Bf+wpckn+d7KV/2HgKQn/7pnx7f81ye59/4jd+QN77xjbLZbOSFL3yh/PiP/7jknL/Or+Yrr7Of1Hmd13md13k9tOuRmkmd13md13md1zfWOgep8zqv8zqv83po1zlIndd5ndd5nddDu85B6rzO67zO67we2nUOUud1Xud1Xuf10K5zkDqv8zqv8zqvh3adg9R5ndd5ndd5PbTrHKTO67zO67zO66Fd5yB1Xud1Xud1Xg/tOgep8zqv8zqv83po1zlIndd5ndd5nddDu/4/ocJYireeOiQAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plt.imshow(img_list[0].squeeze().permute(1, 2, 0))" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAakAAAGhCAYAAADbf0s2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOz9a8x2S1IXDv+q7y2bQWEIpznELSLReEA0URwnKqKMIhiicWJU+ICHQExmSGRi1DF4wNf3HV/jG4nKX78YiBHiIVFMMJnEwQjRICKGEE9EJkQ8ZMZTYMsYNszd9X7oOvyqutd1389mxpmbefrJ/VzrWtda3dXVVfWrqu7VS1RV8bw8L8/L8/K8PC8fhWV8pAl4Xp6X5+V5eV6el6vyHKSel+fleXlenpeP2vIcpJ6X5+V5eV6el4/a8hyknpfn5Xl5Xp6Xj9ryHKSel+fleXlenpeP2vIcpJ6X5+V5eV6el4/a8hyknpfn5Xl5Xp6Xj9ryHKSel+fleXlenpeP2vIcpJ6X5+V5eV6el4/a8hyknpfn5Xl5Xp6Xj9ryEQOpb/iGb8DP/bk/Fx//8R+PN73pTfgX/+JffKRIeV6el+fleXlePkrLRwSk/vbf/tt4xzvegT/1p/4U/tW/+lf4Zb/sl+GLvuiL8N/+23/7SJDzvDwvz8vz8rx8lBb5SGww+6Y3vQmf93mfh7/yV/4KAGDOiZdeeglf/dVfjT/2x/7Yg/fPOfFf/+t/xSd+4idCRD7c5D4vz8vz8rw8Lx/ioqr43//7f+ONb3wjxriOl174v0gTAOAnfuIn8L3f+7145zvfGefGGHjLW96C7/qu7zre88orr+CVV16J7//lv/wX/OJf/Is/7LQ+L8/L8/K8PC8f3vKf/tN/ws/+2T/78vf/6yD1P/7H/8D9/T1e97rXlfOve93r8O///b8/3vOud70LX/d1X7ed/wv/3z+H17z4InQqVBVz3kOnYs4JnTPOQxXo8aIgorCIxqRdAEBk/SdD1iesKl11qyrm/QQU0GmNiEBk2KdA7tb9w7+LRPsRCJaI0I+1fJTrNPumqqvfqqXfHiTX9qT2HQIZdkR8yAi1tQdvF5g649jbCp4D9TdtfRGNqp0mp8fPiQgwBkQEY9wtHt5Vj2veT8ypmPdr7O/v76F2DtSsWJ/GncR4jruB4eMqa3wq+6kv0PK9FnlcRC/ZPxfJaeM1711OdbXltwggQ4x+OunsVIVOkz1l/naZzjHZxoz4nfIxoo6QlWF8GwNDFv/ExidaIb3QOa1Ls+qLy+g9ya7LLLDRxzSMISazTu9ycjFIxo0mEQneMX2Llok5JzC5HRpb4luV45QpvkwgKeLMW6UrSd4laKW+0GfYDu+7GR61Ou/v7zHvJ/TedND4F8MuTpzprMnIdHnJLph+wXTCx7iNfVZIcsj9ZP2utmPONcb+52P946+8gv/X/+/P4xM/8RNxq/xfB6lXU975znfiHe94R3x/+eWX8dJLL+E1r3kNXvPxH5+Cfv/BdXyfIDXn3EEqFF7ScGw2Rsp1Mkjpra5pAuLthwDHPQMwJXIBHHHOBZFBsgFDB6mqbaVtJWCe99OMat4sQu0NyT5JKnsY2xNIQVffCKgCDKe1N6uRCn644Y0xqEY4jWGeFDjAG0jd3UHGMJBKmhZITdx/0ByUD95jzmmKgdKOg5QrYYxLGLwbIEUOwV4eC1JSRlY1QX3Jkhl05o3TPsTFKosySKUR7RWIGSs3ngoNh0pJtmLsu8NiuuCye2fgNGw8HBi8X84n172QzQCkmX1mgJrLGTARS5kJmghMR/3enbBFq40xybM7G+zcKXhc09Anf1ymDzyOIbH6DQA2/qLyc/kA3hcG1wSvRb+EPqz61JyyJffzg8nX1HWE7RJo8DRtRbAg9G/cwRyPB0DKTaWkHHsnk4eMYglS816D/pMDclX+r4PUp33ap+Hu7g7vf//7y/n3v//9eP3rX3+858UXX8SLL764nXcvQ1XTsBrnHdDJ6dxLt8NkRPK8GXBrIz3YvIw9LQ1PYknB8rCWAcdY1xyqi3vWgHUtaHTtV5jSuRFC0MNXR80qENGgYV0hxqtFM7q8OR/Awr3u1OLlEUC5wQl6jpS3c1aZGJ0B+o8paeB2QOyVPLrSvZV9eI5+xLEQE0WweAfQuMt2aYmgzEgkL/2idGiy65VQdcZsXj7MkVgGlCM5H0cRwcRc+jbIyIcwLLlh6XXdFNdHpEwWY++40SKZGHsGazK+ywimvGMkS9RkSKMSJQMbd0NFkdoViEijwGNyFkbyabIpkU1OuIr8zWxEr0Cl2onmnPTP0HVxXZfsDtsUEesz0S+nXh3IZjm0jkv0x20X3XNpeJ+t/F9f3fdxH/dx+BW/4lfg27/92+PcnBPf/u3fjje/+c3PVhmhfHgi5u1hpESfkfqBYTn87N7HvK/hawUovxAr7WQheXzO+/Akw5hPNhxu7R/JA0VKA3mHNcQGebCARppOSZKo0VPbbiyGRCQW/AV5evbfihIQ/QvvuP1NBaZFAvMeGRWQt+fHD/Ig2UA/tD4cO/WRKE127yz9SH8pzwOZfqtWO1NH1y0xANzmYzLbo5/JfybH9x+cNYvQoouSujoQVp26naDoJYGSBUYobHAg8sjtfloaTHF/n/TOOZM8F90hwPCUpafdMuPB10UkW9JySVtcy+dusTnGYn32dNiS/8V/sD0gdS1ZlOIJ5rXRXx8TzpJI8rMw/VWV883Sf36V9X9E0n3veMc78BVf8RX4lb/yV+JX/apfha//+q/HBz7wAfy+3/f7nqmergQpiDXyUKA6Q/5hTqA7MMULRB5w7j68FlKyiF44cohaMmJREp5j2igyPU6Y90WhF5LPefyakmgGwN0xVYhKeMfJDSmdZ78aBEJBGzFII5TQ1JMGSvxbsWiF4ZKRHPOTuq5aI7rCi40f1P1NY/Z744qG1WV+oTsQJj9C0V9r8EhDDC9XJIAc5KJcF57y+kHFz1mdUgHj5HjcAqoe7Ra8H34NpXmH1Aq9+x5ei8mHK1rIRcZNfiStjpqurACQvOzga+3m4EV7Pa3u+l6SF9H/rhtsJHQfXx8LlgUioZeIMD3bAq32x+TIxyN+I/tTxnkzXH7MsbHpjslL9J1v7N3ybmtyJeypRgxo9bhi2kfIAtXrgI6dhVflIwJSv+t3/S789//+3/En/+SfxPve9z788l/+y/Hud797W0zxUFkT67LccfuuRaJJmLquEkC5kC7D7T/vlrDOPSmdW3eUoAQaBndOXXleHRBP+82ZLmI2sD6MsExfeh6DihJpAU7VoOZCDkBnsmT1mdKkBlAqScM5/bQkTHX1Z3l6bPTtmEG9dm0DDx+qpQyZHl2socloOBiUO4ujicYPrj+Alkkl2qpTo+U3pzvpqCDref3gITqdSS/lTBDJMXeqTsbMJbEYTzcyanIyVp+rGfkpFY6O1OgYWFkKHXNFviIQVYg7RIJ1DRZYLhlpiA6ErLIjxTau2MtQ53ROupPSIzkNyyqYAgwZC/zJKudcS+pDCgKlPN0uGF/9/7pQgHoo63cxnYPblJuOgdkJk3sZVP9UYBxSsO3vVEIzw2m3tK2bBeL3cgBYKaiSDlD2u4geu8W+bwFeO+96HfP8D5SP2MKJt7/97Xj729/+U6uEBBhwGZTMgQtx2C+gT5flnk89Iby2yd3i0XDdvakAGzLQiohmcD8XDWxA4f6PAGMJw5psTUArIBl1G11tYrJhTT0XoHWSSjnfG7Z1gayQwi5vPoGqNHmhUH5OUMdCb/B2G6KHFJYMHyvnrdLnT6alZTnaCFnTs9wQBdQgLYI4GQYiTiLiMGAi19YNkM655CTGroNDo4Scs1tRVSfJF13IvFsu2MiFBxjaBibBNFeGaavv0D77mM148rg5nkQ/gthuThUay9nu6jgxYIUTwkzhDh0cgBuypsBKTXofG7BSV7ey5E3iGgcxB8zZU3xOSONdb6SABDvVdI/UW1Le4BmhJXul7uY8nPiB4IeYrcrFGQ+VJ7G671aJiICPOb3Qy8mTRwu3zUOMW9CACQw4D2i5tylhmqKeOkmdgkk3AiDJaW2ld09pxyt6jpY9f8wkWxpD5ov/EtbVgQouuIhIdinpyfrc5lUxnI7ZN4AqU3uIiOpwaUWom+0XE5rnuhPQrityw505aK/wuZtk8WigHS8lX/aCrLQsERKz4BoDlvQu/toYqe6GJf2N4C0bQY90NYyNp7tpkRCFPCKeqm7m7whOCUb9D5DN8Ib51N7L3iEH76RN+D87T+bY7DDppzm8OrOlS9UX6qkIpfGPl55pjnRfTfWxc7yl84E6nts560PYoioX/Fl93AbQN2xJNbss4z5+pNwfGyB1gHL2jjz9Z5JcPLeCDbkCafExhdmNoXsxAMx7BC60olLYDBGnEcKwTCeE6DNBimc+hqVWPFJ04zB1pQRIgLV0FGkfpH3nEuEMAZTnCAuXpbBbjUZMhYhijLUKbBif5pykEKl4Nzh24/zhN+vr1XjwPISEfLR6iOd8IiKo++YEcDtD8tatHqKRrQfLKV926J6IRUWM3kFjGnUMAHORo3acjo0ceX7TbVD60UV2LjSYZnVlij1+AOgcy7jfXbRSDGU/1nImxJSGqxtRZ4dnzZsZLdGElF8t387tt9Ayo+NiqaPy6QuCGvM64Ae4Gk94Ze9txtMskpIDHQC1osM+pjuwnzRGMIY76smx8swmU+Njr5WuDoCRprzgg8IjOEQgNu4+BkDKhYAc+yXkLmT1auC0TBeuy5zy68zj2mPULrzBQw2nsXDQU2B6XZHayT4MtzxTkEtxqjfVI4kTTeG4dxtB34/goXSdG0Nl3tuXAYhZx4EBX081MNKwz2pkr5S8P7NU5pKuiqNUYC2BKylgpjWuNdL/V9Xy7Ben+4KX7AuwQblBsHB3NieA+0Njovl3GmfnkbouSDfOYn1a0c1yzjjy848yI7c7debl14U6CKPteuQQrELjaX/54LA7A7UvLCU8Upz6U7N2C6AleNMBTqiDuRio8lvtBlHuewXQsigJfJxXp4F+xhXFV86NyaBnNDxzs6+m9E9JQOfPVjcteo8ferSq8R+NLwsfM1HyOh/31b5rkrXnhDhIfSxFUikgfeFELrvMRQFV+LieekzI1wDqZCTKALoitGqFmrZEVQIMgY5AIrqbKmsyemAZeYE9+4FiuKLeRLlsWhy8EQaESUtlEzLUptBChmeshR8eQcVKM2tr3A3zsRdQqShU/KHOrF+Yzp3cEPyyHLgym2j3urTwofQ9omtcFxti9UpXN/LBYIta3ThLGCK1ecNcmZiBU+PrgYAiJ8ofB8vN4xz3+mj6WMC8VhL3JsMlVeg85Ka0NcfAbH1RFcw5MXQQT3bLufDQx3FgyMT0yRqOJtkqVu7YEcmB65IKZFg0VS9nErqwp7xfCgTpepFVBuYKVGvDCwknoQAHfPgJ9L2JisjVSKAu4DAK8pNBsgNT2BvmmxI7DpGT5EEuEs3//Tnzwj6BBaYr4l8PIR8AkOTB2/kYASkr1Ff30oYIVAbubfud5XHZKqjpyz9JmGiblXx2IMFGJy/d3o3sLdo4Mmj6YimLuq3SamMB1RjDjEGCb66KYUnP/hd/MPp3YayL8VwEFS/KEEPM0xxjQEVyQ8ioM4EKIphi3jat8po20e5j0BWNaU5vnJS+yXTJxrCTJ74CETSmso1F9wqroSaDFNtuGe0KwFY23amEc9B086dcuvPS5xzdIFqn1zFN1ru8etY2JuA5QuptHmmo9AyxlWi2QEMlebUV4/3QsR4G1sF2LXeF8cZpu6ysowIUnwZygSzLbYCWgMS7eTEXjsOJF/HIUshDGm2/VrHWj5xoTU/6UDlfcyREw8/R3km2YV6N3OzW7baO7aOwzA9C3H0RWNieY55ib/eRNPz0ACkvhh4RUVk04sIaywMk5aWmmPLT6ytLCnzuyttgZS9CpxHRHZ0k8o62iXm+HtYWe84RMbRlqVF/teYcWLLCLk9Ug2dBlfY+KWIJ/PRVk4Oe1E/Kwvj7SjOvZw5gKGROyDDlPqyqT5qzvgSng0Rzuskju1BYThPl2B6LIp5x8W5HNEFt5Fh5G37OF9pIqfMxSsg2J8c220bQ0EFKIppYIu3Wyj4ny3veUz1z2doNSppB7IuTxMC5+ki70+EyqUJpviEQlZUeHjONHHn9Usa8Olc3DTCJZL2MBsTGihcF7RWwDOQ4sSyUrgrdab8fd485NdeO85Ruspl9qWqR+l17xF8flTbHNoR0nmWHDj37Q7ay0MBIytHfQ7yx8tMEpEiIJRUCY2BAoDIxZWJMmx9J2EEoOwEUr9+PFYNmVJeHLrmh5yNpOxVPEWYaqf4KmJcSWDKN3ia1Tr89Mybi/dT0qq68K075aK6WywNYPbZg4w7wXLNvNsox/Eo3rSf5PZLSYemgueibwjtu9IUePJbXPOysd5dA435NpY2x5UpvaGt4jQ7eOQ/AUQVgczxk9FbNty3BZbaJ6o62PYpTjecBg3wGwjBWI6NIltegN9uuAFlpSItMBNuh0HiFEWewKhHKMl9DBDoG9M5jc7t0SuhV6Kb3JXaZaM9INWePI4nuK5SueT8moGNiyDBSG5zxONtuLbwbhEdVGUrk8VIFI+IUqRVQp3IpMjYGPmDuLJCLyrx5MIJ6NeURWPvhLD9NQMqN+DpwgIqYfKx5nSn24GER8jSKoQw80oEQtqLJhUVQIpmu8AdHkA40hFxZ2tlQi13H3t5aX7zuoQYKybEDQPM+iU+rqSp1vLlnN8gMUsMam7J4qraw46QcHMVCU9Z9McWa55lNBzarG+woTdxQGvfCMzLOu6uHznbmbDl8RVYEhgeFXbYjXV6fU4xnXS6cg+15sgYkdUVhTcNuXjRy6X+k9LY2+2IFvuIA+2H1d748XNIZ9Oe9BYIhA9N3LlNA7en2WKlKTReQovNp6YVIYTqF+og4Tq5IAjiBaXycIgbH7DioeB4HujrMuhMBXGdR0+Hel431TQByFKRmRK9AuhB9o+iFNrBp0Xr+3Kb3g24oDs/jkO/Jg5QHFQufjFPDZlOBWJMrott29rUeCY876gUJpS+zHoC/esAjmkfy+qLsK4VSAyrgwGJ/xYSsGHGdFWBAbNWxe6PtPv8W+le94/RmUQxGet621Fwmcs4PmDoym3QoK1Po45J1+TycxOq/XYE8ZXcGJ6VPgjhWfAmMK+m+HH92RnA0yhFx2OV9w9ucyO4RFc1n9tYcoKbWzpERymgWZe/DqxLyWzrToGrbMukRxQHwbGG3ohe/xMISf9UGBEMFqkso5pwlotmyG2EIsw8kwdFqHcLGSwNKN9ZiqdK4u1jhbMt9XVVafn7BrmjLbEURoM1QUMzNWBn1M/263ynlhPGLqnsI9Oi3QoQeHBxadBEsehQ4AZVhH4MgdSUtuSpsxLYtw5feAkdj5FpelEVXNDMxQ8jXvloD+SR7cy9IUNYfz5Agjc+tMWLrHGmXZXTEJ3Zq7qMa1Bu84d9dUOb0Twep/M3nNdQmk8YAVCbx0RZT9LCtdWc9U7UMwRBPB9rWSrEowdpkdDlV6IpbhP/o/z3Ig8LnjWYGtxrNFFs2dTkJYhP5ZBSXRfTnnRDGsAdShSbYOJCDwWAZchoIzAQ3VjwAcKfSudbbPhnITfauWO+0DwHmOu5vZu0g1ahAHYjrUlORtkqRUvcR6BTjvm+3VVKA5y6VNmNs66OG55tuiOij7PjV/VdjEPJ18QNVesDVuLQALO82zw/8kSxuKxVvPOTM5UmDlM+hrC/GhKKoEoa7MHRZnjICGfXXZxzCaQhDRcuiw+DYldIG54puOABcr4hKusjzgBk904JM+2W/SIYu2iZBIWMYnwGgeZ0mQw1IQGkoxIO8euMV0Ksv2AeDgK3+VJ+VCifv5IkdOvsANNWiqK8vKHIhseQ/Ime+tQig/d51nQxhH+/jOLWTuVjHfiKgYHlcbdj4IAHOqzt5rvWKQrKpiLkXjxPtsObxvJZVz/N7vbHTgpaIfElEnNTUnys5T+fFoxANxhkwqadymyUnB8KZXLd+veaDUG0JVLrdUPoT4xYCXl99E53INqIuBtY4qnR2LNfOG2C7p9dTZbyCer8nU3sFnba2V72PQeAnDlJpUXeUTuvgz/q4gQe5RM2kyx5+LwMhtCjBMn+wZ0SgtjHk4an+62RtIT+vR/VQqJrSNxP+2HPs1Mat8ed2tZ0r/OFbaBPMCfOcpsWINmdloFmW2/ecvx0H/tzdWRQyw3BkvyWflWr0J0ClDNzs80Ol2yr7T0FG1GRAmwvoE/5KKauwOfe5wlRdHMLAdqMiRe64+Kaj4TAxb2SXGe+Ttgj1tFT8xLYuWnEPjy0fUD5SgZjzISZRWtf/9u5739dc1KB+8apT43e3wNwnJdEIUbFBnooJW8zhdRTdF9PDav6jbzxsnfFFR3c+cimqwcc3IowmprdLVxtSmcrLHdoKGPoFV/ql+2HaJ29vtynz/nHdeNIgFe9cwY72QBMsD183hT54Utu9WN507KqgEOTGqr4QgB9SXekKIaOCxxlR8rau8r1L6HOfNGkCUAUimVFsBhmU2w5NEy7/OieUX0dme7Rx2lStI57ykqaVS4YXX8vOFHZN8aaDHA2iIx1G9Eshlr26B/p34oMbLjbEsh5CjNfTx0+aK/Xh4+epWQ2A0o02p0HgD7h6xMb9h9Ix6Jk+KcxZjbNhUAcmWphQ+thY0Q2wt1s41dmXMpIrNRtfjbn5tt587mySvMbKVQ8bXcQ8KxJzpQ18O3XRbzaSOVcr/fyJL+KAKUv3p9SHpIEqH42nxRbwZaQGmw9nfVZ7QPky01LsmFBd3hiNidfr/HD+YRuqqC4WbHW2Hh2DNWY+L+t7A5ZInpyjmGK4f1y+72mDVPEQgc2gPqq0h1+9iiZ4/AyQ6oDIXAZo2CjbJmL9fUAZVVCFfEkYBannertxo1Tp13P6qRh8mjA/enRMEinUSUFKGlA1tzqSNLJqBpbdSH942t1Qn/gNvvh2SyWf7crXEdt4rkQH+K/x65lLWoxihNoQkRle//t+ef7wLPFkAdUJoLKOAGSfcI8PaTQ0kALgab5etuer2jXR0+6V2clIWQkOiyd8AOpms50QBylfzemAWcGT5MBApLOpPyKRTXHfKzhm27VzqqBt/FhXiNWU1hcR6LDHKJh/jxav6p74ZwcpBeJxvwMLGrBJ3irlVG86PpZZaIBxkBtPXZ4qUu4I81Wd8P0ZTo7gE6QeZ6ifNkjNFPZ1In6hq7ob0y+pg6TIMLUX9w7GWLtZAMZ80Xj528kYbIKzSWCKopCwXT58elHOgqc3bXWQQt5fTvZXGcyGYLg8Lc0Hixp8OyfEUniP+FaUNfIcAVSsVCq5pYN3GP08GMSOTx/CEpEEPf9yvbIzFTl12IHqdhshe3mSwGoHrSof7s1mmw95yzn26eysaIWGwmveLOY6wWMRYzKzAo901d8KbW/05bdTxwa6WBG17xYDei6Qo0IGKO4bp/ydzFgZ6d9VAYuEB9Te9ZYAXAFgLfDhZyRVNF/oeHCKj5EPUgQeKi4mwX8hXw/YwGgTgauSgkHMqbQ/ppT+ETalrtMPyMjNx4x1Zn5MLJxwkCqMP6JLHfkCTHp527pX+LIYiJUzl/DGfJGav/2WFZoFSc3zrKS1iekueBFR0LnCCDpkr7H1iW0MO84MlqsNM0BoT5AHK9QwrO1gMKpzRdAL14jEZ6q/pViZCYV3bqS8Pe/rxeDFre1h48qPwontfi3j6OAtAVjiRrjUzQNS6zjSKckT9ggCxCmNWowoAf3VJPRxoYKTKHlv9C0Mj6XGFLERbURU54YCGCuWGXDS3Nicc6X7dKa8upGbE3MMiKcmB9XjoNvaDeMXJ7o5IKAyiy9I4AvwiwH3S3MPzXwXkvO6o3hnRgd6WvxCgFV4v+naGgN/tUoOvcQ4bE5cf610K8+ISfXe7WZitDsSQTzZVWezk9Y+HypPHKRmpg9O6Qz3NsIQao7nDQbls1TswuQ9LmhrHztre6B5exp1Uc0UpYgNXl2K7M5zONF8TH26orsIwMHruRXVudD7vcMXMsguUKHg4n0Q+CNRvsgE4kmDZanV8mAyaDNWbxf035XLKcnfjBkmHSeR4WnS9w5Qz1o4oop5I1Q27+PEVuxK+GT7WnfK8O+1Uw/tbMG1az/hdSgiUuGIJeZXI4pIFSrFrY/JXuwkzvKvmfGYtlffvJ8ZTTlZsp6bGnbvgG2uzHnGGjrFZwWlety9f9W1j+Sgl0OqrGcP8x1HmqI46rwzHODYQQigXQCfvK4GiYFW4Bu4n7YYs1v84fywD91xedYi4PT/h7ycxkJpLOi6x5YnDVJL2O9DKeo8jH/wzuhAO0A5Qx49O0snhnL6RS16UkhsROveW+w07dHJ9KXEWo1Fp8YBKs1s9WKDSETk4gfhvZB2RrZJ6bj1xemM9FvMFZBXRPyIiMpSVXMSQA3iQenkmpfwtwznnBT1r2ufGzLahzSjqMq2+HDeSe1fzuEIt37UHkXqMzsSQwRzbZfQwDgavog2dkEqRse2mboCqArkldwqF4hXZPhbWHl/POEblPppSBTHKsZztQUDyeDT4wHeR2alwgGMoyhP+VGKsMj6mvNVrE1pebfso21zEdVbF7WiWMA50vFZK1frQ8WABFAVvQ4l0ND/pd/YgCl1MvXJ4A3TF2Pghm/WBCr9lQZorPvexyZzbq/y1e8k2K+mNFkPW3S4zk3es7b29EEqVvftq5cEsMnrBJ/CLfvORjv8GbkxeJvhs4FXxD51QBqw4l4IyyqDwElCyaDaYVSjsf6Gqs92eHKUc9pxv9RPr5uf9XBQKXcenDD19AlshaNwJ6krfs6VfDTJ5bEit70IvnvmjUvlzcfeLzvnK7QYEK5CU2ox+iJumFhm/Lpx6KsTUK11KzamQVsCVXlFCdG9DVarruCkCCLNKTV1W+e9gOicG3pBgFNBNM4qGBc6fPSohQHKddSjKQauWKYf3111cleKYGVX1gNrH7KEJUMwYelbBS9YLVWGDvriKAIsp0lR7w+QIKBC6iOzKdSjUplqpwgZSZoOPGc97eELOVOeQjQ34tDhnZQdRHdnKe7Vq8vY7b5A5VaePkjd0xsq4425vks50s6DIio7H2lAwfLs9TgStwtFVAAATU80hcjSQhniAHq/A9irLM1+xxYuoPMBVLe655gR15rhmLRQoDdKXwXI56h44UTAKaeVYBGXt/kIwrix1n5xKmJMK0il8V4IVvRLHDy9GgJnp43645tSTVw8I9eZc9Uzo3PcOVCtSC3B65Hy6F2nr5wyKv5Guy/pz9V8vintNMdj2pjmdQ6iVGV3vgKc1o4mvKIPAFRnvlRaNR/pGEtX/dk571dOzHcm7nxwukLmm732L7H4R+4QuujbdzXAKs/LrQ6EXotmtCSUJnew4Dkw9mkEiFWh3J5vZsMOmsvlcQ6rXXe1mnP1I6+75Ysfi+wi5OdLfeU6Od4rHwtv5t0WTpACHC0ynyNGMrClUbo1csz0q1HD0aNTyx+VRQdgATQvh4lUNvNJ/ma4D+e4DWmCtOwyRRjZHGKvLrN2MYdUulUBNvVSbSGJwJXKxyXmP5S8e1dkZf7X3lZAoW4KT+pTKsujE1SAkgixKj9LH9y4SBqXmI8K/mmMvxuhfRhowr7IgUSkV7WWaS3DmMRdlvpbjG38RlZx63NbVQjE2HiAFeBFThkF+FQX0WOGe1v9Z22mDPvRkom8x6qZIVgbL4SEuS70YX5ULmzRntETS9L9UQJNV8bfjCCnTsN5TZ4Cvyol9LCiZYiwghbXOC8c0PLCCijVuGiykXh8YoP3I6/ZwP/QP/F7xXvb+h+PKzRwEjre6vxYAKn764UTS7FodHmkvchYzz6scAGyHlDxu48gs+5D8rx9RlO+9TN5UWr7lC1F6JWw0DRlJ9U7AmLxoE6cyvOupEWgep3EguyTRRB+ny7B7MkCr3PtwoHwvvf0KHmXDvZHV38vuWRdQ9nZeERaxD8B2rmCAIqBygxJPs9l5CiBLAOivbI8TOKB8ewvMWvJPAUdMREetDc+9LHtmOOdiPPeEtPfCUSKeTgt+71hYqX3RdIJ2PquATCz6WjR18N9Cnvody2bwNSZ/MHOGk9j8cIPH0Dvk2Nh1y+nY9773JfEq35U1+4yNU1MxpUjbM35V7ZFCmLs5rEuMhVoc58dqCugdPlJ3gHdDvZS7jbl51WHN8u1n3O8Lr6e5BkfI2/m9Tkpj0I8reYlBrXIramhCFTtnTLh8Wgys0x+NusQeOeKwIAD2v4Gy8MzIfCdytXfdy0wi9oEyxWc6yRByvEOE7l7h0VYNQ/tQDUnbDeZL8Ka/Dg9PyayP+/AABvA7Mbeec0Ptm4eY/KsNkZ/yGvr9zzoCxAAMY+Y53lq/xk8I2Ws2Xet4SZ4nWHxYIPP7DVR/2o3NgNQwE8PdTvppP+OU6dSgUSTHkUuMY92+cj1pVJ86QV7hKSghREJTPwetqO4IcXXF1eIDHM8nJZq88LLB8uojw91J95QfEW6y+u6Ph/VsihBgGG0FICK7IAtvgAyInMeyjVWpaOFjB79z+fZA+ty/rfwkJwYtfnEIishLH5Tcp+j5Qv823jN7Zbf+ebuVJR2b8hQK08apFyI7cvm1YfwAGSMyDNEGqW0/uTeeI0Rhpgi1FacmtKu/8ThtND8C/9pqSuVPGWm1l2jrOh+gFv9lSZsgazbabMWvetXztQmUAb4sVIohqED+npY0hdrr7lBSkQlEeXcZkz8GiEF3VI8FaTcuBRPGB6x1Oi0pGY4zWehRomuGoeZb/wYQPzs/CfRali7Fe47A1XnCYNLEQuzdmzAs8LaJ8YfjxjTmVsV+mKYDRzoO7cfj2GwA+J96Hp6qETNwQknTlCdP4pwNhCVrEoYFVjNtyjE+svU+WpVGjPv+DF6RBuP2JjY+l5skLeLHEeWG/9jnhR9laqvTf8YtNK5NX1t3Y++nKJt8HX1cxe8RsxFZY+DpixPGqRin7LQAPIISmERc0vByggAPEdCg1VcG69c7PqR3qzVsZXQG9+twjaltddmi8raqFbV9rIyMbRm3YNjL6x4JuxBpc3s8h2/C5DLZG11XUlx4eJm6k8KKVk680K5LY9G5tRCM2XvlxGxsch5Kz91AsY8rxzdcf2kTXloS97ZDaf6w6Hx54K8uY5OR6XkPlfenb32lLn1BmFl60lRZKavPGPQHyuIZeZxj141uhe2qPHB6VkHJ6v3Rt8LBnrUpIjIKeaOufkmb+VX76upW6QtxZ/JEwMq1LHfOkcA4GPbXp9RnT6yCa4XqlhPATqHSH4oSwBVYK5Nlz1FCax2FQrcI50s3dtnadeHhvFKX3W/l01dVxfm1pXD4V3lz7jpETSe/P9nAaonDVJrTE4S/pg7d08qtu+PaGWNtl4ofjx5wjLnQmvHbM9XFLGa9u1W1oStCX8Y6pQ+CjS6n1f6fTKQJ55UQW1So9er985zD1aLveYiXjFAER2ZifT+VEsOPsDOb1M2BK1J458AMS/ERgpwIMoTkb7hPtg1NXojA+28aIY8HIcc8MIPXiTCLGQjkEvOW9+g8CUyPJZav5RafaHBokmz1Y2GgKCkg/oVfJesN70CPr/zAzD5Zi8paNjni50al+mT4Yr2td7oOqD2G0fOlZ+2EEdy0VFc3wCKEh+l8G+15pQb1/OQG4/uRaCwlb6uBSQb0avQbz/fHafsf8hxI+qm6SPWn/pya44rjFtcWz/ZNl3SwrJJtlG9gkeUJw1StXR2dcjv1yJ1oF1Rl4/PpvyAv9c6dx5YXpa4dSEPy0fUlcsXToy73Ptv3ikw1w4PcyYwJT1Ml1TPBG2sHwtUdF9P9R252JTb+wRoYoWAUhrZmAPVVLVndnNZek93CJBLci/cvpjTqWhRrE0HubKcu6T/si8MVF0TA1SBbNu7aMQzyCl0e4uvRz6CpE+S0lUxM8RMV18Zl6zlRxsE/MhD0N5LVwulQ3HDv9JCC3SJeHb5hfgdfKJU9Mn+CPKBWDuOvR67ipV+rhPK8qM+l2L8LGMZ0lU6GSrJ0XKX19b21Zn6GwN6egCxag8p86dSHFHhOlnW8zt/lnr2rpQfr4Bqt50OvtjkgO8/sO7cdNirxafiwzyiPGmQWobHH2owiTDl5qWyKWTspeRnMp68HPcCbejjlSBYArO8prU7QGwtE96cWesiUJq2B1jPrqht+2LRmm/UcNw0lxR10YcLKbmlUOdyFZ1lf3sL6wRHUGJG0jFb+V0xGtPvZmx9o8M0HGu3Cquf0l4FcNwRQIJ1OgH0X0FWol8IEtjzJtGJrlM0HGnI+N0V18zhENo2R6szwVEAA6bTyXY+PIXkT/O76zg0S84Rv88J6el6xkH/wQ2l/ReAR6x0435l7c7YSMvYgdig1aCqGu9Uk8dZP2rD+xG2EKknxY/h/tCx8oGFLGVRjmcH4p1nTHsuv45u+NhqtsFlVaeBaw623nxfDet9I//lVRf2O/KEfYkxVLomaVFNwCwOWPx2Ah93eirhVys8e3naIDWGvY4cgLu7pIH+nMe6+ABUXNfJtptnG890uCGAYL0IEWndhgHTSGGDSO5lRkLmDYZxGwr4g4tT7H0y5Ia5GpPyoR16j5p9TloulOXR5XB/KlVGiT6BrSUHalVwtBGT9+TzNgUBDsrqAMmkUF/3gXRZyN8kT+QlqttmstHlYuypj57iMaCaMaW4NLV7nZ2s4MvqdDF6VZm5fyi/1WAmUaK8d6kAcIK6Fr4RMYUPXH9LqRWQXP8VL9l5kyPcuRBA1U6e5fRwLnG+E5arLpO+elkutW+ATYfFhihKqnpdY20VubGpg3BmzoY7AYF0yVEr6iayeLyQ91ZF28Fgu8n6HuPexr6kJHs1DbRiE2IkQJ37++rLkwap8cIdxgt3aYW1jTpQvKFNGdZJSgPReapqLU3NbRz42QcFIMO8QwsjFLJ2sPB3THmF8Uj5AlR/QPDubsRuGDoklqirKtTfuaKVrstyBUKsnHIByvR7SQVyORgQF1z1Pk3FuMtnw/Ianz8xb5R3phjUpkdeWwgXgxpvAd5+u9WpU53xO8yrrYbrlPpLOyJxz1oU4wYrtD0r7tYyqmw5ehEo7lGfLTnIp3+Nc1ZvW/YdhsdZIAhey6HOC7aF+LLx0XJUKwqjK+4keHo7X1LKLx4UZlczcsWfkIfld9Wfix5OOnPbgchIN8GBHQBE5mTpPy3Kod6L1UUjbHWc6NUim+u5PmNHt08HZ+6qXr4vxmIcACrJzrEoKI8yLoV/bRNiVs0PBVhd7FT16su73vUufN7nfR4+8RM/EZ/xGZ+B3/7bfzt+4Ad+oFzzBV/wBejLsP/gH/yDz9zWuBv2tzbmPP35Q5xxTHuj5R82Se3jE+fcAEylF7jVd+MwqMWSW817AdT2jaZFsxS61xJYjggrfbssNcpFimKzgsd3/ycOtPXYG1VSjlyrlb93nkbfAiA6Q9OxKMusuVfRHQFT0+Wn/B3+OX2VzkJ+KcFu4kcar51Hpc8nPkR7zcIGmDT5oT9nwkqnalYhjXyK+JKf/c+jLF4kk9eWgWZ+HEBCREEzDHDD7MY55Tpl2jfQ9aXdwcODjIrYw7S23Hy44S5/Qgw4Wd2Djmx9obGiFYMhL0gZTT2fRf9RxutcZDtoxVNsIetS7ADLFVyWnMLYPKDx4iCfzGzW+4Nowp0ulkVSyVp/2FpJkB27yD9r+ZBHUt/xHd+Bt73tbfi8z/s8fPCDH8Qf/+N/HL/5N/9m/Nt/+2/xM3/mz4zrvvIrvxJ/5s/8mfj+CZ/wCc/c1t3dwAsvvBDCAiBdgOYBbCtiSkrp8W0uoFrLTCN9JUKG09oa60lAgUCnA43v/J10QRA7gA8Z0Im18zPsNSDiO0cvZTnldk8lPKXa4rHP2q4qv/ncyHL1w1tCS/24IOrEigbjba3YxiIqFF52Ts8lxS7qlz1rfau8KEt84/Ks72RItjPEZl6sogBtH0Xer1IfFoMSxM21ZF5omoCQJf8ljRDNexnPvcoN7/0/zXaUjsXbH3mD2Hha5a2eRK2QJb+M+yLMT6kyxOAcfF9tTssyZN8a+61RjiIcMhLUPNqg68oAKnZO5c9k45P+zXPJVL9/hLNB+o8xLrGHqXlUOQBxmacNcrx/eY5fE1JYEuORx9kW1dkILTRr42hzEsSvIXkToMggl/HIEOlDDlLvfve7y/dv+qZvwmd8xmfge7/3e/H5n//5cf4TPuET8PrXv/6n1JZHS2rCjv5gL3FUUubSMDxabFJD2SN1u7HqtocvioezgGaMu6CroUe8OFEg0LFeT+CpBo8qRIAp7rFZ2ypFeUq1pGNdMLYeszG/odN83RLCBZi9fhliT94v5sTGm42btR0tfoUEr9OqM1AEPSdaybiGyyc51sd0H1/fKF3AxAshEGOT1vpUCsoVC1D8KZgZFObl6nu8ZgOWMrKFKt6n2ocLMqiL7kQ4aSm7zTopA89FxbesrlnXQd4/jI/+3NTa7siN424dOXILgBJOw1H/N3xaHAM7kg6GSV4VRK/7qhA/1OodclhleuXxej+c9yCxcJoGNhpFmo6xnjZH5ERy1uf1NL7FOW0Eness97rb0OhzYIoVsSzv2ut4uHzI0329/OiP/igA4FM+5VPK+W/+5m/Gp33ap+FzPudz8M53vhP/5//8n8s6XnnlFbz88svlD0BJ3610QqYSTiHyinAS/Yuwhk1Zo75O+f9s/dGEQzO1ckrVmMKX70AKN5georv1YQxZizLiNQ6nNBfivB3GZyg7KUrQwukmZSDm67ScC08ulIxTJi3VtaVYG1A1ZQhZJgBID7LTyZ+6Xxf96/f2cWrpmtjkt/MbJDTZCUXll+u88CBwH7X/1VRfgHQAxqF//s9/Ly10C6BbPTsfr3hYU7AlHUvAFuLgcsbp9UOqfReEyqo9mvC6Sb6PAJU/bLpV2ubf0I7NyrqyN76hfzpn2GnZ9FDop/ytAglHO9S/U195dA9A5aAkhSenP9RPsiFX5Tq9zbb2+jd5RBtePqwLJ+ac+EN/6A/h1/yaX4PP+ZzPifNf9mVfhs/8zM/EG9/4Rnz/938//ugf/aP4gR/4Afy9v/f3jvW8613vwtd93dddtuPpFdi+bA4yJe3jnjSAMlH7QBHzlnLWmBM1CO863okzJ1QGBhQRXQErHRA7f6MaLRtsnYC/MeBO7qBj7UKxFlMoRGxnCsl3aOUTtJ3urPtmYdCRG5czYClyOfLFDf6SPI84ow2ui/pf23IXTKDm+ost8a4X25iKP8XPnnnWxWk4T927t1cJuyoWd7tFoR00alu3a3F5Ye+3iadtIXXoX7wsE8l3JcN34ikvV2zjrLZ9pO+GsGHphXveWeaOWhAUkcmqTPgljgAU95iw1bEQrIVG96UpjwCOjtUjS6TH4M4mzeVRfx/SjzKNwJ2X/LrSpr5B9UZIGH2PyNd5dVNV6MVj+3wYHv4a1dnratDrKxfvJKsyD9kZ5XpoeqPVvWQ10/1uJ8tzlI8c0w8rSL3tbW/Dv/7X/xr/9J/+03L+q77qq+L4l/7SX4o3vOEN+MIv/EK8973vxWd/9mdv9bzzne/EO97xjvj+8ssv46WXXmpXSQggIthHMsK0WyHgveaO2t2kI0BFUhm13FqV05+ViYEFwuMq8xuay29LSokcHH/PlUAx7fkZf1dgGGZfyUQG6aSEwkLTu+7Co62Ci1dNa+0y1ZPa2zcl3dri7/1Kdf54W3PBu2q7x1N57jqsG1gPyvZJ8SBn9qvOUUnUW4gSIQcn58+UeE4f2xe2+dXukfPkwONnTJ5DwdX6e3pdcOGeR9Ua7STrNMdZUF9LQYY3AP/QuVJNGG7nkVfMET0ZMd4sNuiX6KexOqtELWfD7XOlUnjuY1aGktS72IfW79Lf8CqSqLJwXol53eCDSJL9h+KrHsGJ+YSU3RulRGYBUFJ+27orDma62SDa983OU8TnkVfrn4JtG/HBhbnz40b5sIHU29/+dnzbt30bvvM7vxM/+2f/7JvXvulNbwIA/OAP/uARpF588UW8+OKL23lfYYNgGACIeQ/rGECkfewLacH1YHMoqv49FAzm4Wt4CZE49bfx+osCDYzSyNn9M3PlvV0mcdytJekqwBh3ET3xA79rjsoqPwqBXycBjpuBZh0UoRNn4e4KGbKuvT9xVOrorK+ePClkHJLBDQN+sDFRv5Z619wlgHttV9L16nSS16ugZ/ESFPs1R8PRjHo5Vyx8Y6fkyVDsOJXCwTY5DAbV4aMoVIWn9vweAeBvqFEgXjLIxLJD14JUG6PVkiQhSYZIrORbZdjjA954GjIuJ4BqrKCOuVxvLIh20khrMCdW7VGjHaQLEm9DTHrVCV72vhpoPpkUwsk0bD8AyS5b2iSu38NzgcVZ6OSXjXnpWCnajADRrWrW54/RVJn0xUCpz0xreYbvEeVDDlKqiq/+6q/G3//7fx//5J/8E3zWZ33Wg/d83/d9HwDgDW94wzO1Ne8nvXo6B6VKS4bY4ZE+4IlkSW9BBPmKaZOmLdWjSq9Et+jGM372pk0Z6fWV8P/Y/NI8GSP3APMozff9s/NrQjq/VwuZalJWn3WDE02m4t8s2o4lufOQl3SsOwCq05cRYDbLbkgjiVBBTUmn5bfcs2QDzKCdBiznLoafvmKI5HgeRcuHJhF0R+mr0u1jGJYcWo+upNPojlEMTpXXMOq2wYW/umVzJlK0miH3KhM4oW1MwoaZDloE5WnAFvNGmwys1B1q2PpDfBd/2Rk5MfV+R4G9zdPAcaS7yTr3+ar0MOnK7jhZuFAbIlH5O90eag7sEZTL/UXT/BnHbp+oPfW6hXiJg1643RKSGyw9ixW/KTwPlg85SL3tbW/Dt3zLt+Af/IN/gE/8xE/E+973PgDAa1/7WrzmNa/Be9/7XnzLt3wLvuRLvgSf+qmfiu///u/H13zN1+DzP//z8bmf+7nP1JZPNi8AAe32oMFMBpMbYmJFdsEi4yUFVGyQGk4VgXIlYuM0U0lKumgjxQdaTCczfI50E835LIxkqbjurdMVKZ0OVuwFBjtqfUI3cHMnJyAjM0oRXBXXivZCxdhhPYQ+4LD2jf8Tuoz/3NBbXfzOnqCBADdWxJ2WGXEbbre8XqbLHYgbw53993uYKO5g++5A5fBNMizSUsENSDl7usTi5LzkuZqV8HONCYra0UYPT+SnsavpvnRKOFqlDpMMe5KRU4dpuHMBjFe+2VS775Q2Dj3m6x/ywNB0O+wI9/Vha8RjVZw0OmaA8n5mViKBSrpTbWQlP2juOPqwzjr5NIr5LVKBzBOSBT/jjhnx8yMWSf3Vv/pXAQBf8AVfUM5/4zd+I37v7/29+LiP+zi85z3vwdd//dfjAx/4AF566SW89a1vxdd+7dc+c1vzfkLvJ6ZHUYCtKHlgVr+Ug7B4Ts+Pi6Xje/Le5f2txQ8YiKfpMRVrNmWuV3KIRX4D4eVHM0TKFXhx6O62FtB4dw2vIkzayDr63j2UFtyE5QHWbT8VoN4NfmR3NnftEWWygUQz9Gcu9XNCUYaSkQqvTqthDuMpK004+wPJh5SWYu220dN4TG/xJA82PD5PfOB+F5RYHsWKhOR4c/T3AbvIIMFYU6Kpq/sV4RzqSYA2oIJFPg5aBKMB9gYe9vr4DRyIrx4ROk8UbqTVbIIZ3EVBow3bmN308t2JYX45g1o/AUBlAL7YCQVCaz9o/EvzJP9FBg4khoUyGnkVXZmz6/vvBd1K5q0YpLisOq7LSSkqwfpq3wOjCKj41T63yocl3XervPTSS/iO7/iOD01bU0OA14S4orzjqI0iK0H9aTc6x/YA5MNyB6DymmgwAECMRp1Kc1cCDC01MZGXC4pZCbjVWE0h9MoEMxcBXOrL7tbPtilvpvhqceOR3lbjE9NZUo2tDvEFK+uuk6HvmuopHB6zHaTOJX6/tPZMsh6jNFduf7DaF1x0egOgNPU6XhDItFzQXADKDYCkOV4yV2mmu9cVbuQVhQ5OQYrHG2a4TzyMNkk/NoDSvSs5rtmZmnGoAOOvZ19pysqAXQ67Lne9I07xre4c2Xxi+mSSfKYW+CA9/bNerPFmhyAjo13k8jynyEofSC+KrQ89Ro7BRivYH4W2OhxQ+0Kg4FywP+kpL5/2auhi8m8D+Etyhu7dH2kwnUOfVbsuT3rvPp1rifa4G7ZKKbUr3wZr1zYDxwObA9W8DgC+PqL+dSA8g1oHnLVTxVrVoWNCps911esDRJw8N3ytJQcQF/ClPC4lXpWtKgwvXnG/trVIT/OkWmb4cgKWpL+ENnnYjZiD3xgpyO7dMY+Dh00JS4psWet8Vf0N+eY6QnF8gpeAiBW/zEt5OnWIjZGQfO3OT/IslbX3pfDIL+ZDY3FstFFkwtyBje0WuZjzE9F1/NqAdHBUci682rToDI9DIbzzYE+G9fSjR7RbCOl0mzEbPhlovfI5Ek4fg+ksht+coXiFOyB3TXua3mnj8VVaKuY1/fcJyHBh67yhh7HdoHv7PFqStSc9CLk/qNyWYQhnJWtCzK2ijQPDTNFroSF2hXU6iES7NjZmbrJPpDfCHwdMXJ42SBmCrNVvgrv1ngtAB72TyAdR1+R5UbZ9SPPP2oi29HH87c6SnUtPw0TU36uDKjDZtwoAvmQ3Fz5gU25hxaebV+pxrpWHc64n5Qdw7zu5z2p8Y8GAAVNuoOl1SqOPDIZ1wwVUxBY8YgWOOWfNymmKGY+mM/0IwHpVcu7453wvrGGgovFVJDANlzOB6EAsrWXFfna9i8JDKVX0gl5tR6V7DIpkFeoCitYv32W//XxNI899VqPqm5XmQ+aAR1I5L1xp8Doj5UcX8Bjbwt3YPke4nsoFpJNTwWvE5qc5N+zzYdz/fl9JTxVepLzwowhTAZmK0d77lFY7bYs7lN5eqm3XXZJ7xVH+q9O9ZFFo8Za3zbKVUVUoBxKMdMPNY3ALoy366PKb9/fUP4vAQdQvy5MGKQDLy4h0R7OONIIlF8ojSymt8pcNUDU1RBVgi3BCTyuJB+xI4UkDWaWP7XkXFFcQTg35+a0IbMEGwoCo1wGnzUUnr/Fcfq4Q4vpgL67baSsLHhyYClkVZBcfufsdrHLuKBWE+7x3+cgEZcpQ5KL3wP9frJEAuliCHv3riFoIf1TJILXxWWMx7+5THSjmOCrAtLVz/kL1aL0m9IrHKklN2TC9OS19rkQgMgOe8usGPcEExThfgxSNg+acmD8XKLEjOkIHgr4Y/sZcthnFkQX2ZeREJBnw6oFoth2OZlWMXJ2cLbrclyj8AqRCUmyAIqvCrnCMY4KScugf0WCzc3aQjtBZGFMO+UwtTsfNlc1Unj5IAQijYlGU6IqufGVThO38MsFyayrYjiYEbARWAEyJ3XNKpexBwqPILxtcVePH6R1XsPKA6skghH7Y8wpjALaEQyAYE/CHgNMU5j3rGYhRjE6n11OIA5ZGvN8FstxD8L0/nX8WfKUzRXEfW27wv0Rn2y0ORKnAasobK+ZOoLp5Exr3X9Jnf5vOFuOBYx1F1rh5S0+mMdTtvpMqbOwiIXZvX0Dtti2wIIJxl7tMoMtOVJqyfOlWHwm6XXJIzFzauB0gAdSp5YjYtZNfdzKZSZqLG4T2A3RmDmDKhIyBAbke8wAIwF/Z45FOVzPvSoBUH7OCq9oGVo5OYpkvFMHlCoaQnUqQBlHWhtvAQbcJIBRV1XafRYF/OoBUup1wqOdl2esn90ScsQkyYSDsQLjS5lFkk+nxpALz0k+gRjjiKFaBTAjwvK0A1QvDJhpe6PLOciWjA00FLFoYEfcNYNj8lQK+miNEl0DKAZDrXEqv+WyWPQQ2MNr+c1Fh/kGsvboiKJ4DcyVjzKa/HvjwOHeDXdhPd3FkvF24lcMgEGpqPajnotZ8julA2t5uRxC36VbrDma1is0RO7TrgHtoZpdfP3ZHI2Qif8tXyrjsSJUbqq/ygvVHQhe5D9G9YvC4H8myFueWm9J40uKdYitCKQOgNkdGqK34nBhjzYurLeDK7cC6Q9Pqsn/cp+zYoix8n2a+8lpiFHW5grPZN7dD0UzaTeZnqZeq5h9KdAfiiRBgbf21jioo1Xu7PGmQCptHALU+YjIhrk1BuZCWdnyF9VW3UsHYE46BaNenvtJvTSo48nNhr312cHLDT7uv83NizS2LNsdYCydguzCoLV9nbce1oXG6VuUzQc4iqun77PCch6DVEXBo/5thEa47+cA63vnhr66vPGKOBmerU9j8gDLqNJAnw7HRUN3ZRgvv14iMwrLRYrDlUIdfmiskb5TQAb6Lf8/6OVKIoNwjbxozKYwgR2sQwNCx9xN8vPUFgQl9Qr/Ib5NhYVJQRTINKUfB3ga3XvvhAOVg4LLh7zmN8bUGnIbI6LmD5QtTgHhomV2W3SGQAw92xClPDF4ZJsTl+cm2MXtdGefMA4pORJddVgSlA+tSLTq6gMfa4L6SA89J6I+NdN+R33Xe4BKXQoxlM+hcQmQMHJaXUAEovLKtmoNiX44LRSCuGKwgedUyJJbKkbFWSg1TCJW1I/zqv0VZ0fZSxHG3QGU9V+yeYnUXS5pPKgxwClEBqMylzEMhM4GvKDcamBRD7Zou0efE6QTuY5qvGfzLQobZv98Qi8D9vRG2BOz83K5rDYOUtHG5BgfZ6HVf9rH9UO1SGlO+xGTNh75EBgPwRxNWNO3yy3JR555iGONYaDEFOTqJgPD5KAFsMY+sN1wbeIavUJ5TI4YZ7cW8C2hxjttqcpj6QWGMBImTMgIsMxHk+ru+rD++cMJTXOoZBndqpgafg9YbpfZJsB7j4JPXNzG/3MJVdN/1MADq4BiX+rXeB7g/aifiVTBiK05bFRuIPlyeNEgx34W8hrILg1/s9rJ5bZECOHFM3cdiYUlPz8fClfKMddReAyotElKB6QqkmHaPDkRhaQZQPWLL6ZZWnbwX37wWfr37lG6Q+r5cxJbwvKnvvhoueM2rAJ1ZpKh+a+NylEz1pWHveJrjnhRusB6Kdb1CsD5YzVbeZMqdnrLCQxPsQr6SsO6wlAihgWu9Nld0BXFc91ZOUsdyV2o2uasyF/UEoPp3SuHJKN+x7bBdgWRbTOGtqvPB57f4OruIvO9jvfST9uEqJW3CzeL0kUPEn35Jyr7plfjuL+sH18m++jWiDs1Pp100Y6Vro+4re4/dAxufCGKFwT35nDytiOmq5jJdNPJC9k5cdY3xOe9om+hdXXocSj1pkDqjMSkfeEDarQLbWLN6Ha2aFWnwL4JNSVZ9rY2HXCW/yCSjPPDmuywweBXiQQsnNDqjlvmbartaIOeU+CE/F9Y79Xx3Bcsb1rCS4X2/G4Zz5H2K1esvaoweS10cNU/NiSl1Yqiz65KvzejH6eYp8rnD5ccKY2FJGAlT3wvA2ytfLkUYtEZbv6VkD0/XSD/oJLNhuqCNwJ+jVhGs54kWekDE39E24hXwWxR1GL8yF8XqIv7MUG5Iu/amVNscQmJeJwHsAYO2+zdbNwXp/LksnNinWL5W+kcMNulURZNRTwKQUEYg7NF6C2gClDdoepJdIH6JSY7VP4Zk9Fv6buM9Epg4Nbq9w+vAOPXUJq2hMGysrCbaTrJZbOWpPM60lPKkQYp5zktQN+5Jenz9Rk4fuKxdN4g94oqqpCrLLcWhNAsLd42kgPT8ybUhBWf49NWMgAC247eANt+lRRDBg4gUGTXIODdruXtVQpf3Jc8JeLmUd52fqtvzMVkVb6sjVNujUD+bPjgvyfNnKHy99JN7ZWqaLRw1AcHviChORlLrONwUxYg+jDDyUL05/oSBvnvmxyX/SNoUliQz4+YAlQtqhNqQA7+JtuxhsiKcp3XdsP5M2yFCOLIvdTyDlQs54LkTDQawA3PiNUFMaZkXOiwgLbBlDtaksTb5n1Rb8bwqj7psOOCovWFBmNjifRJI9RfBOtjzQ+3myJ+dotwgFgJrU+K3aG8tGibHg3hzwkT/XR9wPKg8aZA6uYmn515cfLYJ3ANQpf2h3CwP1OZaIAGOjdJmFNsDsEkwXIk08tYafSl4q0shMoNGU6pTATFwaoCV7ZggDXtF/RDEszVKoFfa1EZrN2mdHVJ+zCW8yUuFG6EKtBWglkIqDp7jodUHy5XFJ3JPtWx4RyeuQUSL4S6Rj9Zz1WDZuUcAqQ9vX9TS++BefkQBHXQdtEqPugMhaQBpjqqm+uz6rh70fetXGDKhz/NYMlBdyZ/f6iIn1qhvvKzqD7bb3NGjHBa6SJjvfE6Sl3R9zQBo/fMfemTUe8y2hbckLUwg7pRIyucVpZx3knhF7SKR7J1fU+SjkNd0PRWjTCnQ2G73P9LfeNog1YvmX0QgxmVW0OL5PcCoVKzmRfgpFqJWwia1EY58t49+5KqroJzujaaK92OCJnP95CmaKZCxopZhBEm8j6H1y4ROMzcCX2jBinQEWrLap4emT/NrnpIcd7SLQ6nUx02ojxf88MsPY+rjnq9xuLj3ULphf6xSAQdwu2grusRtHUQtriejWFbXeRS0NeUeFuIdZr4BcjRCfA0byE7arQ7e8lhOxHv0zumnAEDB0CVzswN30BV2udWNjV8hVxHJTEzI2uiZotqsIg25b9gc8kNN+mKPfc4HEDk/X5fzgAW57FBz3MOgu+1iQJTEgxivCgiVt3ncHQCPZisfu2EhoOqXMT/u1rcQXXI8PhTlSYNU5UHmmss8RFzJQmnCY4Pu3nsO0a6c+ZsL0E4J168ekcQppQ9K7UHD9Svnbnl5Vu/xweTQZAXogVkVpXdQpaGwg6AxViwFX0AgejBcGwBnX/YUJsOazzvVB4kLaBZn4pESL6kopb7T10sDmydK6kVRN/A0C9ZlRnzsQ06asWEStLB/Ey9+C4aAjSGtoosoqjfQdshw4+ckOTAeaGJnL/7sx6I1gTumY5V9/UvhW361OVOiZV3CWxmhtMffISjznFw9Ozc+X+QLHZSYn1GhM9rp8dRtVp4Ale2FmBLvG1fpXB6XOIr5KLzQ5rCsS/h6Zq0QQasj1WEiYfaeR1TM1JA1JPnLe9DqrfLNc2I/1fKkQepUQgzoQFVX2ss3kQNWqssFN5SXYeoWStQGdZuPscLVwpW9G27aT7AQ/vgOu/FAUH9vnikgGOY1+6tCGMUZWHu1SeOqnlcnZdvtgMAJdH322fkS4ESbowbbWPtjFf3yfmPDUObTydNrXiOAlmLcSH+Q910ps7F6X11g8chCtiMMQLzqPsGJDWg9J6WaqJScABkaGyZn2koy7dP6sDkavou/5mqXsPOhR+lkbOUxss02tA1T2Wle83NDCG2josnUWJiEXC4e9zpPPKIbEkvHgbOuBB8H87R0+vB5zQe3R5xdkCNLaTFUkMiNu6xXXhSIEoEvbZchwJR83Uzbz/Mk/MJyV9DqRO+rL08apI52PQwkXSCyVpnF/IzQ1kmWbjq5lOfW6lcvkl4b1+W574SQdTQng5M+0Ha2Uajh1BvLkvgqsmkB1fKWJiZwb/tLLARL7Y96jEY2UvHWX92fwm90F3BTqq9cLH5lRh1mSIo3dsc7WMwAVfaIo950ddP48jmiINTXeWbt+7LiS8YXL9V6wYyny3PZcq4KzNe8MDt4zg+FZo+A2O64CLNRTdvUzYI6ThmtJpsDtnu1JO+Uu77mcabqes0MFCITQ0Y+B2Nyk4Dh0XByOPvOgqsXPL4oLONAAm7wxOK6EkrZx+aA8OgbUFFo7AtEhgzMoZApxVE6kofKdTHUKGDQ5D8D2vprkK6K/tjBadm96w7DSYIc9VvzM95nFzxBvNcL9pwagAXQ7Fj65SSLkVKk8d4Z5XbgwLtHisGTBingDFRpINf3lIm0Shy5CAnubfjXWk9Uvlo4rdzqA+GCFLXdGik5HmYf+DhsZRoGnYoxFJhrT7GBgWk7VPj5XnGkAx2gsMA8vFAGrkP/og7vbPh4Z94GsHs+IdJYfBEApWe6JsjTJM1xcEIqNXt76xK6x3VwluUaRDsTkvM+LC/eTplDURYL2uuv9nxrjNhltHfX1c+n1+5sW+eblUbKYgCJ5jX+MshqVzgKln3MGRwV+YoQrVuRRZqxpfbSYKbsZ7TeObQLVwUFckQKwDeD2tr2KD7QO3hqeuyRlAim5FjfpsW/9LkuThXWGzVvebhsulP5yvqf/9vY+Msgr2ybjyeEgIbT/pVGdpqKvrHiFpv0UytPG6TYQalyX39yofR36YilPQaduzuPYARFJ06HcxZZ/93APrZ0e3RVR++f97kYPHoS3pahiwimLqBSTEzVdRzKaXdPX9Bh4IQEJ6hi3rsDwPNqh650+g/9KSkrIKMfIUAwwzttjxpVteexYOCZiz221NdpxwM2FAb08XzXdMNsF5R6kPVQVVeLC0wszv1/wDfZXWvmER2Xak9Wmash0EACVwoxaCwVcyqEllEP14Mh8NCCwfcYNXWyWqdrClnzmhvMYXAuTlwAcf2Me4AckGivOjkiq39jCuYYkGGO3b0tSHLuGN98Hucs3NiHxOnyY7rkmQ15Y5djvw9irNqzxTKi7UFssWX/Tm55sH/Ee1I0OxztqvPKiM+H/g9E/lQRCk8cpPqqMX/+SOmcc5QVKASUoyiT7M2wdG/A+R4AhbRnlkKxQ9At5GG2cWNBkeNphHpoHMUxA5Qykaal/jS7zyfUOQba98/bcQ+IDUjw2g0K6nVnlkV0dExFFQ+MrmtA4M+9VSWyTrvlJOa6RxyeMSsmkPeUQaI6PVqgd3lnPQSkWCkxnyipUcqhhMil4iZI8EXtVAHxwj67tK0maxIW8i37NmFu8CUgjI2KJo8oql593Q38DlAKf5cLpznLfCWy/r3j3UFk50A2XvgrzDmiu9KnleaziCoQg5Z8xzgnsIejG/2AjX1bxdaOnYVetwPoatX43sVGtwNidXpZSvbgFrav9v2ZybG2L4MEW30sHaRXhkXsDdPJs9JFByg/J/njTQfsVZSnDVLT50iq0G/zBCYZcX7CdgGv53wHitKGKy+a7LBehSTmoFLUS/esL+ENUqkhdOsnt9eruxII77N5UmvxLSm6iimYxOtgff4o03nJy3jNifNacdzhP4xD2q8izBGdBHCsyVqhh0UdELIrgjunYU4D52X8cid2N56gh055KS69PoKIXZHZqntFnSRLQTLXE2chpKk3AepQ2EafxjDaYT7utQAeIRlNN2+g8CIjDgPaQ83Bn6kr2zq1LuW/Ge7vtKQu2DjOuYNVGx/v1jGSvORL6Sq1b3130FUH+XqxiCxfxh7XmLZQ5HJlq7aeSvYeWHZFFbEXNAdxJaCrxAKwbYXO7IyGuz5Gu2LgIqnrg7ayUjuWeAMybNJzmmPROsIOclGlThh1RvZTz1qeNEjNqbYR5PpeVpDdYgqDBIMKeSYPV1IvKy8/TPfPCK2olh6blPFloMq6U0Ir4D2CTIvsVBVDseambHncUMHEgPDKRPKWORUTbUf0lArf9TaiJgF8Yj2jkHzhTACUgeQClfZ0PNgHMI92CPR+bWg7da4VazHu1B4cmGAPM46Mrkp/ra82UR4PVLPnGMDaeUu8v1TGPqh5U6ZWK8gJEEuyAcTWUe4AxAIPkdgzjoOgo2fvuOIGit41tYwX6xHcz7bvlvr1FZYBjCcGnEoKbshVVn4EeAFiBW6erNGO99Uj0g74lSKTJJcVB0qR4+pSX7G7eCOYvqCAaeUsQLgJThjizPpfl+y6HDuPSYiKbx3jlY6MCF1U+kVApT6/mn1YerhS+xMrhQkGJgcqFbi37okL6Mzo9NC0g6iDoRyiYh+zohvF4btdnjRIqYFUntjxwUvRWxL2Uh9ruld4VYp+kgGqcme1ahU6IAwEx8tF8YD0pja6uLMPlEMUqbI847XiUcprNcocgQtfAJJuXDk5gVzKQ4WmGJHiQwemjID83tiDIp5vkQWzqhi247oEmKZ7V6OfPR8voMl+96yhsUpsM5zF2Ct13IyAlgs6F5qmEtfCIfAv9PMgI+w0wyTCRccu8MxjCSEu0EqQz8Ukj9lI5x25MlHtKiVD25499I8tG9G/sQPkp9mo5ThWHe3OQvKtJ0/WhU1SWRd8+7SQ81ocoBJodkfF/CHigZR7mSJOFjq3M118rUXO/xQNd7BAPPM2SKZIlsoI28sw6+avSb5gALJWQovqWpYu/VkrPk5QdKfP6Yn+sthz+VgBqauXSgIoBgsAxkjPJ9NO2JnlEYUdH+t1pnP6itpc96ZNqx5Yq47AiRyxpVAzI6/HYNKxqJkX1fW42JyWQjN3iV4+VuasrnJR1I1mcqMPvtklZL23ap3zlFsCU2zRJAg6ipFC8nUBS0741lWHKQipdA38gNjDMGo2ZRcAOhygCZBbOirMwtxFo/CBB/KGMkbaidKWjn0D61mY8Gq9DXUsUuOr0EDoBhKFRjGDZXMPImvjUm9YVTFZp9yge58DyM0711YvC2rXBd2/75E47QieJy+YlyTG5wNKEuA4J8RWu4oKxt1d0BxpYZNZrJxDjhPRmgt8cnFNBVgxXjl+S/A5nNGG88fjbugP95ygrsgN6FEbrYMU8/UmG6rAmAIdo+45WNyZdVL9LdD9ZdsfovK0QaoZfzfsLNRhNMOrToEKA7ZqI8eEDVTzxmQ3onzO7UTcVWhUlFubdxZCHmTQCioBdhEs1aTcni4jLzIXObH3S/c6zRfRhJN40h2/bOe7RzKDwMrmjWJg/P7WoV78AV97aSMUa/XSgdYSQfl3//3g3dVT7Jk6bzhlxalGrz5pZyMWY521plFVricr82hJ1RNrVRYDqOh3lhcOJIpN82jLaPTUotgq1fLuIqs3+4sArrLsXBCLdDjK2nXqQj6Jwn3pfUW7GAc470oTF2KTRrk4IHQcBtz7E+Bf5SfJStl2oSpzl6zLq+X0IRjUPZq6GrAiq9UN0jjWqsOHIhecyQkmSTAF7GFmhENzqrw4TIdHWgqvNnoeV542SNF8FDO3Rd9YeeX6KvScWOcKFZsHTaV46M2Qlpw2UgEKQG0VNoA6/J4VVnBACMfhtovzbr38eZcpM54JYfLYb6rk0Ia2wufpmF505mAQERP/tSjnscWdCzXLKA+OmeRnANRh7JTVPR9SDfAwpsf8TLTnhk63OqvxamNC/kFxDJDVzOnP7CDHW1LWw+DFoxWZwtkBmE8mEAx7Lca0+wc0diJfdyVC65yQMdZcoK498KJmM1AVoNpzWNS/DrhAf8KHxqkU+j2i3sNlt4rmXKTfPOd6BTw7GcP6uuTMzo+R/QLJF5pYMe1CaWuY02GrIWLVIH3fHeGr4vXA3gq8+say1nz2ClQktglQJre+SfXA2qkG06ZWGORNFm3sE4CJD+Xg1ZWnDVL0vzN5Ex5fsccAFe9XoXrIQ9aZudUI50d+psHL0ch4zLyyKchpiuYl20BWHXRjSmu1tNoad3geyOiQd3P6jQyVg1OrkO1KIw8i7ddGhwupp1bjdQHt1QGgaKpUE8b7ZHmIRgJwsU1J5dRhyfs6y7grfTMIVmCPJOac68HfU+TDyinJhzohb0b4QOfJlRHAwAFhBFDqXgZv7UKhCWRgh0JqzXLorLjumG8fIZz/lgDn84LrAWjfxWWl/ha5DjMKwQiDFrziNJNIvq9NJNs+eUB227SdRzwl6SlI7yp3j3uZLM/651QMUeKlwh2ZdFZkLeCY1qsL3VvjmnDrEreuVcQSeQJjgb0jCop41EJ5yUrpeui+CQbSkPjYL1CJhTbauXAqLujea+OnrKwHdEa0Vh0E14skLtqUvLLwinFX+o/X5UmD1F7c0LdPEEA5OMVAr6IMUpFCSA81m5BQ3N5OWpJ1XdmqZaOyH6e17BvMFCGFj+0RSqg/10BVDU56dgy01QWuxG6T5e0adhTW8nJaLGFRVY+gSk3uqTWTzTsqpLDTpO4tugq/3BO8vr7YNA91JsKJWffbebMT6Qih9C/y/Vpq3to6UX0ZJajaqsg0rsVQuxOy7ULh6TynjQBNPG2UdZ2bth/VnjdiBA1+SB0/zXtjKThg75PKMdy6S5mIHnEqHXsbdWl0aZqqy3itpv4coGnc/NU4QrzT1C+vP+tzMOEsjd/HadkEKnW+Q3a2exucy1Q6MFR2OfDovYIpM6U7LtKEvZXQ2/pb+h40lswI0wXnB58vHXtEedIg5YOwJn55nqmDtOTHiTcs9HN5WIkManMgEmNdJv1F6pgrIBOYbqTEVQIpwTfHh0ay2ujIket9uGxhT/hal6tUmKaoxJXNMSs/JMoxDnM7SseAG+SaShX4XBQp7tEdpTmPkzFXr1/r/VtV7cQllrOTQgctVRvRNX+SoY0D+usRfbmYZICjruzjzp5Q9k6rALgT4PgM0zXSmBkOcPJoanGFUkaFltQTYAHRtMhpyb5FVve1j9SLRoQzYcAfJl7yeh57wEEO9uhJFewHulwA39uYc/VVhmYUV2y4hEzvtCQwTIu6FGp7I3a0TLn1Prr+rIUxNoY0hJ0Lqrjc4AKAvbkXZq8UGxu7DrOa9b734jTN2v/OE8DsguvBcTHF4wEKeOogZV56hOgBUKaCzAseEMaA4klVA8SGOqviiIznHagpoyEnRcn5yZZ3pwbITT+J0DAezf7Es1kHwU3nmoxIa7Iyp549KXzT1cWLPAyPKcfC/+gisnrVgaNVheW5stKc0ZHbPiGAq9HWPD2AAfBQbwNyB0o2hP4MSplwj+5l+rekg1kIlQxZOBLmBcdzOUlFj5h3Z8RSUiZcnooLYy992cY6KBGkDXRZVececAHTLjWW+vL2jBH9GagrXnMf3JnvIuf8Zh11YMgVqMkrvzd7TU5IJ8kxQ50naSKijuhPGgyO5sg8rMeODGz87QpuqcUUMPV21R9zd1juAui49ruzrtsIOg4mOhge+H86Ltzik2YX2wXBh169uO2RuK6o/y0CLsqTBqkxxloQAXLKJCepXRl3r7Quo0yAmmSUGlCtmuKD01g5IHlveDSTogdXBFasBpzFmxOigckYtjQ9gA3lutpnMlrZTHpzhJ770t/KN/b++7lst65wOkVgO/9RHQTNMckGfX7AgJcAKujeJpYQDE9w8e+5tD9BCPWAru1L0bcS+JtAJYfOl2eLhhmnadmAGItav4vj5q2qhrGDGz7kOQfPRkL9HisxSDqkjg8PYhpPCcIK8Nocy+XCI+ujUH1r9dgsQNXDiQQoxOtGTsPQnahSCZcAp8V/HVrvK2PnToJFcUopX3IqfbP0UGlLk7HesSImUGGNmkdAjeTQibjfF1uQXjD1DoYbPw4l8bc4s8VQFf1koN4bCHPJv23g9BjCsjxpkFqT8bLpYRfJYnw0FcnPrTHgB19aDeTJVMV3S4wcHXNBOZKqtWbd4a1uHXvgu93XhflZS1SraO/EenWVLs+78aY1tOwpPVxcvORMqfFv4RVaHkGGlFVu/hArg7Hfr4Btv+PtkyMRdBfITHoKH0wzBeXVDWKG2QFKeOxr5/PYeK2itgSfsS9N9ZU4op0WPrOtGigmMmiOcQie7eBY7mmNOjCuh8PpUbtZXwXBNEbkz3ISNK0B1TnDgfKSiyQImRiw6fOm6TuJd6Q6LzyvVmkApTtSroz+44qpMDzan2Jvrr1BU0HnWmJ+PDwVJDghj2tFN9pykyUScovtrrRUBZhwZH+pPzJLvrkMtfFqy9MGKWe0f0carY2JPtBSFWD9puTBPNZAN6F2D8csQHo9bDT9Q41+hFOa89ssrXvfyn29f5fky6USOD2CsFxHPlym/5hAN82UZirtOLgeUkJ94UrpjAO/d0Hr2rViCKNPCKckI2VzRvyCS+VxGhyocrx6S/viGYTjEhF2uKue8qGVY6q0qaeDTBIXnrZejHsle+vTiUcArTQTbGKxAJS/O/l5Jadc/fmfnZ7mlnWvTHgerLtIkgYaJ3DY4V/62aiQFxb0vpLRBjsXN4qi0Ob6PgW2TZf/vhR7n59qpetWVw+t7lLYmSDGnJrm3JTIe/CcPdPD3wmBgtlpr7Z6TxGc89LBjx3VVwlWH/JnhP/0n/7T1aMUwS/8hb8wfv/xH/9xvO1tb8Onfuqn4mf9rJ+Ft771rXj/+9//qtqSO1o95nNT7Fa1wqEqLGXgYWxccL710PgF34vXUdNXPvYboDwSF4/tPUDsozHXF4sQfbWihypIYTwEUPA8jforN2b+zfsJvZ+YH7xfx3NC7+9zE1LbiHRGpDVzjz2n27bImlzf/cT9/arz/oNW/wfvMX/yPo713toq7RlAhTwo/N1L/hyYDFmvdxlkAAZ5pydB5FPOI7p3jJXySYOSLznkAPVYyAEKo4I8d3MAJQ3Yg8+sWVWxsbPxy/8Q4zNJ/vOzChmDnh8M5Gy71FuAYzciaJczf6Jq798wxvLK0+3qbM+DbyYhgWj9zbmeI7q/n7i/d34wb26ztZfTOB47xfcUek48dodqpZcZTHrbSUFn/GkAUvaHbxQ9BsadreLt0dQzgtWHJZL6Jb/kl+A973lPNvJCNvM1X/M1+If/8B/i7/7dv4vXvva1ePvb347f8Tt+B/7ZP/tnz9zOrlTuvQAeFZxCqnDolrt9rvu6UXJC8qq6lJWUkryuMs8CcqDYUwYoqnIjWZ+d8Ggl+nLZC6ct+37soXlx+zxCgnZxBuP6g5CTy+60pS+WEYV6iMkRLEc+/nu68Ru/OAqrfWW+UwpRFXX7JEu5uYdJEVPU31mmdC1FUeylnuaiTobWq6kyIMETIR4WMi6Ek2Un5tyiQ2gDmL/FsnO/mQ57A6wy+7Jp8+JDRyr/cou6NJZESG2UxkO3yyzdG+MlCVCCPRpqwyH8v49XOA4KxLNKPeVb64yXCUZ9y/44SIilhdU7TszjxVkh/3sTe+ush65OoUuAQug5z3URj1B9dMb/sg+7/aI/0o/SbcoccLBw7VjdslZ7+bCA1AsvvIDXv/712/kf/dEfxV//638d3/It34Lf+Bt/IwDgG7/xG/GLftEvwj//5/8cv/pX/+pjfa+88gpeeeWV+P7yyy8DQPWCeioBaIrZC6coGtPY+Q0DhnLyNOdQjWL1JD16iPaMtjS89EDqlRUSam9ZA1eNh1NBV8Us2xbVkUK7wQzSw0afGey6GKk9N5KR/ql82D3uClQiEu++WlshWZW+8wM1fHIWyrJxMhQ+tjoSaLDxkdTd+W+fDAKpnG4CZaOrrxw9LS64PYial5Axzu8HspvDpnGSfq6YWxygqKrLB32VTkR6J+cunH4T+u3EFrtosZ4WhYQhTKAq4BzVF4QCf9nMBqW50rk804XgkQH29NTfhOgd1naQ5hjNfmuCeLdFm95RnwJTpd6bMpbnRGol/d7oL7Xt9usUxoadkVphOmiD5mj9Mm1tXQzyRfmwbAn4H/7Df8Ab3/hG/Lyf9/Pw5V/+5fjhH/5hAMD3fu/34id/8ifxlre8Ja79hb/wF+Ln/Jyfg+/6ru+6rO9d73oXXvva18bfSy+9tF/EmCEptOWSo+I0z+ai4uolC9+aBqeAEzKd5a8UmYjQvzsoDwJM80rEUkMeaodQXILyVdGzvLwawNvqqICxKqWlWVcA5QpCysd1FuCZCr1XzA9O3H/wfn3+5D3uP3gfn/N+Yt63T0vPxC4S/m6jzXuszEjv0+ThznfRWH8hgMQC96w5vTk5FaTMIzY6dFxZmqBPF3td/maAjCD5PjI6yg5DM/BlHK+HuBu8mxfyZ/ktgV1Ix/hZxOHGeUim6SLFtHTBU1iRzhOadrDrRrkX4Vw4v5Zc3ON+urxMzHtLNTOfwqlJW5Bik3Ic6W2zCRHxhD1ArFZkPOA2op93a3eR6McJd4FcYBLehZrLdMMpch287zQnYQ+NM9vcGAv+o7F+lvIhB6k3velN+KZv+ia8+93vxl/9q38VP/RDP4Rf9+t+Hf73//7feN/73oeP+7iPwyd/8ieXe173utfhfe9732Wd73znO/GjP/qj8fef/tN/AuBCzR6uCbfdd8Wj5NMOUNL+4mSrLwTbUwLFXmj7Axnk3Q42J2ovkgqM+HQhJvAUVnK6/TFyodgUpUdVx9saxikIsOOYwajyCe145x0Z3BKF1Gvd6C8jY3NScy4AC0DY508YHPN4B6o0Gjf+aEx8mxymdR76Vv7AfWKeIHyJGhGa/NH1JZLk8fHfuQ1UI+RjzfqRv6+PW7pxLMLX5DwIXx+LFsjqdnmujza08+OBcRm3rkOR7xgndi4ZYIQ5cOo08b/I1C7b0JMcEONuyBwaT5j+Qgt9FRg9FL5VmWsOI9dTurqYEfp9GnOw02FWOXTEfrtlWKh8yNN9X/zFXxzHn/u5n4s3velN+MzP/Ez8nb/zd/Ca17zmVdX54osv4sUXX7z49Rw6lrB0+/mGa3iohwWcW93qbAPNf9tgPgIAzgQ5XfbA5R3CxggZNFcKwToX/Ig+PWO7t4pbQWJ6zIkIvXdINQB3v78BUfut3GLKxcA/7y2C5Z0ImkJyEWABle/SYIbynGM5FHIG0rjWCAqaOyPMHq1xBGMdLeDt3xlsWh+MnZk28neDyTpeaWA6h9VXn1OL9sgQkx/QtMSViYi+cpIP7Ds6VG1DVwBryyxVqE6IDNuFwedKJqUWgZhLEYnuOREue972qbDTA2jsNDPLyygbw5H99h0efIl8b2bxNDd0lhFimdUxiPj9vljGgar8XhTB+PGARWt+iOvNqs30tTs4SvKaTKoE073HNosn45tA2Y/PYH8+7EvQP/mTPxm/4Bf8AvzgD/4gftNv+k34iZ/4CfzIj/xIiabe//73H+ewHirpCdp3ZpY2HpUiecGt0SUNTEGRG/VS86eB6yQ8C1CYgBaY9G5I9dy2qMjPb315oMmHvGQA6Tk6M1kg3Zh48/11Gomcl+wiG1bJoOhhKqX/Jhl3SmZRA0Epd8xBXS6UDmTwJO/N6HZdk7aAvNRJ0Vx7EVXZ4S6GsAOrbvxxJyD2g3NvxHun1FP2UNB4reFHFY8fcVxbDcMZLjHxI85n70LWfOsy3qA5XtOStHgZttu4yhrT5YANujCt4BaBURHS4RMD3JGbDtIUPbHxLw6qSOagws4oY2RlsI+yr+tvDkElGBB7FpDP5Qczi3ZecV1DYeOBFvtjfAYBU9B38lhOMpTjEDZoYr3mA4cUYThLeJwziA/TnBSXH/uxH8N73/tevOENb8Cv+BW/Aj/jZ/wMfPu3f3v8/gM/8AP44R/+Ybz5zW9+9sp7mArkdygpH/aRO/FH6wUekHYP8BpgyPhIEzJWYP7pCKB0HOkR+vRdxWXE6wVy6Wfm3v03ERxz8tknbH8doJh2P6zBIoGF7X8Yc3CNr4UHUb9Ub++CJUVf2Kjwsuc5kbuHnK3BlX701Mv5dt/lxAFKoDLiYdVFnqID1Jw2v+HzUj7f4ceeqlS7VmvKqTOoeuSc8kPMgXG0mXOks/zVVKy3h2iT04s8KINkMoEqeeJAFhsKu8yNlFtfrhxvT475l1GP7wbG3R3G3bC/u/jz5c6+yz5fI3Z+jKYrni4j4dLGl/v7++SPzdO4PRCmdQzjRTosiyXsrqDIlQ/lnOtP1Z4CIT57O0Pq2wR4Hq0hWMh2/yM4ir/U2ZyDCptqO4AUggow7Slql3MUOQx/h/T2YAhvlA95JPWH//Afxpd+6ZfiMz/zM/Ff/+t/xZ/6U38Kd3d3+D2/5/fgta99Lf7AH/gDeMc73oFP+ZRPwSd90ifhq7/6q/HmN7/5cmXfreKDXeYq7JOcOboBO18C2bW6IcHHZsyBawvXrnWPi3c34k2p0wF0VemS1i5EGgDuwHp42TbI1OUVihsmqYaGTBxOh6V7HbyuChtLO7HmZXL7ooPWLAffNyNVGrQOahdNcuqspFlv+JKPGcberxAPYRG6pouxxBOODJh6oNHNWTm9CfPel619q9qDVrWNZ7V49N0xdsOCsCg8p+X9CCklh2u4rI8UEiEGR8Tp25eNERX4OXdcPCXs0eyg138si5cGvzAqVEXqd47SiJVqr/uIKiZ1FCva1Untrtwe9X1pbMicaER5AiUxb96V2ZeT86yK484UEbFeFA0JwzIuDbN8nBKoMlOQ1bYdYJpXdgyq+EeP1MPJWW89Xs8W5hg8MnDayoccpP7zf/7P+D2/5/fgf/7P/4lP//RPx6/9tb8W//yf/3N8+qd/OgDgL/7Fv4gxBt761rfilVdewRd90Rfh//l//p9X1VadbNQy4FwCe5rMlhypX+XAAPKYYqB5ctcUS2vdcTsraBkdFyimzqihqEbayLqXdgKp6KB3Ydp9U8sWPskIQus4rEyL5pKEFDLGt6ZotZ5l1btwblGmVa4itFtIqydIr41v566KxLCU/hFmbthWUjSN3NU1IZZ2pqDKJlyPjWFk/Fujpf9dr4vYyP77kW5lC2mG7QSGre0jW934ts8cUwKqJv/b5D9Q7zODF3OYpd3gdNLcDDJ7ILL92Drh1ZBh3/jR7giAYhsBlO0P87rklZR67dAcAudxzDEf6BX6P+YfGeTcqYi6LnpbzJCShF0YzsaFSy2jcYs6Ym4UZay4pgenRKx8yEHqb/2tv3Xz94//+I/HN3zDN+AbvuEbfspt5TwE0jD4b2C+SJXxg41UmKD4efIQ+XXnVlupIPeOA8bdsBDXTNgEhr38bU5+ex0R0OpeOkDn+BXrBKDcA3+DqqribpiBlLWxWkZR7sVXz0kdZUNj8q9nFeoAAEVh+CfTWOepGM35ug6rcMBeoJcKf9KMGE+3tcXzSzo3Re8A68cizOLr/jENuuhlg7TteNM8UiFTILLz6djeocTIP0RzkKHBZ39pop/P8coG05ASw28QE489jOa4xTV0rgBTF6xT/Q3cjC6N56N24voCjMuiCuiAuMdvej5EYixP+Mh/gyNAwNJlLpNrdxK+b2v+wGILyG7znW7iaIzBqtB9QUOJflgXHyoX7eSP+TyjZ1F802eeswyb+ziMetp790G1znkUz8TZFihSbhXYSwnjGqAEUiJYz77sSz/TgyXF8HG3EHe9elrzXVTIyWBslFglJFU9z78EbdhlJ8mTMNjhUXl7wR9bzuBeDoP2QWKqrc8bqu3e526EeLRYkIK6lqUOrnj1z1Z1WY2ldQd8hcYDkdr6uowDtlnWBPzWn9N10V52MI4lU5cBUKB0Jt9/AUwO1KqN/48pJpjsoTPdvuS9Gz54vKcew3W+NQY4XaY26bShtC0QSvHl/NOR7HbkMuMpMz/X75fCPLZuVGsV0qTlUJTq0jmX82k6KjPnmYb6qr2M6Py5n5jTDa8IUI/+VGGbMDYwNblmgOrOneRGv4Ed9W4CKJ4ztfmgqJDuoygu2bfbwsI/XRcL3NFJJ3fdTY7NCfxcT51kr0Nyn0i3sfP+cXtFPWmQykGnyUj6bcm4QdVhXFzQmHH0Y5lgrd5d/85tmnYP8yImbJmzNbSNixsbN+AMTLnKh9vdPNer/Gb88XUCS+4Hrvlr13MjnVyVUyaWqQlOV3SQil4VY5o0F76PMKPQKWar9hV2Po4MZLyDRziGMS5Ey8HLvuXIZ3+FvxAeajoLaoqbrOsHYeDjRbklFKMmdjIrrfT7Nj4+tEoX+phIdS5uR2xuzTTpBTkeREt13qou9FrzU6FK/ediSisnuRbqawOmAtd8XVZc2CPAmmOiHdsXONny9wCm7LsIcm860ssA29iqnfLrLhstPRDzfdRF8bo2hyf109uLhS2zOYhkA8XGsID7VWHwlwUzEIk3UXPbD5XlSNo402vtV1CAACoAmPeP89KeNEit8gzuqNn57bS0A1m+P6f6Rkz47kZj3epCuyIFxbSJYYUvc/V7NIxtKsgyZJZW9FeQAKt9p+3KCJTQHcVZcqNTnlUyAF0TnKf9xPf+RbnwBs8CXCxaKowEZQZUA9BJip+eXLQDoOxgHsTszVUMz+ij9+OnVMID7qebYSWtDAdGz83z8N42CA7EUs5c3aMXw5s0En3WC/H961hO3Xkajcc3AWrVF+lFelSBPW4QDSqS6bdb9br9JYU8X+76sQDQXwfijqjOuV43MgQeivsKYbubVs82cIb7gQ7AtFLOfzRg9qkJBdCeRCg+iwMQDBjDFZvunCEiqUm7S/T66BFAJDG3+Zn88u+C9aplr+JKkLjY+IbeZtqb/Qz9WIikXDk8/862ixVvE9wwlsUHo98kBJojqYxyXDnZquS5NURL2YJGa2fftT9z9dLaRVOGowa6t3bJIAAEULk4gV5WV7yvjSOrmQIYigJUV21bpZXsCrbSDIinXkrUwu3VkCY/Cl/rfJOD1QnEjzRfnN89dAb/i1u96YgOm5JLfi/kHUkhwGhAteE2nYgm1Inh5oX4lD9k1Jf3BSBFn+gT3Edq39FTDrKi9Cl0vSjUnDONOvJna6xUJadjO+AIO9JP/oPVlSkpsY1h15hGXxs41cUi63fV0nrRkXw0BgYo1QFb6in5Vl80Gkt99IiAJrDt9flnzk+emLbx1MYjHAt2Lg9ZG5bbNHRVDorKxrg8zlN80iCVeEJARcpR0jzxd9B+smYVHEZ4UJAGFs3QcmjrRjVSJ2M9Ob+eoufIBaU935MsnhERvibJjT52QdA8vnTVKeISu64bgquyRVB1i73WFipfr14N4ONnLwCEAuNO1uKT+1gLB6A9Y9LrKOO+G9CNtpt9db6TreWxeBXFh3CHo2elja3/qaH9/i0NawecxuqOREJfvi6j8rWgysPkEkCno9OEhxxBX6DadSAA3A25SGzKzMvkn7UE6AyNhU5u5MVQyq8ZlmEJOUH2R6fkQ9vwDZApLUdR0GajfQGR5LwU2/wEO5x3CaH6KkBJlRoesvikk/4RrxoeaxEWTjaWeEhtsxyenBNF7tTxUHniICVh2Jx73YYwUNVxMXF2JaAX94Sn5A/NBXghV9p5HV69wt4cq3Z+YK0FX+eFwCqJMyo8pXg3StuCbDeLe2D06V5aiToe56UECzrYhFNFHl/xBG+AE7oBlNMl2SO7zidlFr8UY0hslhoPBZbUBr1GfrCTVv/fI8TqxfYOiAOsOx0hQn78mIUC3NLtwvNAfngcQvZCwyA30AoAaASIy7t/reMj3C/4uJNOdScADbA3Vpx43CJw1fb7AoC1ik+TRrGXMFofIg2ogIgDlJNLhBK/qivfPR11FlkHXYeTV2EX6EHalA0DSEehAQhGkdWVmss+x+s0HEysPYUB2Vg/Lt9XGtBd504KtwOoLtwIMVvWHXAvnl3nxWBzVmAqRtXkhYGzh1Jut8Bjdbs8fZAKF1WPRgNgI4CiqEXY0IRRUnl5Ytjr2jzqLUrJt5u6koWCESHcZpmUJRqCcPLulBQ8BjyAioSjl0O4HqkU6hLrNAOTV9GzOkULxP/Ok+q6jVVYvfWNHgzd7UmNppS8uxgb8qhzqClH7gaMjbkRnd672PREKq+AxgLcJ7ICnb1yPE3Uaa1PbOxuDZ/fJrYIofyI4BkDXcTv3A+4I9R1AHmzW86jap1+YERb11QPn5wMdzrovmk6oaoGWKznmtXLCNJC/xtsb6QYj6qOaF4L5MIWWhGcOs9/JNdGyCJrrAeo18OKMZahMxxJoTaving5c6aGk1JT9GD9buNPHX6gJObuQNXk8mT3NpDyj3jXVtrFR2LSVp44SKEysnDhDFiQ/rPs10oZuai/pjhO9NB8FGyDTCggy7vSicx3I4V/rP3312sfPIoaTWDYsFM/yysrTpLP3WzRV0GbC/m+BVBXPIg+bE6Da7LU/vi9tiR4xtUTMgcwFJj3Z/0rYGlL1U03LiQg79H6PRUtnQQG2r3GazA5gZZYhAhBf0t8RCd+7OZTWx1Rp5g8NZHg18Dwfc52Jk/KN6yZdqJVveKjbFwi17nzYIByOcQ2CGrzkTKAe1tAkytc02COoQFga7GSJM0W+knvNLhtbnf9CWBO5K5z1SQYQNHziw6sqor1CnmPBNciKnca3Xb31NyiVxKo/Nm2R2xcF3ZHkqdd9W748F4JfQodCq0uVAxzAQFqg+XKbNu04/W8ps1VzkWEi9RNeqg8aZAqYepxFG5zIaKSuJK9zKs281ojIetzI+mfoiHER3I8UuN333j6haMLG2gU5QYdU7rvaNVap6+u4MjgoERHgCqGz/vgBvksiBFJaa56ZLPWI9qY+BUUPpy7x1sxbd0ufenRxlI82WkYncqgND6LV37gLk/M50OpICOo0cJij9Fx0VfmodOxGz5q/1hL/rY7YIzcej6jGj1fct8qLScaQXv1Oyh6H2cqqg7TLXto1B4d3Dh+4XNZQ81Bo+t9zrjYgRDkmhlg0Iq5H8vyR4QFH1cz7nqdqlNKleXqyouxYzD1+zvdcT7l6gxWCVAsBzHGEcVa/zTZwrfHsSLnszii8hkV07uyie6N8rRB6qdSQjHyITq20QCadNDJBzR+efNt/zqAQMsuFbFNIymCYqPI3k2zOqqgV6Iv8rTtUJlC5EabLPVjYu8Ghudb3MiSd8nGnYAq9gzzChkQYIrgauYKMwz8pqdP8zXyN2k+guP+mX3SHDfvWUS1o1S3lJcfytX+a3KHFNm76a948PFY15BBM6MX89at6ni4OSyOL3HG5qU/U+l8u8BcBva85bCC7BD5c2kYSw201ZziOCGLZUNwb98Hhs1hpiz5sm0xUCje0tVY0e+ut6k3ByfT+ppp0gHFivCmZQBE1F6suKIpeHR06nN2PYEKsOcsa7tryHc75Kt2ISnH63GO3Wm7XUinnTfWh/WmYePuRX0qsOu8tlzZytHzuHvc/uZPG6SKBt8YAdULxYIJsG3COdWOR4b95HqwFyKbwCJCXQjiYUEAOTfluyqY4PPcVxh4uMFOGm/ZHB/z3UCRu+ORSDChWp9TauBmg9HvOuF+9fI5HpuI9sy4+64YxyDYI8piqJKfG2Os3vBAdfc3EtCBslowjIeU63lu0i8S2G4EzCuSreOAmVH1baD23D6Ze/XXR5Ah977wZDZy1xSOCslNsbrTyFOD0cmguQFuAXG7yN0MgafbzJraAqY23NFUz1rEK+sD4io/y7ZXhtYq6820Om2BBbAm8m3RjYOLTrG2JKLqfAikDQoqXXDHsj9ZzYpfoqqV0vNLdVr6/s42eoZ9V102QQEdtrM6ahPBd/pv8U6Cr24fukOQc18UZR70g7MwiZMUgd0wox4tJy2NN9aGOD1AbG6cKdS8dLxw2FH3UJ40SOWKtkddHB+uaMWEaRUZf14hc/PuwcQlzckipe91XRlgBig3yBxyF8v67EWLoMfZi0++Rrbzu8Gi7gRQ1T6FYW5NhwBzmoSjrFBMDaPAUakrlpMZXqcbQ6Xj0rTG9X35bqQgui0TWlhgir/GnVY6Up/Ca6geVICrAPmUZcgHoySojRxDPu9VX+9zSEsj0pKUcfFzp8iJV3QWsADR48QN+532B1ztEtiTXgR44KrwCkAboIhAzNz5/K5gpf905Pj6wNuy+pIVJbr4eIevztjkleupO195bPWMxYt4Rb3Pl9l869qFpkolUO1H/ibkGLMt8WMGeFnzWDw2LFKhuwnAmUVKOeEInylJnmX7l0C1se3sHNzdfUyA1CPtd1EwByo3lIDKmhD0sBZz2q4Md4g9uVixQ+BvNxtC1EIwf64jPLEx0gM91nndyV3c27crd+1WYT6dvLE4pvRldG/1y1cqXjaspCQWUXFhA7MDVdJHpMR9xYgfKajywKDQsMWpKePHAJOo5xVej9UI0ZEwuqfCEOeXzNOYNDq6N+zsHxHhIsaE51RK2wHexqPJWYgUpkhjm27IXU1tR3EHxghSXsJM4xgpqb43ozWbYKORAlMAQ8X22LMKlbMiiIds90U8ToPQmwzE+IhwSLzXnBWIZej8XieJmN8yIsC4M/B4YdqbCYynQzDv72Nhwc6vwuryU5mrpnMKZQGrWxr5R4idS2E6idOe4YxXqoTjqPaM5w1DK41IphPEe6fZ+Hj3wY8JkHpkJGUK1pzC/E1AKSRbFRS7h0t6H1K9DwBxfivszYTwp+UtE/PrBqQyF8LrsRtjayPeH2bfWQi3+4717rZyWwJNFpPlfgmyVavZB0bc4wq/OOzGjraAaXelx8fpg8Mk9AGcujd9Rc5WfGjZQWkOQH36/0Q974hotWRnwHK5aNVyzEAVTZThIYMjCVSRkpEKUAxSjL2dBo3x7s8yeZ/qdlo8T8aZAE8RAQjn7FbRmJcUeyvv1nSUcHBscF0+eMCjSddfN9AkELkRa1GScqbwDZGUhzM7gA0ImR4imGOl+yYUQxewTUVsYpvL/bzCk+dg/3EUSO0V2+KXC0hOjKcsogUFyaZRqlBtXNdDxk22xYGIgWevOX5ze0f8GuNjYU7KHupuJvd4aUkh0GUuUBNqTJtQyfmkJeAjPDgZJNSpoxdAtX7zB449ohIaWAary6L01+s2Y+MPvrqKX4P3I0D9cNWW8kOxsySRbBhvG6RsiFCk3eN662kVXkG5gUbeUTuxkXG676IIzRP6vaqHpf+nQbqC3HOJ7Ba9giZ3GKhgtd1Li3ICoAZHUR2ocJTZAEHa0aA26PfWcZhTMUa4WaUsjEiZ77LJQCmw3VmGAPd1nEqgau9KK6DEAx5AxUQ0+do8l0Z7GGK/3ACKXt1zFSVA1t6dAxN6N7L9FwCZExPDotTFv75NEmPJY9TooRLgrFLaLLwXxArUjNwd6Zp3gAfoct4BR36JyMfGwgl/TXl8p/+9MB87UBWZ9bQBABFTAE1PQkQrOPXKS6OpJMXbK2+okAel74gzDE4DULUcvUqC4WQDQgQzmBSlUFza1g2tSGbjmR/JZakgZZWkabXFy5uoPwc+hKfohoYNbChWTTmBfsu6s3qP/MKzPERiG8vNMoavTUixgIRev+5RTVm1d+gXtRPpvLnuZ4By2ebX0aRMsLEFKb99Hx2YQM/1kOdLwxEA5cBkshQEm/Avllt7XRnK0JKBN2vrEdfGIDOYU5Uegkc+Y+OkhpwRrSxz5ExouYccIYo6SjnJIYOQALEJNEVVeR+NLjmgwx9Ov1srQCZkrX5b3ASvfCskBKnLNVv30E4jrwLAesS7oiVnDa1stGsUmtEe6ZEMGpcG/PmN56h9k+4cp4+JSKqkIpQNDgm19HsqSEWxPLdMQMcaGBV/INCGTddS32U3b0//sjHiBQJ8RRhipm8jrFWqWR+8bsDy8ELnQguSL7zcjVvcDCAO19G5S/LIAB6jKY6AELT2LnoTIpnOLAsnhPpOmh041bAKIPvE3xterh+IPgJkpcnkUF5K9aUh9bTTmUd7EKdFHjmCirZO40P1sXcafCADIXTNdj0BfqWqAnKcFDamzCtNhlIKnIFq14HKKAccifFmBEXha+q9z61VgPJPNsD52zbo1OugegEEfc+ImiMC6kdvwgQy52nFVh6SkA6FWsTowSDbsQRhj2oFiAVFjNiNpQdVV7NlbENq2pyjXHNSoOX9c0Ks2lZA+iGNaYmgxAHO5r0+FkAqPf1T3rwNkg+cg1nRv5rXntOeb9C1xHU4evnOzNoA6lHeDCvHPrjPXNxjsdVE/qwIgPbAK3Gh4sNlYZt0gUcngux/Wd7maMoIrN0jDrV2bzXqU4XI2nFCpsbzJmI7VPdIKh1bie9Hm1iAu5HjTgld47rtXq3zaHo0Rem+NKRNPsJYVnpOKb6IoPQs19VxJxCS9YZZKXw839uCDuro+b69tIsFFeDjXH5Jp6LSn19o0QNwpKWCE9/Lf+2emNfKendIZsOKcPz8e6mbZfRIZOueSNYpNldlIMPR0yYbQM4jCeLFhosuNYNvOtft/XEcbwwwNb6lHlkZ/IFcZ+INeenTGjUKvX0vl6cNUkBB/qsAxJClWB0HqhQQ57it5rOFE74CR+B1UCPSUgqh7BUI9wiEXRJq/+TkoZ9bwm2UkoEWA6eEgcxAVSvcZWO7BDjzknS0/F7AwQ2kC2O2lk+rZ6LNPa/igZGNjUiYIwIX/mGElCiBaDn051JGcNA5d2qKF07n4o8q9jE87P22e/W9nQvDkCwgXlnNdI4jVxLFEvknu25ZlxxCUotCR0CC85u518AqnKfeKhn7nGtMwPV7r3RDFeGwMOCFhAcP2TZ0pvp/eX4tZqJngniuLca38WwTLDL6D7l6xLaui8p1uaNE22Gth9x3WfF+lL43p2SL9htQcZYq7yPeHsaPGm+f+Rcye5luqOXJg5SXS+PTB7wBBg+EDCzgUQV0rrSLC6xpq06NnRRyCbXkgIeg6VZ/GKdwqUk5rgAKp/PZ2L6SKlFK+ROA2O7SWuq40e5W2CNEgEFfANIXTvAnp8VKzV2oS1TiK6QsivJXenTaWQldl7iTlo668gNiCBiclH+k79vd/NlXf+XHAokEu41GsHORIF7qESEbIDkOzfokUDEA1P6y4PEQxMO2Apx2N4B58WF0Wl83S94dkV7URsZ0w+XHJ/JrdSSECjOPUn/DYSxR9bHzCaDFOcYEnzdRex6s7wcYiw+4TT/oTu1DhWxI0u9TDfZd1i/DbNC0baL8DeDurABYqcRZWR4OBo/1hq/kfG1lV7gCUJy+4EiqCdBNWWjl6YMUezY35IEB6iYWmIEQkrYVUSFO5ho6apsrDgBUUhJUA80pGTcIt7xbv25VXPqSTn1rr688c93Ot5I8urBXllkWyYcW/S/CeYnfs6Sx3LrViQkBtzvNmx0qmGSIwrhdFa1f4uW+ExXM/XcP3S6qvG5Lw2Dz7hFlSCUX0mzyqkA89tBL0/u0AzWS2slOyxY23UhL2T33J6M/ovXaDpnl03phIYUFJ/kRh74Xs1pGwGRnrY+b0ZUUvQFffMQgk82tG+acKy3rK+mak5EkkEF14LfXyYvatlhDliC6HoezUYfTF9EoKWdfcFOQ4VKkuvsCF7PVH1npXWAH8vgaD46vPp7GrvjMV+94Kg6SC2HUerBdyw7U0wKcZOOB8rRBSpCeDyoQcdHTYB/qKvfYyV3l6Ez5sc91KSlKgkfc5kr9LEiRHcr2uT2jwgGL/4S1XHNlG09sXxrmOC+V3FPkxEoeckrGA2cx3dNz6qTm76L2kHUCQK0v+XLlwKp1qAP7MsgPuglRS0SkVzeQPkbfVM0A64ERDbXYgBcD4SAh5VyprPU9jKiLmwmqp4cv6efrCYyk9UvaGHdC1AQ+AYaudUMfgEqGEC5HUtgjAMkYCRpo3G1sfa7PP0HtUPNwU6tQcrYm1CZ91Vep6poOUGMoR6ZcbwAV6jkirwImX9hEYRtPBGsyw6OFDTZ2ffDamHXbo6RvJ69/k0kp5/bIlOzLOdx+VHnSIMXLbNVHXaqgPliHNEF1g+ueFdLo3izePgHEvJ/QSQpClvP0zqgzXp1/C8CLCfeJeW9/H8x2fWkzy9Qw6+MgcNW1q2xF2A8RYEi8vTh4N1ww976I1BVLDxVOA6obJjIYD453OrD26R4uAZRft+v0A7RhRU2EOYcu1xs0U7Q8D3rZhn2O4DmDVRs8fSxXu3/lYMLzbwgHowAIdcU/izxE7Ui+CA3SwaitemlsPUV1t46HbRuRckNAJnRMpC/Zn7i/n0s3puaCjHBByekKAyuQueodd+v5yKG2AbQK7kSgOm0rpul3NOYevASKpDwTcnOcGisfUwoVskauptjtJ35fm7Wx2xdEAGDVAcjn7vYGdypOR6+mPHGQGu3NvP5kb0vKVL0rB9WzyHPbvIpfe0IS96xAAEWKMe/nZgBE3dAMPAhSstOd0YCB1P1cf96mTkx6hiecGYE9WJlARTYlopBqzK/oklB0Xu68fiWvSpjsBVTZYPaH+3cytgKbJBbk3ABFp1mNVrqV+5befe+XxH9scIFLHSOQuwqIuc7d4KQRcMw9sbrKYAWqQpzwkutyuoEI0Y/kmTaZWr/to3DqXBp7qtrGiTMd8dm87tIK0+qPhYjUzKQ7kocuReTkuuDOGvWLgtvLsfOl16FD7NjojV1PWo/iqPDS6Nl7H+N36XC0IWAeht6V/SCbLlY0S3IEMXDuTIbz0HBJxOe/cGDg4YbtsscD19MGqTvBeGHEKyrEd4awrYzSA7wweRtQURRgk8KhMCXUPcwBmOAGQHlUM3VFNnY+7rd6xzCwuDFmcvE7zz3N8Bjp736mAYIZN3+rfTxkewodWrSB9Kycd7D6eEcDl9o0QBL/c91eH3WENO3K2ofFM7AC0FY1MVDxogd2Yp1vuzgw6KY8hIVn3pQ8TWucdFP4t6OCkolqNEqrr4dpHMn4BQxQAWBGfxqV7F+fJykA1ccevRBIMIhSf3uanU2mR1d9uIP3/qpyALiTKi/EBJcuJbly+WeAmvdrEpKfLxzMZM3Ni/3RhomJIWONuE5MXTvRDBn2ni9ak8gOAwHZifBypkU56AC147B3/cJmdM9L+F2WZztCdHOKsF/rC8vKIqn06s5EhoxcX/ZQedIgdXd3hzGGLcHMydEVScCec/IVePVeDl8BlNThMrz8rE8q975scg2op/MixWdptzkn7j94n4sYAKvPXn8ttpntQSgqrVIEthuVeW8pP4+qiB/FSM61BZQAK3WRVVu9zZijfvf0gadXlj6aqApVdK1FGe2UyonGB4q615+1RCqH31BcwKn1qZdCtr9DasjeDaqzPiOV5yOSYQPDYBrOTNLmz1z5tbsf/IhChiBWvIm5CeaAbXX5GJqz4/KyjT2ID2x0uNHjeGswQ9WfdcPaYHWM49tzl14g01Lqj4Cwte2Ojjlr5CRmFmMGEDOfziUQNupUTHtQfumq72UntuIv9/67XSuQ432BXzsfDoLgDm44CO502H/VUfHKGAhpIB0Q7fdjBJe+ToxJcdyLU3fox+1uPlieNEiJbd6Y6QlL9w1Apnk5kv7O5u1yBEXRzUKqZPoGTBLQBN8kNbzRkmZYKbhjum+YvItAsLy1ozMC83TEtqOx830FX6Q3dE9tdGVQA3Bemmy/pEHv94axojkVrrMTfehFv5oNvPTrttTUflz6pwlebmR7BHXOneSYVzlAeo3RbkGoVrcm2WTn2CMtXSAaeXw2Z+rGt/6TlOtYtqmfSGNUafEv2Pm2tcPWMvUnzkXFiAjH248FMcBagNCrDwN7UoYrp4floC4Y8i2liu5h52StmkCVZNV1xnXf05jrgmagN0sP6ClLcOCx2H/xaAuHsj6ezS51fi5eY7u2ygiiB0HuFdu5Dm67ZR2c9vrzPm93lSQ9lScNUuPubqX7XCDvAU+rDazzvkP+pK09gFTesscZvRl3Xz2UpXjLVmGmFzzVd29/Z5AaqgughkBsm/yThKTQISMHUpaImryPSLBSrqAB9Lre8HigXHvTw1O6VjX2N5QHl2633p1cyc39O1QSxt09XR/zc5rqQV3wcR6yUqF3Yy0Esc8SHXrd7gnPPO5VRlfCe5cwoDGJb3Ws7w/w3etGgsSRVZaCkeib5AafxMfoUg7mw+12j9oBKnZmuOUz+xyHGXyRlc4jEA3v74RchZI8HRG0jc28V+i9Rqp9W7DU/c3S/pFsAj9Snqu09IFuORjp855cD9X3gHoEf+1a3+Blu+lgZw5yDLo1ZW5Ph/eoym1ZWWDxCHZdlScNUp6Wga73tUx/O6hzZAIxAShVKNwb4AgqV/ahMtUsjtpxeIJueICyyk51zQ/lTtJNWYQEyYTolIYqJBApULVdild6A6r23I/RcpQ2qrAAlzJZt73owpLcjWO1WyO37jf5PY+q/Nxi9b6Uee7ABUvFIMGUyqYnwvM07LA0J8Xb977GH6j/6QNxdMWrq1wE8o/m/g49Lh4rd4IEtNrvBFSeT0uZZs87KY5I7wC2xbsIQK+Ale2l7p1HOHVxAdXBkaATtes7UOnG35T/7IvV1cAtdT7rO9vR2pNoq1/fyXsAhDT+ml1qd62qLihT9300CCq2zo8PQKXhELQZVj3wvkfMLpesK6j6cpmFit7felNDLU8apMYYa5noPaDDnmiYc722QhUY9t6W4YJLSgxWtuYVCjaB5TTFEnktygFPLcz05Byozm6KhvCEsuFq01qKEqy98hoHjiy0CnyvrmKvSbavYiLSUnGtjxtD6qE/x5kA5RckuGebjyinyxQb+HsEW18HT8DL3pwXMqapbCOX0rfXWxD5EUFxZMygKLB3Zg5FpIJBzgxHYo3WJLMKYElBxjlnSYJV/O42o8m2/5hytnQkHmwmVgVJRkqdm8UG6tlGk5vD64M3oJraWmUZruMXwLapSdUFdpq4yOGoXnANVWIH3QnjNNixz6ULhOzupRblOgg+9bd2SdG7KGK7dAzbNd32OAyex60puxyNcnvCncIJoHhlcgrgrYVTyceTTTyXx21D+wzl5/7cn1ujEvt729veBgD4gi/4gu23P/gH/+CraiuYNQhcLrxhVq7h1wHhDW6yafybM9MzuazVDSXq/FNZXXfxfBQZjz1P/IjiXpMT+NBgd4N1VWUYzcP9V/Wea0MajGuyHlvq3IIZeF+YwkvuJy0WObQbqsZzjUO257yGyVJ5HiR6RtFcAIzSmHQwazwtjoSi27GM6JaMjjunpzpUF8NoFUkd74Nsk+Nr3x2Yqz44LYtPyJQoye7jhPfGRcUyEi+Dbztw4di0cCCZus0XHsh4rIwmr5JHu2fwQA0+doxTJ4+YvhfHkWQpxK4ts7+3Fb05F1dlku+d93nd7LLK5LiuGDBBRgIU/bZHUieGn/3PW+VDHkl9z/d8D+7v7+P7v/7X/xq/6Tf9JvzO3/k749xXfuVX4s/8mT8T3z/hEz7h1TUm7g/a/+ZmRSAQ3gNPD1bl5O/h5UHKE+U3Bdm9t24s6a+0af/XCXr3WlCuLV09hsceLtDhZpFqnZvhPXbOmWd+kN/CduIyjO9nNJdTt7ZKDQcvQUkxe6ot+M4RTUQlzZA7D4qBJ2POhpedm0MPvZ29rxlKaQggzFHm6IlALrrODK4LHdKpYcN7smKIrI57sFw3NxFxSxiVdUaFNx+ugsPOFZ/ja+qxmP7hugQh1wqm9mqKiPzIMz/eFUBO9FMkgs4H0h1/CwbXw4iyWH+QDRG+AssWcbesYrqp6pUWvi2WZO/Ur+H+tPPZVL6+poCTAh7Xh49UMkFcB+KN49Fn+lxy2EGJ7m3cKATSw+8fsXTfp3/6p5fvf+7P/Tl89md/Nn79r//1ce4TPuET8PrXv/7Rdb7yyit45ZVX4vvLL78MoBuRw7fU6kgtFGPuvzdFyhyvbzZpxlYE9vpR+yMPempZPBFeNr2UEWEQkQNMA116UbVgAaZNhPIrtWNydGfBBWfqCd0O1pfMZe/3hcyO5oHfKt2de6jE5Zza80hpUlRFHuWNqje+A7GgYNt/cLQOcbR0ANrGuvwBZH86fwiInN9+a91dwq+7Bk1vVg7tX17sF9FDn2NIpCALqdaBsNkP1R91t+9bbuqhOhq9Gw9NL0TW4h9jnA5gQGjIrmXDU4fBu6aARSZ8gYg7meAFEQWBCAFRAWoIBANyp/ZmcQVkVIOtju6HsIPUyKN6Jf44wMXbBrxP9L5Rfhwm/a0EOwfQyvZdKIWF87FFL45vlA95uo/LT/zET+Bv/s2/id//+39/McTf/M3fjE/7tE/D53zO5+Cd73wn/s//+T8363nXu96F1772tfH30ksvHa8rKStivm8PEsJF4MADUHLalGIqCyDKMco96fET0HmJsSXPo3gip/Rki7jiOpcVCSM76K/fj16Xs8f+65Efe/w9GiwARXQk3X1Q2ne+gMMEZhTqPBan1eqcjhL9141Wvhutjf5B/Iyr3cHZ6n2EdgUPKfXZbTS32cZsyHkc0+kS+7u0Y8G75NfuK5RIzelp83JHr6aPifcJLLOtD2PQH/XpoVJCNglBjLZoVe4Y67XkY/Ac40i9qL2Jb3poT1q73h4pb9CRjqsZ+RZNsO1hvXVeOL3Dz4ml1pDNOZ0mVTGu6H/efhv3Oem9Zd3WFdk4z+dtRZ0r1Cbx0tPje1v6+DbwYV448a3f+q34kR/5Efze3/t749yXfdmX4TM/8zPxxje+Ed///d+PP/pH/yh+4Ad+AH/v7/29y3re+c534h3veEd8f/nllw2oXATY08XGdPiVD+jDydhFxOUeidgrfIefBNS2Y2KA8meVuLDydk+5+C2H14+7h6O6PN456XJy29kJO/YxO5vftf4uUZ3GF95q99oIpbHNTkfns91bhqk4lD4JjgJMPA90Mv6n0sHUX+nNfRntVQfMxmMTbPG0DhY/HLlVRA6HlmqEbbDRSs/PKX2wm0vHUb2WW04HDVycSflMYbyNmIAt9ioUic6V0aS+xYmj5+L3r3b73G2vL+RKat0iayn7WiQ1MAHcAbjHXM/eYoReznklKuRcMUAdnT1e2lhqoN5pqLFGXRrPdELo+U6p/fYoaJpwB/1eb9HV7qEQDVatKOyBZOY9mrdS79vkzS45aa3W/4yXrrOton7vRwNI/fW//tfxxV/8xXjjG98Y577qq74qjn/pL/2leMMb3oAv/MIvxHvf+1589md/9rGeF198ES+++OJ2vqR8aEED6PXblwbsFmAR09dhpmNELISe9HCtG09//oUAxNuP5sgZfDXFbcNKzTTRM6GIjTRvyIDXI8j9446lynDzAtN7FXumqOSoX20nufkWva5z6ZMEXbiNVa58TFYCARmdI92EhORE+y8C7CuO+/GWs1g3LBmqNwcpktddd4zobWO1EaPCop1kED3xedFsOAWeJvS9FO+KGwNn0nnu0jvo40lLyXUnToZHKzzXlEokMvKN6lOg9wZeqmX3FV9Jd+Jm8trbQ0Z8ZQeSKuOX3VPkj96/YaApaju3mf1C9junGizFLQtsoYA/9Mnp/o2rTcZjJWdrx3+9KS52ICqxy4Y3JCOtRorQMipL10iOLstHGKT+43/8j3jPe95zM0ICgDe96U0AgB/8wR+8BKlbpfEcsfR0GxRjeFzMSJGA1NE9nmOye5dCef1OgzbAoB+lfW70V6WUcL+QnoikAPpT6K7YTnfohVnjLV/dyYh7r5a9M5Fo1p3AinJFJXWZFoQ7u67ZiBIUxfA0krdNn49ews7kazZRetqdhcaG3tYJzHMuqfJb6LgafPJsjKjy/qmo1z81nQ+Sq0KJrnqPgMDX61kmQu7MZQFoZ4jgO+qYxCoDo039zb/0Ik6kPDxUzgG2Q56kM+H19YtNX8YApi75G7qOnQfDHbKDCC39IvltzliJqg59uvIPvN5UabF5ZAmjDo8qCazXS6cNuNfbQtbCltaQtsakHOfYnPTHTczJRKXjAFIgjQUVr0YPe/mIR1Lf+I3fiM/4jM/Ab/2tv/Xmdd/3fd8HAHjDG97wzG30eZPwuuO3ZuEervCR1yEMD7frK/seU8+KYkjixCqOPBHOUm+F7Hp5PdGjelCcUaGonM3t6aY6V+G59JjfARkQkZ3+CN8OQHUqPoYcFdOwdtv/2GHO+nHmsRnjMETc7hXqt9NMMuN7afuqHG+ge0732j02c3K+9ySaD+NHDMN0j94rtDQWBJiyng3b205hq4bdwXDpgTtebABLdOMAQotahH5TVcjdHTQ2cV71DPtt0vZIV6LHDlzMDd7ZnNEdLbm+jBBToIqP6co6M83nNLszwLLlaUDnw5xzbXaLzOIUZwjZTgIpya5Ttzldt4ffd4FfmaG59l1UtbcAI0HVeUZOWTorTseB6fNiIFr5sIDUnBPf+I3fiK/4iq/ACy9kE+9973vxLd/yLfiSL/kSfOqnfiq+//u/H1/zNV+Dz//8z8fnfu7nPntD6l6C5jFoMMzt2xxacxFZKOODHC3Jn5rjlkKyAGqSAKgJch2A4vwFnZLXu9djohNKC5RXFKRNFDr2vpGnrPlZbhbLv1N6S/wmS4VIT5UWvGmKIDSh7NsIWYfJxqQnz0BVOFqP2YPPSHUXajZkV4CRFZthjGO6jVNCSMNXaH8sCPZLVS/f27XG7RYiUaciHKwNnPyBUx5qcbAJRB8KU4LahD8OQHJv/w1/y7MmGIzgIdeZ/noZs+gXjccJADhSp/Hm6E2h67k3rLkps+iArh3P9XJOqnCktLc93M0y7k0I86vKVY6iRETEreX71Rof7FpPdcYbi/1aorawEw5WrBsEzN0eEFIxjqSK2gXTZWftpeYbGKs7R7p+l05M0aFKwNqZ/uHyYQGp97znPfjhH/5h/P7f//vL+Y/7uI/De97zHnz91389PvCBD+Cll17CW9/6Vnzt137tq2/MvN5QvgAtTsERoxwX3AiJpziskMIXHUMFmkgbeDst5XfKRjgRrPyucpH/NUHNX5pi7eMPF0IO4q6UsXiDo1ZWUjxHcOMJ5jznjBE0L5OBkABKk4loBxWsGK2QgLX36djTdmQApWyIvN6kTeYydNo0tpD6gKVzXQwZCWPAliQ/CvmdOL42Ig257rPkBzY+H+8oR5HuKy44dvmyTsZ2dpOMbPTZ6XXgodZINgCUJdJnQiXvi1slfso2sNJiXv/wV8BLWdvSe39iTEn5naInchge9F8EAI+b68JM32OjbCAAytOMy68gRjXbFBkNJL1HgAr6DzpF4+xOAIbtpLKNl6OZg6vkOdbdQ/vzkZmrDwtI/ebf/JuPxuSll17Cd3zHd3wIW2ptbBp/cYsNTEzubapKVd4K8bU+uJtaXOl6kCQ3nExOJevB+8OAK+gNpElAKhxsiev6jGYpGq198vv9j1c8JY8enNd6bEfyy6uvx0kJb3BrCGE9rV2dnM6YWMt/Tcn7HoxoQ02OwckelADS6TuNb8f2jl5k+E9OxLpHwIb8xI8wQDeGLPqhrU9ah0mnRYmiuZhIJzCXcVeLBtKonjqt9f1mR4t9ozCBfhwAnSt0U0bJeTpVTkDiYF8ctFPbJ1t0KlanKiv67RtzeGkgj1VbBDUamz06RnO+QBhD36u/b9GRgdLEGmuRFdm5TVBrR6fFWMLDYkfT6zUd+khGUv9Xi+xDHYpoSh3hdLlmR/xzmuF03r2nTIOwm+mC3ao5Rjmb7LHA8DXHwmF8gszWMEiASeHCOwsC0dIR+RtHkcET6d08WtdGMR+wF1o0I0k6GeRDk+vw3G4lhwDKo+mIIG1X9y2tUok/TviSY3syIcUYdEekj4MU3Ck3KWA0XvfTF8Ms0Zao9hSFlQCn+2B8YH+0xssqRBgkiYjevjFfJffq89+y3zdWmG79dIGhe8JxODiIR1A8gNRJdAKYeoryQgYuy7HypJW7sdkIQaG56PjBQmzRumWKeB9JpLPVAp7rqAv7+TPE7mOZWSeEDK22HsfDpw1SxWimwqn/doPRCncwmivx6OLK4ib28Q+nnaoC8LDHeHV79+6pbEvGJSeBpeirb0KZi0CWACeAl4cw4UAlRYce1YmHFIHph9TxRXWYH7iZneKSlYBU4wkI/JEV8cjKb2reJStcay7lCvtYPERjANQG/kFweq0XlV+u1vR2yCix0/HYcmxWEQuWxLxpyDSvW2IqRkRi0r04iV7xM+mP0piQ7vHnVX3seJYIaWfEyTTwarlLSWQdgZaKOviXrAXSqLNT20Wuds3Bx8f4IEDsIJW+HI47EHn0PtY4p5NLG2NblqbolA/t4ZEgVf3Izkl9RIrsx45BAOrgyDqxq7Oiv5gscIwGoFTaBGwj50I4jhdHmiYtF+fuC+6yd4rcvqnWmwscygaqAeh98YXmROjInZODNCHQI4STglLkAVBqKvBcq8DuXmHji9CcYRgXzTvKuCagbtWUM2ZQNXcEV9h7yASADuLt5pujIIsD3Ggv9DM6on/SaajD7d/XVynXeEdr7XtZAMvsp9dwONhr40+pyjMDoLGyw8hIEEMEdEKMhtQFX4Cz3u+GFaHGy8uYP3UhUhnY9CarPlOf/eC0cKo4ji5LkoDPHbleKp80Bb1AW51GTCHdVdeV5gyrrpiUeavH+o7U1L7Dqyab5pGqAL5AS0wA0sa1R2cKCiLH1/iv057PDB0H2QpFsjb1PsBWD+D7kVzd9xErZI1Ct1nubipZDmYe22Wh8GR0HvD6ookOjuFgEKHWyPYMhtTjU3eWo28SUxSP6jxEUx3R2BifPMsEqH4s7UonjtNmyZGllHkNVOmXWhadpnhC817FcCPAMH5nPhAQN3tcACp0ilK3a7yZ+jRyqdw1fbZBSGMMf5VtTOXiyg5QvIyahdkAE01eiQdo8hG1FGOlbZz8mK4BYiFH0ASXdznW6XTtQ03gEsCyuwYnjXsIpFK+nNQEDO36stdeP5RA6uicGv+9Slu0cREaR1+1gGnl2aVD0rixxqwClB+ubpNDpvu9fG0FKuMXyxUyevL7w3n05+fY19DWxia7t8tPL5C6KAWwNM9dltDQDFtlSIzZT5kY9tzc4AP7G4HXRcXbdvLCxcWE6Hq6vnruDiJrQ1zfCy4r6cYWm/BWe7YULd7yykSRc0BakcpCxqMYomIJcR4U4kNGVsQHMiQe8XJ9D41X2jMtz6H41i4Mwkr0iBsg6luwxekBAyc3+LCDc/Kt/PbuJxFEAPcrGh5jGQvxBQkbUp+NZqFTsW9Geot6b8efrxkEq3NCZe1J6Ck/JiuM/yTHwyn1bXYazf06LeNB/eDuuZ64PtjrqXPOrLXh9c0ZYJKreX07NOKK6y8AGWu1waCNjCm0rbJ/s0j8S04c5n7U384Ne91Ld3DXPS66/mbuLYIqlYL9n6LeQd2VnhG4Z58f2WUqP71A6sCAzQMjZd29f0J/pOFRQzjx3zhAEPorJ/c2q4ddASlTcvQbQIDQCJ3WYCyxRS4LdWpdeFodNT+shUGKKoXsgVcFrkC1TYoXctOQJ0DRopNWJOptACaHFVZktEuuC5Vtx/v8AKT0ZOTE63RGnoaCFLh7EgEeTdELv73J5kB5+rZEM3AjmZ9bp0qktSpgL3rxoiLWaQlydYMbMCrd3hE1qlDiwbq4p4KYFRyhcPRzs7TrOFV4AqkrWhvhvRfwKE1Vt82mS1vQcKJUJLZqUujaEskfgqV2LsRi/RbAuerLbAvtclJU2WVXQs+2xVFIWci3ll/h5ebZVLvkP+0pgexkB79TRx8oTxukWDiuPIFXWScrZSgXUDx58gOv64txrstZ16HUZzC2N51ekGiKzsuix8xz3K6DFfcvViUWgMo+yEHwtpdJ0o9bKjSMY6PZPxXFQ2fafGFAl3tK9qVTgMb5ilGPL27wh9hzUlhesgyU0fZ3aMwJTpv4qsDr6pvx9O1uugGlQ0GClx/nn89h0K2Oo7Yhrb0BAgMC2E4QKlr6sxOaf15/kM6/t1ugKKm/SAlFFsLM/QQgE6Ij+eIpI9qtZUtHgR0X5itdEfqq9HsfE415SIgAY8QzVauN9ZAqVx4Lopzv7QWbR+ve9HnJxwDGMJsu/fLso9YIaQHSsuzuei0dbE6K6RQEmJgGQnfrPmovHElYlO0vO9y2rRG+YfXJXyzr9okyPw+Cf+lw+3ygPG2QOmnNqVx5wFd1MuC5AKELFoXePlA0gbjOIwa1AJQb+9E+afDd86yked5fTfAFU9cT9YI856TzfeuchoFLg+kdor44e+I/KZ5QAaVIn7BLJnaLmPGuYO5zHgDK5Kl7nT4X5Mr70Ajz7+InSHGEjokdVckNCZwnoggLndGtOS13wyItDYN8lbpx/UeM2dqVIWwBjYfTtDxnN1rNF7NdE8qKKf8tgMp2grC9SUfwoQlU87ESoxpiupHmi0sFTF9d8eXgJENtuiLB3RcUsLPlhp/7JoImA9ku867QTPzMk20uxVewepQHgHfuzltTb5QAyt90cBp5GQKZNhh3dxAbON9gNuZYxRww54V9Lw4Q2Y0+ye3Rv9so1ZW2BoApE3fDZDWyEM1jHbb6MlJ/6Thw5J2O9Po+JOekm896VlhyOp3/Y8jhwr08cZBi76cJtfYvB4bIdpBld8LWlSO5zbcft70JoyFF0ERQXrjnoDWKp+ICkm2H9y0CzGmCaW89jHW+SCVlvHWhp70NO0Ak8FLbzIIw2oeVb26Ng2Apv0tWEn99EQor6HUK4lC4kT7U7iigeqhxY7dvKrYU3QFfc8zcGTHwTRA+yBcbfjUEcVDXGXf4G6CTNVq6wkfsPG0T0ewJi2BC10Od8TAtts1Jb9mUXHv2uNLlIMYx+K7B153nNZUG1diDD86jQnUD0VtUeXMe1XHEHwa59bSxGw4EinixadJLY4E0wqoaD81PmdXgD3fi8kHj4pURX9gZjv6H15XtOlin82iOjGhsChs1FMOV8l3n5JLeQY51ON2DdKLU2XhZms46+fOh8qRBqjCIQODgvD9DpXRM9y5gAYEIAF1GamLtV6ZqXmOnL55LIiCKBQg1gmLvimlSU3KoLAU25crIyYW8aZhtsZ+59DQUJTVoPJO7/BI/u4cFifpW9GTG33GbzskYiJSCdMZeFFcqj0g3T/uB+y98kZvXb/eq27NaHOxcsdRBuALtqQ2x330MBlCfRROUyIiHheXYx+64UqqB4vDo155tWXMh8ngecd3EAuUvcOeG7rG+sMPCwCB8Hcmh82Y9O5NgVdoiStIvobNCB0UPcoxCjm1gMr3HQMvtZj0cQal6JIWSGfC6l73wR1ZMt1+QAKwhY7NfKfKZlt1Tlq3QOLl8eBSt4u+dU4umM5piLFK1jXi966TP1cH2BTkVoK5sZjUsfr0fu615uDxpkArPCIJ4RbNQHpi8Z5ZVu9UOKhiwUxMXOlN9wCia8lQbdGa+m6p2g4241wbLvCwg6+sexmMEtRhw12tKFfgEdp1w18YQ4Qqtw2FJqi13oyt+m3pHEYwWgDcl2zDKbQQZLyVjwAtAatSlZQyPXJHDFwKWG3ceiqfzWo2P9ADjBm/Ogl7VVNp4Y4MzqvWvznUd6G73FACQbC/erWev/ni4C86zyvOuL3mdt8sYkYAgXF/QnPKdcmlOFYPUAVgvR1AE2wbJm8zwCSeczzaACz4SjcVZ2GpdxRZKrE13l7OgIx3LRW7aEnd6errPI1HY73qQiToXh7w3nLvrAc9kU533zegPBWSE6M5oqAiJ/Z8AtYAx9bGA3APlSYNUDTUNDDCXUnqYfBBUqmH7ppCVP/dBI2DKl/y1N9j5y9RUbdJdg7469zQobcTeRXTi2ni4nLLw8ssAFW25sH1OAiXlFF9jSIAOt7lUJqOEqoyRjw5nAbS4wMZgjKMwBkAdZLyDaJmzcLDl28hhCyY+RhHYwJ/6btHSalDqNWFbGhB2HjGRNocxdBmbFXBMix6RTgaoqkvyafL8AFRqdLsn7w5KT/10dqzbe4WHa2/wtojL0AIclYaMjnd5bsZYbgGTy9PBIdqIo8O5G/jzDR2McqA4qi0jr5LvfzK0EpnrpYzu/PRIxICBHQxvY/oLVemcO5A9LTunydNUiEyo5tt9j/OSun8KWjoPtqSdzz3WV6MsV+nzxwJIQRyYYJuCSpzTEA4e2Lit2aZUGv++MhXJ2DFkTTKWrYHs6qH5fIfNFVktFvZnPRw61+LCLmQsSBFU167BqpkXn5YeKV7eQYn8d+9w6Xydb5FtMlNpFVlGFewErJSVgZKleuTuLmVeG7eFHqAlg5ZORV7vnmRMWDNY785bK4eTD95X0DL5RimYo3GWehBfjQnuwIjK2h7R0j6i/moJpkuvu0Tn9RZQeTXGV37Ilrp3uvzmubSn3E+Ji3smAb4cut2/NRI0ubw6fbpdk7S0FYu98uAJrzxtwNhsQyfs0o6GMT+D5+J5ApIDzXA9tVdfeEo8FnO0/ioNwBZh9jbJvmUGQsIBiNQv+1PVD2gHe68qeu69L6SVdBTZGI/IpDn7F+VJg1TxYt1IMgj4Hj+o3hI7KiV7sXkZzkwCGX+VtKz5IxfG6e+UEdT3P1F6MCKwo+1Mw7SJIHuXk/5CENeEroNRV2zu++qjCZvkOTHPpvBAXTGIDjFQUgMs8mDXXNWauIvfVQuTPbXA/OfAiZWQUxvdy/a+tVcrFc+vMpuv0o3Jq/uKIi9BnEY/A1mF5w9xHFOveF0uMY6iy+ERlVicESmgkaQWs0XnHJyYvRtQZVdWd9tvXfZL2QTwDAIVqA4AV8VsXbs1SrLKkcqjC4+0bOcilCWjv2Ub2O5vaNpAt7XcAbxKWabuyuIQSTmIzxPiFdZ4BoHA9oIjFYQTqDABZVzofX6grOgv73mo/XTiEcxbOuMpw8e1+6RByosby4yictWMIh9865HUFlSUShP9RWhTVk/3OWhhDd6dCX9EHWSbhY0/N9SUxajc+ue/z/tc/qoWTcW5MPDIXp0wl/vm82Nx6oSeldZrA2cc5T4Jpcui+mvJ3FI+1Per+YDaP+oL/x26s9+899dfO7AckBlLe8ucZOnSuW8B6s60IcAcEN8RAsNedKqxqi3H8kA0efGQ3Cmhp4lOpfkMWT//2bnF4osKH2lg8nI5RzzmNK1oPMfwwQUDXE+kzzP9HOWiD3vExsZ1XR4OVasz+Gfve5pTc86PgKXQoLoWHd3PtfLSsgl3uFvPNZEze11qH4o/yc5K7Sh02nt9x9JRZ9fW5/1WAJ4yXvd6JJbObqHoQvSMgdzwo3OFTxyktAhEuPgU5dC1dLR0mZ/CJ7eTPXGPyAz2OVVXgAfpNUVk8cAYlIjBje/NG+i67Y9/KxwhgeIT2dXoz4E+5ln9PLiU8bskNxuYnBZKxMQwRUi1n3a109P5JChLq3cDrOasOBOvbLjNQYm3v+RibTor+YCvjgVeg1NNvBBhH/hwiBxMjGiWJxWT2HivknPJJFWNf0C+ToS47RE0G51cOCRnOpqF8nRrGftSXztwQxcH+8WFG2Hs01jFixxNFpVCY08XlhRiFd6ih6mbnRxrgyKn6GcDqG60Gag0WVl0f3gazfvoYEV64dHMVMWYtumujPVaeE97nZyR6KpkNM46DQeeXao9iorr4hk1kzGPxrc7zXHwY2OAZ0du2ak6UleG0HXmcUD1xEEqX9u+HCZJIzAGxF51jIsBTEV115RXt6Tgx3Jx+xzF6zGtmNNC9lVXpu98KXd3sc6TxDcdyEM0UQGqG520CdGXAF7vHwl8j+yUjov40YH0k0krgHiZIOTCKEa/8gWSiCW+FFUEz0r18JQb0+BdTOtS76VmDwGUyYXPcY6xohubSxoKqC3Tr4sQulVOhsRbial+YJgnviza2jdwrIUUk6rQCp6rO+mCseHs1qPZyWI8y1j48nelMeHquF6WGwK/7bJyi2cdWOb8HlkRvSZwL764z1J3yciI32TZX69OzmPhAPOG5Fmdd+Wzyacmvex4+n6IHCErfP7Uru3OkI2jzrVgBvc0yLZLiIDoj2xHIHjI41oeNm0X9pUf9v34uBROuD1yHoRetAvbKTFmrVR+5ZXUUYZLTzj+Hm1tz2ldGY1zedog5SteUuLhwuse7hrcYQxu7y9xwQXKAEQkSxtCemrvGLKSlio9l7ROOVgZOXFpBZsKCBf9LZFG60Qv0kSAACU8zz5v2d2kk3uZbL0QsWRocQQcrBYBboEWuDfgnb6tApgntY+bEyYX59k30INhKtfR8l9zblxmRAaGKKasvqws3WiKelGcr+zXBJvEXPF7BAW+0s+fuYvNbhNYw+/xLyfRCcOcbXsUweDlzo7voODPARX6kaBUhPmGyAo7MX5LExy3lZ72FON3LNVmNGEdDN3mVPrFWNBYa/SXQJ4++40UT8Sn0IlYQATkRs/I8anqNDF1rK23eDysnuGOCzsCQzBiIklzgZZhVziqk469TmCTTo4Kr9jF/WvsyE9JSCon/ejUBtnORwZRAJ46SPXogVFmuSWRQvBUSed+KH0ov6ftEFeX9f0na+jCgaYEXfBJajI9o3n/Zjl7hwnc0Pp+aOrWD1fpqS7oTlvvcvK31pOBaQJU8N6kVrxOshCZ/jS+4AxQO91avnbhV7IUGS1407edgpCIqcCwye6pUJluIZox50iciZDkidcpFXSY8DqnmqpPdyNSeQfxz/GidE+Ald+TVjJkT4E2DIZFSVuNXM0cX/Fys+xZB889ZXSvZXx7FMUOYzmmz156is95UR2WW3LAhph6EGxNe8E+A9sUrwWK2CZsRVSKgYk51zy3jwHLQ8yDQwBd6UGoYMz1eIdnG2LHmUIlfWqjLzaXpbEldDqxM9hUnJN+kiZSivxyO88GVE8bpO5n7ES8e2iyMirmvU+dGPa8QAU2M2RT4OmGOZY3NCCNyWXY1+1eVSxmmGUTSrg3IelFLllILTntBtFLzUFnw5yp42iQ+1crSg/o5GXFgcKWvHc6HiFcqqmUQ2wOh5REBKD0XhqS5BkRU2injyToCmx4mOOZmAOPyi2pysuAruXhEmGNYsjdWkyBuV4iWei4xRy7phiGtp+c5o4bnlbyBRxQhMPVwyMeuq1P7PwcDHOsDC3P1BHVBEoFpNTGsoFl0RTZzxWOcFdE8hUo97PKanEU8bAg+kDruf8/pUJmIMCoeCe666N9znuFiEIstbtesDntVTuAYAB3DoKrgTsZsWp5TMEUQKdAdWJMpL0RX0nIdkTKGDAtBVGBBMaH+s88LJUWiE4rIz5cbbrhkSj1pEFqGjAcDYQIvWVBMrdbvI0UXjZOa0moCd+VUSOrUBYxmMAsxdfwpkQAHRR5kHuzzS2d3qpctHw34D285ujH88cLjJHbDg3rb1yvO2AyubfE14GGro996cI2N+KQbVRv1gFYa4WmuEW2j0a51UdjGP28aay8fUlDByxHRBA7B7iQ+APPUl3HcAPUN+/1mmNsUm6V2kyW0TJ/MPfznhhX2Z2U7Kt7tdraTmNWon9NeqMvh+glGOOyF5Vb78M5A52v91n3sp64vfLykgYnsvS7DXgJDw/lhp9zWQiUku/d+jtB678CXKqxDViZsihtkCM5XJ7ENqmd0GnP18F+C7vBs5ZJ4+ZjK3LxD4/FhQPbI/cjW3ww3SGn7JNH0DGOHwsgpTox72e+TIzzuRB7+dsKkUXzmv6qBC3/kedvRumw7qIYQV427QC1HrI10IEN1joogFOMtCLn2cBAY6ueUpZO3Gi6kWm7knaTtXfcWhxQgSVTP8g0AuvdRbO1DpjR8tz87V0OoqPkwadxrPeJo31+7Ea5gxCle24D07lfkHQ0fEVWGBYgUoEZ6cRtO1gFEFkTIo7k2S8j1mWm2OnYuDXtt9q94vcQ6d1p8VWJPj7ez8t5UQKovnXXoh/xvLxjSr83vwdVGTnp4TKvx9JXRXZEigxk0Sp/B8cnFjXg0L/bivVwZEFX1pF2gvzD5XItzPEUn4PWojGUfkthjiFQGeRrzwVYds90J9QdmI25BVsvO8o8Zp2Krhz0aF2u/KW0lQ7GWiMA0EsZHyhPGqSmRS2+/f36BCnU+m/IgNyt1Vke7Syj5XlgNmT2BHjsKJ5RRe4JpjSIJvrquyPbqptJq9WQILUErepZVHsRxawfkSkzIUEpXuN+ig2Pp4rWjge+VL81QzQ/s3dJbYZ3/XgNvy7NG+cqjylOJ6Pw4Nnb3CqK1Vkreh8Q6BwLyOaM9wVddroMuv0XYNwWbZQOuPFenY2UyfTfNI0avbo7HOaoxx8ejQqrIT8Z8LQt5AAyeTzofI8vsbffCoogBRnY7k8SaPn5Ho7tt2q2w86fst6W+q++7OUZHP+tSi1E+rGhdKCAHO/1xkVgLzpd38cQKMZKydvqyCGA2l6B/RGDS3BqjtRVR5vPxz7ckXexonNk5BTOTgGth8uTBqncMn8ZjRW6IjxfwJjhXlm88pteA6D7gLon64DBwpZLiKsHxN5GmWvRKqK3DOoVQK020+gXnSOA9WvZbpQUDzLlx9EcC9lxElna9Tg5s1pvCD08K+G5DbVngC4cvdIoj0o1Bx6RBVnPAlDe9klRVcGv3GDnheVk6y6RV9NzAvUNisP75c2RjYWyVWPn19iWxUEGCifjwXJUzxOP2hj3Z49OKdslZdrayy/bTh50RZP0dv/u5ShQMhtKY96dqzrfmQLR+19SYRflFkae6L/lPCXBt0H6QEX8r8wfkoHVdmZO6p2ArxTtda6jc+dcPo42rNuESOUhgSkcKFRn52MDpNZChWlMmIJcshlaZcwfWKkaVXvzugJT80l/8vS0W+TebvxHAxjANKHu4rL2W47DvRzEhHsCFNV+o+E83JWSrmGgIqVNg4YU2JZCi9K+30wl7xbnNjaRoOaDqRZlalOkZ/RglQ8e0v1ufG81Z+Mr9kAvAHvdhhRH5yHli/kgByUOD32MQPMK3CGLflXt9QrDfY6xaHMM6cJy6EsaPQ2pD6zycS5G5cgU7EK39TjoYfBMQypHZ+JYW7s/u/MwQFU9D9LIeF6QfwDwrWxO28W5q7bb+ZiP3CKuyuuwIXRp9yOkVV4ej77VJzZffuoBfRLBeu4t3jeFsjn3g48LHMrTBikO5bGMm8831YlVUzQFoJLABF3Pw+laqeXeQj57USU3PeUlCXV1WgpSPDCXlOaHW4LumRgdDxnV1EX2IGszt+8/X1QNlGwytHmSXbFO1bpn5erGwNSiBfYEr8Cukt588CsDt4EneXTRNyk/baXXbWPuz/Hkd+/qyRpt5iEicjfiAhBgu6G67Swt3ilFPc6s1voxL7MPmhu9a1ui0ediOBllqF+pVIdTLYMRjpfrAhhUhK7i8U6dOUVVZVFTvarIefDvWLxfjSeyD/cjVDCAv7Z9cAAV4PRtLKxS7vMJoBud6ON4Vu4YHrIn6mOgu3kSvp37xBGUrxcI+RSArntMefIghanAHX3XVKJ8aFXMwzBgkTvbz2pC1n438b6XWIkiWDtNgB8OLo2HMhWFVSBdEPZ8/D4AvKKnCZa3vZWqo8eqH13U05luT9ZBXeVTiShKzIAV+ivdNhaMKiG+RwPwyM4iEbR6Wffyv0O5EHbBcVcIEcQ71bc0FgDevBVAeYNx9+S7oxR0Ey/2hTfER99gVtp2UkaT84jvC9n2tzOLe60aS8LL8MX2TSegYmah3LtFUM0T2Jy0bfTteTJqWi2CEwyKBRq/wSsN81y2y5+sEPWwRlpILPVmnQLqZ9e9Re7Os8rf1nX7vYKFlOtdnwZlVGrDSIAipyDAF2x3GJRRPrlP5C9eqlLlKfONFyPZ58j6BA4+kvI46JicLz86beV0Kk8apFK4U9xB0RTAhtXQHGK7lVv46Qsc7iWXFRsnxxjxao6TBMfrIya/7yUZz4LKJLMxWhHZjS5Kld/YzfoBYcvQ3+AgruuGZve4Tl70g04P211FCOs6d7iZoxcCqkXTlQaderDTJkPoJX9EoLR34kiCK9qla1ViTQ9p8POqpAVUxdn49OIX+cIJaDwuEWDU+CFu7P05Kk2j4CI8LwWjWGlmTZUF6ffkCd6Bf09Dgb6vrTPc3i6BHAbC5iDGuVp3GmbQuDOg1c/FFwENZ1JyAK3Ohg102mGH3/6jA4A/Tse81HazG+3Ka4m6duGhvvZIir6f+nmKqvz7bhLWOHcwOgJUqastLQcgkm8eBy2WOM8r3y5PG6TUjYmkl2YS0pc9b1HAAKBrNZbo2oqH36zLjI9JP2/2lEoIxfLftTS3042UZixDs9rNm1hON2ftBFR0Q/wUESUZ0EeEYKd5iDyWdo4MmKYSovDuQiAJNDQ0x4CVK/UeaKunyLzRrNibo3aEjk+8zsaMb/T6jAA2q3NXPB6a5TBlKHWgXR0oaR6qOQ69+O8RlQgvnNBwTm6PclK55gJpDqQN2e5dN4A4OkDChGaFuqI9cd2137MJNsgEVEVuDwsimKUbwKY0pfMKbMlU1j3ZhKFcGPyxvtlIWL0aY7vrCtcvm/5cF9IB6w/zf7GO0pqX4NSBL5vXfp7r5saNZOtBO8eLJDILtWVmHtFjL08bpKzECjZ1BTWQUKAsjSLPZSmH4E7qHFaPwMYYa5EDeQkORPHKjPsJvc9dJm5GRkz3jetOqQcWr8A3uT3gkTKKF7CZUQoXyy/s3hseJ0nN9ebIlVN8HI1GxADBmmXNtJU/xwbsBujcfn4e5yGdLkurCL0pOPcu7EzUkKU1punFulL6q1uOPCqe8Akx2Xb3HSeQO07ERQ55BEA6abWq5Dhbs4f30T1c5OK4sKUae3bIzjfAAJiqDQGmk2XvOQOomQaXFxaljlG75Ih1Q7zv9kr4adexsT05+5eq4I4GD5VajS2dFfraHeZb9Zemqhy4jEb9/t8jx538hnPR2zYqbGpJv4DAiS97Fliq5XGvRqTynd/5nfjSL/1SvPGNb4SI4Fu/9VvL76qKP/kn/yTe8IY34DWveQ3e8pa34D/8h/9Qrvlf/+t/4cu//MvxSZ/0SfjkT/5k/IE/8AfwYz/2Y89MfI1gNAao58rzf5SwlCf3ZAyIpfeGHQsBFDPdQ6YEKt1ffQ0fYBtJkvyQI8VRqNIbYfqk5nj5+7j1h0MdyHqcRNZGxxM+gbVcWt1tZC8x6ByFj34O9P6tiGi4kS1qdS8z6yfmFOUQSLzja+X4B8ad/93h7oWBuxfucHd3h/HCHcbdXfw2Bn+nP5KB+G6/RR8tjcFAVWwGjWtkxeALHHKci/NEXmiP5Ius+u/2j1nDFe6OTp9DMyIP7QFS728Atc3FOQ18T+lbtt/OpPfPfNNQ5tTjpu/xiQaWR506lWqFw5hK9l2OY7H/VhoJEU7Z4HsqTY8x3ywXXN+Fk8Q9LF1kuWAbiFJ3a9o5tbO1ZzWeAYjSPj5cnhmkPvCBD+CX/bJfhm/4hm84/v7n//yfx1/6S38Jf+2v/TV893d/N37mz/yZ+KIv+iL8+I//eFzz5V/+5fg3/+bf4B/9o3+Eb/u2b8N3fud34qu+6quelRSo5o7NKbg+N5TXsDA6o0OGzNCkMTLDZYappnMk67B2plIENRm8EI7kox1a9kpEIOIGcRSAGk7zuP5Lg1YBbjhosfKh9m/1Udwvb0xHuzYlXGz+bhlz2Xm4ARHRYkDWAatHZvw3CnBXgFrA5EC1gOiOAOouAGv93dmfA1QBM5ONu5GAVgCKrY+mQxQro9xN0m40+uAf+hnAv/Mub5XN1PX0Sh07NvLk4KF3Z0OZAjLVWIdvEb9dNn5xrl/BqfMOVOWOAM/O2y7fF4WwrV+6gxL18wDGYg5Y9p9BX9pvHcPZWmQjqQKpHwmA+xhdluJAsD2p4Afu41VhO1AQTKIrVyMe9vORKPXM6b4v/uIvxhd/8Rcff1NVfP3Xfz2+9mu/Fr/tt/02AMDf+Bt/A6973evwrd/6rfjdv/t349/9u3+Hd7/73fie7/ke/Mpf+SsBAH/5L/9lfMmXfAn+wl/4C3jjG9/4aFrmvWJOQFQtYtDYm26qQiZWOm+qPdCb7wgqJQzmoZF+0lwATgF53wFf0p6XPkp8wktKUBn+vqqRdShMUNUeylwNx289+8EZvUhvaApg1/VnL6ZEI4GoKBIAEe+AK5vEnU7DeoWHPbM2J+7Vx3Lxt8+iudGJtobYggiJtGJ6iJLeMXmk8CgmSVqHNJ7iDoetRtgVi0CdmCk6sBYGWBunp5NPPBc/79cf2irtU7TBwOje0amEUHhj3Spf3MfkNCCIk16n5C/JYrd+fLIbYnoMoVUNvgWNDdSW9DG9IduUSY1rS6b21RYbc14lyvrYq785Rxy6Y+84A3KR0bD3j8HsXsi21mE+1tlOuaiaSLBkdF77b5vNgenBBGx1GtTUWGyrOTDPHwlSzxxJ3So/9EM/hPe97314y1veEude+9rX4k1vehO+67u+CwDwXd/1XfjkT/7kACgAeMtb3oIxBr77u7/7WO8rr7yCl19+ufwB6WmxR8gpt+6BrXso2jHxCL05eOtL8Dmaui4RtCH5Xz7DS0mDeU7n1ONjdHFxzLR6n+LYSwGN/uMjC/ehg0b8jfaZERyDMnuHJWrY0px8bkB6Cs7PeR09/XiVEj2Nw9W48PwkKVtJLbcUdMoDg0ozToImExdyx842vLpsJ09zw95apqG3NHkRDna5W7m0K6krCUbNYYnf40vKA7ivTb6zqnJ8iraj7hsldF+V9DPPbQxOLD0CcHKgf5FgA0D00t19BWOco4qu9D75ylGaZLtew0ZY/l7HJsW62IbDX268krRW+4u0s5Rd6lMkjykf0oUT73vf+wAAr3vd68r5173udfHb+973PnzGZ3xGJeKFF/Apn/IpcU0v73rXu/B1X/d12/npKTbb9miK7Y0HUlxdP4i9e0VEMqJyf+CkkLIdlB9vrp0iYHIPamuCJKEI4Mg0VnW3/PkVX/wAD63IEB2ck65zGxFqNFzbpfN9dBPR73NDtULZ6hbxVWyyVlf6ZpmquAOgY6VRxTvFrp2QMvbUJrW7pQvReNr8Wb+urIhUejjb1rX3Rw1cUXM3kfpyx3AWyjho+XimYvyIVLe9rmH9MWkXbfiwvxrn5GZZFacZ5J98XHyM7CEbAqGIqs2oxXDf5SsoSiaB8PVWejMDwIw4vf+qwJy2SprOOU3PzAG7J3T/QE/5Lnmsrd9b3bFQRqFjmB1be0fGwqeSZTmD6a2xZ7MYq1rNvl4FPrnKUFYUNWwnFrX9LdXsbdsvdN5/BEDqw1Xe+c534h3veEd8f/nll/HSSy+lJxReahqWdFqNJTagazdxS0HRyxAltIRdCLQBN2gSGv1wPczw4rw/mldmDpYZTFZasZWEIKNL93pKpwDTUjwf9owajQVlN3XXaNIK7zeqtxd93+h3uu06vmdDOclaJGtjxWVKxrDdnCn0HCdGWl2ruVyQITYWQ2p7yfOk7RwV54sZ/XeFjzm/ziR5l4bT7hvrAVVhCyU2RorDVm0PpObA7KA2kW/RRZmTJdr47eTRF26czhL4l7bdYjMmO1tDllhn6msfuMWUM3Yk8jjTbKuhtUP4LE7WEofK2zou1iZ954VM3Hs/EBsf1dyaiU2BJkkJQNiLtPMp5xJgsa0zYBpZGZQ61cZEAOgYENuv1I3/sCmOzclyffF+xNAn81je4ddRs6taBQbJ1akPBlA8ZvN+AZS6M0s9ub+/P1fWyocUpF7/+tcDAN7//vfjDW94Q5x///vfj1/+y395XPPf/tt/K/d98IMfxP/6X/8r7u/lxRdfxIsvvnij5bAG6NJYnuUYa55AdaZRHaSubMjoPz92YyViY8zegdCzKj6orbIAKI4EKBIZd4uYslMwsJRsNdWcL3qfUF9zHCkoUuKDwWcDTkFHHrO0EijV9ALbJQars1ZGnQXTBIC/Nn55jLKlXpjWzsMEzs2Lf0yxiA4iRVnLhgmm1NrAMyNmBzOh3YnoIV3U+4q1xJWnWlNS/rmlFD2NwjJvVdcpsQpQ21BVz6iQqfylQF93O/iQU3YtvRtj55En6Spgu343uT41i3rc+28nCcjz/BozitTUxi9e0ZLMK0Ctebiqu6ATBFhtvEtnfAzRSx0n+5ZG3wxSt0HF4aaxcDzUKz4eyqJfNvAKCln+TWXVNrFWsWivODOLlvtHRlIf0jmpz/qsz8LrX/96fPu3f3uce/nll/Hd3/3dePOb3wwAePOb34wf+ZEfwfd+7/fGNf/4H/9jzDnxpje96ZnaY0Mann0Mpnupmq/NmIp5b5vS0lt0r3Kj21ky0vu8R65qKztUOCh5doOWZo+7NYdy94Itj35hxDJpX2nW50FEKJXULIwrAj+1P8sSeVRFJSZm9oVTZPTH9I/K68KzAEdaZMAaenRDyYiN9VqVcTcWP3wF3gu5hPzuhRcw7u62uaab2EQdP80D3CyFJzcaKYbDTl3S0z4fum4qvf159cV3OplxDmVl6+0+sVMi5Ttd8rjSQ4TgrVdqQBSRr30OOtceA/H5RbmTtZKyvOLhFi3177iKrPpyEUGlfij6LQ8WHvODs3ekWVgIAjXTCbkxkBJ2xXgkPlWQdulaR6nJicqzG93j4TzWCXKW3NbeT9x/8N7+PtiOP3jdIJVnjqR+7Md+DD/4gz8Y33/oh34I3/d934dP+ZRPwc/5OT8Hf+gP/SH82T/7Z/Hzf/7Px2d91mfhT/yJP4E3vvGN+O2//bcDAH7RL/pF+C2/5bfgK7/yK/HX/tpfw0/+5E/i7W9/O3737/7dz7Syz0swzQW4c/Dg+QLkw7gjQ7/v0YHXhZC6lTW0Y5vz8uS2+OBTfYFZ5Pmkcko954YuuwCP3lc6UXbFg/+OyBawl126Yv8VI4UUfD6XxpYN2T55G/wBkMtRzjzvzI3z4glV2W9Pwmub9D1WuD3WwoQQLPno9kQ9uorL+OWRFq9lnorGmUH+4biupBFPGFOitnQAqhNw3e+jfUSO+eacJCk7rSWsKIrhbCTZjcorNaW99ruwmjkfXaK63Ge6v0Yq7XckC49iZb9HOgx8T/aTh3WTYyU57MSI/97p6+Q2Yg8/BR2+yw7JHjvFTHetCJu9qz/uX1dzqadydXn8WIUn7EHjJz5cCyf+5b/8l/gNv+E3xHefK/qKr/gKfNM3fRP+yB/5I/jABz6Ar/qqr8KP/MiP4Nf+2l+Ld7/73fj4j//4uOebv/mb8fa3vx1f+IVfiDEG3vrWt+Iv/aW/9KykhNfAz+LwipcoTYkVy4Eb/hp1fwP9A1LEg+Rv/VUocLdunQY2642+jlKrzQj0aGFBiQTMw+yeYpjfbhNKYSOG8Ar5s1WYX91O0FuNrzrf56/O7Xv65N5vioYcYP2WzJsLAa9UJTv4HceOEBnl0YDTHa7QtlNDWNaHjIg0oAKWFwtsEe8YcmLU7VLsbx3kCkp9gOne5pA1X4McJWQENXrXdwdjo+/wG/lwqDWybgqAEdFUd83XvoXm5ak5fmH8zwRs2YFGbAAUAWD4FjgMu1798IiSHg5uM2xvr6Rqm4N5pZueSpOh8BUOa7ECMkNUAOOarMAXub42xOvSFnFLmUY9jd28v5jg6nTpo93Oj57y8ssv47WvfS3+P1/7p/Hxr3lNPCOTD7nC7Btxkl0fB7Th80AOdAkY7PFlMcNX5gMy7ZJpxFwGv+wfAZUZryGZ9otzRnjY8ajD05W2bSilBOKYNrmd93mupzLD0xdg3DEv6JpmYBxrSpTlEQJ74MX7JWchToV1LBFlmQt8pGHIuQVTZo6iiDc+akJ0ZduDxlhutt2X164xydphTktxlE7Ax+qmDD4eFVA/2nWAp/uWnK3UNY23TpvYTqdGoIH1i0brZ1t6H+PoW1LRDiq8fLiAf7ccIScp67xaVSzll5s2D7CuuVFLmba+3s/Qg1zRqGGIUxZ2epznKR/OUL8mHS9/3m7cIeSkyv1BMNqwZsqQ/Ihp8hn0sRORUxW8iUBOHaTjynoZS+Yt9Rt2x2QDk+VVs8sbbqVtym/dwWsdfwDF0n7RJ+mopU3w4z/+4/jj/+8/jR/90R/FJ33SJ13y9kms7rsqKxKpytZTZXkxLp1DQTrT6RHSwG53WJXuTQ+B2NKWtfVcKpLQPcWDPaT4/JrqUDdPpAjeq/AvSkoAnpGyY6I2lFhDqFAioUMalcmxPKo2fkVaYsg2JM7PoIWxD2Rg4rvR5oYNFbjXOSctQTWippHT3b0rR9Y5f4RXsSUgbdF8D2NCyIz2TSZbtAQeiz63Yu0LG08BZK1UE3eLOw3Mi7itOxqotJ2G+CB+7KNwg6ur9B0AtOtp8ix0Eu48SNwTShnjvtO4lWLg06ngC8KfIDaI3XcyJydZKefcMPdIt/TVf+JFNcRY6+PNrjm/xB6EtrHUtSVm6unZmDXad4jiBSGRIvaKnsGj3PFZcXvQsjxpkIo91twTkdGzBwihdMnvlg98QwqHDwtUb46FK7cvH9cpASrq6zVZeiXv41VpWzhv0RMP5nES+EgTNUlfwg4tJ5bSU4Uh1etSB04t1wghe0ZrLIzcHzLa3l93Lu5yGflqoRuQvRQvn7aignuWHFUxDQKIPYslY0BjGa9RHh5r48epSBqvAKUhj6Lf7wfnnHwC2/pnHbi6GQE1QqAJ2GpVtYhq3/E+RlGpqhuFcYAB4dHukavfXH2VO+9XRkCnOUZV2JwvPRt0q40bpWC4nOdzPWIKcJJ+80V7DYPYd+yY85jicu3PdA5P54HeN9ZoEgBq1+pYb8mTKZiSmxAH8DyGltIn0vtHYlKhC24rnkFmWnnSIBURARv64lnWwl66yH5heBPdw632udwn/t3viQfuANFh9TZ3L5RhB6eybFxTaPucxGnAnZY190NekBtU7zKBVfDQalikZ1or6+ZXmrOFI0QrBq1dE3wiIz5ge6bAjCvvjs48iaOMKiPlo5nagFp9O0ilouTCkz7M2zjj8J1PsRx0Q3sCK2mdOoVvh9Tf9rMTIYtjdaLcPg14awSBygPvi8mTsDVp8s8ifDQ2twwY+Ylc0QrS0zHg+UqzvAbCN5T6RhH6L+/WY13bohfukpPFaN3A29zAorfelc6vfSMAsfo17i3bc8FZotHOiX53VgT0OIxdHXPFhx3hoxtaj/Mr6YyjuF107luCGktm16/HDumTBilOq1SwwiZhPti5qklimWZhVjEOWoQsBkj59fTm1YYXnq+Vt4ajfXIdk/7DSG0pqzm3+o65FqJNFYg3sqqWS+oCk4tCHY+6xLwzEXtODNvkbAe3KpXJd4FA7HmiYfMVaVQrn5IvyMcGtM4BbunP7glHqq2uFHMFZjDNIHBPHZ/mmeJrw4lXW3wxTzvLgYzRsoz8uLOuTwXUI0UUY8nescB3x7DnL22eyg2ZX+csDR/kNLTc19ZnFvfg9f1cjtyYwBwQmWvU+RECkxOF0J50trAIis00bt5Gc8ZwvjxV2HSB1nLcMqCR4KA+bgb+IJK3K2Q+k/24v4+pgSJWxcFNe6JA7kxhzx3qVHs4Wjd9PZJz+NkXCz3YFVyIvvP0sTyh8qRBqnt3IZgMWnlJlOVcGWQV57al0zRNQ+TFo25LGdHg8WefQypearR/6lMFtU7PuZBpiUhK7MWtZDk3r9J54EDO/d7bU1XbdopoRAJV8f7jXvKqHNx0YjjKYa1HgkxbvDGM11peu14Wihhoz/uJtVhAS8pxLQDAis78SdYAl9XnXIWozcMvbE1ZERpXc0ZgLwr0lzWyt7nxr2GbtKNieF3unK+NJqa1RJ8emfI5G5+gRxEORuy4YqCYcz8IvizHoDlZ3AHy1a5SZMxrQ8gFrv4IhwGPqCyD7NXIArF7EUMQDbBS3McY3PTQQ+adl9U5C6Dy1a2S90QkEKLlkXx8LbrCCwMS0FnncVnUKVMAsSjHZPVew8irESuq9DbrzgCTT986aYy1QwlgaW7btFmBugoxrWXHfSa92gmtYiFIf5DAM4BYdt4/VJ40SAFmuEQKE09zPWd0t7MkeWUFUzCfBEJz+5uIWlr9ESG5IIXrJXXkuSc00pwqqAetjaBR0wgzILpU06PiDuQPCUgXUhe800Swdu+vuGI2Ps04KoZtoWK7jU9AbA+yZcvafAqBNuftPaLqqBBAJWx8qiz4iqs0OtWgRBXDD/gk9q2ODnyDN6uFujPTuZA8Frw3O+/bEmXqjtOkCVRxc/RTA8ABm0/sqQQ3vsSPY+BOX3pUcgKqsGDAcvIlK+fdPUKbhfTutLMLAXGh7egNdDtQZSX1otKv7X93xDawajx6XLTQYYBaCwSU+EnDOZrJy3jPhu8R2RwXt0+FJwksJzIFiLcnb1LLYMxkFqBb8he77pBDKu6odJpulKcNUvc+Qbw6vhjrxnjQsz+dGx2E7GxMwk8SFPrdlGSCQl8ZuNztQGrUEs7RUZo3WMjv7CwVi5Wgo7G5qf1sRmC95dXaIUyOe0+FMa7zKKtJb5t2uKj9IS8qMFSBIbkBpcjaXFaGefQz98CTRjCBifpOqtOXJ7c+CbYdzkOLvS7elVkdsIjnplsy0+nxz3iYl6NoWSmqpEGiqX5uM6QHZp8XypxgMO9P54UMCd0Rp10+RHJeMFRFbPPmKxoqOT2CurQ9BlSR+psaD7/HPEx3tQdibldUITpyKlPX+9y6MX2WwhFg60Yl3TdYVcTreCY9CvnqWic6sqX1MWfYtOKrGdFlPlts6sJrcllVXVGnrg2cRW0HmtCFG0jlRFV1LoRo/FUoX7RZNmek6Xq15UmD1LKJGsCUhS1VLpws94ZXSR6rzjoJv9kH81xsI8WIkNzAARlZsYGNaCsuit9CCG9MaMINX8bR0Z9IkwjCe/KVaiLdw8kqWHB6uO40egpiAyr6zMgGe0qI6wQ9BEs0MDj43FDQtlWVq/g8ciq4uLUrodDOj0WH38tj30DKiRAAw42nEU6T0hldKdTmTNLqNcYy+K+TVSo50pN9ZV70mZnjwNui7z16I6y3voXToId7vC3SD24O/JkdOJxTaNFPkgGKjv3dXUFT96CKsyER3Qq9u+iyRBeZG8cYLM9pHlQ5Bclrr/9gaYQPz4JKWAMmYMNeVmVBpHfFMhIs43zL8oHSLlWROdFkURDbKq6WwGkHqSVEnOmRK4Y8ErqeNEihKVCAC5a4SbjD/caqwZ4y0vYg7lZEAL23VzGM5Z0I1sPEExD2GljyOLpgMKIoY5cVltyDwCzKkUtTBbAXoImMqPfkBUeTvizY90UDpY+UUipEHzt2HIVkJEW0UT1EctDlNOR8CoCRhvE8Udv7w6ufnEYGJvo8MKI+HFqjBgFiBXQuFnGHgOizCIqVkxdYBM82upDA5HXZnEENWQ9suMEZNpj95gJUWJ71MOMWrxqBGZ+Ya7zwUi5sjF7/VETZV2autF6CJs9hwHi/bhqQsVwnf63LWG/Yq85FNEVjqSwpRH8Aw5nidL78OIL3BK1eHmF7U1bbZ2/cx6K1sxYfDQx7IWdG0QwquQzJt23TboMuiIv5PiflcFkA1aTf/fnIWLW7Lz561vK0QYpLZ0TBpyqcZUcIWimWG7GSxx7V2aANsXjfwqkxMadNAk+B3N2ZB5Q54qBjWap0+o5Rh3kg0SU76iPtUYf6RL2ad2m7vA8A9+kbKnnF4rcPB0DbHT40J1fSB0Y2h8oN93o2LJAnWC2jLbOXpCW6FYcm6L5Z8saV65JLb43V9DLDwYBQmUfRUy7EyN0LHJEB3zBf3Ut1sALsIUmCSAm3yH5DDh3RshYM2AKAg0VffB20byA7Nr0run+Gl3tlifj8GrcAYieIqwtVUAIYie3Esg9LUDhSX/Ue6DD5W2O3ZFB98YbT4vFPsanGN7F3jY1MBXLdkVUIvjEDqTaS+Y1D1PdpQKC0IWuk++iu8DtvWGa3CwlQph+xPZXm/xSteF8iRYtZpjiy7k2goHoPj0BFpUbjl/R1ffX5YmUKWyS1DpTASc3uSb3q0eWnB0g9aNXSywpjrd1Q1Ul5/4uqzcjq1OXNeZphAktYbC8mJX9tWasDiQdwAkyYlL4/qnMZ9Zgg8aKB7bkI+xqebGpVTGo62RvlDlggvqRZBlunHlGsNMOhL0oIqFnVdbfZ+Mt2rU+CX0ZQ4QE3Z8TOef+SgcgHZJMBzTpn/4Oc6L91j6Io9T2olMbKbxGJHQIq7Qmc8U3zM3hpv/LqvGMpxtuACm5KVl+r2Gjy2oAnAUDCuVA7yGyRbDSQWtimrpnaCqcrVNbHQkOnxPjv9VdvKmmsnb0BGpI/s5NSoiWrugNG71mQkmJKTTfHhdqOxRsgPhaHI7uiumxQbGHlfOQmyNkNORMUx0qiyqbjaPRlTSlzfvYAUMzLsoF3s2d6Y0y4PGmQ8hcDDvMQxqCX3h30OzxmA6WV7qF98eIcGa7VElZKYkUr8YZfLI/BM1UiuhasDXv624Xx1cS7LkymtFzUfw8QtehpU852UxH05bn55DVi94VdYGE8Buw++B5ri4Y7S71M8sJzu6rmkWUHSoTxSKaAjbc2Y9BTfCcesGMCkgUHq77KUgTxbpxM24F2eXADIkRLA1KjTY0nqrZbChTwXQHiBo+IE8iWyy5J96FEl8pLENOwMBAWbI0LzmB+5CHhgtseAZ1jflEd4Vl7a2G4sCKDobELSDKbxsxvho81Ner8s+vFHQySyTD+ZJ5rh4PSdFiChsCMa+BHFecigkVO6XdBbEZczbeDDMnjzPvUwV21RpGt3ZKSdiTzB+dnQead1lIPyc3WaVzbHbgeSR2qZyhPGqRQNq88zz+EB0gGqmzA2D5xUAiS8O5G2PNyLj1rWxKZlo4Y/oAqcvloKeeR7YsujsV+4+ip081polS2/z97/xqzb1bVh+OfdT3KeJoBBzuOU/HY1EMF6hGJaQORCFNra8A0GmyxEjVmqBUSa2haT80/Y7W1ib9afNOqTeqhvBArVRviAWoc8RTSai0Rg0Urg1YLIzQMM8+1/i/2OnzW2vu6n/sZUXjgu2ee733f12Hvtddea33WXvtEZbpx3W0WIHwqMMeR3cjad+ODiNjMKrGZQ1WKfZyjbBXEIEl1nWrIrmjQoPF84Vrja8jAimWobcxeOgPUydlsXo62oAe1FY/jxREHIhA/YVPMKRoeFsLT9fo5u7xuPgU+2Wc9laS3jquB1kbNYb9ThuKw5lr5Yu4CGX6kISr5DJlgwKodxGa8vF5c32izSpIcVSZ62IaolK8U6hqKBggNmrkH0yfWdH9hkJE9luInUa+6Pu+inqHxUueDdI6xZxMS1yDHEY0z8uFw8Ji95zQrPdNoRYZdhwxouXdOutEgtfku5g5SBFhAChaA2CG4ApNPX+5A1YTEvEOREe6zIQWo7masBb5T/n4JOwbETpcFbSe0is0P6tbNRZIcOqxavi+dQpDhKsaXhDbq5NPUc8ElK3+SwMbfLhq/LraL0TugvP2dzccMyMv19uB6lqniy0Sec1wh95QeYwvRe0ZptLUyhB2SeL5S4D2rMKTlnqM3kcS8g9jkQOt5b2MB8yYCiq1G71OB2CVAdEt+mbVfTWbgk3nzs3kpxLCJ03r4Yy4LgMO0T95JZymbigICC8M6t3WMiVkh6WSyA6HxejhAnuNKVxigyJEq4W7vPYG/k4hMALXgp9U1xx0JeIrjl69t9I7nMWWtGveOk06/ilrIbH881JyyTM5BYDuhaThTGksHpK/B5JeTdJTwsLXZGb4ggJsOUhcVmDZf2FY8NQQg7WVyxF49tF1DIZazmVzRFNjNiwiPyfIQm6/g6z42mMBe8JY/PZ3ZUp4ixlJdpdAvp5/s8ChFUQwr1WnfPUypYycIux8C6TwtRkFwYb2aYBcJOPdqw2CHwqtNkiBj47gSXmXmNbOpo5IGTUtNVu5VZMi3MEM0nyPS2OC2LkItAmkQAZSF/LwBrE+oCN22+l7IRVbF+K27bQMkChkLm4Zj5Pm4Q7XH19aTSjlYsq7VVaICJ63huy9ZT1K6zjpdJjDLDXddnnhzXamfHJEIIC0Nm7oUs9FcoLUWl/xdiQD3hCtYufMcLlUT3ZB1f9Edof0yntlkuAN7vkYnaVc6UrFRJjsdWRnujTv9wx7CFtujHOPDz8gGbJDYzWICVy6/OCmNuCvSjQap6D25B04zygCEMSo9JTtuO7zR8NZqY01pSLg1qfU4aAacQCGyj4WfNh89B4EbQC3bZyn9c51HxYnOtP7sPSvXryibltfI1o0QVlhlFyr2BFn5JUBrxbJQTA9/IEE+x4TSki/DdF4ekiZlrYxuDRuIylwex6mhTkL18gLbsfMdiFNPem/ax/0GwOxjx3xR8zJpxwgBrX1DghKU+F1BNntPKQNRzUbo0iN3PQhL3R46AdDAQZ7La269EphYnipNPQRbnSxu6wS6U71xhMBzDXWqdupWn7Sw4oEs6lFsU0QTMLOVHJXCRLGRbsnrfjc+o2DK0HX5qmnm5ZWsYOlcqkQPKMtkwBFbL3pmOd0ZOlO/bjZI+cm8Wx73sJxuvPvhcG0t1LiNs1vTc3TFuRzvighEczLFdrG5E4JdN2zemKf7648xkTASEHPYkntW/BpI4HYL/W2mrQ7AwGwMUhldeRi07DmfDMBhDqNhjx5s7dHIoVftqIlinPJIaz1oe6quTYbpBjxKPyECDdeXjxcbTuQCafzdaRljeNswAP63aSo+8VJk8KvM1vJd3rFo1lMAdVhBqsApEZ1iUe4Y5Pf4nCwqPzC+c3tn2x0g6Jk6ugbLauSHESfh995a8MCepLrmbMdm/AmgpI2Pj8Md20GqXKdgRwObcG4FO2z8YN8tnE5OXQSOwqsyL9DWQ3mPatGjnuWmfh+9ObV2QtkIpJO7XQzZ5h04TglS7VVdnW42SAnqcexs4Jo3vPKopwHl+Kfm0X8uw4GALfw0kBCNdT/xjLVOITEUP81cHQ8bU8PjWaBYV56VFvXi39xb7EQzUHFddt4Tzp1L+0YuFYf+eoglZtgJK+HwyrahR1b3PcuNrFuYEAD3lpSNjik/h1iC3MZP72XMvcvuq/CAeWNbmqqTKVqT0EskJ7J7m+VnKi/Xe3CdJlMQYhZgtKcd6M7uBboMON1e73NAoXj+45/SKSrXUi5i1qfnMQHVpBLXS934U/KZZl5x78FyLzYjUXWbqUoO6wKV6fWznf3Hbz/njuNm3I5k9cNrBIA9FuHGwkUvcirbJKs437XeC2w9TMl+NTyV2HItZx6ifAsSWfgPCuod41PpRoMUH3rIA5TdKPuYVPUy1xwqCN8dKvosRhBjDfxQumF0YzKClZleMn26UGYXoVnPcd/X2RTLyQa3rICvXpOZwVK/YpANodSEardtVjate6zl/oRFIyso8bVmmLzCgx8STJSyTQeqULPrFmBF64dsY7DZCa9t758dxCM+Vtl9CFCngEupScMbp2YlGCCg3KFqWym5p8COljVWgFov1u17MaxZaOx0XZkzJUUDpwL042rQ0LMhcajf06kocmBgJH3aNXfJoieRhaQxPJ2muhwmgR9lo1A6BJPkabOxKnUHI6tZsgkwtvr5se8OVC3st6SGMh2HpY7uiy9vwb43GzK9lLpfuFF/rs3e2htgtcznatHuRLmDFOAjlKPW7/7lXEfqRoOUMEBRWKl6xf4walu0djkVbXCHf4Uh+UNtrcdF6cWw5z43ugt/zqxbprCcYalrOA9U1qLaQavbL6Du0F9qJJYXxjoKizm78PVFg4cFHiYzDjat2tfUnGOARBBHOYx11H5oUjNIDMDGlxgg3mubjMcYwPLVayVyCtwROGTFOTyy3GLa7/KdBDSRMdt1hy0LUIUvaLqO1zqKZeS/+lEv/7x09oPRu9EQWlT9XWXVGN+CEgcFSTo/tEEzLGSnYmsnvXyTtzAZW9qg4TSPv+HtyRztWVfW1EABuYBitwjK2LZKxdZCArOee519sszkOYAc0cY/GeNOvv3X5DAKDnkdEUwlG0FGmJdJFKC6ZrrZIFUAqnqu3YtK2VC3k2unZClEum7kyWvJ8EFeqd/4fZ9xNJZz8B50OR02AZcLS4AKpQmDO9MX4Q3ycE751WoPebG91+E0O33Fy4y6UEEH0pnhwt4Y7bl0sKOdFYiD/aYadKvMgK78THM6CGhOplanSSwmG8/hyLANxwYL6Z3nNGnzVgHiudqzaYjGowrZ0/kp3YtrpRVzs6JzhGD0kqXf68Bh/5Taq/bHIoVzeIJfPnXdy6Zsz0usL/4PlVc6LfAQIRA9wwCgrfzOdUk5LrWsRpovy9d31NgGYNnG1gyOc/3LFay5CQLlrGfICjHB3MHo5Ha6Q6wESKsXHHRjkJRwyPEajtONBinnnDAHzUrwz/BsBbn3ngk+69hRb0o1DaJeAtyWp9OZLeHHFSC9j1JEgEPu5DhNjCglpdEoa6DcqHmWB2AQxTqfPOykNoiPBCYxWlygPebvazL8e5l8AbrvdDO48mcnkNrCvd0RVu2K2idJ7LbvGl0ruzPEa+elM2WgiKYbLZpEE/f976jufsPyUJLxIdMANvO6TYZUfRsvjO/X7h6eSG6cGsg7oLAsSedVe5fPLVxjkdCLp2jqY4n1eQf3EorEwsCz8bC0bdLkiRwEQQ47bBIn6U5OtAPVon2j6SXDj1kZge577HYek9G7o9Vo8znkc/gdYyMEDADcobhQr5+EQ1OB6dhBYD7EU2ouVQw5H3h1Z6QbDVLVg0FwSMfNaDShZ8cFUwlJ4+qvcD6Ae4S8s8Np76wbYxwIpRcYW5pYxpqaM67tebZVzNIBqrJYz4FVObyxoJ9niLE3VolbeXlqAOVjbAFGw1Ei73UoYoKSpqT7MRfGowBNQfQoZ6RCuyok7LPXnHxJAC89TlQjM/mefOHQkHD7nOepz4ZXHG/SQbL6HQqLkJ/rQOXXRG3GMiODfW6A75h1diKnLYCnVahiAKFMIZmmy7P3ri5T2RZpC3NJQ7O9yBg1PSBtOUaExPxdct26w8T2gpycGczdVsz85QkXU69qwY94h/KoSW03E5ePbIykQILlzt8IHhhvRw32aAPf8DnbNnVS3MkRa5PGnwi7sm2a+LC4EDKh02vXSe8TIOXfgYbYpMz2c3xu3gtoBhrAHBZhrzWNS/FkF7RMngUDIDes063pPcVpqeQWlplgHajin6h2qZ/P1KshFYK0I0GjcgYokdGhsaRxLaUyvEE3CltWuk5nd/6uwJE9Bf9gEJsgBq6h0xT8dgzHuBasPeRboYbbWgfM+1jRfJ5Z1oFt9zRN3o3QyjOQ1hreds358mtxz3vLG00LFndMejkHxoZxWOrnlLwn6nRI6so4+oMoFbFxW+SaPMsjQsYNoUrPaCLAHTSSZbBzgvyN5L/TqDWnVq624lz3R10qLz0kJtSeMrVtOIzeRv1ogfhCO3eIexkCgLZZcwE2tuSBrX7NdXArPdmcCu9jcGMBfzquE7LOPFql4A1AxzyEkzTMzCiX8PSsdKNB6twkgjSU4cUBKyXFGe6xiHf/6VrseiExE24ySlxktxdNkMoWJnavjkGxMj8GT+VQFvuNpFtV4/wfPz13dndBToHV37pbctFO3G31W4PPIqkrZE5Q2V1JaR1c2VDYDdeu6xLC+BBLhW4d0nJMotfIv3CnmS6f7OUsOWLGr06PlhwLFMTxKcMmDYPkOwNkhoKMJlCBVIx/qmbIa99RwqspAmS8FTkJwbO1a6qwoyaQE3P88D4ioPSEp+4byRshmgIU1rV7bKADQw64zsUcieKSYc2p4oYTuqTZRmcnAyjAdhuJ8AUAtgl+WHV0q0b7quQG0Z306J/nXm8H9QSIlY08dtAzG0F+Og8eS7rhIKWLby2FE5fhkXxj5cnYXXKtSjjKOD5hjHfzN/Kq1lkXGS5PuVFgYQgX1inIm4e9KtRrUapLTaPbi5mpJc+oWe+Sd2d+6xrF+TX1cvzISSytTVoBaTwclBOsuAdVgAloxq7Syx58zrScWZBeYpnQT77hiuyUly5P5ZlzUnRpjH730gvd9qnteaFwb2FxNogwY5osaPsmQqOb2tcP5QsFlOlakQFvlxUa2z3ucWUB2h9NYPI8uc35QTrJlqsdj1wFUAtnw9Uypr+TbkJTTiL0Dgpdc55TedQ+mj1m5d4KCKu83n6vTWLyanuuajF7CURZOJDOE36GABjFhuT3EMNiOzwzHPO4pRsOUscp9UxCSTc3SHZdO9P4HWvpUHxyB7gBooFok9sxiOqe23AtUqS8u15jxEy3C6I/H4mUp4QjUkLp++KlI6mYvEsXQlaGyqoQast2trU6vLNtw647tljZfMqFXFmqmmXvQUVv6dJ7TbarCBu44M1xqQxGk9G8jtfbSS7Gx4VHwWcBpSE9rvug0TRegBjHoTbzDY8HlzWNwwH96xCr1p8g/tiUdp+cMXqvtJ6u2J7FGM5ZfEzejO3LEG2duc984tmaHv5ix2D8W8eX498FOBYGxM+cmVfH27w9BJf7PmyAbmPIZwPExoZiw2B2hnrc8QSPsufszKzGfpoEBNhRPKkHk3u9WouyoiGczLokQszxvyra0NviXIAC3qdAauFZefJYNFjZj7lUp5FzfBgZwjBj4flFKIG2aOLWVjOUCXrVghS/qHlWqZ4aShsg5YqioGvck0gFLdwJ22aK50dqEDU+nXjBoGZ8e0oPtYSApqfSCqbJWHm3hQOlhzSF9cpO4AlQxQ4syZHIc+5xLHRqad0OkpJT05ph+iMjfSoNzGBe6STXHAGQKmGJPOIyNtdl6USJwI9bGx0SDfDKZ/KTjTrfS8fIaeKC6lKBK0FKWTcS+KuK90lPJEtLkMrGkmRSgq3WR9O5HNd3jNOuRccsuiHjtNhyYdmPXLTputAf0Nqo59BsnetO/MuTVloZdLcv2fFnBhkS94azUnu0LlvsQ18nvQ+B1Egs0CFrwDDGIHUQU+LS7ddlb5YfqWDnDSRxL/+2bGj3NG0hOVNyGCpqFtLxRqHVw+T7Uf8UkHjAP/vEEEHdpNcRQySmjzYGJ4PO8Iyn8af1Qwiuej2ONsgkgGIwZuDmvRlLuGdBSgVQokFcwfheYB5futKLnJxXjJZn4+FTWmroc5HYGtr7R6HINLJsQKXm5RSx0fVnC6BILNjcxHi8ie3XNleSnbQyoajt3i3Tm8npAlBLh4vops/em4j3FPCNV914Fj0pBiMBKkNibh+q3lrLWUd3TDASxYgebONsOcg40sd32siXsw3T5lQaTht1Z2ST8wnHa2+33ZzzWmCdtPYsr3hWpYAWyWjPHNu+mt7nQCpTCnVAPpzBZiZUibkIY1mASnyaeLqHEvmwB9HLwFgBLhiDxe7N7iakWx7fcZYhR9VV7jFEdUMZKY6P/okwEgmodjqomwyvvK8NK9U/WOsRimGCTOd6cdjzT52O3M2VdbpKBxbWcZl9M4DnZF3KkPnS6ZQMPwRbTTmLe0LmLYwfEKNGZxOd9Syi6acMKGwHBGeYzeRTknNIns68IU7M5kMwyzqiqf7990KeO0D5MsJr1TMBzF/OOvMYn4Ec5nDweDXBRgR2CCiG7VBJR1CH8zpO7tbgQURXFk4NyKhPhp3oCL3UhfnXMaM1dyHzF7t3gjUNXJ6/X4ynf7cP3qqtRze4vDPS8WZSB+k1r3kNvuiLvgj33HMPRASveMUr4t4jjzyCb/zGb8STn/xkfOiHfijuuece/L2/9/fw+7//+yWPj/u4j2u9DsG3f/u3X5cUs7RYeqr0kD1aywNkuiaL51bXeOFe5Fd6I+6pZcMsB/X773NSNDam/CZBWADUXH/M/PDnyrUGNP6eGxmp+QldjzaYq2J2oRlgISWYDPxKe84Hv4Bi17Eob+EBBm01tHrYVITx/Ac0Y2/1mJ0TDo3MvUENb4EyRm2DJSFU72QAUM0rqBfuda5GUYhub+eN235byIEvXC6nZ7McOi2rpKPnM102OjHr0dJAQ0pbnJ/Y+LL3P+sxCh3efjtWB61OtoB7jehgNDspK1vBofOuy1wPbuNCK+gauEozMKZjIUWOa2/2IG/K81ybd+2e1Dve8Q489alPxVd+5Vfiuc99brn3//7f/8Ov/dqv4Z/+03+Kpz71qfi///f/4h/+w3+Iv/W3/hZ+5Vd+pTz7bd/2bfiqr/qq+H377bdfl5TTiZ0vT9SLEgE0t5YePtO4iBiDGu4qGOgAtPEbKoa9MgC8sFAggNoMQ+ud+SQLtbyXXjMVwuMsQwFmr6QL91R9YDIgDCpwcFDkeVlA/sPAscSLBoJ8WrKsXzk/DcJCKQIJlCqHCnrt7fJlQUzpLU1e9gnKWp5sEIO0LXlfnguPNMvlEN5xob6jAMaWOQqIb8cRr7c1QavsloAI0xFuf66f2LFpdcF49sAMGGhpBr+bTsgMVKml9un0d9qLUTyuXm8bv6aa0+oP3zlKR81iRpnXge2Xu+l86tpuDI6Tq2WxzVXRZe3Cmbe5ftRu+a7E7jFjjSj1FnvdD+qlZg+WnmMKG3Y/Kmg3e7XDzvCjyM952BTp2iB177334t57713ee/zjH49XvepV5dq//tf/Gp/zOZ+DN73pTfiYj/mYuH777bfj7rvvvm7xJdWeC7LhGh+qxyp0nQAhNJI0YpLU6gl6uQLEBpTdG/UiPabvEyu8E7tjt65/LS49xNNtqkvtnchOjAlgIoDiEEz0KerYG7MusKzcRFRg7qkiDVLhKWlZGBmdb0dPTOvhfwSuvPsC/DvWkzaWwID0+uDfF7JUasxg4/yF0JEGxDPnL/c4ts0Zk7xpaDKat3myxLIRfkqR8t0lfExvM4PoIMIyrySnLkf+GeqgFPKavPUYSQM7f1FjXjsYehOMmpyGaBJCJ27HMk5JKHZK+lNWJX/b67HTv04sjlpFas9UICivZe+PZs4pYGsMbRKFicnYeT2BqhQZbaL5qTqO7mj4BSAWB099yVBkDYe541vQmAXnNZcvc0q6bXTnOYEI0VvcFwDl9drP7EldO9x33fS2t70NIoInPOEJ5fq3f/u344lPfCI+/dM/Hd/5nd+JRx999DCPhx9+GA899FD5A9CsDbUbMYO7nvEOGbdVGHB1vYcqIsm6vH3fRwNd7uVvdPfp8MU9ha+ED0Dydw3no+NB6SCFkQSt50KtlxnaAmDtMLcaMqR8GfiYEAf3gxQAwU4G8YPDEKMJyeAftOfchpUXh706kpcMdeRf4Y00PtnntuWi7jDSdB0itjs287JwI/7jlg9pL3VHq2+/n/kvw4tc1/JpT9Dvgp/qoMRtkeVuh/IldM2+I+lmR6gATAAcMQOzXnA2/vzUZtQuI1y5xbE/+beW+eAxUGn2uoLuOUC5fnOYz7+vlHvqGhJQoerEOi0kmxpwAjFZPLrQv8LxYq9wqLMzQLHMHZDf0p/pxIl3vvOd+MZv/EZ82Zd9Ge644464/nVf93X4jM/4DNx55534hV/4Bbz0pS/Fm9/8ZnzXd33XMp/7778f3/qt33pYTmcq1NZYUBIhh6Lf8EzCI7wikfyEwLnncJkx6GJgrAejamE+AKKKTTfbJ9U9a8u7gVezV0WhV5oaCl2UfmhRGcQuBkzKy0fZL5hIX+075V2ty9VJu9u6LNTbSqJXNdFgublXni5ny3LV5Ffpf1RNKl+BPNuOgVCyt7ptF/be1pyFgzJ7le2LGGJFTwPIrbXUNyQFfJsHdZ0ghwqoABXVN2fZ66KK2Ji5jCX13gUIHHoPalm/82RDJGkKes58L+nha+Oi2pExU1KXQ8AjGtnjGhUfvZJaDc67ZEe9WejYrmjsdiQIGV2hRdCAWP8XBCLDan7lVOr2z+UHsHZteDlhJwGqeIMA2C+12UEYOCUwQf1E7nS8z/W8/8xA6pFHHsHf+Tt/B6qKl73sZeXeS17ykvj+lKc8BY973OPwNV/zNbj//vtx2223TXm99KUvLe889NBDeNKTngSSoUTpdnw6gBQgV2ThX+kjF88HWDBRI/wR74VXtFMcNj2fSDb9HDq+a5zhonnybjESBFBMDOUrfr3J9uTpkacaoGT/IXo51JMEEItGMWU/lTP9WL1wKiPUemup63WTGfD46kihmFHgGpoC4h256emdJ9CzMaSXjaYD7/yAOV6F/JWwxFUKoKIeTrpdYVkmCzFHGzQfVWQIaXLihv7UJqq9jHB7CNPqF6eN6co7lQlJ07lpJZ7cE6oPLhwbYLkMo4wZtsK4nMWbieK6kEeldiIrUyMJ6+jClUC1UM8ASAx6lBg850MKTOVCvXdXnZ3i+PAn8/ZM3fszASkHqP/1v/4XfuZnfqb0olbpaU97Gh599FH8zu/8Dj7pkz5pun/bbbctwSsH6fbsWu7ck8m0bYgptACawXTPKN87GuAjsYjndB89NwbIDlIKsbUTo7gdl9h0g0Kw+6acemneFrJhrU77XoUyjjdgxWWvcQKqhYcbt7rinufdrlManXAIXCGLEh69m55HF2hW/B4+qF0Drofm117F0r7NGZgeJ1CiCScBOhEaQmkHLnwZnvQD8Vo5h6QGaikZ7wSSmDhhO1vXSRhDft2xC0Oy2M/Qm8w3id3c1Za6ieyaV+t6HKfmwRTH7LE4KkHEifvmoh11YV2uZOh37FXIx8NUzCZn+E+buiwqfE63pnFIdenILe31I/1PTy70M149YjvpcsgQHXmz644y5RzsAOExNee7HaQcoH7rt34LP/uzP4snPvGJV77zute9Dtu24a677rpWWfvljstHL8NQjTGfBCoA2XtQO+BdALmoax2S0am8mIBuzd1UdN+KJ7fkmbzBXaG2+tx7Ufu+296OMo6rdjtLU1YTjBlIM+/lNF1KxQR29zI8y+LeLt6dMyzOLwGQ83asL1FANuimEB8C9bAR5c4hPlVerNwMGBpAEU/cUSmAtfQgp2rOFe/VFdQxiq2Dk5SNhSfeWSYClF1JOCRaa1kJFyKPjvwxvKKNZeMhquy0OFPC+02Bmw1dOPvmiOWEEEkHopB5ZKU1ygSS9k4P05jG2Mq3bJbG8wxUcHUfz7snkSBVej7WqxCTK9nGIH/QrbS90qrsyQFa0FqwkdFEwRMuaoWV/ug99sWi3aS2TfTyrWCS01FM3YOxy+6pniHbwGW9GTRP8eYgXRuk3v72t+MNb3hD/H7jG9+I173udbjzzjvxUR/1UfiSL/kS/Nqv/Rpe+cpX4vLyEg8++CAA4M4778TjHvc4PPDAA3jta1+LZz7zmbj99tvxwAMP4MUvfjG+/Mu/HB/+4R9+LVpiMoKBVACW74gNs/3bZmtoNXZhlgh9jFS60AQQ46b909tJ53fjxhTgTa9iEO8GxbZK2ao8loFH7iFyOU2Aufc+0srszcKW+/ORNGn7zZkwnf1L8yxVgW3TsYh5Y2BdZ5Rhi6NQZ34ta1A6gDdrxjh8POBcX4ijFFyv3bbxpIiLPInVlXmp0JEVGUUKGdqXxXszyooZUf8O4X0ouRyqK3n6/Fz7Wi9a+8GAio9gEPdCWE6We+dooirtVzgZZlCTKZZtH7KxcqJOAJUaDeGcOPhyW7gDJ0DZ5skOjBQ7122c28X0rwqc61l0j/Ajygwv5AigDhI5LKO9FuFMKpMnuHj55K9P9CWRDVTDF0xHImhmsrtN0l75q9O1QepXfuVX8MxnPjN++1jRC17wAnzLt3wL/tN/+k8AgL/6V/9qee9nf/Zn8YxnPAO33XYbfviHfxjf8i3fgocffhgf//Efjxe/+MVlzOnc9Mgjj+BiuwjjtO/zpAWRDfumYz2CjN6UyEUefubCrwhjt18m2KWOk7L7R2lZBigcyvC+22aTcoEdmlOEdx94N9GOuuwUSvR1V6N+Q66TruItAoBoABCHwX2DSu9hdizSqU6eX1Zv3E3D6DieRjENsIecxh5mArnY0rh1w+kCz2ENLoNu8wzJsVVUhh1G2eOl9EcOFHhR1VJtAh/ZttxEeMuF3ZM3zjyLfAiU4tnzFPX8tHAsltfq3enOkqy0hstxmfJU3+19nWcf/81zvqoDBtTnrq5Ae13GzG0RYLug5xkrKb9wYgOwttHf22kPQA+TLso+4qcIcnZtbBFFTkuhg8BSEPxk3jRfuzhj1YHAQXsxIxY8bo5UzWHxbrePO2wfrew0cK/vnHRtkHrGM55x0hO9ykv9jM/4DPziL/7idYtdpkuf2m3dzJ16G4AzasQp1ARsyJ1ZWj+O2Yxr/UwvoXp+VSeqTFlDFM8BVZDDm/NDBDWUqCtm6d0VT9K75mTEia4EogGuypJDGDpv0eVe0bHOd3ug/A7XNU4/tge3fFGVjioQ4ZyCjAC8QD8q04Cs936HIhy1GS+W5ILoqdJTwDxhgbzQalgOAOpUSne+9lacQAJXqVZnlOeywIbMWido7414ZODR+EPX+3NX/660DEArP0tVh5zna+mQd5BCeSCuCLiTVp+Pj8Ejd5hWWWf7kTHmPGkT/6gO+3dkryubM5PoNYFliJ6MHrV/uO7WSTFeab8WchK3F41DIDno5Q13eUzy4PBDLOSwIXSJmAIjEuGNvdCPc3XmRu/dd/muR/Ho9miAE4cERhpCsLlRvxybPAKwaeAjyD7PSqHelWlQqh7NEGQw8vvcjjK1I4GWzkClSEXasz5lnK0oKQmI5yuwqa0+E9GumT3lhaZx4GcQlkazOxuutFF3K71OYEA1ALCwmI6jC3bZR89D6YA+JUPW268BVG8X3XkcksOg9kFGYTr+u6XJfjugc3jEDEDZReMgIwbcEImNF9Wmd5kvmNEypQ7xWpFdvOywPOlZl3ENIsjlrFQU0R4nxzfZyPbri1TIa0BVnlGSOaY1CG75HlN4kFzGhh3YL61nv5Ey0CJwrliEeH3hr1jEZh+TsQL0Vk5BT+TgpCxtdSJTY6wYGg69a5NiMAPVHB6WmS63TWGcVrZzIv3KxCwo9WCni9r//QOkLi9x+eijpYfBmiH2CbWw2SaQPQVij+X5Wt5t/kJJMasnUhMuaoSGl3OG5Lz1nj6VSD5uvc6f3Siq+um+5kVuJCEOVhNNDo6YPULyxIYtSQMYM3t81w17QZx+O0cnn83McxC+9hQZcKJ3RI7IKK+O03VwLb4xzYgLEemsbG+mcy1kYKaGqAYWxANiNzZALiXCzNGL7L0g94TFzm1yYBDMZZf3kYbqnEQCO6oXqHxskAKwQRUjPpIOuPPsEzR8DDJ7+UwpOyYduJdPzQXKiQc1L7KG7/sOwTj7bZMUir5TuYjEScJqv2UzeY+uHOsgEVCEkHriy4XCW33HebdtEFWjd1iE2iOiXUeoLCe4+Ce7GwhA+E7oXmVvHKBZymz8lTCjoSNNrOMay/DY1PrqdKNBarfxGsCNE5lHVdKjwalxAJgLZ+1u52yipYRHWrKVyxHPqxqBcglVoGzOF+ralixP0S4e0MZXhW+LC+fwwOTCq6opiGBh7giFia6AKWUj3UI3glgTJq0XxOvNgAQX1QUgFxvmIMqApYWGSrPxVG2yASmKtrxXqYeB2DPNoghE6UYopfFhCAJ5I+FEJLXe4xUBbMNsuJ1I/2uWQgFq6M+BbeHizJ2wDsqn+RFRqU4HiY0fa8FS420xzCy95B8LT77wGIvmknK7kc7yRS+Y3CuB0xjv3egonVZHzniTuhBXPBR3WpgmcA/Hp8/0TJpzgtHoxYVMGyrMxn/NAe81Ff2MdmCwImIDoFpm7XsFTg1jUWIXzf6NIt4PQGqMstZuqlfbt8gbf2YAdAd2O7RtJ4/GGawtE1fy1sMSEjKSufMSCeRy6xiQbLjibQIxgD0DR2sF/D4Jc9SHZq+t81mkYfHL873nU7+kGgxwAuBeoVVyBr5VJYkE7o2xFp56h8gmss5KJiEJsGJHUxA9AGLCDvMiZKT0xljxJ83NmYMYxzlsG+1O0OmW0rCRt5szSPb8FbBQkVoYNh2VkZXBiAPrghPOj6uST4AII250hPc81YNfvjL7+uwROUL3q7W2ess4nFAEY2LEHjyPHlXkRTZi34DNQG1XqOyVx/yl0cf6niG+nHxTn01vQoHcTeTS24gU94BnrhZLnXHQ6yJ4wM7M9KoHEJ05F6/eDNdJNxukLDFTqxAgG9+T6gCrrvDuBfBPOdaBtYPhHvVMXXqpbfB9k2E47Fp038m4Kdz4pOHJ7hUWrc8aU4HqZOpA7R7+4XvuLU9uVfEWyxtuGO21KrSukJoPX5m09L5mHErvM/OfhoDBlZeWQ8nF6SdrFOHPnUHW7rr4+U7tghyPm+pIMikYxs93GUfKwTAsJldmbDPsY3Vw+RbaMipAiD0gB56kUeJCT2uQyubU2qZFLHJt1iqy4DSP+zS5wUg9F5ekFgAAtpRJREFU17gVo1ssYxr71IvMeN/VJqHxxKuqu9Fq7vQ6mLEtCVnsFNfKFueF6r94NKthYdIAL2r36ux11ckeVJFdclCYyliHt9BdJyh6ZM3e+fv+XLa1TKbkHGcHuOEgFaEHUjofEJUmAACQ27go4tiM2D7f83PlNSa28NMptpaI4UKrijcdm45uuUs0KlBhM7vhJ/pqGixoF0sUr1zZQw+BIwu4qMnk8Uwgvqz1bPAbQEVp0eMYTysbyELsqfKOU2c5R768YqnwCwA9zNdAwuP5xqEMUbryLwaf3WkCyliHHlRwtP8Wzwq2EZIyeVTtTTlkNADKeRp77pkjpEN+xoLBrK/Agct+ZWNRG0j7RBioiVcr/0I1xuJ2aJV3+CSabBP1U3CTyPNR6kSqemk2wA4mFfhu5HaGlW9ZFgzxWZPZhiKbrf3bzUlB2iIG2cNkbcy96/68h93g9sx3Ude53Y8FOOxS34mnO5TFVBzkx23MwHWqmktZOfEKpxsNUhcXgouN5oYC0eC+cSe3+hSX9cYVGb2ZKfVJEnRH65fAPi4DWX7IYOzAbLN6yjobyo8QI0MziHAZRFojkyFRU6l+UumETXShfAxX6qyODGXFwh0LJOMcoebNdcq7HXQnQfpzEmVESIs5QeCdDqdPzqCQzdSuuv6m1gdjz9XPCfN/KITk7c82JzxJ37OxVJTr52NSOwCB2srR0ateH+cQFS35eJkGjDGj1fLAiCb41Gx/KdtBF0YzZaX3nMIoN9mvHrYf+dEG+eHtnHoUQAWEHIYx7jJ5lUVtqWAUtPQYVDTCfxsA3bLdyzohn8jgCqYyjtvZdEzGgkA3FN7ORK/7ESzz2St21mY9xQAuAepE/VtBym0WOjJ4zvvtSrlH/HPAI4Bal060plbGrSs2yol0o0FqkzGuk2rPPSKEcKXSFPfOrpWflk+2a3MIF1fr/a6gQlpQBkm3MZsnAUpmYba6TO6zG5LyQlp3taOrUfHbaKAKxh8poCB6Dv3lPjGg0MmPUn4ZKkGTdC8H2WvzkICQUStAlYAXz/NsuSgnixnsU1qzUfnRqlFb1o1uu8sTYyae0Ds86+p4zK7VTdXGHhBGedDfLHuhOvOLKnJIKDx/+2cDYpFqB7gir1xE9SKi7Q4MTb8eIUttchvVliIrbtaklEeySP/OhS+uTb6Msp2uvYyNlqRQqekg2HX/vW1Q3U0kU8jY2Bej0siaSG9mallLASKWrLXtl4l1U8lRL4/QxIxSTqGu6li3n7Vp81aK97XTzQapCzufBzkrzo19DtCmJ7xhGI39sg3Wq0LUTzQ9sGCUmNG9kSblBBkHAcbZNR7qc8DaaiaoDVw8mukJVh7zlGm9E5DGLp6nMTAOM3J+ZYzj0BLNl3g2XJ0gMD723FKgZmFKLJAxe0rUDoMkM2GKmafPXgaYHdHH/GOAurqVez6Vh0eLTU/my2MAfcIEFDnmtBuIwOTSiiiWRfrrh8kH3IEd44iIHdZNANzzj3/tnSVA1XvXNzjVQ6DIWcqKpjGLSUt0LUD7MZTf6Q7V1RG1G2WNHWA2lfoQOZHiR1Yje1QbNrMx49a+72ViSkQWwjFNVtQW1fmGNzvrsY9P0SNLlEozuAY8tlvdSXBz2E8d7MC0XDneaNDp69npRoNU9JRAbV4EYjyTkxHsSZmFnBdYzqG0eGpp+JeehdPSujHuVXEPSuxe5lWNXslyYTTGdbIqHFxWB50ksgNTn3FWFwmK6YfW+k0u3wo0Obn3xuOCrZLeiDaTTTdtMxAH//yE1gyPjTYFBCvLddgJKcnzYI+vhjwLLLViSv7F+E5sWOSWL5ZeXymsGvieYbCUDPmUewsPuSzlVOQqrxWo8osqXVxMox/vVt6l3Nf6Ol0IV5uy0aRZIbbhqxW3u74u6rpg01XG0XvHRfOnF5xGDZlTuJPpob8BYO4Q+5IDATL0HTaq0QCXd27TXMO5XjA/6Cq9dVRQCudWsh0sa4r4JFDVExZ0AqHmsqA0WL8F4peiZH5GfwDADQepsRDPe0p+jRUtlYBnwWybzAOIZDFPrbwGEMKCpcAc0BohvfoZA+QOPnrYL7CyOc/M2700tiyCpJEgcKbLxyrILWOgAuXDdWbjLyXe5xR4lep4VDkOxZ5JIbY/myhSafUyxe5vEN3zGHlzuVctO1HXQpzxhC5HuhKkV2xkx8EMAfcSJmKO0hEOHT1bsqvGzO8Fvk2gytsVscPF47Ar/nMAzp/RxXMr4Ja4n3+kE25sAZJdkkMdEy+ggPhuD5iKpyITSJXOFevrAv27tN9HTRX2xHqovrPLoEfH7jCylWUKAMppv8mITkSTM0U5H69/VpBwPSTAwWgzW3lg+tFrnnrlToBPMhvXFMXhDhMjFIYemRP1xC8rwDfrDQfkgMEt3WiQisTOD4GVewPjt4QADCMoRTlXeZXLKyN9BVkxm4/XQ8XmklLCbj1HbX+lcmxFR9eitHisXLe65h595F6NB2vPLl8BhICcyt07wGh6asKbqM1VKrUpPTNXTlN+d0x37NjEFlhSr8+nyI5Jb1uEWAbA7BA6rZRBY/yU0ltIG8uao5XHVyTPLXbZ8QWh3QBdmZEruhniQJkrkh0Pk9621m21yEFwWtjR6LSd1/PkF0g/OsmWj58GHTP7xENfyHB3OA6SdAClHptobCg8bF/uuHCKyWRHTURrr2m5AwTNxuQeSNTLjTh8R5fIDLLn4lsvN8eire4HHokCsZwhAGq3SRlLmRjERG9oZj/1jPzLzK/sZA+Ci7PRUXzF2GDwnDc/HnfeP0CKQgruAVNII2bEtbfkFNcnADpCrdOUFaPIPScQMDVwWGWbnhOi8b1OuW4jSg3BL/kKULSFiWRvlmgYwObMkDR2RlOMC1DJqQjm/U7Td7wuBFBRz3E/2w8LlEZ4dAoDA0XMtoowYcOHyeAS37MOdWA/QooLxS9ZBcgTS6W1QeiwljUk68RtdGQZCHD8u/G1HFkSnj0/NxiabVCRhXkV4ynRd1qQTjLR6QIIrN1IS05uGtdbz8Le4o/crcPa2DzyIm8roPJszKvR+D7zttIlM638tKBElwOo2DHxYVNnbam/lPoXdjalKL1j5TY8IAaV1tDhEw5X6nB1UBK05qZhe5ElCR0jowu9rVx//+hJkdZoURbnbDWegP+mxbFS3z9ZVmvvo3b3sjb3nDabyCEYEybEzyDyvfp7fYygvQpmGQdjAyoYWx0dERJfpUkJZgOhBBSdMEUNPfhdAUQsjEpAIyyZQfsJRhOQuuHZsccGtUDGsUfIT8KzhgzacAno5kYaEf6I3T3aGp1ada0GJ64Rz6kGKVPGyzixl1jHdTthKPKZ1kZLHnle1HMyGYnz1bid+OTd9OrI89njmqqDwNqIsIiOTzvzjJwP5kn87oZZFtfg8tgcCDOIY03TqNcFRq9+R26Lxm5Wod0y4s4OERbA5MtCLi620FseR1omNyPUoxq0mq1RQMLeADxxYgI/5jMDlDsel3u3++3lqwTsmmlRbwbtgtpDAZHLGioty1f6hIyDdKNBati01boMYpLFWCMZk3OiBN1n8NdTY0MCHxjunrbJ+xD+LRuVu/rpqWWDq/cKlWx6CCgSrLgiJPyMKXXwu0tZ+zl5sbM3nHRpvWa/XdWiF4LKl96hOtU7GfXw9TTaxqUUHNoMkmn8asMWxiyC8NEmIP4nn0IWqKgAaQODMJRaDWDkC+Z78zID5IbceDWSj/RoQwflTwdJEvhhzGyH/N2+8yGQBlJclpTuKX8mCo82WoR8TE5TRpNf0Ru2fwvPiWGVV8Si0qNfmO6ix+b8YQtHRmNWRQdYmxoSZiHbM3SRFteX76SrK1pKjwr0XXL3jNAQkYUchvYEpQH41Lbq+kfyokwD8WXypool06TT66HJWnaw2DTkHzkTjcdcaMGp2cQsfx+lGw1SUw+KfscYh5ABIsPEAJVgo5z1nMgbCCVceJzhsW/HCpDudy1sABPqcfZmfOwBoiVfFpUiECxBKyAqTwXQOSNmDiRQzoY1fvvOGEAqqB7w8kQKY+fHmMAM7ZbGIqtp4LRtuauB7GOadetJsoHwJQppUIYXrJdK2Q6GDtwkK9SSgJSXgGp6yB0Ra7buXBWfSVtzhqxqMpiMGfzQz9aTip3pA4vI1QjrptHjZHFkgJqcCgdI+KdBgdPdgDz4j1avuEDWkXXUdMTbB35a9sWGbddwnMZsv9YF5EkERHftCaSObheb7QCzVd3tTtHCTjQ1Np5y21YH1d8JnhRfIQ/wDD776QS9L8XMFoC3yZpTXVM4iJ51SiCxY0k0C9PdeoJOP9vQqHtTB27m7f2hJxVJy8f4ru4/JZzHtwXj+GylIy8/EjsRRavJWBFIXWybnRvjf5I7TVAdwgPeB1Dte4ZvuCcjQvalKPeK1qykFKLba86YUAaE8buaIZRNCOm8s8CKrNNZd1r8CO/NyCVgMN777DS1napBCtnHQ5xzqrBd2u14TD/p12iIIXZdD05TrU4zh6vVAMq/h49wKrfimHGILwFqHGOCDPvReyNzJQ86eVSd8LpnYIC2JjjAnSnqrY068P6EvTLmdMXaxA3QHd7HWjkDqbsGWqavapNUol1id/1A9aB91W5lBxhyKENXp14UV6Ywi+47LzqELZAb9d0A3cgn5fC8NMNlFukRpEoC88Wdt9prQigLf695p8MHsK9Qy3MKV3kcpRsPUp3Jk7yU/mblSzLUjI8D2dRdDt9uZmzzFtjLCO+Je1V8vWfl3foI4zRvClqVtXKiffKlhTfY6uJGOGnJ+nvP5tB4CvJhNJ4ugCquc9nLlMwtY437nCn3hmKhasw2TO+Zd+GuGG3mWGATMvJuLl+oyscMSOPsMtTrgbBfwU+tt+HAfsiPfHi0j06ykgBijo33RImgID+OfkA8PwMVAqj8apmokcSUOge4WRMkYBnQYeyiUbZmQnVu3Fw3x9+wgSbIiDB1WZZzjLuvqclTqvrZnBqmnUN4xqzidDjfjT9ihCfYr5LGnzo/7feoY7ioXfTaD51/iiRvxPdKlNpzJx7AafZ2Y3CaeJb1LsMozR4wJkt796p0s0GK7fKBpeNQCvOtj9sUALARVu66lmJLq3K+FaDSO2s9qLZPoEds4IZnt8HovRofLn8IVzPUfoUEqjhsLCWtFzetcFCiy2+sHMfmJV2VGLiKT3qAgFHvHWNXcE0e+24jXfGrE0AWzu91vnm4atcxEUN9MaZPdSb+C6zXMAtd7D9LdSUba6BC9e0yW0JVrR4MkvH4HgbNj63xUFGp+1I59OA70Yd0NqI3grw+yUjPLgAKudbGJ9dA6LiWLWQ2PH1bIpIAQXmv2U9CWE2vgGaMlgaZX5+cSJat8Igr4DlfiuPgJ2nbe8OubFb2ltQtelNTO3oolvH8pMIRH7pHKBYaV82JJKwiDMqPIVWgysw7QJ3biwJuOEix8oR3hqo0k5GW/FxOKlDkgHtdblTyXBM0bvYe0/RnrsmgNb2nq/6GQyYhe6WCB54O0yZw+uaKVG9zTtPVprtRhhkCMYlPj2zOr6t6oZVkPAaMBbEB6+CDFH7nu1SPQpyDVKOFFqhCh4fva1I2bPBtilR3GihvlPtWVOwJeS8mKzKeCG+78jJklQB1cig8vxI+sBIISJ1ffYJP1JONhlD79rAEvyjxdvT4o3YMIkyuP+f5MMhEXX1B9gbZ3CmpY4pOIE8aWSWXu84voTbjCQirIymcM/5f8aRESg8wJmS4HsfklQSpWGy+qxWYO7lM7dhRw3gvcQEoMt1rGm2fCBTA4Rn5My73tQs8OXGcvcsy9c1mzi2Bt+F9+30q3XCQGj2UcaomzSxbPgzizAxSImPPLiAX5mXIRyOLNSH1Bw+4cpiPZwtx4hDBHr0nmtXDR1dzga3c00SiGH8hQdfgi+bnooq6ELa47wYVXEYDU/6qqUtQTZ2i5GGxGGcIABw8vHS+itikCmn1aPWNa7UCgwRTZhvL4VmCuxOzb0MtfRecieBKt/YbDlB8qVHiIMrjbeGHSOQy97BdiGr1C9pPTUvtlGFB4rsBi5hh5jXi3IuKmi75UdvPtzPiGZUjTOuL3gY4qQj1PJiv9GXZjessdWeBrfOKzvLS4f1RHOXp7cBtQmOaDkRj6vmOCxFg28MxSiPPMsJgU6mJsHPxLCgHjzRQr9xDcS4D/lwC1AlQIrlg7qhqbohAOXCngW+ciUfLdLNBymfNYRs2ZN+LxzQeAoHEaEA/b6o4SQA2U6axxgHUMiQQ7B2vFJ4HYC/G9+2CT96UhQ6495WaH57qoRJe1ezmNbpT5sagIkfmFsKvlU5iQcwM6iRJZhWgYBGccq+9I4Btw5LrSYBVlb33oaRw6QCoDFAZBn6L6NGVrlpzUkYhIxSjsseaHCPBZg3aoL9dW05KWCVm2+LZaJYNFBoWO0LmwGgGT3RZ/BgP9YJ5kfSBseYiLO9ocwesyajOWTnIZW/BDZsX70KJmJGpukNEx1ZXAVBj3U2fReZfQhZPIz9iHKbIODCJsnoW5F5Q6KqDZExU0Qzv7fterkMTjLFt2HWH7FscWx86YsUGCPa6yPxzskHrynvfMZ00z87louSjxJcEYtAzqrC5LvPSjV42qNf1WNPNBykRWt3N3kUyL8JssU+eeRuNvzHo6kdcHKSl79HLcqvTJkyUfu6exhdA9Y6v8hC5UKZNgVibER9D6nhQOvljQmwCywqd34EcXFAqtnlL9BlFNBtberukA3OEgMZvAqRYMQeguMb5Kal1soSXIcycQw6OR6kS1mPyGL6oX6u7WqynlBwY8RVAocpNbJ0lNHA/NbWuM4PpgTklkAwri7vREeOqTkH0AMnmu07kJxe/cFiYQgK1VmKrPe1eckU4L26RzvTney8yesmxiwiPUU25B2ujzpS3Rp17OB7Ri6oTnixPP6RQx/flFPEGULOjVcFaSLen5ICkOnqlq6pibcuirZx2O9alTy/PMCLKAaYZInV9WwPVVT6kpxsNUtvFxeitmID4UduxSSKHd6T1oLqx0vT8nL8rL1l908gu5Ax+7WDDYfC2fIYThQhcOzLWfz3vowqXC1Ob7SRYSmbuPTb2wtvVdnrAht291h3GYy4kyxr5VLAqoNxIiGeiqjlFNvSj/Y3Lpiy6p/OB3XbzwFCqzTIWSSNr2hVgtOhxR96xn9wO2TwEM8KBuyAmWCB2FkhZSRNzAFSNbz0sLLxWZ+Muac2sT6g5TEI0zrbC8qqfoE/G+VPV4TadnQ6qQjgnmbk7Q6p+6KCEk1N6M1T4KhS1rL5Q+5PTpNAlreHUOcgE3VbeziA0etbZg6IlI2w+HFQUtqxjT7sAdo6Yo1TZprCp29exEq0MwpI0egnsR4nFKADL82V+lbpUN+U66UaDlAOBe7m7qu3l5g/kc9OiPPIuYmAXta2EvCq7PHopqijHsyOBMMpaLAgss/rc8zJ5DS+M1ylF3vEte2RTovfYGE+ZKP1bR/EkeiNGs5/oumePdYC0NDTMIpyLCVAuwFx386scRzAMQ9hFByXy6tcGyRrbDcKuA0A2Gceke49CbBYZHdXiYSQP/UzHY8TjeXGMyRhw20xDD12FYQp2aFRQzRNdDtJ3kGo9qNEcbKApFFz4MMsN3D0xXjKY8usnOy/C7YqGRske7qT2z6uSO1LMbt33YsBrb6a+2wlXp40uF/pdxq3ACah0cMtnJJbuo4J6S7nolnf4QGsfYTdRXc7tPpfhOuUWnyx/w8lsR8ztydmDy2ZnZ2obtmlXA8kg0/UJRSamko+O4D1TPm40SG3bhu3iImLAG6TyVyp4ACAjX5CnZswN4LqvSE1sPaxUSkkDzyDFYRuwgDlQaY1vk7ALCxbVaerAd4+vuDgOxFlf4eeYZQy2BkyyWe9qvwTkUCITlOIzrVuK/yzUHLIopncCqGaMvJ3McAwm79AYlHKaxr1hvLL9IwwiRBO10yA1740o8GZbLiE8/DCyihHOoRqHESVnpzIcKR9tKx7wHzOkM6qlws9A/hnouUfMDgKRn2S6XLR77BcdPT+DVZc5Jj7pkUZbvNv0pxBubZUhp7lY8QF/sgl83+VqRGSazJXd5WfdTZuwtsCJRd4Ww4EKh43lkCpQpuH0dnQdmPgEyrTmN+cdjEtCz030OguI50tB4u4HnpVuNEjJxWbhEI94kpDwP1KFxptsGC2aRFkUVOo1MkTs2fgL0TsjoPLjCabdJSAYW0qYAaNBV5/dV6xagOCWvxctHDFj+y7blro7LlaQlb4rRAqqbGNTV90Vug1h833SEhRqlRpLjqVQq6GM7z6LcbU56oHSZJsb7buFKgXjVNuw84RaDZzEr0mOw219zxaR2EDWQ8tu+J2+HMhnD3dPgPAQUVHaQYNPrvETm5meudKE6B0ASXSq/ebJOE6f8+7YIm3Us0uwJD40nyOclGTbcWIcKV+uNl9LgPK6k+PD0QueOJGzJ0GOi9318QII+rb1EZrXMVErAWtHTVwuCu+iPdyIh2O4xVinWNGT/9wBis/FK80YiBF6z2HTk+k6ANVKa8GV5tU8hoxx00EKCyWuTtHsLc2PnkzU1IdUAMizocjgZSiBDKQLu//XvDKgemQJUBK9KlkYsJj0YcZIulAGQI08l+BF/IqynX6xacGCzL97SG5fRArPfMKGMzQ4UAypG/s6EF3fOUilx9iuK3IijNB1CDL8h/FdtjEbDhReYnkS2iHdm1Lzq/hUa83volu2Z4BJNkDvuabcnJa6dGSIN2yvCjDROwGsOr0TNZVszx4FcPmIsc4ewxTk+5zXKhWgKJdq/TXLSPAnsE0vgchYz0SdxqK5nnyDHI24gMZD+h73WhnsAB6G07x6EW5H6irxJpqM25Yco7znL/XGHddy4sjKAVqR6PXqD0uqElUD7XvBS3riXMi60SCFotDzdijl5yFHZPmVixhMzm1eqpdk38uYVzU8p4jJkIGGgQYZIM+vApOkMJe0gFQWRgKoACev5FJosx7KymOz3lZeOEenRg41zwmgXMknxUd8niPMHu4rkxz0cvArtqRJqkJZqM3EjqsXsfFN4YkLXMGsV57yyghhH5ptqjv1qPwechYlOwN87SxPKotsoNR4bHxaAlRXHTp2ZB7UZ3bQfonMogMwKBmxMafKngRoncd9pkH+4TEM2vo06Uab4Kh+ipyq7+WMr/u+F5k9cqLmepBsIz8Bn8Ax5Db3z5vXfWYPiupdrjETuJYDAGPHj0Odr+Sep3xUzMm0As6r040GqexdAEWj2QADOEJtwUo4W1LE4HiNczegYlAyYz4UM73FYtO101gIqw3PAGHXlmNSM2nHCk8hADidrPSueNSz6WUJ/buqw4pfAtvQwzJK46pUft7TvFDqNtXf31BkLxLOZp8mSxkkUsNDfAKfVSaIlav7PsKmYVfXjtAExjqgUHefYGHXjJ9CjRx8OuxBLZjfb2vjVfTe6uST6lcs8hKbr8DASfVeSyxJAjkp2fPnR/1ZyRdaPnyNja56fQDa0NZllXgJxOzOTj/TV+lu9JAMMiF1AkfnRkrBBHwtOuBba8Vs1nAIgB06TqRGmoKOn/xdobkr1iQLtd57yDsSqDjT4iQ3Gzo1/sh/ZKPBTUWaE9e7OrS3cJJOpDM3S8/0mte8Bl/0RV+Ee+65ByKCV7ziFeX+V3zFV0ye/3Oe85zyzB//8R/j+c9/Pu644w484QlPwAtf+EK8/e1vvy4plsyCh1L55TRGLtxpcNMLOtXpTC++IAU8zBizsbbq/fawTQyCHxVyUKW0XWS8lsZi7fKUuPuqTC57wZswCAweJF9FlFfVI2NALsRkiN14Mm5zj+AwsV0h7OGwaTlbyY5A2eOPF2D2Z1sYtjgXjQzxiTL5t9nYEk8l9+e2uN+OhXCni5lVbMgVDEnEn64Vg3rklLkatR7UWeMY/r5/tYpkflVP6jE2G0Rywoj/p3EidJ1Q5MfYRDu2dj7SB9bNortFhzsgaMonAX/XIaeZ7UN0L+O1RrdNW499On0K+yR77LAtMMTLYD4VOU/++AGVrONu6MKJAYvQsVOU5eY7/X2lZ1Z/56Rr96Te8Y534KlPfSq+8iu/Es997nOXzzznOc/B933f98Xv2267rdx//vOfjze/+c141atehUceeQR//+//fXz1V381fvAHf/C65BynBkoBVAAA95wxehNonhbOBHlWZOqi4zrKTRgbn+6KTB6H0ks0CqXD8c+7ZOWcnlNAycmFdU/jve8ertIaLrqyYu2n5md45pq7GtQxtSuEeHa6azXUq2uFmoZH/8qVJ2yJArtgV4Hsgu0CsPnsgI695fjMqtm97fS5XBiRu0J8bZWhegm7hXHsmRqRcEOCeK8oegdRMmKMdyFVVEyIfg/xkci4nLExdBqY1KljRM5VfiWB9wcl8+8pelBRNlIOGXvdoz/q7YYTUJ0+flrp39qR0CVtk7Gw/KdX7W/faeIRFapQbBcSZ7LFhrwXNeQ3Nbdyu4xPPkMswsretio2sWg4UWVyxqLaZyWdTdbEnzP9nFW6Nkjde++9uPfee08+c9ttt+Huu+9e3vvN3/xN/NRP/RR++Zd/GZ/1WZ8FAPj//r//D3/jb/wN/It/8S9wzz33XIMaFvSRlFosvRfNs4XcHZHslo6s3DJSXl4ElQbANjedlSG8R2TGkzFultcHbENYTIHVWtb7Tf5LnX7SDp+YUI0AV+40YNYQBmrPo3t0IchW9lIqK4IMtnpXJ0FBPFSAuqvBWJEvkIgRZCmS2SawX+lWKOXBxkZCTmKgvfOEjFPMaCw+QLbpksciuSktcSbqyheFP8zQuOZP9LCgan2mlB/Vp2Mz/PUGROHL+PWsD3GmgVWpKvw031ohHq/l35S/CrUQyxkC2EtvKkBy1qf+u05KSZAsegpqCgONyJlAuYPikWyi0T9FcCa6rfmkcGXo9YGF77wpeRMwAsgTymk3naUTqH5Dy6Vep6Cbat9lwYVeaReYUtSZaHjtcN856ed+7udw11134ZM+6ZPwtV/7tfijP/qjuPfAAw/gCU94QgAUADzrWc/Ctm147Wtfu8zv4YcfxkMPPVT+TiZvLA/jXCp0v8T+6CX2S/t79BL75Y7LuLZjf/QSermP776C/NI+La+RPxsMFtFmbRaphP9coZZHV1Poh+vESspbsNgzXE4p7zRZVh0HonGQ3vhz/tHheitDeTJl+R1IN6rrtsmYYHch2C4yDDPWahnL7Hu/D/9srL2ixvHRldxlYN8HH/Ryx6XLSVzL50C9zZX2ZQ8FZWH5BHqdPsqvhLKuEzMhsRT+TjzdLpy3FbgKNV5cKz57CcRWcgQCHDZunAYmC7JjwkmEwmqYNha+65zRMpy3bdi2C9rRI/9QlokkfWv9mgFGHACoahk2RjvQtLdZY+qijPIovRKyS+VVW6GFbx4V0X3PMDe161E6V9Pj+VIVWYrtuSD1bp848ZznPAfPfe5z8fEf//H47d/+bfzjf/yPce+99+KBBx7AxcUFHnzwQdx1112ViA/4ANx555148MEHl3nef//9+NZv/dbpehgp7gn4n30p4w2+ZsXfd08+PCupngyHAM2jVXWl01wRDyQNRdCJ1mUvKo2XG4ZtGzPPdvXtl7xOrJDD5VL48+NSFj0ynGdYyYE1oK9NimJH5/CajQhjAXWQihMpxpLSuwTqFoAe5hOMNWU66ukb/IqoedVSATiOtibLrkwMaTK/1x6bNW94fOFD74DqbqFcM3TmHYasWM9ARevYpPeUmP8eBiq9cJ3oqPhfGicNDxB0ssIXESPnlcsrckGYIVQfpjyAKcrTpaHJQMR4eyMZLOE1Lr8dccJO2NyDIh3Q1rCg8SQDWtfbHDNsPSrXh9YEQUsfv2HWytEP4wuSRzuHyicQIp0MgNN6v2gX0OXF+cbOTPIQYRsUsNOtzfHzeu85maK3RVZohShXe4LqZVcxHp99adlBereD1Jd+6ZfG9yc/+cl4ylOegk/8xE/Ez/3cz+HzP//zH1OeL33pS/GSl7wkfj/00EN40pOeVF3mzoUi1OmJecNF028hzalQADzAGvLnXqAqYotvmrLiM7giXyvE10b0sBiHwMTL32RMfdsA2ZudWigLz2KjOeKVecQj9bLQHnOiQyZriM+vpcRr6I0b/bSHkrcZwJy3SkDlBtNCRKMuPhNu3GOwZnqF8h+ZJQVHzqgrDNa386rSPneisCMQ00GhfNzoqYzFzh42zhlUldlz+BegeeyV2E5j9GDze/au1vUuYJjMogdAxrw5Oh0oi/ytuVczrjTUCUQNpHveBaBc9NLoTqVRtun8paD0CRNMl0OAAsWOcGQhfqO1KdkNpX+KDsFN0gneHdr7QV0JyaPmXdjYyyCcCXfAnD63HerlFxNKgOfyxSQB6GvkooPc6rIwzdP1U+nPfAr6J3zCJ+AjPuIj8IY3vAGf//mfj7vvvht/8Ad/UJ559NFH8cd//MeH41i33XbbNPkCQG5m6sree1QGTjyLKwTf89hT2NyDLIrq13YhZR6GaAOGcbrwLfhlhA1UbcAdh8I3N1B1p+poVNZpyqe9fVjQefGvJkQVqEqhjtcs3IIc8D3qtSExRUjRpzroqPtGtEz5CD+cyiw+KywKc/hsWCdezsQFABjni6lA9kvLq3q9Q1ZyZpjaMRM5Y2+z9XMHhLO3dFVqvYroTfD3x5jOFA16wWlKvy0Vp82cK920U5mSUTQkqgZ33YZOv4jbg3TCuG0GmGyjF956c+Jyay8q6MBCAshSd1A+5PclUCHf33UpvxP9jOFFqdIrmvIpCrvOey7aZWlQLELryaiMPr51VAaI7u6CzKSS0r23gNTv/d7v4Y/+6I/wUR/1UQCApz/96XjrW9+KX/3VX8VnfuZnAgB+5md+Bvu+42lPe9q18g4v1r1SoCq+ey/xWQddR0ok8W3ti/H1DURJIG23nbEGZttSwLfMwzOJHpXRlt5I94SW1vJkOoGBkSUbkfrsacukEzGnpYqxMGZYKXKgPopdANJEnCmP5mDsUsnF6ZKqrJ1sA6qJlnreRtSvOag1M7YdMBmEb4Fl8mF7+8EWB3tIk01sALUC3qMOnruccG/K/pkN+CJxr7W5MbUnntWagOog+wxb+6QXMlDUU+ljrxxAFKIj6hlAUK3hkXNSa8U3s03yAghM8l4a5ZTbUnf1cLMWu1J7hgSMTq/UbBZcZNIoadQvMxn2RLqtWBUgxScLaVu2LU1KcgYwMLOMhQ3lMt3JLKBNH61M3m3DgerPrCf19re/HW94wxvi9xvf+Ea87nWvw5133ok777wT3/qt34rnPe95uPvuu/Hbv/3b+Ef/6B/hL/2lv4RnP/vZAIBP+ZRPwXOe8xx81Vd9Fb73e78XjzzyCF70ohfhS7/0S685sw9hFAKouoz5p9KgZWtgBZDB2XQJoi0kY7Ue20aEfUZoy0emxPa2i+fC1Ry0BlC1kELEjq3kkwaoJ7fRMFkjEC6z/q7qUZGAlYjM6vEjfDOjWGa8TciQmfA4RbKeHo42OjZYdiPui/pgO7273I7mtAec2dL4z6p4ATbvQSMN1nax2cLdHWPeWutRSYZFlxmvDGYzIkwDAEyTqARrmks5OG7PlsL3in0LXUTSaJcelOS14/Kz7UoPcUlA0syiwj2Qlcj62HOZRHRKiOlj6ql4wQuAclpYl69sgEMyUjI6aOVnlYEpzH5Y9BAUthPsQHvvb3zXs6pR6tOB608xsw94DCD1K7/yK3jmM58Zv32s6AUveAFe9rKX4b/9t/+GH/iBH8Bb3/pW3HPPPfiCL/gC/LN/9s9KuO4//If/gBe96EX4/M//fGzbhuc973n47u/+7uuSEqE+7vqwV8oDhwDLXeWQb3WkpTXI8zSvYXi+dvqvCIANm/rSBgv52cmw+77nGU1bekLRo+MTPPecgcNhnSSnGVdPCwNTLlGYoPKNPTnqgbZS2LSz95r5rIBD8p5xNXPi8A/9Jk+3TzZQZPjueBq9jTM6X8VHhXfKJWmr9NN3r2xQW7097ul4ds6/fd9jQ2G1CR+77jZOhbHeRduEiWVVFmBEjsfcYwx/OQbDIfmuIJctdLum/LqOtTRa5qgvzD0BAx9Xw+1Xe1Tj4ZITG/0AJTfKBNu2lpEjAi4+jSzKOnubnn8PW0vQc6otmhNg7CjhvQgjphHWSwfNnIjlVmnSFJIjP05+2AbEvOuMhPS3tX3W6mQv54oeiwFSyc1ZpbPMeDneLkEj6jUqfsJ7m7/2Z9eTesYznnHS0/8v/+W/XJnHnXfe+W5auFs0JoGKGeuyGsYfE9PVuBm6yff8n/BOtBijfdux7ZbHto0CTMiiF0FeStDg39uR8QxQEWqg2i5FVV3gDRZCoYhTLcw2xdIrNpdU2HtmCqFdZFlmevkgdpwdRHlozpDrg9xZjl0xRXMDVMbyOrZ4W/J3nektRrwDlF1SVTMoFC7Zd3t6g/oiDwcoLwgl8wWtBFQt7DJ+FBei8s3roCnbYXSKABIV/g9HFQqisWNjX7lXQk5H6bUIP9fSgR1hl8Lb6Er563JjL/AYZZUb1BBayUvDS+u9e56A4Xd62BCaz/FQRIAQ0eA/Yk6pjxORkzzpkfYv/EwCoz9Se/ATE6rPwGJOtojfd8dHXLaM2nQmZlCe7R9vuHs63ei9+yK5fWaPTFczbGYFPZ2pJ8mTc0VwKekRC3uTqsNe2aB5dKdZswmQXAB2266nHFFR6jJTdESxOa5kSBDrU3qvhVOCQBetc/lFrzAx8b1N/x0bxNU1Q8uMGp3tS/jo5hTsCshm++URAcohhxVyerbSHBP+vKLOvNiXe8yjJ32BsBiLvCY5HV+iCabxEia4/CZjqMIbJcb7bDBqFZXkuVs3YtoibMYz93JZxWLSSKM163QVk69IDO6bYN81Tm7eBbjQi9RHdv3HSwvC6FljVMouTWsPXRL/GGORGFi3icCPpdptv75Job1NxpHYgMg4VFNslxPQoZC93q25JrXbilnMd1pGgUWazy9D3P7bvSCrjwOkNm+CoxW8Ae6yh3aQbjxIsZLFphIkBCEHZ4NTS4vG8jIDUPbdHKAtWjvGR+yFOMaB6Emgyu/8OwhAeh+98n2aLdC8vekaeYcCynthBOPSY+dd0RoKj5TBZ/8MXrtXuVAUNwia3/3fGA8DhRXpbj5lV2m8zPe7iMLCI0bxkM/hxGhDWYAMZToerMZV6dOthX8cgJSDwWGSfDfsAsmW0oONO2lRoo0A3keHeVx6x1PbFuaYToz8PYzOBo8NX5Ab7URZ2T9q+YLyzWPQEeO0Qx+9F9BCud0+CE0MocLLVHZ2+pxsE8wYi9utLONHSFmnu1SU6afdWNb+TTJG6+f46m/P/Mu6Z/nR4z4qaAFuw77VduUXJufrmulGg1Q3QaBPFAHEMdNbXstUDLeG0dqxm+c5FjZtAHYZ62X8yIjutRUHQkfvaXdw8tXi8RQD1In6B1AhV/XzwXl02uvRIHali+zrsmzyNA9V4EAtjJ7s2XWpJ6NsY0pF5NX+EUGZnUcG0YEqtbr4lggjUIpO7XYDluEaILdpqmtWSi+QAEfMUI52HTT79Gdmap3mO1qBe/ynAIr5O/dGDXL6WEbPg2VTK6f4GWZlHFdj6wW7EWd6ZrLsuuoYOwzrvqO0TwOqk0nrdzb+7kCq+IJr77FmL6/3XIPkFl0oAEXjbV53wBbX72YHREa4V/2aJn0r0ndT3ehR2f04bPOEP8IN53O2gh8uu5jaqxBySsz4+U5DyJBVzmlF8raOs14v3WiQikRMC7vng8h+zYQHwMSpY7PdrsSlYZg393Z2DSVwkGGFX+bOCqhprPL6UWuSggniFFfeUbvsrO3XYgGylDqnwVUDYE3Qcbp2kuDIwoyJwHqJB5S6vd1tGvau04mjAXnqE0h8h4u9GCoBYikA55HjAwaYXm+lN31xtJYSD9ncHUL2Srul6JM9jpKXz8YxuK/EfzKapXmI3mwDhNBzs2QPVECt3alZ0wiUcsteigI7BmM8FKHsBUAZcVlfqn/x8DUueoG2Ro3l88DnWNDKAqNjf+AxmWUbPA6vn+SAd1AvTqIZdQekMsU+HL4kKHoWsbGwfx87cOyA7WRybK2VAUPtn51kcihC4UFvzoD6aP5sk+OOt075lAxPvMuRn9I8RGuKNenfGel9A6QsZcOIhREkHW5xz5Kf9NSsUbs0Pe6PkIc7gMm65VcNCJIlYFDQ6YHFiy511BOpRz3k37b5YlJJI+Leobb6h2IswOogxVTz8K6ZN842pfVnGprGoTYGqAGKeSw3mA7XUpvuHQ6c37PPdFK8ACkba/KbwfnCDpmeK5ZvxYvGl/lmWBdSVqV6WhOwV8/k8mfLfJpc0k8cLmlhzQ5SMf7MC5L7eQo20ZKNk/8KYVJr1nyWe5f+TALASnWTVkl2A8X5MwZjHtjvvdmUizAXJlfeg1oBsgY/BqFqoF4iHdKdNJnrwz6U9cj9QkRmSp0rSETO0nSEUjpKs0PmZNWf2Z6F9quS2caJ5uI8nU7vMyA1hGkYbFWBXIwD7PRiuDE7bApe887Oynhlq5ANXJ9PCcwBZH7A0ECrkJ3XD87MEohGr2G7sD/7zr0of36VEi/bRJPu1V8nkUDKNnZukF3HYW6gIyvMy8xQgO+zuBcavObqYb59R4QNR+WQAI6Y+n+BC6js4xDhAEAzIAUMrvbsTt6V2iZxbdn21Qjn0Q078fqgtFrNaT1SAYXeDWRaw90/USs2gP5VaZswIScFTb6o3rzcwfNL56aBnn31n3vrUXp0UCivQ/JdpvYR8hw9dLXJDClT+ezoubOTxPUhtV4oPSVarOnOUjpEdrCm+Pi5tYXbpIUfUAEXk0xNva6JHvq6INvFod87Aq1TVf+zSjcepNw+jU1i8/hvEYlehG8MqruHRNqgqedVMs6b3JDcdrxOIr8jjQY/5wVpGkzAhaxKxEoO9Oi+pGdX6KEtaYpylYoTEDAwRchjNt9TiC34j2LQBt/MuOwC3TROpY3w4abjzCYnZZ9B0untbaT+kvfMumfrhG+2lk09zKPp2WkDrClx7bPkzv9wt6nt3aiFkxLGh5yUtmN1eJ2dArbzkl5tkTWhxpl6Cotqrb73pNTrwyhXo7ENtNTPZiN5554d0ZvZkmOggLqxzmKn3iYDdKwpOiIbORvO89B9hPvS6DOnq4OWbaSJ5cU6azRGl5360ydf6ORI+GqVIffhukztoeoES1Nfth/OykWLk3gCzZng+ls7hf+SRUzvLnyL89MZDkZPNxuk2Di4oXIjYV72duGNqTku5Vxu03MX4/D2LilHXCej1CYClLi1B6SB6pUijZN/H9kuWm+WXbNJWYbX2UN8K0++KFSEPwgcDgykJLMLb3ww1/kflpEMm6pi29Q2zB03d+xjWGOXHNhXzD2oOPthYocVmethmK7wJmLW98XY/VmzF+cec86e6iUwF/jarOSCGnIdMiGLYy9SAJyGPepcDTKDLY+59UYoPalKVBqX8g63cj7QW56lIYHKOK/7kGtff6SNLql0RRhSM7/oORB3fclgXDMDXh7yLclwOrk47rttDywm5xtiWUDJmGQCmr1aIb4lEc6wrjEEHPAJNzyZAjSpQaz9QdPE55rxwt4h6ukkZF1dfpKi0oEl/eCecJBsSq7Q6K1yr85fnsN8cyvMkSOASS7m88yDom42SHFyJmx27AMEO3wAHdigwK7YJ4U8zgtgpguNyY6LY8NKickKEW6Lc2pkajA0AxkGedw8pqfYzQShvnYjX3B3p9YnexCI8Aa8f0FumVA2bCjmnlRuTjn1SFyfzSPfhinCaJuknXnTe1A9dUxMpA3CKn0YvbjN1q2NffU4/9EW4UVmCYX1PP7GPBVXtnCOpLS7uKyUalKP0ia1KTO54KGNOXGvxO4tZ2k1bomQTTu2Le21fGB+dFTenZoKnP7JsnlIXOywsl9q9Ajq+NFUbJB2ddgpmRj6tgGqtn5xz3YbMuchvqoHzNyobQgfMbQwauHkiEBGlx4+JR2q2GSA5ljjl2BTHJrdAcSMF+0+ru2ZeEeSRJVxXhj5czWRGoaIn+MJnEoLn+kx9bxw00GKKu0eQ4gmheFkExuoHzOUYk0Oc07oz36Pdko1HBs95sPsPeeBhRuNFbAX2UJN42LxgkpdIIjNbYeVzPsc3mnGYB7IbaHNZdlOwPgcQpoGeR4cb9cMiPIFaht1I48wDiPtuWlvIe4YoHp+ow7Dcq2UL9aliAyjhLFIcqwVsPb0KcKgLYTcrnSAt/JLL8WBpAGuIJtM6N3wvd0gw3t2TLe/R21qGXZgylBazSPakdujOyvF80+5Kzbaabb3xZEiehcM6j3S4OF1ifI0PlPua5jtOKWPkAB0Ff76ONqoE5WT6tWYwn/1jlB9lflHbcrPZ7LC4kiaYYeScJuyvRM3dc7X13mVi47n4XhqZDtMlXFM3dHySyuOSTTVGqNmOzA1GfslV3gSR+PkPd1skFqmVOxt20bIBzrGozYBdBuGm85Eqm+6Ea7XwjIJLIQ3jujwkz23D6iTFmrfmtIVu0tMxCCFi42hyFiHMXur1atb6j0ZBJ711B+O2HMjawIt33B0d0OvCylPA4XdJrd0Rhc+nAIqchd1KP9yAL/UxZYNbMOT3rGPMJACwIi3eI/K84QAYRbcOXGwZdLpsxefY3vGEOMT87+EviS4PP73kOG2ESCdSEftuAF6ecWr8U/LZtEUvv/BqHNVpGkCCb3lcsChTh6HXKcUJjey8YkTLHFA3GGhfgUwelSiWwHPUtrC+JJfUovQZNiS+s5D8ZE945EqZLsImXT5WGW228Jg7r06L6E00QTEH49yWCXETxA6IUfhchAfzsSTkxlOeZyZ540GKfa+mmOfx2XYFVcc3tG89gAAkJ+SYJdXJfJCAEXMsBNem2Sf3ct1PAja04i5AeeYbl9vEp60EFiB21rJ2yz/UKZRWBqGhXFgjBH+weTAB9JhNC1epqIHA/gdtY1622PdxS1Onyud0w4bW/TB351eaeDNbBBBTLPyx3w0m+lvpEwhLr5DCq3DHsZ4Qs3sBABTnV2G2Oh3EB4sCSlC/VbLC6dD62fN0N+X42cmBw4hi925qk/ypBUizXXjgC29l7kCqoM3o2znxnBCyHawDhF/Yip5VCGUPvJL+pT4dkBG4cLcYxEQvwQxGaU7MNyDC95o0jCrqY0vRi+uq6fXJ38pfeEm7FK/XO9kNk2ER3rXztW5wPc+AVLehZ6SkIJ7+G0bXin2nWYlkXVp2RTFkRSmHIuyHtVFDfXVnR3Mg+wKijab6CBVzzSqQvUCXNQdAMHCbWVV6UwrsRwDaImhnKjPUJSatzr1CvxhBkZ7u6FfALTSdyAG5oNiTQXeIMBmvSCxHhqwzpu+yybYd0kQUStHd8RO4OpjbV5XJO1EkRu/8KoZnBwET6xXKj0kcn5gQFom5gAo43/xmbytYbNs52V4xrNor6g5FEFUT6tuo1BdyBnoZcb45xVyP9FIRRegOiDRX8yN3SVtBhv74gQ1cHU9K05iPpt16XzvaUZT7+F4PTZs2HeFih/vwjCGUMJkm+uU20Lik+e5ZRib/fGo8oxq4yOeJbtT2bQUpuoWeLiXZen66UaDVBo+M89H0GxGadtswN6OjBcc70JRBIpah2e75TR3ROitAMoBLeGxnahac9zsWt2UlQ35qILNdgsvuBnp+dS/DDutiFD6NGwZCkU7DVD+7BHmlOpFfnBF61rrdabNNAvQ5aePtey622p8CrMCxBsaL6LeJ5BgME5idcO/5WGBtq4OqjawvafJ6EbdrKY7Tb5907bZ1ieLXd6dhjELMR0lb9/NwIl7zrV9huXhkOHuMyJ1n9pfrQFHW5HhO2ij47SS7xRGie9y/PjidfML0kA70V1u4PJ8Xt4SsrjD9ikKZ66OrQoEW/LGWS6S4dZiDBDyMUK4xNdScepVuHxGkRLO0C4YE7+wWXBgOF+JTHNaApSRV/ilBOhnMS2fW/giI684V4zaqJvRhdheN91okAqvCCd0qyh5TpzIk3bJ9SCrGvl5i0Cmxip2egKUBVAceqS9AilKEsKS5ec6nIJQle5WNleRnrB/V0CV/PAQQwg8cqIBkUu0Dn7xeE7mnfmsw02gacbez6W3w4vM3wOQxjhj8J35I7Apx54jg5Vl4N4eO6/+aYoouxBA8USFbpiYXsuoG8TCr3pRirwiAGolU/GbgEqz0Ua5TlGApx9dQfsunlSig6TJpp6k/jNlLoKcQUZ0pQPSDHNT0/KdDepERKUX4j2pPL5DmQVCWhpODusayXdxEtIeHaZiLAjGqUcVz5GuV2HMqizVp5U/Hzoq7XPOpQ8zdLuXvdiMNHTauIRQmtacV02U8XSjQQqqI2yHztiaoplFxpY6Gxkr+GymZqgnz2CEDbYthfM6Ss2iJkAaInTx44fyt+thCrJfTJEoU9m7RwMsDeXpSqQgOzi4zT1kNwOWwtalzUUy2ATVDjh2XEEf5E0atCqGCGAhkrQDGXbd/DTlTWwNWT4DwHrWVLnuGbJFiPVeVJf4zvTQ2FkYOc3KcI/PGMfhvD47tBQYxWXYLA/OvCzP9LedDp9AAl9Iam52szfrxPwwtkm3mgfy4WKd+ggbU6QJN5bvEjdXqHis+u2+unWE2gGlQ+bWjgKcxtYOMRajY1KG7qMHy5OhuPAM67mzAdt8AHW5Am3dtdlU+d1W+mqXzbOdijQm81xIzM4A3V6PvYYvAQ9xx8QMymcNVJSOUHaRbjRI7Xa6bYZZyAB0jxLUHm5kXSnNlQsjCBwycAihCwt5066s+eT8stS/GLvSIMSMVp1SLfFPIeTUz2umU4Ar031tv5c9ArpXwLN8HpXXqCMU4/BGfSCPM/D23Gy1oELrztAGFu4i1MkQKyuIzCcrlTVpxtlDf1ybHlpazdSrhhEkJ5lH50N68mQkU6qjB4XwiNnCielAjlu4ATpsIreVXqerEGI0xeSEjWhGho291116NaXQfJc/B6vI0B8SsmhX12V/kWQClm8JFbfsBqeN/36awWJmXkRvFLFwV3xyoXpZVIbvO6nApjym53JO/yq354nEwEMRhpTPyp50oBf6SCUOpxKBn/6A9Ie5nGumGw1Suu8jBm+eThiHUGZMXMnwiTRVlgI6DFjZpmOF+GbbLylgW/0snJujxqBwjogdiiZuQEYO7u2GcUJXL7rSexVTefZPVLFt63OlW5rGrPCVHh1sJ5PCgDYBVL56aAepyNZpKaGV6lCkZjh/d+tdAcAmPqZAvBPid7HQIHlCjFv67NBYjFlqc05y5CGbdwBcCWjevuT+KBA7xNPfvu/VMZPMh3tsRVpthmNcIfmY2icAygHF0erM6hsh3sPMsadm9JQcAia7f3d7y3b3kHiqhvrEiVn22dHtYVZ/PhZeG6t3H4+ipSXeQ2PRAhSCLetoJyEzcLjRHxEbQLFZnnuxR7sfnheZse04SNErpDqRQ1UfPc6Hw/wOuLD6Vk99vX7xuulGg9T+6CX2y0u4lKYXjTKZatgwzb9IaSqHQIzf2hSPsc8/9x3YbPNKB8OcuIC54QmYIIgB0ixkJ49+NuwZP1eIbPDB/snwgr+mxrLBj1wbGPY8pvVPnNcKy0DGhQ3Nyn1aGBO220vZZrZEc/aGNg9dxlwF3rKHIzELtURVekRj5ziXt5/NuHNBkFrFmDru38ngHRR+fH3iq88SHaEg3bWEmtyYFUPe29UMYVoZkvszU8P0ZWXYmPn3Cj6paK1po5cy5UpgdAhQVmZ/PRe6+ixYRMOV7cuivTjMxwKb/PK/fd+x74r9ci+P+QzYGCM1m7HJBXYdx/2ErbLNaGNTaLW1UwCgG1T3OMDRQWw33dwXZ50FTya5ynbxRqTRyVm/df1u9DTtmTJTeeqaKbPuWulGg5R3s6P2znTUgchpBhtbq5Lh2p76u+49+mCky3h4TmdRTcJvYYAhcHbwDQGVesHK6spG9KwCr6IGVXXtumQIlT3fvO/fNMjJK3mhTDlf8TYcCgl6ljPZqLiDH3RNyi83JHOWFSFzG0F3dLRsDxMTbUQQC1hFpqaYJrYsDcVMb0w0WRibIruoYx/cm2LDC5Fp1wwOMQUoT45b481E8on2aahBbmC+vgIPjarlNaxgjPiKBWixo3lAY504sXpISjk+BX0ZWiP9LxMnWh3jLTPu3pur6zQlAcLlSiR2SKlyrQYSCD11WXJrMftEg5vhPriXkR9Jb/w44E+/747PEoScLv83eXFOutEgdXm54/LRXEZfxqT4N9LozjNKDl0GeqS5agSGVRT6tVpKAKeMwfxxYOslfDxAdUhjDpIOmrQorsaJpjFgOdFB5OkJ58WPuvaJA6335z0pH2Rfx79J6FxRHaRWqHTC9rFhP4VRV86iyhwXGaS/eBDpqK/zM5s7QCNsM/LaxhUGMhBQlT0VqRKrchV1qy6it/xrRn1v+z9yzzImMpxhCI6aakoh92FOkX1GhBiGo3GqTFWaaEBTuKltcy3VghTCws334KMKTdjrFbXPAKjyjAC+WXPoeu1huYOrOnZ42I3v+64R9iOPIuyO80p3V8ix58lG25YFQAHZUwfg8WaJfQcTjLZt9NL2y9RNB8EKJp0RZCeAcESX+in1R7ezhX1wxiu1Xy7N8Eknsnj9KN1okNovPdwHdHeRvViR6i+VsI8AsQuCNIUDC45kXv049m1Lhje88K8rz86VWf0UYTUrpSnsxUOOCvhiP05OPwgR2XdpZVu9IWqbbea+davZTlAdOVFFeCB/otV7fxFO6TXvzpijgYYFyp5wVvGqxGGavjN5ckqBWPy7ymQuj8c8c+mC5ED4gmVCdYjb1qU7BAWl5/xnGO18oMzsK5+tDlP+xmM3pmRUayfA60jZsfPHfA2ZqcbLezZxEi6DkImSKso4zlLeTzR88pfaWCSOYi/OzBFPjuKG0X4uiI2aoD/scgUcf5h/+D6RItFmPvbpM45Zlsai882OtdntuS02o/Vo4GbntuFEDzF4zTwjYoOHS3ZL+czmznKiTmF/EPZslVXd8OA43XCQ2iMGHPpbPFkClpXxDW7xzECgK3oaOcuzb3/EShtZN0snsLN3kEod76i16SLMsyOUlwnkp4qxLwJywLiKZHDlqeQbELkd0vFO927zqxajM+5dZYkrUXVBdga/yvMtzw5+wdc4rkTiPDGnKfhc9S6+83hmzD7THLuTbRtTg090VdjTzyrV8OHJRL2JBOlEawevACad7zdXIO8UdjIAprGNulsWKWMSdTsC5kWxtc5knBOckLJFNLmkZwiLjaJ/yfK4/LqOh57tRHM2BaC29kydWFV6TVUtss6TeCSQwMejOB+mAQBkG8uLdR/6oBj2x3fu37I9xSdTrGSSnSPaUSUc2xVvpkQMT5PoRGed/dNJKWFAkswzu1I3GqT0UrE/uhe+ePJBvXK0+uYeACmfb38PAxHKpQCdtwrt0bddXMT2SP0I96MURs/oGN6HGVAqm8Mh4SlHLRNghmm3a+Tpq3oFdS5/FBALXJfI7K8nM0i+MqYO8yTTVR6P7+2sLs8iK3jIomUqQL14V7a2G4evj3ID5tXeAcWebep0kYF1zq5SjM9tw8DsfEzJqk7Ol6LExGsFcuCogns5FBE9/wyLRdhvwRwHM6c9rpNRLUDV6yCVP0MFqjOw0QGbgEvlDI5l5iEWMo7JXk814uH943HS1PMMM7kYHOgmoW4N960M/oJIzmr6cpAO+O2fPG3d99kaJ83scfRMHPXiryrG5AxQ3gSs0bvcSZbPBIsVjat7qVemF34e1k4zaY/eX6SbDVLuiQGYdBgYs2h8+wILqZGdLaZ+2Iu6i4KAZtq4EG8ypjPLDE7sWqTztbbMHjKqt7U4xYr0LqOeRrUYAKVBTQ/laO2EwGa+KRCaW5AIzahT70oxjZdnKZrgFaXOWhxhsQXWUDGIqQhsuJw3WsHceZk2ZW6TAB92dVlRvCIbmI2TEvFkksH71cD1nBgopjsCm4FITojVMcNifQyugRk953Sn4+qGwUto4KH1s1YYxEMCKYoCzPYtHS4B18uqFn9r+ZgvSr0R5Q2B5HOTsldQJ7NwL7jmAUwsaQ9OrTo3A72atDJQCdMcOurXtdgdAdfD3hLzY2xs2Hc+EcAODZVwJJIsZRUaeYcsebvQ2rRj8Y2Mi14Kz/bNl7PedaFvmkbS8PeLntSeg5YroXejshmwqPpxByks2UGQYq/ZU6wx+HZ2VIDVFgo9a+5KyY4qtfAulc0ySxRL1qpMKUU7NlUm+UWS8lV+rHXF8IGYSF6rzKHLq9JsQv1GWtIpT4o9+UxJP3SyZ5ahMVcvjWfEwSJAzerY4uZcP0DHfYUNiDersKpgyaxXmtq7yAFoFmvNsAAVZaNAHOwXvghjP2W37EFtCf71k3g0XQuqIiohXuCyEOIBXXCYW9pNonlp46I3ZM8Uxw1rB2RqF1lnfqJpiwPjBnuZTUc5diCItJBLi5aowk+VHhOuRghwg+9MoeM4lp3aYfLJclYjINgvd+sJD1Ky98g0dhvDQjSuRTuvbEvYApLFU4xcpJsNUs0jCwY4Y6JRRuP5UeUSxtsFmnsk6c1sto3OUMQtQYvHpPyYjua1B03RIA0tFpXhEAx70LNtUzPK/hskS67F7i3VerI1GXS7MjWjUzK218K4h/TZPTdiGyC7HZe9B5ARCXAPD1cIq7YvHE0r3iErs/BYVG2LCJ3tc9hJxL1Vf5/o3XuburVHGg+v3zHK5rcJY+fJG9GnUh1b7lBv+hRQcfnu3e77mH/oU+fZk23YUN9vwLRtydMZoFJmYlG36ZfzOnSV6iClzOyVAmlI45p1J7jnp4oYKy0emMld7eUumiTA4TAIWFMRgxreH/4Mhd6JnORLL4XadNol3HtHAxBEJDabHRP/dmDfoGIz/zaBXrosk7IgvysE+66I6KHKOK08WFbXU84sKwIWOhx67vSyDFIblJ4Vg+IV6UaDVCTWuBDiHCCXJgDuhfGc/fJEU74ym++Cpqi2YzlSUWcfMEicbUwJZUVoL7x+zmE0vHd8Vliytv2MFKTIwnVFeG1+e5lF5MDrMXyxInKDTj6baZlVaxep9/k1bW3LyfWiXOsApTYiovyX507FQssRUxkXtzlPL5HrX8IZqeZnJlkIH0ImQjb8vCy/1r3x1ujZizVa3fAjw0qFBgC8i4ZLAQO/dJAC0AEqsnab2xGFyZb8XI1fjXIclMTqn45Zhg1dnzPDpXFs/J0uGJB2SqbGCYHTUKcEa3++OkDLZGG3bB+vDI0TuS55Jlt2i3UfDiGPnZVTuJ0MrWUOB8DvGb27mtiXVUynU2/LrspCVocBipyG9wuQiokQPpZAe0ixh8M6tGTLiTYJA14mYFTwsgezkFX+BJ78F7fjGtI4rUJHsZiUjXEtOzZ13eur5OOVesDr4vU4086m3R707AAuZMt6hEJo1B0QWyQ7A3mA7xnlds5EOwc7yCOn0Nm++3TJ8YxGwWqyMxR/89DfhnG6A9JABhEikJg4gckTORX5OykvlsVkX8KhIeO/YkzEwsZ92zls9HBb0WPTZIDPYys9KHfKNtIFaih3bNjBKd57tL1P7tCsvk0+8Uk4CUjstdewXw9heXUP/KGrk7YfHM5oRlTIMdk2gV6YJ3MxnJuxXmkv9T6fjnRAJkfCjbr/K8C27VBs2KG5rZUB1VVgM02cgITN4CrX0BwDdnc4zEltM2qJ6JRNkWs31s0GqVAYY+BmLpx7Z36/MZ/VoPfAImP056lMkPdoDxSPB9qUqQk/HIT4XgWwfI7KCDx0T7MJFbLe7iSBihU33gysBObgepE8Rh0mRmXd2cudvDiyMp7PMJDC2eSn1OIF9WbIePxT26tPkCh8DUegQEDSQYbPb2X0ZBgBMheD10jDQBJA3yWKmICOwK/atQRO12smdwIqQe6YES9IlulgAEyiXsyi8b7OVnU9ou/28uSkOYlN1kvNemMj2znkg3srNecwdhrPFylZpsmpQbWV/n3VF47xV1+qEo6iOXp7hubs0asdLZHOsnU9XL9cVlx/ecYI7bjvYd7ezkue6DyWlE2lweogJGRulZl9bvzbqHHBlOGgOpPeT3pS5uHBd2ygqdEAKVmGtNjLXgIU0JA+TH9TrnPSUW8pQals8R/b/iMXCNrbUbTVIXqRzA8n3Y0SuuEjypwd7vWwwTmszkFmDaTjJF0dq/DLkfHEG6ab61ayLkrPA7BaX/bsdqXJDjQBgY9R6PUg3RvTZIHdPWZs2ftY9qjcLPDuAi5blPGStdVAJBql4QnvumWJ1feD5stdDghck4IglwFo2xYARbtn9JBf0D8Vnk4YdY7i3Qht2REoYlsA+XrCI0ubei5ZBDsCevzuyMDr7e5HEsi+FXtPAgkZcB4BAt1Gz3uHxu7lJ3tSxKaVzPsj0p93oNrMItgifOybOTM6AIGc5dnDmXlweK/81hDAfgq6WjkCw29f+RHA2iuzcj6O03b1IzW95jWvwRd90RfhnnvugYjgFa94RbnfgcH/vvM7vzOe+biP+7jp/rd/+7dflxRsF8B24Ue4j8+LbcN2Mf6ErvdeB8eB+3jFuO1AgtCAVXQlPgP8CPn8BdIiz09Jq1TR1kT1mrpBwFQPMeHsHaCkJWlS9d2T5/xzLMoNkWQubAHCyI8/8XeDNhrD27Yx064BoL9ZKAj6Mz//G4ZwAyTzixmWlGm0me2OHw7ATobyqqTuTDQnYp9lJNpCFsY7/pWsivAfyWSwe+E1SX0vng2DsbZBS9sXddPykIevXE9i3RMDlBtp0DUeq43uYCW/kEZYFhORYkISyg5Ey7QCnlCxKuN+YoZHd5sqEEPm/Ds4ZHv5X12Csm2b7TqTTocS7GlkZnyD8a7tXCMyBfrivfgv2kMwLX8J2kDtiMMlGdybmxxUwjk3ZenfpR7sdESJ2tFJuzVA0TsVkGc8G7IT6do9qXe84x146lOfiq/8yq/Ec5/73On+m9/85vL7J3/yJ/HCF74Qz3ve88r1b/u2b8NXfdVXxe/bb7/9uqSEcqkOBNfFWL0bspWzV2ZFhfuU24oMb00BP5bDGie6rJyKYpLwlzBTBTsHAB9lsPafvbCFxZFi1NIk8qA6kQCHBRe0tsa3CC/Mk+/bI6wiZD6bqwq5pAfnvR732oodnpC11MjzVP/uTHKvu9Xb26wTm0YqweWq5FPKx1o7zU0+YxOC6nH7hBFdMolzbuCMuoCcw6psLLOXYHyQ6jSxSDLvql/G+z3SxJViLzx0BTJqoOfsGpfBDsiC/0GIen0zz6ndpnzQUuVfyLPw54KfpbEQdSiZtFIYHvwxMZkIoHCw2F0GaBLRpGMSfOuL/wv4cB0biEQbWgEMPE4bh+wnWef8ZFzIdiS+Bu3V40gbRc8ZP9WOGIlxRr8v3urp6CxpO0jXBql7770X99577+H9u+++u/z+sR/7MTzzmc/EJ3zCJ5Trt99++/TsUXr44Yfx8MMPx++HHnoIwOhJie97B2dc3atuVlXww1d4V1LABZqGSAgN61BntmDZQLP0qnJj0ELDpJBch+p85GwZcno8VFRrQ1XOkNSusD3VDgqlsrV9O/l8Axb1fQEBYA/ir8xnlWcovpgi9F0tNMcZl0bi3BRNNfi1Y4eoxNEqstH+aAeKdu2iuf3ZY3Huh0YrgRa1juv+5h633+dPqpvfKb6FFFnrKe/NPcezkxlU55+IxJlw0TsMWo+5uNJrd4Y4TLpy+ro/tXICW0GkZDbVfAN2tclGttwiwuZK4BuI6L1U6z1deERAIupTTo5mZe/0eh03P7raztTbTEYvhm7s2HFhvA4WULa1x9wrraXOhc/B78p7gYRO7hi2eYvF2LKUqXPStcN910lvectb8J//83/GC1/4wunet3/7t+OJT3wiPv3TPx3f+Z3fiUcfffQwn/vvvx+Pf/zj4+9JT3oSgFSWum6Jrscft7mSMXOg6H+W1KeGa74XYQVWJsS9MnXY3isz+tICHgJT7Y43b4s0isiiEFVmXciLKrVZg67EhJnhKduP9H4OpKx3U4nW+ORMF/kEb6JSSY+4Bw9uz9a2mVHwvDAEYzFkTjqp7xc7Tjws7dnGtnjSdq1MbxeauUmgUj1Xl6tKx4rN/ZoQX7kXxHwqlpgEIouQluExUNW2aOG+E4aIaVn2HgpDTtSVaCv9DnIuuN1A7XmCOmJR1Y9ad/oB4kf582sIII62sDDf5qHwCPtt6HLpPMuCnE4Clmj39rdaz7mYoZxlcMPNnOo4uUpTr5X4H4573D7lGdT0Zzpx4gd+4Adw++23T2HBr/u6r8NnfMZn4M4778Qv/MIv4KUvfSne/OY347u+67uW+bz0pS/FS17ykvj90EMPDaBy7wUoPYoeyiLbkJp/ABD0xvCGFIDvkwWFjC4IeX4gj4MAzHpL1cjlaaqlqAOgci9MNi3KwUrZDeWp3oP6U4ocY4HWe0kBMdKEWbVGAFlr3QsnQ+PefHprY22HD7ROxLJwS74f5ZNh0uHSphKs8umpGZhySxe8Mzp8z7EdOjaYML5xuKyn0lPu5Xduk3H1z95LdHYV9hMLvSlKL4cdmk6m0rWTtqIJ+QQyrYIrAQxCG/h5ZbZxoF9/10F+lV3Jh2vp4sDsX4jZ+mRevtImgzMDjW4RxNTrsZejxEnbMSHEaS3AlL2nGFtlwKY1aZ0qVBLGM3aA6hhuzl0oAMR6NXGG2svCGV0jZbnGlgLiCyfDAUpklsEz058pSP27f/fv8PznPx8f9EEfVK4z4DzlKU/B4x73OHzN13wN7r//ftx2221TPrfddtvyenhfWgW2e8UR1gp7xv8u8oy71BKqYbTVGO9HEIyxKn90NeW5j0tR4W6MXX7sh8FTeP7jHhFKynh1SkOukbv9sJ04hKaJpkEZ5TstodxO8GQkuA5pYNK7x9j70M/FEWcmCn2hREaGK3yGT7JaAean+CCohhEO+oOwULRN0ckKMTCeqAO8aGwd1EOmxTZyO6/UlMXNjXxksA59ucOuSoasZeszz+IdnXe26GlpEsl4Zg+LfhP9Pj0/QlzFgnUnJhfZhiwgeUajtJVGl6Uy+zDvpx6OjKoRBTR2FUkgn8DWAdzoCLiKatV6OU9GGDDDwjyd3sFnuxi9qVx/JmUCBffCnOqUVZchoC1wGONhmyAGTxUBVkohtyJT3lpT9/0so8LVRyjt2SikZz/7ZwZS//W//le8/vWvx4/8yI9c+ezTnvY0PProo/id3/kdfNInfdLZZQQrycizlzDGFJqxaGGAkthddaFku2LvibnYoWSo973sAKidFvjFM00YJA0eA+XkNbJROxeg6Ju4BhodPrORFbAoia1/0lCL461fmLz4bd5m8Qw32MFtpmJNcTr6Vseh5U0OytrpSKIKD8UVVEJG3BBN43TMG246RWuvP00qkkRltxDxqRzYaEcTClVB5hJaAzq/phpRKInLoyKyLZyHsuCMXQvsR9u2yIFBp9eirEoDPdSAqdhbkpXsYWZFmdKQ7i6X1ERqmYm6A4XcucSWxfhi+jg1oYTgckwKW+Xt7HgcyAbcZiCBiqegG16F3+kMIAZpMArle7EbB2LXdX/p5ZxQjXO15s8MpP7tv/23+MzP/Ew89alPvfLZ173uddi2DXfddde1yuCIyukKz4y/KmNFgkYAFsjzK957Nnz0onYHJxrHYMIpdSByoQsD22T2rFo0ueZ3eEA5QNUWp8jRKX6c79lE+CvDGPiuLjt8RpdTI+E8eBls8Eq59n20A9IJwYEMrBSpKxHr7mT08mscTInsvfmmtCMvYg5b4rjdjBCTEcYd1HbsHc32Y82ehcCAjGi8TwZ61dYe1kMZ9Unv3x0ZqvOYKMNewxHS9N/kOFH98nV2WOpfT0uA8nep95WA4VPHKcdmtIeMdZ7W/H3CynYhdjah8Ui1nj+3bVm2T7SI3V56AafSrIABVLEv3x6TfPwBnrnsdU07ekKpr9J3kqV5nFKK43NuDT1dG6Te/va34w1veEP8fuMb34jXve51uPPOO/ExH/MxAMaY0ctf/nL8y3/5L6f3H3jgAbz2ta/FM5/5TNx+++144IEH8OIXvxhf/uVfjg//8A+/Fi2+FibswULZssd01Ty2kUmu7IbnWo2V0g7PsUhVy/0BSrkNTPSqKmFDAYiu7NZrFD31oIA8ckRK0bVH0d6v9oKsZ4DVKNe3LBJ+oSt7o6cUDkBPvOueMJ+7Vrw2+yd6SWowpmRkvQZtY+B10iXAxXdv4nBHG2D0vJBLHioxjRW1gsGfGHsor0p65lTR1TTsnGASVeCqtV8OMLJ4UMr3GjZzWiObhdGpIUCnjaMBzL/M2vI25y1C4JGJ4RVHJI7qWABD61ftD0oAR5lQUIwqZ2d6HTomhW212FFg2qEVL8Vtda2Py4IQz1viZQnx4Tat3TMthu9MkRGQrIc/61Uss/+4nLmi08+lg+PFiMu7XaCv10Gqa4PUr/zKr+CZz3xm/PbxpRe84AX4/u//fgDAD//wD0NV8WVf9mXT+7fddht++Id/GN/yLd+Chx9+GB//8R+PF7/4xWWc6tx0SSfziuT4zcRiV4SQu6a4S/RqxkhrXiIDJFukIu7rXidOdGUsjRsCmoI2GRWWtACeqlgZtiKa2PByXv1IUDOSUwiTKlfGsvyLKxl7nx09vejNCdTAfgYEzjZAGOn5CYSASq9uwnZ1AnxGc3rmVEey83fK23/IfENcaem6j7MN3iiADQpbjXq6cqeTv0PrurxEIjQ+pgjTkSVZhfvoeqrLHAaN6iwEbCyLMOeODnssdTk0bGSKJ4Cy+vVQGx3pwhMURpG54n3sQ2ym/5RhZV/Em066+yWLz86HRaZY8IKjIIvITDpv1ZFbTURhUFfidQW3mcZVFKP5LXGPde+cmaCcrg1Sz3jGM5YV5fTVX/3V+Oqv/urlvc/4jM/AL/7iL1632GXaH73E5aN7KL9vkZQ9EkvNU9AuIEur7DckBH8cGGhery/2VFbFOmkilW3e5sHBIEoScr6ZHjasBFATkCGFgZ0rEeQGvPFc9+o06ymoQMV8mcI3zpdGygn5iPodCSi3k2ow48hDPy7uGpY9ynk3JA9tNJ4D3mZ1XGdcGkcoDBDWeM5N7/Bxqvd7VA1vtPXY0cz4Cq7TF1zL7Q3gte8x7njgQpiexMnCu9p2YIsJRsDCAwO8p1OiCI2m6MXQDhH1DDh29jT4mHZu7L4aeXd2lNAZzZjl3t0+TtR1t2QTxQh8Y+xWYWHBUjfmXtgwTb5q3is0xPuMJgZVbRF41bdWrXbfnUd+3jc28LFFniySDrHJhe2GcZ1eFHDD9+5zAd+2bIRZgPJZwDyKQHv3LnDIOI1/Kczne4ttHd+4x5afPaTlNAQOkaJNpHiPp82rPyK5Gx1pFCIAqmu7048QtskYThcMQMB1WVHlgHxGyLX01rJdvSdVuV2fPzCFRhdGSLOzgwx7Udgr6aRUPIbszUsvC0ij2EiIXqPU3QSKodNe90rC8GGMX+4/u6CUcFJ10KRnhC4fLZEzU0BXc/ymGK+eF8lWGcPVqj+TtCxICoA6aLBR9R7m69O+a35dh0fLkP4uRZyAiQAu7Q45G7oPWbTnfJbwJJf0hcePHZSU8p9AioktTBLQjslGW981fWEbdCJpfDXBE5K5nsMgI73t9JnOQ6sbDVL75eXYK8oM6+YqOsV16LsrZdXTZn8T7tURzMMvts2PGKIMw5JvFkXbd/J06BmZdysIgHJPJ8JxyM+CBlPNSl4AoJttDK9evzRU+ecg4F4eyzsL6AyyYUhxjsDVuoRJv8KzCoC3Q+94tDDuq9NYXsom73o65Z/XT/VUSqYHPQwxIzAApj/i/BdMV8cSMuxim6zCx6kaXSwWrR4imI9cMDARZ8SqKmemlOXB7KM2d6AyuGrGrYKuxqbKGidsL12ZzvYjueFXA4SyB7XxxIUaTkCMHV/uOAIbzppMSsoj7TKz+1o31x/ZoHppU8WlbEYrIrHurogpgdMY50YR0itDohRaywkTkvzbhm1LF6tBVHg/glOKEeF4AlUo2QpyEoi4w/w43WiQArQIT5yjVAQVcdHXIMWmlkvXjD7YEm5eRnq4IeQUKuKBbTew0/Y9RlIOvrvSe0+mWSRCjlw/WoG4CNZAjTTnIrl1FNda6AUhoxgKgKKohUnmgSoQwpmeP4t8VI/Qb8EOoSkkS10YQEWtnR0gNyj9XSLl0NluPcYKWpPGt0//Pofw5pT3l886Dk9y6dbE2iQQuZryIpNAyFFMsaeeGanEDFzUBOWWGUcF7Ohy04Wt89Xl0WTKr4wXRyXdaIdjUD2FIm/NkWNHiz+ntvX7m8R6trJgtjmJ2ZPbS8/OaU2SfF1Sll9EzvlEvZ08+pZt1dgIUn1DSJo6XvOjfBpITTrV21RGjzYtQV1eEmF6D007qDAVpgcReo5yyEYktdUpiaaoztmpWa6rdONBCrCuM8Yx22XauDWaiDMkrZYAseuyqsw7DnAjSIITgPAItJTHr67Dfq5jAp+qy3pieTghfUEOHVk+J4k7IaPqepEhH4aPiZNqoZqd6q38mXX0SSqwqbM5VmSQxaHJpUGivOwfddBuxqOzp/8cxoTzNvaRMYt1Wos6J11UhDdFeYEB6li5qiE9ArkFHZG92B/xdIEjHZDTSbBsXIwYoHyPPEbs6GJWGoWFBt6Gg6u7qvX6dmwHW5dzdVzwtOTltjaBiX+PPKRkmL3/bFvnd7zXgcyez3EomZqwjI3x0TlKuoykX6hNlkEbhR1Rw/pPvNjH4YibAhqLmRZA69nFEhYaq0PSFS86b8hZqeHMYbd2GyPffA/KWPOZiJ8TUaw8A3CXp/QOCHjZ7KbJSrqAdBQAyHbernw3HKQ8uceTCinBqZSeHEAlAYZ7mogTLtlwwb0oIU9EEbsNZ7hPpnfCC4oBZMS6hY08UAlA2lGAqfydk7iutfrRSVrwjX7l5QAoBc/7KE7AvkPNAOTYA0tqzbjPdmSyPcRVejbFPs8GnoHFaSyORvVLDhPp/TqFc0LHMbhH7h56kMjfV4WdpqWWS9lFD6lZ/CtShGHIiLtTNc2yikgDgZ1kmcNRs+2gNolDK7OXwvkcVWhBdxEFq+cCCHJfzsTaI0Y7TZvTRu9yZ2HXofRj6MDA6tKuFUBgcp1/iO27qoGvoObaX/wz1TFUAR07sHTfh8vlXpNmpKbT5boZ+tjbIfg4Qos7A1SfOET21HVS5WIAVTsfrkZaWOl7+XJCNo7TjQap2qYmOG7oYEJpPoH4P2KgUDwMty9DibhrXwxmD0m5LSYBK1690mAwKB900OCG1VpGU2r23Gpzu8omWLrnpiDQYiSy1wIsCZy5B9XtYR0krfXrg/1FkElB/TM9bUGEPPwwIAcDYFY4zcpRdu0ZZ8uBYtC73XaWgV643HRgIqVrQLUsVpGRYffOO70tiXm/GoPdVu8De1+yWiFvNDbCoevl9ZlaHKpStUXZ+wCN3XYA38grDtJMOVbcj7BlKzuqV5zL/JRoh0WdVqm8k6HUMLKK3Dh41YNikGg1KHWdb3Vu1EcUiN5y7xHFA6k3SsJae1NUTLxKyzY84rNRGNnBlY+8WcgqTB3r5ORso+WkmHPTNcDqRoPUOqXxHMmMHwRbORxwXMv9xpBnBpGnxYLj3VzvPfkEiAglSj6nLvzNTfew2gZb87PButFemO1SEb2qqEWrX/+rT12PXyBU4/qiKVCOW43jN0b93VCN2Hb2Nj0/B+zS9aEUY4V2Nlg5gpp6KpNcs9EuFwkwHoPnNoUmoqdAM8TsOxPGPeqzm4KNYRCAAEW1NWUdE71Dlb5G0jDheemdpnPiL4QTYHXabEPlyeGwiMDlPgxV2Rw15CLfjZ51ENQbP8v2um7bBhWNrcScVrGp+R5dvAqjikPBh2M23d4vcwxqOuDyKHNmpeswZr7H5i2ko7XXmfqRvovZiZ3HtlNHp/FhdjrMudixj50t1Hq/WyWMf1eQ7A6L2zzT3V0huhUaikOj7AysGEYZXyPdbJAShE+fnQ9BzgIzxXEwMm9d4jOBxXtg3tjpSgBHNjCORW/JjXudIaTtGfu9A2Ucyp7NfMmomGeKIuNCSiDJC/J2nCZ0xSsOVKvoUjkd0LWwJt70AeJwwUi5FrlNM32st5DHz9MzLviFTLXpiyDngmpkbc/GvdTF8iw9iiiuDbSLjBi6gUfdrQBheKRkUnmZkx7cb+HxDgZyck5Krw3oh0yGODoNlQLyxClblmEJtUAc3e6Gx9uRwtXjXTOGumOzDHYHpTgbSmnMck6lh2j12TZJdWBPxRzJw2hRM9wMSA6WJRzpzeC8bqcuh5MZDbX0juC+2NAFRyykrDId3i6+FVLrDXLbjIkaFOpjGQmaFiSRswDfBDnMoJdHhfqECVZkvt9BRarotMLLoyj6sCBXp2Y7TDcapEweWrtpe0JTgWMGjYcAqdsk2nZn0AyxSMuvfJ+76wrUdR9IhXA7Evbc/pRn9EUoyJWBiJDQ2XxZBDn90CSDaOLBW47ggL8fiF9hJfK54k0p1cXHlpx2figudIEeSqTQEnJM3UhUDuzTjKfv+wCrlf9WDFsBjUHj0G1p7zggiO2r5uMa5JnzLtx+n8pgWhiiuUfDIVUnixXX22gT4JLAE/B9EOvyBw6HRclURjW8WcDmkwlMHXaMMVO44waiMWTGjZ7t3mByGLtxCwFWsF/ik8PzY53jADrf31FYQK2tVj3EOTkT6NUN9Qh1AGMWX+4KEzP69jaBI5C5F6Mho12GvA2Kk+XPUPskPTT5B6g0OF2tHVZJ3CDuCmz7CMnGADzRQN9jwkTLthQTESbTfTh7G4qFjCLN0sqmOE/PRKmbDVIbClqXOisx0JRTwu3heFK6E2n301Py225GZkNYQVFD4Eiwdo3nPIywOT4KIN4bwE49H3Kt3IO5cPIlwcgzsT92pkp/rMlEAEuSnl+93mUHZX5fg6+eEc9Q87DN+N7FVLKMYlC9BdyIdLIyI8dUNy7Ov2KAYeq0wl3n52q8xEHHDO0m1Hvy73xg3IZk5AmM7ylbl+SiOTuc52ah0D26RUDfmuSop1HCzyX/wR/fE3a7iBdiWWCU4PyOa+Pb2Dtz/KdhdH1KtfMLE2EcLvXDLUbvLL2plN46o4ll48jMCbXTyXETbZ/241TeYRBc14SJIomN+oX3YCrbx9QEscA2nDQHzoX+LeTDb6iOk4Ntv2jqjSFVKH2YANPOohh1FoFeunOdXBmvWDurL+LO8a1NEN+TvJlH56QbDVJAGlFyHsmGaizNyIbtkx9oe6LwvgikamnkR2jIauRLYb3eRXfggiBXmHuqlqMAXSl9UowKVENQJBUsBB61zlxXzl/yeYocxWvCP7h/EKDIZeakCBd2KmkJUhUpLV9uHG5jLzA1cZp1dKUqdEvk9AjQT0qt9yTALBWfwLfwCZXuXqgZ/8kiEsMHAHjvvxdARnEZQmqWrRcVbafwpRyjl1qfI/vpamOZSRhBiXsBvdmOkNCrcARYBtyg66IwqmOXq5oIqL03xZ+L57k5gsaS9xFUJVCZgLfWSLkoIWtJOjMnkl1/02XCAVuJroVtyPonIIUTF95jmgyX3clZxShTpYbC1Rs9cKbuJsLfa3h64lqA1aleIacbDVKb92S39DY8ef33XeGOXE7f3ZAhtEt4s3kKxqajQEY4pBIh011oisvTtI6Axsvyqb3ljdZ+4zmjqTQ8/9ja9XnPwEm4S+iBylJALoyO3qPibAiwOHyQs76kFZcgEEa490TcqDUae5IQdqeL+UxmZqEMjaosgoApxjHcG5d8s4P4eamjYkvTLXc4NHpSvgNYTrlXo6fyMGS25bkq3Q1MrN0TH3B3jDhV0eGyq3nuG834w7abrpHOEI1jjE+xYYfKBt3UdkhR+Em9wzjb4X37ZcjYxCmS4+HJCzkaW+VREJGeP4+PVe/qgGmdh8bv3ltIsWIHZ9YJ1bH2zMGl5G3/LG16QUYyLqEGFi7cFNGTa7ZO+fW4SIW5rHNZanXpKCcJgK25Tc5q+eekGw1SHamnNvQ2g8d4XTB17UEgGy1udSHV9lmLWniv8yQILtu/r52KXrhMf+61xCe/Iemtahc6q+uwwTRaQAIffJAUMG2CWnigGSYJ86pZYDck5TfRNf2Q6WZWIOigPmRoNddkprc2sgNP8jN/rwzcgaodGRL+MlkFflZL3mpKn2y3bz7LLQzLTFE4DpKeOssHmaryFocd2RAdNY+9FfJRQEnz63hvEXzb6DkPg+/5VBi3A4BKQKi93tXEljDiAUxmEwQ2cUSIqSTv3RHlulN7iu+KQnROck80+3sAz+SU7L2YHJ+Bkwu6SlwFEfMnOYf/S20UTkvxzjMFCFmPK3UvHqh0WNlcJ3F6zkjvEyBVeiVAYaoqKITgikMDugqbeakgfTQFB3hQZXS+xgsZ0JDJ8QpwImKUXKEKSoQYTnNx4Y68jg5UmwmBA40C2KCyp0Ly2zJlU66JMUGhZdYXIXGjbfC1qsXwyL03pVawG5MaBrFnosodnI2u7p41E56ZIOnv2XU2ej5kSMcH0UgZlBmTTpcX2W1PkNS8G8XS4E49aKQIOujAjc9iMGGIuUSdw+D4rNbpjSwpyt7HFOZ4Pwx5NUSTnS3y4co017GHimTbsPuUcxsDzmu01c7Cm5uwWWxqfIDTeC9m1Xk+NhFAPINNsO2CnWb8lrrJVSKkwcNBpjbZaVupgYAzeCdWDtmJ4ngdJ5kYUelywjyalKIuVD8r1+3bEqDyueLkFfkm26qamw0rvCVNTt8PQKrZlACWwAYBYiAHcztnj2ZW9tAH2tQUthdajGOp2D5mEu+MIxf6mJKCS++9uF56zn4aMeaw5bRzc0GX5MSCR6YIUidVwMDnVG8mVYzpZy4yWKUyhAPoU5VBCz6tkVYwt0yswAvMnpSTf1NdJ9KdL481sYJpdVY6/WUyjCluWZ6gyHUxE51Jr8tqhLzYO7LX+iGO5r8EesawQpShKD/jqhrbqW3Tp0iq+Jrk205zVUa0F327JlvLs6PyYMXPVgKnjWdfXhhQ2VoudoQA2GSOMWFD9x2XFqscT9hEjT17ngediqUIeX10HzOGN+qthRez0cviE0/ModwEsifvRLA8g8sJ6G1QJvZ4G/j4VBhILp8yO6rogZygXVUryx0R7GInce+2EwZKO5yTbjRI9VR7Qm7kZ2OUds+eirnnBHjlQEGla9Uwk59nDWBqFs9pGrTJc6CFwQxc4r5Oel5hA4Q8MBYuzMVI5E8Vg3sz8+SF6M3QvyM/hymeCluKDi1S8qrCvHH3IryrXIfTJ5IUgD9QzDSgjWen5J+Vb1IUnZsH3dvTg+tXW1PvSWsAk85gxfmSXa+kukfOZTebTvgVHqy1de6RmG91CIkOkOQzZE8nxyDkqFfddVFb/vR2lRR+hsCXjGzJBBJvuxyXv3bybvSkmF9+nZSQRaRq+7F4iRHqRjh7LD18qoWjMfGA1m96j8rtEW8SXEpcARR7C1Xtig9RZ91K5gEEmM1jY2QZ6F5xLJR/S65jdL1H8vT9oidV60hWXt2bGD9dsULBhuUE3Ms4E9h9V+m8kNfE3a0Nues571rbGzy8JInoo9kUKs96UrSLs48fZSiqqn/O0Mo8FAq5sNkluoNnWh06NSZUZbV72UGDmLBUZZ/xZQPyNqC+2bojNTqiR6CId0ZxDaQYSF0Z6dTZKN6/MGtyL6KFJT2ofLnHMxlXL5ixX8lSAyLt1/bK40W2LUlxwrxOPMEnqmw/fO0R7FBQNv5hOI7sRQNLdqTi44T+BJWaz7Mr5DdCrAzAg1+x3nC84hu3NpYkCNlpu+VgwxKB8LoINCZ4bOYoyeg92RiZ7pXwPuORiqcK++w6AWQ30CO5h4XPPV+epr4BwDZ0I06xpoIjvNPKN6Zu3INadXe5TbpskRfC+jmeZxCi9vGLmvqaa/6GXm80mUttBpbQlXPSjQepPWY5kTfA3gX1GFbvu4eSjpTn07yWeJ6Mg93LIenMUzaF2Gyp4oFQjgrYzL5KV1H8olj0VypCHxRu88zKKBGHbzj7luGQR/P8+ayfAlKS/BAmgnLyTd7UjZUO47Clx8p5hqHyj8I0CWVUCDbb6BSy5TZNEeboDKUvDQizl1MVjjSx8KWn4C+LBdfFwagrtK2f83rWcUsJrOXqe745SaQZEq+mpvz7Nd7Jo9AZcr3iWRTIX9CyzjKWeWS94HaWkGucZZZtMBZo527k9TP5lyoywkgbARU2A5/Wk2qVjoH/YFuJOAAlUiKYRCmFuItK2gmx7Yl22K7x2vjEumiAKyLQ2CB9sbMN662DdORjsrPxVOBjT2K8ytDhRmxG5ZBVtw1R8dbTtbrvu9XZD4pV5Obcq8nHi3TjQYrtCYAhpAWkvNH7y6nYDFDLKb1a82Nl9a9UfAmv1FiHNejc9p20cpJ0nQ6df8U8heGdKEyzUnbGr4pOOdWMkIYhD3FLviWDTCFLmMClUsM4xTqOyVAyMCkXU5k7LAnGWJdAsEHsyIgMb9K7R4mBSgeA6F4N5eFLng67oZy3A7PO11oPKuq+yHYVeurOEl+KVnFZse26wzErBqKGOlnUat1XpjX5IPEyA4IW6z7NGCN+zzs/mMw5cAXvGnM0jTRPyuHxW2lGwR2IdGQlTkTmGtf6Eo9bG610MHhmkYSQeQdaKy/tk8/wk7BjOTWeGjbYLuW7P89AVyf/HKRl78sAW2pzOf/dJpRIS9P7AbSayxl4X1Q0e3Mi3WiQYi+mpw5U/hzpRiiwb7c/PX9OctlxJRSMBrm0a7bWaglMASgWGttCrEOuPSRWvOJ4mTPOOHjITff8Qsfc4q2FxIVtJ0Ox7ymYnZLeU3VszguJnGozx8q5VaC8qYwZpMafmoe8XYwwTfSeNozNUX2X+xN1i+/utRttE4B41QjxeQpuhFPFBMkIdk9ztzz3Vd4n5BeqtIHtCiKOUwGoINp4xHJoHn6WOct/adqGh3PBxxT6onbxRY3hvxhB1pnaDaDG5x69KlCPismJzWM7bW6gF6G+QhdW7CeAvSoVC97rbPc22P6Hgl2Ga3XhC956WERkhPf3vC52Db77BLTYqxICF1ubWIDa8ig9Nm/ZleHj/Nghsh11kECV+spMSIfMN6Tdd3e8qW96pSc50s0GKYB4Uxt1fNJj5Iy0F/N9MwXDa7WmKSPNC0FvTo5HD0Iv6NUw3n1VOyltUKGIzRR6iM5VSyfB6IOd1fObgAURyOBsC1Cw58qyGLPZrIAw5IfWNC3gNChun9zbmAAqqxizBUd2my3m3iH78NyWSammmuWw8dNlSDOqeFi/eZA5e5+qasbFe2q1/ksHC9XBCIdK51eUHA6WjgJQlHOIqxsM8QXYc71KYv0RvlBBfOW1Zzi9Cbnm/ZiUQ3wLHkY0sOqNd3yCP3GdJnrQtUH1ionU5tQu3ZeI/DurCG+63c22FitHMm9Ndky8tHDb6ElZ3jzT2B8pPag2SaT1yNIo5bh2OF2eoVEdQEJGrU6ZJ76dSAOoiKfeywKufNfTzQepZjiWDlM3iPRutN2mZXeEK5W2ZETkOCAIYovATQHdw5lqgtzAYCMw25Gr4CeXzaXc+9C8pfEgRFiwcJDNUdXcSMRvf69baPqj2xWgvbezJ4OQBsYNxL7PhoiNhecru8/GArYLxYZxaNwukvvbpu2mCmTdABRQ6uGlntIjdSIkwUlgvR4C370eQ66XGr2po+QyN7L0CSc0tb2xvuHiQYa9HhX0jshZdj4YqA7yn5635xKoqqHlAXff8DUcpehRLXqfRI7LiOJsNmRWJG891Oi3z0nB11b1qKOBgTst+67j6KBSEW6fYUC2i6Hju/quIwPoHGQSqEYPqk+3j4M64UBdQ4CreEM4LvHeZo77JXIG7bnBuuRGHAzt9djPy+FGg5Q3UHpWleHutQBscLUKhY0tbmJrnAS4uEAIwrFRr66lQG1gUAv4gT99N2G48DcrQ+S7vxNCHBpILlusUgYcrISJdg+IMp3At2n2saJLAVjf3NeFHtL4PxnVXOZbjasZiRLGIaDqDobCNq91T3uzbXgAyDjqIctllCW1IpmA0qSQtvt08Mz5HTuDg84Q4v0CpRILN76tl0B0RdgGXY6NgapA7GJQ2Yt2radTjtYIiY7W7j2AHvlpbx7dqL0Zel4B2vGCFsIToLMcxF88R89zu/TTZI08d7Bi2yx/WeozAW58hpRfU+S4HfP4BG/CuVzZDCsnPREq22fa2sbREe6zz7HL/DbsE0UZ3BEeAGWGjECLe08MWiR0+UdwpSBnzOozyhrh9eE8S/wezsWRoVTkTj/jCR/b3vfzZk7ccJCaBwW5q8+GLTwtt8ImGN552nefwumejL/bV44D1V9yQMiP8SVDKKOr3mRXMKs5g9vymVGeD7pKh5TWXe/zTGWpbeQRleySWkEO3gZpQSsDlFD9V4ZPwQt1tBUcANFSArrlbiOwDizuoQMwd42yJVAseRcPfoSUpmMa2KERZKe1NnkY3pS7FjZ0OsAeetLCEz6SP7mgG1rXl0y8q82cskuXnf+dvV3ex7XZ+i4jFD1RgQ4PYvQzn8oruubXPD6pjQEwW68BfumQhJIjQpleEdQ8CzB5WJaAqrFhSt3fmxuqkRwy6YDT2s/AN0FlTA4CFKK22JfMEPekMtxn1/y0ZOpJ9Qkkq1QgxfJSG/vzsTJffM1vnK48bcsVYr0CtTndaJBio35U3yFspigcbTFGeRf0QjAJ51mKeYo8kgef1dbz1Om58X103bOOi5q1zwoqAVCLbWmOkvYv1BMNLRJ2wCTLOydzB6qe/LBDBq8i71xXHv8xL27fxoC0CDbqWceA7nRERc1/PZkhH1YDV7EznPxdlXG8xZEBzgyP20DNcrnLIcXzdEPhW8pYbp1WxGPJpSuaRMyR8medPyU0FAbQGPZY9IHaPcJYhe/2DI07RQ/qBN/WZZHyKgGB10eVmtfKoPHCAEbe/eM6KVVkskeu+7utyBDrwV9sUy6D1RcyxlgxwoJKO88wYMd6UOo98Y4TK4BKJ6RDbG1gfn7btjGF/sInJmGcoKw7sG9Dbq1nNNnicFKc70OW3k/CfT4u0JVSqgKT4k82Xd3wIRfkhsd1CiAWaHNKsB2oFrqe3hD9ds9lKU+tfDMC/IjwOwvPGtPTYSIT2OkNWb3CBow82zrLreFNwc282dfp5BjG7IR4+NR7uQqNXUMCEtwgKcW+e4M2EFSdbhDjvC7Uw2k9k2CFy1HT1nK/Ze+g5K1Q2oQBKvhTyQweSWB+Og+yqpvae6wQSCeE29ejE/aP879Xosi265VohuWk8mueKKHJt95zOkhLUgL8mnEUifteLqjs0ote+BasP9xCnZ5FqwdLPJLrbVofpslICtqFAjZWvRUnCejOxCKyNB7K3xW7SmUIP2oe5vhF+NAXhQOp6+btuNPldayFtJ/vFz0pS8ueVFGYhbxHwyQgkaWNPK6VVu3gtGgVbFf0LYQsjcMETKwZE10HDe3acHZFKij5AG14PlROTmVdATW1Bel7gNGCHAp9F1JiJw6tn5lFWhIOAYbhQRokFpAa9jjBhs5adRi0FfkiuITCB61DkTGeixCe5TsNoWj/2iYVNJKO+OvXpM2dKRmxDFEGwvUl2Qs5pLyW4HQyrYkJMCCgiAkSbZr5uR2aCH2X2N02vHufPNB6UrzAeshbjkselcuGfHJiF/KyzgAVBBfl+Y4TF3KBXXYbzyIw5/zZTpzVPsUSHV6KXtgm2FQj2u3O4aAL1qMayq27zxA7rnpI+/tDT4pbZSksjjci4/RWG1UvSnDYsC4IdS+vo+dS+N3A+w+EwfTy0hCMHxt5rW7Ei8FupQlox2pBDBAf0si9w0OrzN+7v8ik0L/mOgfAGxp5b8yzcENIkZ7Ch1o3pJfp1yQ/WX8CqAKHyIorxuJPqA2Aq73nmQ2CuHYSR5cjxp940D55TjuLmwey2zgCh2WyBlZfB5G93Jnrw23U9HgGqAOwnVKV05LIQXJieExkPNLLWQpazbTfJV6GXugIE+V4UE5imTxPwYz0kwzlf1CN6Aj3pM7x4HX6ggJOQDpOtY71mpKium6XVzR/h2ZZIbwGz2fEjTolKJTM2O7R+yMv64XtAM9KjRetMh6ZWsnVcMR8j63d1iSKzWTdQwcgmzmN3TY0Xr1/gFTzujBEIbxZl8s4UNCt5dX5ujTVEEYUkk+yQFJLTOEDp4481WGoeRop3TtJHhGlMRwZ3XJA21BUzqyaN+3iCqU7xbMm+7OltxfdnlygF0Yo6CWgKUVmuT6jzfGDHYPi3C1IdkVM/hD/B1pZXlU5hWgY37PdE3wtS63AGxNJ9twxYNuCGy1R6LJUoiUhMGBy2CMBMG2JPeV3DBDgekSZ9DO69JjGMAbtVwjnQkfiYvAvdcOn/Y/OjwaglMkrRylJpWclnBTvefiCwyOpz7pj2nWi1y2cpiOyTpIdWhgPZICa3aVRSEzaas4NzIlKoEqmO9947V5OzJk3jWVJSv3IuabSBcRpMbASUexQQG1hfTlh2GqtqPxwMV6xaJFuNEj5Ir9IUr+zRwykJ5tbwgyRYRPN/LwSyzot9lmmt+rcSG7kfYDT12cto2cd5E5SpSTvXfBlujYnst4KbFvOdBq6oJFTsw5WPAFUxYzI3ZV8LldRVeaQuvn9sIHO9/xUus9gWIgn5JOuQNyG6Hk6QKl5ug5W6/acgHqq3+LOVZostayrQnJZrxnkVqHmcKauIOP6qU5WgANUD/clKVXVFzQN3tq/lq9PkZZyNgaiUxZGWCxT6LK8Us7e2pLEtsrKTF35WR5YlMa9G2i1XytVofwC4Myh8ujLACIJh6g+Bwsz6toZ8ciDCCD7cCZ32PR4m0AhiIgEbZHR6MS1BGo1t+Qw3X///fjsz/5s3H777bjrrrvwxV/8xXj9619fnnnnO9+J++67D0984hPxYR/2YXje856Ht7zlLeWZN73pTfjCL/xCfMiHfAjuuusufMM3fAMeffTR65ASaQkE4QnOf/X6aU4d2odmuNxgMS3zFORqBHybkr5SfJ5O2q5bRpKa1Xjh3qjTkLSs6Muu/fiL/zaByBa7Sss2ZvhsG13z47nBFQNt0iln/fn09XynprAf0dvM3lyfvtynLvdGjIkCREKRkUJCdYtLOGrxh+C9mjz4LLGG2t7+U5uTXNJrpc2gwRBfbsHy4rJReQrETE2WR/5rTcDXCMPXzkkQSyC+7FJkpWLm5WAsFARO7D8wgKK2VbnBpTj/418vqgKzUKNzRCN+r2vpJC/a5wigTqRiuPg7sm1L4sonv6+SzbJwffWH/I64njQJNUDaztlWhRxGDf706Vog9epXvxr33XcffvEXfxGvetWr8Mgjj+ALvuAL8I53vCOeefGLX4wf//Efx8tf/nK8+tWvxu///u/juc99bty/vLzEF37hF+Jd73oXfuEXfgE/8AM/gO///u/HN33TN12fep1/+hEFwaYNufDUZdJ/0/XHnFxY9/G3+9+lXSs0WiPagrph+CV6VayBxQDBiA4jtEoVrNZGwuk90iAJumTbIB+wYbvYcPEBG7aLC/vbsNn17YJASwaoBR2sXMKXEwzXoJUvTF59q6d/DwPh2w4t6lcdBCplYQP4M5V3P1D+nRR7r9eC38jvjSZmz1y/45RiQeHXpW3o/Fo8tLpc9ELPJKtX9ISlDsBszsSKPF+K0ZZkSJcxAjr1vIs3SY+7Lm0IB0z8dxjkK2rJ4MRVOVFtz7bAkb18QGp7e0UQA4vOYVPfBzEmp+x5rb+z064frf2ESJic6W3YA97xYrIJV1TjsNa60ugz0x/+4R/irrvuwqtf/Wr89b/+1/G2t70Nf+Ev/AX84A/+IL7kS74EAPA//+f/xKd8yqfggQcewOd+7ufiJ3/yJ/E3/+bfxO///u/jIz/yIwEA3/u934tv/MZvxB/+4R/icY973JXlPvTQQ3j84x+P+//pP8YHf9AHEfgICRq9YA1xyWsixo0U+C0X814UxadB5JKlG0iPp4/vl5d5rcYnQD2Udoqo589bn7OycSbojyx6DMUYrwHAkTx7QcSIINgt9UqqDBCg0MvcCPRo/7vak+2G1fk1NnrlzVg9k9k7k+q5Ey+6Ry5GgEibTbkxGOauDtmmdVseHzthPrtX6evaJHp5ra16G1XkbbJ1xHPmnyT/vB37ozQw6bISxrv1KrpvEYYalLf38EBy2a0H131bhKeNv3s4FcRXTZDhAibjrZXeqvdjrRzsZN6Liws6X2rL3hNcX3fovmN/dMfu3y93a/sqh0nD1SbTg4bFRSB9l4sN2ybmBBqNH3BRe9WeF+lTnWxyAlS6XFGYQOJTSJ62bDvuGZEdCHNAbZR6odgvjXeXOy4fvYxrEWVoM4be+c534qX/v2/D2972Ntxxxx2HvLxWT6qnt73tbQCAO++8EwDwq7/6q3jkkUfwrGc9K5755E/+ZHzMx3wMHnjgAQDAAw88gCc/+ckBUADw7Gc/Gw899BB+4zd+Y1nOww8/jIceeqj8zYkMb3RF2TDS30aGhAwN0I3huteizYAoGbYw3rnTS+QnVFbvGnP3mcM17JHIUnjkZMjhpGIFfUmDl+WhvW2jXtNF70FJCf/xO0Uh+Q/z7w6kVXnSIBdwO5GmMrm9/QBJHjequjgx6VTnM8Aim5mMIPfyevsQADceHPWVs6Ua4Ifc17/O12rZ+cEI8sbzjGXZ6eGw3BwmKiEjDuuVinudlerC7dvbmq+5/vZ6IsorZSK/r3vXrYzY+y55y/YkSipVWoXP8qHuq5ZM7LOG2A7eWArnCbDsvav4qz1/uNPO5VM9pvY+5CO100a6785bOOOTkF6ZHvPEiX3f8fVf//X4vM/7PHzap30aAODBBx/E4x73ODzhCU8oz37kR34kHnzwwXiGAcrv+71Vuv/++/Gt3/qtJ6hxg5/15vq7A+i7hOw+Fd1cMrcr7mycyz/Wu9ED0DnEp0FeMbxGdRI4FSj8QCvYLim5lCywZljOlAErIkOQEt83K92ZmsWog7HmrKH9UqBiShBkEUxzNQttnnEn2NpntWsGv9JeTRlIIFo5JCv+CrFz6viwozoZ0jx0ryc29ly1w3VNB2ki+1opHYA6TuO3Go/pEVkQmR0d6mGo6ZqqyQ9dXFDOU/JdhnJxKDWukrhPpC7o8odpDc+4pOi9zk22cebRptgg2HU4XgrFrpdIOzGX3TsvTM7URkuMMWJdj5QaRLU2eFd1M1QS7DoBWF6WGQXmg3/PU4q9SIHCT/F2ctaSx61bd6bwtYs2/V39dPAraG3pMYPUfffdh1//9V/Hz//8zz/WLM5OL33pS/GSl7wkfj/00EN40pOeZL+kggsqWAHUvgJAZRynjSo31blM41ZFunpT6H9Y87+OL3Fe9OeoQoStyi2GGSgKxDLOJ04vyyvXKoiCPCOnPx7n+vksSVt4uKlgxzjdbMMWQFZqIflZCSdNVN/3sGrnYI99N3sWR6loZh51KT0MKWVL/GDqtOJ+AJABMgHm7IVnGdmemM/NIj74bMCggHpg0nZnOJm4PgfvhDHz8osCKGJ5RsiyIrO0fxk//FOzftFaQrPrgiRrJ6IZbvj8OHUxaxgbXR54IS0RGQGSufls9gBoIUbN1ulguRfY/nQG04WW2p4zz+3ZlYj1JxVJlfEgHD96sZgGyfyjmn6NjcBKGKgMfi7YEetC62bAnNtRS7gDFDzcxunZeXq6koz39YTH6TGB1Ite9CK88pWvxGte8xp89Ed/dFy/++678a53vQtvfetbS2/qLW95C+6+++545pd+6ZdKfj77z5/p6bbbbsNtt902XefxJ3bWVr0pGFCwkz/nx++KZ4fjN0bS9oWNixAxhwOyBFBjCUx/QBbfV9ZoYQmnAqV8itMY9I1P31W5AFUYKRJgBXy7loETYy+9WIXu3UwqfsVNNgLDw6OKdANLLBtZtkw3EN0gsEFeK7xwGgjwGPvs0eHcVL55SGPb3ChJ0OdGNgbziXbELvuzQEgBMLaGJ1IB3sxP24tZdToc06cKK9HqmIEBmPEDWZcOwCN/WpPju35bPcTBx/FKHPgHP+BGLPhGhk3n3dq5NxcfBi5Rj13HQn4HKtWyVVOEgNWWEGySO5BjG7zZeR1f5cOk88Eq3pjaONOsMjsMHGobPCRNbcAl25YbKbtMb6SDnPvK02EwNNrcTdmtRyXWmwTEeJbOujsiVDWjAbDOkx0rspFNGG3tei7uBJyRrjUmpap40YtehB/90R/Fz/zMz+DjP/7jy/3P/MzPxAd+4Afip3/6p+Pa61//erzpTW/C05/+dADA05/+dPz3//7f8Qd/8AfxzKte9Srccccd+NRP/dTrkEMGyD55wkTjoQPQtvGfx0ttIeYK3JaMyA+tl+J9L5N/80ve6/VTCmJBo1LG/W/mgOVPjOiFR8lt5p3AKu3n//KY0RX17xQEFuZ4VsxcpL9jrCdaD45fKJ/tlfjbALlAxMG3aFupstLfozzZk4ZkGG8jp4UBatvGPREZR9gXj5xmPUqO+SSPDxjS6Wr1z3GC9kII+iKPU8mmxO6TTIJkMsdXHDwAltcc84B/DkqrBW9yHGSa/sbFo8T1oueSjDrZRXV0hnbVpHsmI8Ct/Fm7FzlmGfGytdmC4rXq7PyGmh5UlBnt5Ln9CsfI9Oyi0jfGgwlEzlXmZmt8pmxuzlxlbg4vun5IqWPVKebb+UbmWj2p++67Dz/4gz+IH/uxH8Ptt98eY0iPf/zj8cEf/MF4/OMfjxe+8IV4yUtegjvvvBN33HEH/sE/+Ad4+tOfjs/93M8FAHzBF3wBPvVTPxV/9+/+XXzHd3wHHnzwQfyTf/JPcN999y17SydTYQRdnvhwteHtt8NxDM+pOSUrwxleIWXIxqg4N93D7eftXNWIo6AIrcgs8kUgQilYeJthK9VrIYim2UfTvJ1d3RtcJlLmiI/br1KG8ZScbGKr5DNcLVcUvwbpj1q+QjR7mQ5qVPfCTxRgLr1k47JS9lE/eq3nV0GrhtYKpxUl/4kWriHFLlVXApuhHd9QuNnGdQp90HC4nGbvjXp7JjecPrW2XEjGCf3gqip9KXgAxEkn2YnnIzEU3lscbcP8l2jvlKU0LMzaOIh0YQ8UaTtWNqU7QrLQvcy40QfSSyDOm1LvwfgrO21RzF3iJTd70SYR0Xsdn96T4pBdVMrby3loQKVGd4YMJfmjKw6t07VA6mUvexkA4BnPeEa5/n3f9334iq/4CgDAv/pX/wrbtuF5z3seHn74YTz72c/Gv/k3/yaevbi4wCtf+Up87dd+LZ7+9KfjQz/0Q/GCF7wA3/Zt33YdUkYiw3Qd77+8y0p0ZgqviV4rNiZtO7gh+J3R4NZgtvU9INR4rrH20mR0qWQRSG7/GNcADC8+Sux/RB9p+wiJjk1UA/xCCdMzLuEWBhQYX8U8MhJ4rYV1mxnXumEuPJ2cgCoDHFYthvyMJFyAsz/eTcKKN83l2g/PJU9T1cLHLK+2ZwDxiaRan6tVk/Zdl/Ja85Mw4P3BUgbxvUZyUx4KXfxSvTil4DqDc8QjTzNkch69XMujAJWfzbMkZbT5ZqGzXe1cDTtFV3cdkwJgvcyQ10pfsimF0JcL+dITbBWo0vCfMt5m/C1cqarjaJp9x26THAaIjEF3nwxRHBTNvA75CXIGTj5Z2GZsGG+W2hjNjzVdC6SunEEC4IM+6IPwPd/zPfie7/mew2c+9mM/Fj/xEz9xnaKXSdjjRHptbOfzvkzqW04Ps6ci6dwzOaYD5mlU/kwTJQ5sc15MwstAtr0e3p7/dgUgwS4KMhlP2xLFyuJBzMIw94wtv9LND5o0yVuGATTLNk/MeeW34yTjYGJnx9qB6D0qDm9kB5ERytt70aLjBimZTgUU/k6oZ2E8K9gdkzC4BtIxIN1AIHIpQDXvscY/nbyYPAIvO0FyvENG3o1+AQtu13XqprPggdbqHPUgijM/eeK1grweKH8/dgPn1PP06TixV513TV5clrYBUNgM3HzsKhyLY9nlJELt4/IpdZJGXFtZHRPy1T01eS3hR6S8p7PsNiWzXIafuPe4rkzlUboXdl2Dhiipt5/CHKLzLOzN3rsPJsTCzHLj1myNxdQTtEAytnRJSznHqRqyMCDNmKWX0fKuNh2IzrGvt4AJIeJ8GRHEQL3XswpNL5uvee514NQFzU+6DUxlD6iMQwSVx6xRjXr33l/0yNh1Ps5msn4lshUAZW1RrtVy0X6mjTQem1IJ140ESXoG8U3KtTF4j3F4HWEFh+Ds6uwiHQDUBFSAnwBv7bWSrxyLyR6dC+mSK+ZY1Fp2fFcnYicA1/oct/0KvOZUZSonE+S9JcUFJDqhI5QpSZAd3yEQH9hvSbYxVXoTxS67zWIdgDUOLNzzrCcnrfUUZp5WYHIs2GiHhqO3BQRQAVYU3+OzpuyaQgNk1W3IohGkN64UnGpSzS85LVle8KKl3nxaG+zKdKNBylMYMTpqQfw688N109ZnXDtEuEpCeXWAorOqRKTZHtZmcirMdudiOvd6NGeQbRJ1O67CQrw8bzC4D4LHtvsWKhCgSHRMwdax9gT0u4MH8aReaNRRLy4gkh0IJ5l7U1rvlZ6DzxSSFVOOuLSgy/6NqbReeggUP5kDwqNoWrUf9cl2F+4pd+fkEKRnhXalL3bRAZCYmOCmuSUOzKd2uW0zrGJ2HVkp8QxlGEye3RljDgvHrCLWASJObOAFsV6PrOiyMywLcStykKE6EbHZYuM4Cdm27FVI6qlYCHQbHi4ucRn1FxUINghvfzXJfBd90kehPwAxVlzQYQFeFSnKN+UGUJ4JCEBppmX3dKK41CPnRzqCTGt7nvPiLnM4dpS8Ta+ZbjZIqY+fgBSFwWB8KWpOoBLvEkDk12YRi0NHCpwockDkEVC159mgBFA50BooFMNOFXDTyjaB6XRDyXlTnbyXBsEIbfTkIRhTdqehhOr4S/s91vykB5l1GI+EbHt2JuglHLjkq6l3CUHMvJh4XcjM54vcFGNNHkFrvwjRkJGbvGLKwrPv2DMuOW8RRgYOBEpTsJtjoKkEpWOqqO2tKcDW7lnHMNS9OyRJA/OOJ/lw/7LytOvEQkd0+XXoAQiwFi+5PB2tNyNiB50+OUTVwnfjeInSE+/JHVqVWHScMmHtxCeaMg2yrPEynRLXq94bqj7aLsaCHajYRExtS7rCMoDkRY1KnAAqJTllmh4LKrV0o0GKDbkIbIV5MzisWy5fQs/ZvUk+u5farx2kbp9SSIRsX+9VLepFO1eEEd/VFg3ULfa5nqVwVB75/oJ7HKeOMKwevhjZb9YzSSCK7VJ4b74TTPBexLbJGK/b+iN5/lTtrmCA8gYzCidwiupZZi4u7p969/T1+XiP4aHSGJB7vb433KhgzS3qF+gb41VsUApih9DSbDlq8hxzQjpocQxNXk9vp91bsaDj06QcuVjTF80GSEbVEzQyTMV5H4BV+1udkutOVae5Zy30Qq4Buozece0lS2z67PS79+oTLkaHZLdwuJQ9N+MdTeDiSpPUOEHE/7UMLqVZVxf7WzzW7HpGsxmbYxnAw/VhMEpEys8Dmj0/Levq/Bo1UgfLK9L7AEjx1FcslNGMLN3eyGAeGcHpGhtR/nbIa2GxDFNa5ayC1cqW+Aths1C/r2lmgBIDF2TYh8IpYoC9BfiNQmsoyQCK/xanaip9ky0j5xlevQIwRKONwksmA12e7e8u/Pn5mXOuAbxdB3XOkkwgd3u2NWG8T2HV7laUYVQuZq3TtUNmTHA8q7iuJSvAewhm1Ad9kr0ocLssUhgStC+1vom0i/tKct1sWAGpkn2a7kqONj2oPanQJwJ3D1mHaRXOjQoJIjU+edaZKGLT4Sxs6M9Y0LpBNo3D/ZyYPk0bSBBslUvQ5KEA1XK2lXQmljw0sho/D1q3lb/EN8nnYh2TvQt+PhqWdHiSE0n63KmTBK2c9CXBe2wHdWzphoPUaPXR3VcKqbn3bw+KbYli/PNZqLKxwFuekTn/mJnZlW14MERbGPqcTBFTRzHjqZIihfc4KdcBH5B2lR+cQnz7WDTsCxuNNXZImRgvvR5uRJyXNK5RQKvSMXjDBsQWucZuBovQDBkPRU7b3gTYwxNcAVPnQyoXf6fCFtxrjoMxsozNGD/K67RR7bSIknqSzBl2YtXy8DE+oXp6e4YzKhKLKgdZKayK0aZmb0ymyV0wpDrdG22N2O2QpPGstkj6K5PjvUoF+L0q3jvXZBSDFdcpe5X2uZF0CPGX3o0QuQA+FrXFriiwsF9mED0t65mOsSsbw9oV4oZ3AzaMHixXmTvZXs8YkgDpgjpQ8cszE1dgxDMg3bmMtitHipNdXDkhknX2Zyegoh/LaEEDz9RrnsFoLWPgtL0/gFQ2JiP0SCnUmt4L2R2FefnTNGcBvxqfZkQPewNr+0fva7rHZogKIPLnQT7c1r04bT8c6BKg2oC0P+cOzlFyo0HGpAMUz+xRe0aISBVbX2Ixv3nQ1T+GcqgZ3WEABoCqbQDmhvoa0YJayOK3LK+nQvtcklBASPSkttaTCkCLXEx4PDsDmTBU/k5ZTwYLzbhea+yP6LMvi1MDHSAmgk235DsbfbbaV/Fw4RQVzEUFmjmbU42zEDZyykjU1m9bfXzro+rkUAkMfvbbbcCo9z6KFEneqppm5tpCD1OPqRJjM9ptE0A36IUd5+EOnRXUAbvS76A4JCqWfwS+nCfY7IgvHZAJoFzvyEENICVQYuBZItSiAH5K6Dlzfh3MVSUmpAA4e1ukGw1S4fgUAEmDmUvPxwWX5a6DNUxGWjKkOPN0MerKWgiyr2zAhfNxQRGADH0CClmgJmil0kRimSUeSEEApeRtUdYLW0T1yUx5EHuU0KbJ+9MaFDBBGRrc6jtcRw51FYOzWf1UJnprz/UIdubv0l+2DLyXkpMWFqooDlI2k+9EqG9WQesrhcG03pOvvTHQijY1eryxPGs3htU4mVx5+7soa7ZWGIcFOmmTOcfGPh5XPOyTNmZpohfXNf4t9dGDT/+qrfdpvCsevVZjHsyLvfpSQRygSiSBZDNCcd57Vm5zmapVgGoh77wTREy+isXUJxhLTFJWaLZblMWpEPtVIBXPcKZsk8h1U384HC+StcO83x9A6gJtg9lkWAmLhGUmTY87FErSAR413GYSJFzOY0kjn9GjYYCSAJRaOSI7yhWi3Wmmrzsr5wKcwg7lYtNcs9FnpTHdnmcfL1g8Fp/hGtoq/Q37vo8V9w2NOMzgxhsYm11uHl7ElrsGgNrh3PY47foXpXbjLNxG9m8ONNczs7qyOzNy2DxQJ5/xHhVOrBxxgHL7RbKa5SDCXrvu0bsI+d9xlPucqB1HNkajtJDrY9aDq8oXEuITj40wyKgzvM4a7eyyH7rleYpY6NRAzvJx2fbW6pUcO6KPz003o2G8EH18HTO3Tk4qak7dcWDmCgYXRTxRnulZiBxq061CfOXV5Y8TKaIBPhC9287MdjJCdJXl/aQntWEMZFYHltpMDQPcsKXh5G5rxvhtCxs39nbulECDobxgjoFx4Bh5FZRKjwbVa0wFIuLJMDkF/lma1fedszwKiDBIJcfSoPqVBlI5rbeD04Fry0a0VdwFkr1WjRmYTkcFDkFO3tiwYYcNWmMvPcFBO30vDdvd2qGACTXoTmGp2Vo5XZmttNJ7Sh6e8mLKOApQZ1txMQpQV/wgLWAtcLB72706uTtLPseZeHtKAlVfp0FjHud6xCzjsx33/EhahXRuwSs1wnkMtM947DLDPeXxR3XzHpX6+F/jkrexnTgsm4x1VCojeLhzCPKo4RIUimiFHAVTuz+ddebPUzi1kvvpEQap+txZIMXMCTcqHS9QT7w6OieGTlq68SDlM/U4TBUyInMLBrBEg6RvUcNjyMFQltRD70en7zNYUU8NoNAeFSCSBoCsZhUi8vfsH9t0OkDJ6a8Mq0IZ+jCtes/CKbBHXhABcxjGA6Da1SaMIKbQx44gXhqVHdyIsAstmHQDwr0prhMSJOrU5z5rqRkCKjd/1fvJw9ZzMuDPk35n4QjDRfUKNjasmUN5K5BBGNG5MH5I5waJXKiXR23Kj8dkJDc6Ln+KaFPIFT2HA9qvfIiFyXq1kIUbWGSPwLeBU29d5pzL9gAoZI9Kw9QGr7dtHIwoYssrDKSggt1Vk3vLSxJI/iSCifkYAVWyortQ5HweMDSLY2A8pqvof6hPf747QFWDxCgLGbfJQcNuUqREz3dubjxI8fEc3Gh+OB1FnfI9adcaCMUaJW150vdDz4IeIXdtcn1on4Upw2a31vm7Q5v2O0DEDxkrmVCdNxtPiR0sCLCmSnT6j4iz62snUoO+9FqtHbZ2vEXUT90SA2qn/pKnG8W6YWCPcFXptH4L4ntdKogyc2YDIlgzr+V3ZMhDHqqs+T0fY8oOgp6WjfP0/piUKMdkzMYRBblrgdhEg5MG+Zqp+EfudW90kuvJ3sn1ygmZ8cT8duWxyREoHr9SHsMxUQiwe6/KjgRBo5Xlk49Vd2eHj7OJnVPQPh2NYbbIHAzktVqev5blHnAkss8vp9qTCvNe0sLZmt7xMSpcvxlvNkixfbA1Nu7lAcesliYH54R2R75Y7H9FnsVBbyp0X71UsviNSCleKvK5uRbFUc4wH3vhaWzruIkD0zFIraJQXoWYUWU3FIgQCVDtCfX2l/n5Q/OY2HjRd6QWt5ocz/R83AhMPR9pH72S1CR8nx47C6iQdZw93jlp/QcFopyP1GMsMyk9hLqgqwuwY/yRASvPhlOmmMV4ZBLhNMaMM8EjyIgGO3oKWRZ0bjMWpmD8QZ7i+oSgGajOUGarscOJL1eB6qJ61bYIXG7FjuXyfieRIf1L6hzPgs1gSQcqByjvUTafcdKvJvdLB25+XghM121Uec067/cJ7+srTisw8eeq9D4DUsUua32m6+hxhvXdcuPkiz2daALTmmyohfU+owVZCDg8OSlUUYIxpfbiwnov20EPqhsqJs8NVTfk4lPHc2cEemmup1tLyT9eSDmUz8MvPshNMwwDBZOuYz5dwdAFiCbdC7KPMjjKZ4qBMjzN64McoKJ3DNTF02dqeAJJvrPEKMLI/skTFMYpt229G/ektGbh5fm1I4dlIhowgKgTaUZ++Z0BdcJJKottfYloEciNwxpHbGrfU55i/LRUTAdPwsnD0KVdfTZ7ddL4k5cqeK8K3UFjzk0MQuEw8Yvfq+Hug6z4/f79uol4E0MEjFguY9dBJ0s3G6TiH1KGWWcO+iGIh7rnL5wxeGIB4lyY4s4Wjw6tIWaVdeBcOcHKik40ha1RLjonStQJEogXOTQx4uhbCG7x5tx7jC4jEowEFuKRMB61hkMIxySeZoUKDs1g71HyHN9hPnmIyX97ffeF40iAGz0QZyLVT5NvzODcZIK8ypVBLfhdyyk9zN4aFLrzRuPwWuRVlDzHStwZSe4gBNdZtsZUKZQcmqErnCMGO3HZcATQZMoSLIJUWZbDneMcb6RJCOg6aRMbTInqVOz8muLGSMXXh6xHu9h+fr7aP3S0OVmFfJPrmC3osrjND/ZIQVTpKiDRBJ1cSJPtIN6L45wZXAsZwZHpWv48BZBI/WIC/aMAkxaBsNFyijach1g3GqRc4NjeFPu48uz6fSFgM0XqPV7pQMUGobQnK8qiAWKLn9mgaP2nXrPwRLxnOLI0iCyEDFIEVPk972U+tPaEdl9Y2esoSBHvRJtQexR+HaUogAAKDo5jfCJmxR2dQ0MgNfCVeEm85R5KFYAByrEriBzMQjtSrkMmBQFUbtJTfrOx9u97Baki4+HASLkW2c1+QXl2rhLLAl2jV4r8qhsfVBqnArIcofIcsxmgEnNock+AhXozHfgDunCEqGFKPZR00jJxoLLvATycp1aRYZkXunh1UMv4uow0uJJjeWBglIfF8gBkG89tP2ckhzc7SQROU0NnlKMAEzta8Pb27+8PIIUwO++WvLynFGvIXd7J0J5lcE8W0uRd43KpRXEOF8YpH0RHrNJrERnb9WwXDk50hk3ZJDO9pPAMbdGiT3hQUpxKg4OHQraM5yc92QudeKcajkEPkQS/REIh0fJeJt81XvdB957THENp9t6bMmfE6rdtWwn1pH1068qOgf/Tr811ZRrKLXqbQ33Ri9pbNmLGVyV26j5pZLqAdUNOPYectWWWeOmQ9bqtm8WN+Yo8bsrJ8S4vMAAP2c0Q8E73r5KLTrD31KkXJIDvRiHbhl3SsTMC1nmK954MWLyHM3Ub+WtfCiD1uXPszJHP9KexUVel5nClPLO87q3nNCal1F7U+wFIpY2hLjtOV50NYYhGlzsStpTN7EXVdw5QZpF6CKn0jCi7q2ww5Ug0dK8TAUp1DZSD14o+odxqKpNGOn2TJ15dXfeCeRYfVTemladNkkURTtvV6yvS06cwTPPS+w4ccANFO5NP1Sv4VKzN2gIvAHl8KP88qkV8HD7H7bFiiVCwb/XMKvTTMzdPe+Vtnzv1fLxWka47ZA5YbMNG1cz4k3w76QVMMfMpfIdCtz/ruVcRKE17bv1kKIfvHOK05pT4zK7aDXe60BqhKdrihOriIx3pw6J9OZzqGak/VL+s0xFAdaeKQSvAqyjQuRh1s0GqpPP1ZZ2oJf2UCnctp7j4yc3uFlkTKHXjvdSDlZMRGnfgybnTG7QaQPnecn7vqu6gpG8X4wLq4cZ1vQM+dhbQFFSnh8l1mkNpNH1OjQ2AJZ67Tuozw9TQNRYUc2+KeigM6Lv6BqI11FkcwVIo+cxL1ppRDFm4pkd5hhP0mBJZ8hL1RQrnnyZyUMPJSyTFMGLpQHgqtnt61b3MXBeYtJesm5XOd1wmRi9KY49G7NRL856bO3kl81rPODRUbVxxoeC8ccD60Mojwnu6QhgKrWcknb4cP+r67T2jEp3wawxU7dyha6abDVK68nDL7Uhd0YpXQ6BR5EPbu2xoO1Cdwfw6xVngx1pPDkwpW0JWgz67lNsBSkZlJA1D2VfOF5tuDLr1sxI7FMwnA5iZn40A1d+n8A653Ud9l+5tjvX4hIV93xEn29ru9Hw0eXYITAHCzFAdejXM6HnozBVn3/exMagfX+L2ZMfYBDd8AQV2M1JWx5M9m5NWwbxr7yIwjfOTUdcx2SUdHF3xf+G7xHU/xqMXZDIzjJmkTGk6J2xkIwrgZLvinagzA9RhuOykUezgRmFhMvbh3bPDIanjw/b71H4gT3v2B5CLTv1YAAOtVEkK3XEPFbx5QJIbk5FWjXbEM+GGzIdyrAcoLdlwTCKPtYjxAY/F4XT+HZDV8/P3cxcZDb0CXwd/gvhw5Omt040GqRVALQ3IylZKvb/09czqH3tzRMDJK+u0Biia9QM3vqTsvQwhvAyAMj1ru3PzVNfyuQCpEPhQBLeS7TnHEKVxqQIMVLnyrimGpnGMU4H9sEPW2WhfLZ+LBptSD0e41+cg5XRAAA0aTKEFKDNwlN7ppUr5FcoZBovsNLN8Ah5j2dQjXNXwFDCyT8HfQ97T41n3dAcPIjRagCqJLqok+eXIGZp3KhS7Ood5J81s8qT0x2QpZqDyKlUDbVn70JaIyeF4KWk6Y59Fr78V7uAYTAl5PunNLO7TGKbq1AShItR7msJ68c4CkOKaXlExfkUDkFKnOkhRSYchiKvTjQapnrrseZoX4F6RSArl2i8vaDhsl5E32+GQhS2Vcx1znkEjInkxi89BKMHryiQFlhMsMa+PKuXClJK9Wh0r8FPJqhIEeKDuvyZbHunB42TZk0l4iIXbtJ+ap93XVe2Kfb+E7sPbGwA1ztWq/BOoW6vYbQCpZ3ulOfhSeEWgF58aTrJAMoS4gTLHRAsk+V6A/zGk7h+QT5SOkNUhjgPpL66K1ia3Xk92ssAPrJNgs1lqSrQdyK6GKFTa1AngCtIj2ttlvOi9Rz4RehiN3qMa4TyeGj8BhddVFZv4HoFpQ6bxYX87HKKZ7qDUJwA1r0adVwYMV43ZMkMS/Gyb3AMvP9tYyruqwH55SeE+B696RHR15K6XbjxIdZaumFzOTGrCztfYIHE75/c0QGE0l1RwMCBv5+y4FTFmJF1hFvdq/UgZnf7FcRa1lNNAlQalAZWVeyT8vVcKBeBTxnf2ZtUc1LWhDc74WICXS95sASmvrACy55R5pllJmViRNBtx4tL4NTJnWj3MpARSR2nZs3d6gx9UFgmMh9+uTFfildK/VBfO3mSNB9KniSaWCee2SqNOQr0Jvz73DPLDwTFgIO209Wz6RJv44qBTvARisj9q7dzbRBnU/H2PCNh3bi/noVDjTUAeL/ivqhwMvPyZYJw6GjaG/qrdaamEZ9O5i23EpMuzJl9K76fVqTRftl1OPnJgYpBSer1PzuDyr043HqQApLCeuH99BLeXlu9xgVpf6VlMxuoIRoQU4YpejwsGvy+knqFUR+WdTisjWWLyVHQBNlbkGCu4HOzaUYxf1wZVC2yJ5NRv61G5H1Z6Em4d7NldduqNDRDxk4T3fYQi9ss9QapVZIrINPDitR21x675lPsg6MbTfjtPLZQ47afW8LICCaw3JQG2XennxMadCWgFHIlHF/ETZQXpgRMnZI550zClOIdbBSi3wfFqE4WaqhO3BKh40eRyA7AL9s0kbjdd2OhFJobKjezYYYzt2STbkPfu478T/KrOlvabQzcFiKn0QaID7Zx3D9dB94Vutvr2kN/e6NI9Qn7lObBcHFbzML0PgFTWvAuvp1PRsunWdCG9opVhrdf7/aTHleLQQaF6pFObijBNsSeDJjBQ2Cg3NefU9TTCAbWM4ySFxhVArZ4H6voQ7x0Gehpx+ZU9rrFhp2wGVH42EkNKByoL08i2FZBWMYDS3SZK9F5UBagRJo2OBYFus4rcw6A2YEXU9hrXj1OM4/XnWohMiE+TkZr8Hbtg+/OQP51taGXIRBETt/qqk271Yk8ZogM1yfcZkFw5hfhBgl9UUXvhXvEDFJ90sS7o9TWBPiYIRUYphNugVpTtsLY6hBNKO79Ez8meqyFAFHnj3gmv+yugxMzUOfQXrln0oPbUBztja2XTYkyVnWfNlijjvGHr3OtI5wICimidj1bvAyB1TmVnYTovB22NnJ/Z4FjqQbzTbBznU3/N5eQ7BFDVvtK7BlQEBmoKLHZNRWiGTyq7by9Tyl8Z0AYsPAtsNeTNdV7f0wLAikGM7LmmKucFt/waU7vBL8q37+OTQyfOuQ5KXmxXIgqFhMOxrNxamk5LKTkEgU9OiD2ywwwnqoEt4pPgdvW4xKisT6ue/LCrEj9IPZXTEYAGMFyuVBCP8Q+h5+S0PCVA+UsJGSUSsKyO79BvY1QcEXAnBCZnC0SuANUiIU4OH4kjEoCFVvdKF8kyZpnzyIEj6NBn40F3GFzXwM4aDFz8Wj3fJ6MfXENMtmDlyLmghnlR5OQONoxXpPcBkMK1NEza52NK2v5WZUt9pocbVq9MxfA7mp/LB33jTwOmst3LDhsDVvh85lCk1D7ETDIT8hWYFvo1B0Srl9XAYIms7nX18SEZPUKznGphvOphrlhgwAlXRNA6jiyHyQhZcIDCwpudylldBIhtU2odI/Ku1w/ycoEogHYu1RPyLuShR89aERv1RiV4o9NuUMkWcZOemwpunnrgIC35Ze/lkghyNEqWDhN8J4Eq3pw8MMSz1AkI+blWD2Dygmki0za66znrlidRnM67RwAq1SZ/p+ignlQZO7Iw3VDBrvVuE2A2gWabdgO1KGtF7GqrrlPpxoNUdCVBInkmB1bei5QvVwiNf3agEqAcg92eU+0Pr3JOry6NbCsnyKRFiQ4cmt+Hd5XPwXpUxb9cKG2GHZOubuS1fcu65mSFUn/GI0feeK53E9iTc2/spKtOvdVTClTrEJ0WUh735KeJKkS2f2db6JNDpHjTvibJy5Oom+cc/9K9DHXWtuzkBMEewhMCIH+UnSardIKzpBVu9fEywvS3ySnnpGUPpsta+ZK883qkCHZL7O8lkknhqFHghoK6oiucInw+dDrqkyQf3naFxnQEsm2zfSPcVzhBVIUeHdFxHXOPIrhlrCvKqgoeze3iVGahVf0u5JySkWDyeYJ040GqpAMGdYfs9Mv+9US8npJOX9BkrArd2mZ278UVSeKdI1vbvT4RiRNwM7ZOzzGSK4UA2VPye0VjZ4Ecrwh8i4gEYS3Pl06V51W5UgBdfc2Kby5LxprrMvNQ6s/yYAJdwR1u9jAmnG8zRJRXein0GAnbFLKiXlrtrdE1wWJPRZoSfiLFgLx9ckWE2iZBP3k6Z4aJpSOfnBBwXbC6MgWfkF86m6I5KmHHwTxqM6ny5FgyVTPaNe+EBAQAzW+GM1BeoA/uQUUVw3M5YW+OGO1gTIVMFSGodp30HSC8F6X7Ur9nMDkQCrTbZzx6nXSjQaqHBQ69DemitHigfU1H6IR10PwrTu2p/NGB7bglY6zmOsaAssvpsjBvdABf7O7tPariAfJUZSnGbVkc9cyKlM7dJnRo8se0PZY6XLnqJRw2yZke2hRGujKtfO51Kd10huwVgGJamuxx2EckSyb+u11j/6eYOBFIjOXlQ6vxlKOeQnvKc4iy1Lsif1aJQb7Qc9y28WSXn3XzxfW1Xb+mhaW8tBoclHjywaWppAOn9FT551KbjuJqAlPXT5Nhf4mcUS77DGV4zOlGgpQz9J3vfLiEd9S955UH2NCnhHbCW6UwAz/IdhfpjS6PmQeKWpVhGVvgmvLgBmn1V989JbAZ7xbz1vw7YhbRUPrZkxfSlgLMlLfX+VQqE0ymnhSNDSGnhpd1Szxea3XZYt9Bq5c0Y9xpiDbL8SdV2wbJpp8XZhqrPfyy8cC2PdDDrU775Hmyw2RgU9bCoPOfqKbvczsh+QTFfunT6VvoVTZsFxvEdrvfJNu9tMnC+pVeiLdZGc+zW/vKqBHpMrZwQuyBiEID+yorcSqzGr2t27u78+JSy2LtKQ4NIMNSeZ8nIKz1KumWTSAXW91ezGQz5XD2eCZS7B+Wr+zx5sJ1/6w07rkAXfdjvhW5Idlz/af8vG11v4TuY0uyY1R020DjZqSCbnNTr30qu9YcpOUnwDvf+c6g61QSveqJ98L0e7/3e3jSk570nibjVrqVbqVb6Vb6U6bf/d3fxUd/9Ecf3r+RILXvO17/+tfjUz/1U/G7v/u7uOOOO97TJN3Y9NBDD+FJT3rSLT6+G9ItXr570i0+vvvSezMvVRV/8id/gnvuuWecGH6QbmS4b9s2/MW/+BcBAHfcccd7HfNvYrrFx3dfusXLd0+6xcd3X3pv5eXjH//4K585hq9b6Va6lW6lW+lWeg+nWyB1K91Kt9KtdCu916YbC1K33XYbvvmbvxm33Xbbe5qUG51u8fHdl27x8t2TbvHx3ZfeF3h5IydO3Eq30q10K91K7x/pxvakbqVb6Va6lW6l9/10C6RupVvpVrqVbqX32nQLpG6lW+lWupVupffadAukbqVb6Va6lW6l99p0C6RupVvpVrqVbqX32nQjQep7vud78HEf93H4oA/6IDztaU/DL/3SL72nSXqvT9/yLd9Sz7ARwSd/8ifH/Xe+852477778MQnPhEf9mEfhuc973l4y1ve8h6k+L0jveY1r8EXfdEX4Z577oGI4BWveEW5r6r4pm/6JnzUR30UPviDPxjPetaz8Fu/9VvlmT/+4z/G85//fNxxxx14whOegBe+8IV4+9vf/udYi/eOdBUvv+IrvmKS0ec85znlmVu8BO6//3589md/Nm6//Xbcdddd+OIv/mK8/vWvL8+co89vetOb8IVf+IX4kA/5ENx11134hm/4Bjz66KN/nlU5K904kPqRH/kRvOQlL8E3f/M349d+7dfw1Kc+Fc9+9rPxB3/wB+9p0t7r01/5K38Fb37zm+Pv53/+5+Pei1/8Yvz4j/84Xv7yl+PVr341fv/3fx/Pfe5z34PUvnekd7zjHXjqU5+K7/me71ne/47v+A5893d/N773e78Xr33ta/GhH/qhePaznx07PAPA85//fPzGb/wGXvWqV+GVr3wlXvOa1+Crv/qr/7yq8F6TruIlADznOc8pMvpDP/RD5f4tXgKvfvWrcd999+EXf/EX8apXvQqPPPIIvuALvgDveMc74pmr9Pny8hJf+IVfiHe96134hV/4BfzAD/wAvv/7vx/f9E3f9J6o0umkNyx9zud8jt53333x+/LyUu+55x69//7734NUvfenb/7mb9anPvWpy3tvfetb9QM/8AP15S9/eVz7zd/8TQWgDzzwwJ8The/9CYD+6I/+aPze913vvvtu/c7v/M649ta3vlVvu+02/aEf+iFVVf0f/+N/KAD95V/+5XjmJ3/yJ1VE9H//7//950b7e1vqvFRVfcELXqB/+2//7cN3bvFynf7gD/5AAeirX/1qVT1Pn3/iJ35Ct23TBx98MJ552ctepnfccYc+/PDDf74VuCLdqJ7Uu971Lvzqr/4qnvWsZ8W1bdvwrGc9Cw888MB7kLKbkX7rt34L99xzDz7hEz4Bz3/+8/GmN70JAPCrv/qreOSRRwpfP/mTPxkf8zEfc4uvJ9Ib3/hGPPjgg4Vvj3/84/G0pz0t+PbAAw/gCU94Aj7rsz4rnnnWs56Fbdvw2te+9s+d5vf29HM/93O466678Emf9En42q/9WvzRH/1R3LvFy3V629veBgC48847AZynzw888ACe/OQn4yM/8iPjmWc/+9l46KGH8Bu/8Rt/jtRfnW4USP2f//N/cHl5WRgLAB/5kR+JBx988D1E1c1IT3va0/D93//9+Kmf+im87GUvwxvf+Eb8tb/21/Anf/InePDBB/G4xz0OT3jCE8o7t/h6OjlvTsnjgw8+iLvuuqvc/4AP+ADceeedt3jb0nOe8xz8+3//7/HTP/3T+Of//J/j1a9+Ne69915cXl4CuMXLVdr3HV//9V+Pz/u8z8OnfdqnAcBZ+vzggw8u5dbvvTelG3lUx610/XTvvffG96c85Sl42tOeho/92I/Ff/yP/xEf/MEf/B6k7Fa6lUb60i/90vj+5Cc/GU95ylPwiZ/4ifi5n/s5fP7nf/57kLL33nTffffh13/918v48vtaulE9qY/4iI/AxcXFNEvlLW95C+6+++73EFU3Mz3hCU/AX/7LfxlveMMbcPfdd+Nd73oX3vrWt5ZnbvH1dHLenJLHu+++e5rU8+ijj+KP//iPb/H2ivQJn/AJ+IiP+Ai84Q1vAHCLlz296EUvwitf+Ur87M/+bDnZ9hx9vvvuu5dy6/fem9L/v707dkktiuMA/lu8UoQZKHUJjDu4REsJxZkLqSmaxCkaioo2Wxram1r6A2psi7agvDkUKhQ3CgKhsCIQAiG6oUHh9w2Pd0F6WG/pHnnfD9zFezj8fj84fAUP2FYhZRiGJBIJyWaz3meNRkOy2awopXysrP28vr7K7e2tmKYpiURCAoFA01xLpZI8PDxwri1YliV9fX1Nc3t5eZFisejNTSklz8/Pcn5+7q2xbVsajYaMjY39eM3t5PHxUarVqpimKSKc5R8AZGVlRfb29sS2bbEsq+n9d86zUkqurq6aQv/w8FBCoZAMDg7+TCPf5ffNjX+1u7uLYDCInZ0dXF9fY2FhAeFwuOmWCn2WyWSQy+VQLpdxenqKiYkJRCIRPD09AQAWFxcRi8Vg2zbOzs6glIJSyueq/ee6LhzHgeM4EBFsbm7CcRzc398DADY2NhAOh7G/v4/Ly0tMT0/DsizU63Vvj8nJSQwPD6NYLOLk5ATxeBzpdNqvlnzTapau62J1dRX5fB7lchlHR0cYGRlBPB7H29ubtwdnCSwtLaG7uxu5XA6VSsV7arWat+ar8/zx8YGhoSEkk0lcXFzg4OAA0WgUa2trfrTUUtuFFABsbW0hFovBMAyMjo6iUCj4XZL2UqkUTNOEYRjo7+9HKpXCzc2N975er2N5eRk9PT3o7OzEzMwMKpWKjxXr4fj4GCLy6ZmdnQXw+xr6+vo6ent7EQwGMT4+jlKp1LRHtVpFOp1GV1cXQqEQ5ubm4LquD934q9Usa7UakskkotEoAoEABgYGMD8//+nLJ2eJv85QRLC9ve2t+c55vru7w9TUFDo6OhCJRJDJZPD+/v7D3XyN/ydFRETaaqvfpIiI6P/CkCIiIm0xpIiISFsMKSIi0hZDioiItMWQIiIibTGkiIhIWwwpIiLSFkOKiIi0xZAiIiJtMaSIiEhbvwBv8xp4jkyGQAAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plt.imshow(img_list[1].squeeze().permute(1, 2, 0))" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "dataset: stl10\n", + "Files already downloaded and verified\n" + ] + } + ], + "source": [ + "from data_aug.contrastive_learning_dataset import ContrastiveLearningDataset\n", + "import torch\n", + "\n", + "\n", + "dataset = ContrastiveLearningDataset('./datasets')\n", + "\n", + "train_dataset = dataset.get_dataset('stl10', n_views=2, mode='train', img_size=224)\n", + "\n", + "train_loader = torch.utils.data.DataLoader(\n", + " train_dataset, batch_size=1, shuffle=True, pin_memory=True, drop_last=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [], + "source": [ + "img_list = []\n", + "skip_cnt = 123\n", + "for images, _ in train_loader:\n", + " img_list = images\n", + " skip_cnt -= 1\n", + " if skip_cnt == 0:\n", + " break" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 37, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAakAAAGhCAYAAADbf0s2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOz9Tahua3bWjf/G/THn86y19z6nqvJRFVOvii8I4tcfjQGFEFAUBEGwIdhRG2ojCWh1NOIHsRPspaFoTxsasBMRbKRhwIggCBEJIhFM431FSUxi6pyz11rPnPd9j/FvjHHP+eyqSlL6xlSV2fPwnL322ms9z/y8rzGucY1riJkZ77f32/vt/fZ+e799HW7pa70D77f32/vt/fZ+e7/9Utt7kHq/vd/eb++399vX7fYepN5v77f32/vt/fZ1u70Hqffb++399n57v33dbu9B6v32fnu/vd/eb1+323uQer+9395v77f329ft9h6k3m/vt/fb++399nW7vQep99v77f32fnu/fd1u70Hq/fZ+e7+9395vX7fbe5B6v73f3m/vt/fb1+32NQOpv/t3/y6/6Tf9Ji6XC9/5nd/Jv/23//ZrtSvvt/fb++399n77Ot2+JiD1T/7JP+ELX/gCf/Nv/k3+3b/7d/yu3/W7+CN/5I/w3//7f/9a7M777f32fnu/vd++Tjf5WhjMfud3fiff8R3fwd/5O38HAFXl85//PN/3fd/HX/krf+VX/H1V5b/9t//G69evEZH/3bv7fnu/vd/eb++3X+XNzPjkk0/4tm/7NlL6pfOl8mu4TwDs+85P/MRP8P3f//3H91JK/KE/9If4N//m33zF39m2jW3bjr//1//6X/ltv+23/W/f1/fb++399n57v/3v3f7Lf/kvfPu3f/sv+e+/5iD18z//84wx+NZv/dZ3vv+t3/qt/NRP/dRX/J0f/MEf5Ad+4Ae+7Pv/z//7k7x+/cA+nmhj4+npY/a2sd9ubL3TddBHQ0enj04fN5SO0SArqQzS2pE0SOtOWjak7EgdSBIkrZBWhEq2R0QLolesV2xkWt8Yo9PbRm8dVcV2QYegPaEqgHi2Fwnf0J1hnTZujD7Q3unNUDVGMxAQAUlCSkJOmVwSqSSkgCR/+c8ZkufPQspAMsgdRP1POmqD29udfutsT52xAwZLuVLrQl1WUs2kIuQH/K4ooNYwG5g2hu6oNRovqHWUxiKvqPmBh/rN5LySygO9bvTSeKlvURSANCpJC8UWPw/p3LeNjcbOMy888cJmOy+8sGtnHzu3rdH2xvMnL+y3nb41imWKVB7ya5a8sqQLdVzJWihtoYxK1kLWhWSFbAvXdGVJK6/qK0qqrLIicVEMpbXGy8sTT0+fsN2e+eTtRwhCToWaKiC0fef5+Zm3bz/mF3/xF3h+euYXf/FjXp5vPL/daPsgpczD4yseXz/y6vUj3/IbP+TNN1341v/7Qz743MqrTy9sY2NvjeenZ26fNPqTsv0ctE+E7X8IYxtoN3rfUB0Ma/Q+GGOw6Q4rpAdh+WylvEmUz2Xqg1AfYHkFpQqPjytrvXBZrjyUR3IqJGDEf52OoX6f4GTKQqGQWVmoVBKZjWc6Oy88sbGz07mx0xjsKAWjAq9IVBILGaWiFAYLRsYsAyAmyEhcSFwk8WFKLCJcSSRJfr4pJAqFC5VHCiuJh7hSHdgwGi2OYEfjO0pjQ+k0dpSBxh2oGAPjxsZO44mn+P3BQmWh8MiVSxz5h1xYqTyykLmQWUhcwQpIJXFXJzEwgUTCn3YwBoMXlA2lAQqWSVJJ+H3nZ68z2FB2DGUcK5uQyAgZoSCWQISPuHGj84s01BQR44PY/wdWKhcqF1ZekalkVhKxWACJTGUhswILwhUhxZXLCAkhx3+JHN9LFF/HEE7aTVE6GveSxhEMFMPiOma/o6yQJMU5kmN/Pv74E/6v/+v/x+vXr7/iuj+3X3OQ+l/Zvv/7v58vfOELx98//vhjPv/5z/Pmzad58/qRpitdN5Yls+8bt/2F277Re2NvG73vtC5svTHUUBm+uBehrIlUlHJJpCWT6kJaFElClgWRBZGC6ApWYSyYFmwk2lDGEEY3xsiYKtYLpoL1jCqoCWaGmae3YyTMBn0kdCjaldYUVRhNUTN/HwwwB6oq5CLkVUhZSAuQDUlGqg5aKUsAq4MWKIj4Q6pASrQlQRb6bpgK65qoNbOshbJWB6mrQfGXKWAZG4JaYmihGQ5SVqnyQE0PXNIDSS7kfGXPiZQyI/kNrMrxwCVNzKfaNKGSgQyWyVSSDrIKpSnaE71BvoF14fICtS2MNihWyJK5lis1LRRZyD0hKsgOYv4qyR+TRVaWtLLkhSUv1FRZ8oqZYmZoN19AxR+jJJmUCiKe5UtJiAhJEtkSxTJLz2gpPFCRxZAq9KaklHl8deXV6yuvXl/44FseePPNj3z6N3zIp771kcdPX9h5YRsby8vCy8c7+1NH6EhR2m3QdTB00LQzRqO1nX1rjNHZeyc1IVui3jKyZh50YSGxFOFygXpJPLypLMvCuhQuJR8goAjDEsOS32NpIKIkLOAhsR5LFlQyncLCyk6iM3gisTO4MSKeER4odyCVURLNMiYJzBeohFA0s4qwAlcRKv61iJEMkigpwG++U2bEsqbzqcDvKv9/wqgYjcEAMgOHXsHu/lspdODKSqfQGaysVAqPscwvFF6xHPDkn2RIQIjGQm2xNxLLeolFvZIDfIwBcRSdiM5IGCCUOEKoEAGDHnsKhRrvvCJWEDJVGhuDC+2A3wcSBaGSKCxkKhcWEjXeAybAOEhVCheSLSR5jPNX42qn+Hs6AEbiuDCPni3AqHGjYww6wxpJFEEP8F7ibCxcqCzxviVAav6U//krlWx+zUHqm77pm8g587M/+7PvfP9nf/Zn+exnP/sVf2ddV9Z1/bLvSzwiSQpJBikVUu6klEkisVj7aVUb9DEY2hl0BEPEYEAWI6kgmsAyWPJFa0YXJvhdb7GwjbiZFBPFUty6ca5FE5YSDA9UhxoWLxKBH7EoJgk+1rCUMPUHbIwOGEMHJn6DiCYsE/e7IcWgQkqCBUiZiB/XvOE1/hRFRVEZqBiWBSsarwCmClYUiiHZz49pLODq+5k0gWSwAB/LmHq0ZiaoCirC6IKaoBogoAMdfg4RZcjAUHYGzYymxlDx1y0xmqCbv6wnUqvIKKShHp1JIuWKSEZEsObgrq07CAK5KJIUS3Eesi8smgxV9XOrSu+d1hrt1mn7oO2D3tXfNyvIQEQYNlDx95MCaRHymikjU7WQqpJTZr1W1ofC8pBZHirLQ2V9vLBcrqz1imRAE3vqdBUGCVkNq4aK0qzTRmNrO6M19u3GdvNsam+d3BKLZdanhbJk0k3Ii1D2RB1CVQnAEZKAiOLrQAK1AHGJqJ8AB4vF2Bcb8OsjsZQWEkZGcOAyfGmdsXaKZVqYNygeKaAgftsnS+RkZIx0LHieHyUEE89q/Y4tKFuATroDqbmgjwCKGd3Pe17j5c/ohBQ/Dotj9vOjEMt64RJL/BJ5g58LB5cJHEaOPJTYb0UCcOaWY+H1T4wg8XgaR3xXsCPzSjhUTyCcEFh8L20hSUUoLFTkLoMxlMq8bhZ7TQDYCHAkPsePXCm+hsncQ+7yo3tI1+P8GcbEEY08vHGL18Yu+/E7Ew5T3CvGEt/Px6p9gtRXpyf4NQepZVn4Pb/n9/BjP/Zj/PE//scBF0L82I/9GN/7vd/7P/tuwEqSThEhlxtZjZQHKTeyDST5Ld21s7UbbTS6bqQqpAGasj/MIyGjkkZB1G8kkUIyBy4zv/hqzYHEEp3mIJWHgwZx01kCCqPhi/RmMBR6UCviAJvEA5QMyACzucg2Rt8Z2jE6w4SOsNRMyqBiSFVkMdLFyFmQ4jGnSELN38tU0T4coKXRpdHSxqgezY26ktaCXhS9DCiCXBSyIll99dC4gQMk/YHJpKFkW0i2IKOCFXQkBoluQld/jWF+3ArSe4C10nWgqrTRGKp0hdYTfRTac6ZtmXZL9C2jmshpdcogJQcoEYrIEYWNXbGhjN7oDLJUuCxYKdQL6AJajN4U8sDSzhidMQb7ttNa47a98Hzb2NvOtg3PUEuiqCGJIDYGVhS5CCknFgqyCOWasQEpFR5fXXh8deHV6wuvvvmBV9/8ilef+YA3jx/wpj5wSxc2bkhNSHlBLjc++XnFbsqeOzfd2NrGy9Nb2razPd+4Pe+0fbBvg7pmLq8q62Vh0cLyQWIlc62Zh5aoS+JCJksiCwgjlstY7g0YKShhHDQwCiOWbgNyZAl+fxYrFBE6Od5pHEGcE0Y13j0DM4jr8ZwOSuQOCwQITMLIF7dJNqWDchr4DbgjtGM5s1j2JTISLKMSEXks0RPMJjxMoLrE36+x7IkZVS5kKiurA7slqhDnoQco5qDn/Dl0etG4MUgUVi48ovEe+W7Rn9Cjd9CiAUFrwE7FVyBw2HOAERaESpVHMiuZJYDVeBWA51TbjWGdIVucH+h0UgD5JPs8I1IPJ2RFKL7uxGkkAIug6P2bevcdh5/GTmPjiY+48ZaNJxp7ABoRrMzjGVQWjBJ7Nqm+uVcTuH757WtC933hC1/gT//pP83v/b2/l9/3+34fP/RDP8TT0xN/9s/+2f/Jd5LjZXEXzz/Pf4pTb8oYGrWpQRIhJ8E0YyZO0ZkH+v6FBDhFJmXgVNWM9DzCJnh9iw9N2MQhkoENg+xvbCkeGhVS8mjUDFzYMrO5AYMjnlTTIz7RqFdpMiQrUhQpxsiCZItL75GyqWePw9TrGjoYNvyGNnMwSwObmUaKLC+4shk9Yl4vMxXQhFAiazTEFqciNIMm6AnThFlCW/I621DoBsOwbthQNK6DqtKbRlYD2hMMg+cMW4GXAZshZqScSUElpuSxqInTqIIxdkXVvL6XQAWqDsYyUIkY0TxTGcNrjq0HSN2aU8J7p7dBH/5e4jcOQzXOf5yVJKSayJKpWpGUyDVhcV3Xx4X1VaU+VtaHynpduKwXLuXKKo9xZhO77NzyoNRBuiRYQRdjFGXkQU+DLg6N3QZD/dzpSFiHNLy0V4ZTkF6rS+TIziU5jeZhWtxDCCZpsjd+ueVcyGfMPaNvuYt6LYCrUFAkFo8UdY+AHkuesccyOn/TIzOb62EsxePIezQgb2Y7DnT9WPrmI33Q4LF/72rCNGBhvsf9CjFhV47vJ4Fy1E6C6JIUgB7ZhDkL0SNvctj0atMLSo6zsFDJ5Lvnxu/BCXWe1zTaUXny56gwnymJ8zgwUhCnk0Bdozbn0Fkwp9xl4HQNKFFoxiLLmtnQvI5x/c3pHTHPZmeQd+x33C0gR7Z2n2vtbDQ2NnvhJs9HzXLeCR4q293vntdiApTZ/NyvrgPqawJSf/JP/kl+7ud+jr/xN/4GP/MzP8Pv/t2/mx/90R/9MjHFr7RN8fwEiHkDO4CcIgRisW/aaX3QRiNLwnLxmpEKZn7y/IXXOi35gm9yfAo2jpsPiQvOwI5HIR5+OB+WpP6atEsiHq9zaWBAMkXUuRET5XzUo2iZfAG2NLPFwShKSkJPhRwFXIafhaGDHtlCG/511xbJUQrqTx1Ek2GzIhyLsSqYGjrABqBCSifPfWZSBbRAT9ASNjJjS6gq2gUdnuVY80V27J3eR3zt9Tgd4ovvAHmqyGbIiyK7gFkIOzI5++ILTtdhw2t88X6tKzkpORtVO2nVY9FUNYaNOL/QWqePxna70Udn2/cDuNQs2CpziiyA2QQsQaqZnApLMsqq6PD7I6fM5bpyebVwebWwvl65PF64ro9cy2uu6VXkCoVG4yV3ah3INcFVsIuhq9I3ZVQ/rp6m3GHWLM2ToCFUTSzmryqZkjO1CKm48IY0S/J+x6lkX7ridj0j7ZPUksisTjGAxOJNLJ8exA2RO7gocU8Js4D+bmWIYxn2RdVrSPMJCHKLSTHqQT7OxfPcDtCxRBEjmTl9Hr8blc+7wJGQIMzjzcf3ctR+5BALnBmZMjCxIy8bOEC9MLihPOMCAQMWVjLFwwGzY42Yv9ND0NFoGMKs3lnQfU6PjVhLRmRPlcwlRCQXJp1nGEMc9gYNz1lTfJpFOcOh1qncCVFQpABKEot8Z5415jsfZ3CGCPcE4BY034u85YW33HiLxudlCpVJZ54gBV8S+kiKNfXrGKQAvvd7v/d/gd57dxNxSkDthTaeeNl+kW2/8bLdGEEnedwRZVgVhsIYQqoZkUzKhVwyOWdyCkbdgwcw8QXdzhqPyhn7ndEHzBhPrYMpYrH4mgs1LOmZ3ZrHTQaRbzvYOYfcEGnkNEg2MDPS4vWPdBGkglSvhQ1T6CNEHl4DEhJ00K70vXvBvTX2p522N/abg5RI4lI9i6KBpBQ1OU/c1ZT2MtAd2svw6MeEdanknKi1ku1CspWkq4OUVpIqaUDpK4yENq8p2RjYPrAO2kCbMYbXqyz2u0bBLdVKZaXS2HNzsDO/nrsOxnDVoY6Gasd0MLqiBmMYpSzUYtS6I1JovZF2r6OJRW1NEtu+00fndnthaKdZw9IgZWORFEGOU4xzYU1WKCxoUiRV8tLR5FldWoICXBeW60q+VOrrleXhypofucgrLryJuyVRuVFlo+TO+qqyfqqyfEvhkhfSGyhvlL6ttKeF/nagN0PfGkvNPFwWvunbX/HqUxdef+tC+XSiflOCT8N4MFg9M56wA744e4SUIUd1RkbQQE7KVDgouRLL1QljkWtZOiiimbvMZwBgY9Ctc5N2ZDYpyKf7CtLMtBIEoTXBImNUXDYhkRcccfjxp4rDITLzo1m8T3EUdlCIOaL8SYid1S+PzAQCPuQOqGYW6urBgddQNzobXjHLQdc1dgbVF2zxLNEhqfFCo7Gzx3ccOBswyAxq5FFy5GVGYSVbZZUH/5oFCzqwhWqwB+kIG8IOjMj87oBB/PrnCDkSg8ysJRaIe2KejRG1vvsa36DHq7Hx4mpc+5hdvCbl75RZuLByZeHKhUdWLgHe9bijmOdbzqD+V9q+IdR9v9RmEauo7Qy70doze9to/RaqOmFqts1SZE1EzcYJgJRKUEg5iswyn7/IqOILuRdLdOxAsogVo1ivNhBzyk81mMP5cIhTRZFBIwlX0KWIuXIH85gti0fNiCvHUgGpghSvjzjVZdgwr6GJYqbHMzj6oO+Dtg16G+yb0tug7U5hpIQDRDdkiL84M0lTGM9K343taVKYrs5KtSBpRVhJtiJWccFJJllxoNJCGoYMRYZBB+uRWQVI6fCXkBwMpCApQU4kc1GGkJ2S6xtjePbUWnP6sm2odlQHOlyNqDozY6GNQRneilDUJdxd+qHUa73Re6P1xrDuQUJRJEHOEovfKb0VFcRiyZPq/14TuRhWIV88g6lLJS+FtBbypZCXQkkLRS5Rot8odJJVkrhSMV8y5SFTXmeWUWBRZF0ZW6G+yuiD16y4wpIz10vh1adWHj9cWD/I5DeJ/Eqwq6ErkH2xOqjOGRZJ1ExkRrEasbMvTcl8YcuR3dzDzxQQIxO0ZgXrnoiD3ZQuvqDPrKzB8eeM3ls8CHMBzQGVFsuov7/Esnnuy5QkzCcwcS5259/9+N6VU0+S8Y5x4UsXS1+u53c9pzJ6ZIcuXe+xZGscxxS0n/q8ETWoxqDR2els1mniWUfDhfp6rA5TbDDhaqVIDeguAbFeZ/MMamewY+yAq0PvWwrmtTjPnOeSCT1y36noO1sxHJIng2NRTetsfgS2s4lD7iYvdHZGaD8nDbywsoair7CQrbi46RDUfOnrV96+oUEKdozErp9w61/k7e3n2Pad297J+eJFfrn6jW8Z1UzXzDB/BFOqlLpSlkKpiZwdQ1LwiKaAzlpGAFTumO0BWolZclYT1Iwx+vl7w0HRLLlCL810N24Km7dFR6VjskFpiDVKaIgsAl/JkCqxg6HBUsP2EeAy4olK9N0Yfbha7dborbM/NXrzTEoESjZ6jU6mMjmsjIXYYXRl/0TZN+XpY2VSNeXDhXRZyek1RVZSqpFJeYyb1SgKtV+gZ1fp7QnpHb2NyPCgNVfYYUZOkGthzavX6kqoAlXZ95297/Sngd52tr7zfHumt0bfN8ZojN6jnui07bqsLIuyXm+knGh981peAJWIZ4V73+m9sbWXQwBTskv+y8WrI6ZgPf7UfMjbMwlJSr4o6RHkUciv/Brlkim5UEolv17IDytLurLIIyuvIzY1skT+UCr1daVK5dIr8qGx7hVtF6dId0U+BrkZ5YtQc+JaEx9868L1deH1b6jYG7APjO0yGNnYRA/KTfHgJcsUNsw/YSq4EnCFoHqd9loiw5oSiClZmLKCwT2pM2kx40U8yt9okdkY+Sjb5yC10lHJKAFKhIBBQk/YcQKsx2I24/2pmcvxpzJJxxxgVAJicwjjS1BxLajPxKAHCTfh+UgOQ2zilSI/NmULGNoiM7oxeMuIfVh4pLEGWAE0Ohs7N7ajw2yTjU6L925kOivGCBD1jMm/WuxClsLKdVbLeIks5sbbkIBvDF7i6uzHFZodV4qFCH2qL/3spyN/nHnpvJYjMkKH4MGGi83fsrOxy40bz/HvyszPa4hAHnnFIx9w4RUXXvu9LVfO3Dwy+YPm+/UAUtYwEkM3+tjoY6frztCByALJjhvOI/VEThmV7BRfyZRSKKUctY4p3yZ6mzCNbCgyJY0ahylTT2EzzTbxhl71DGdG9IcQ40hz7ZCicxS2x/HguOQ56ljJ+6EkqBvM60Ohr5hMJPkInYyxG6PB2IyxmWctDbRP8UPEr+rSchuewJngPVEdRhfGNmXgsailDK1CWUh6IaWFbBWRwuzJOtSNKk7t7cbYlLYPtpfumUvr9NY86xTBciJJ1MRm9iJBQWYh6RnpjZkRDc+oeu9HYGBRMHJZenYRRB2M0dGUGTLjVL9wre203tj3zelY08jeBEuFszckLuHRSsDRFjALn5InFSveV5UTUsRfye+/+dnzUg2MEbStZqfo0muh1IQ0vMY3QJqRLpBuUC5QU2ItictnMsurRHmT0AdjLEBOkKKLRs7o2O+z+9rAXJpPIYLTfXJE93PJU3CFqaQAq1NYE+XbA7Bc7hBKNos2DzSAiQOgHDx9sdLIrNQEk0k+pvh5p+F4J7eykCj4hRHR456+rzh59lAP0Jt5hcObxueeQnXPEomz5NTfPA/zmFpkUI1TBD+X9Bb5DRjNOrv4z84sbJ4fQe7Ol77zmnmghphlNsR7dtbv8rKdzhaZVJx1Gx5szSsU4ggHZSNR6LbTxBWTmcbMhvuRI565n3fCNXY2Nm7sdmOX7dinCZ45tJs1GooXrmQupKD55ACmGZzz64nu27xBVje6bjTd6NoZOkgSrXRyFutSzmT1LKqUQqn+yqWQco6b/R6kLL5UByDxmsdIEa3Iyf+qmdfYIwPQoUd2QbDB4NFscGenQIERNZcWGVV3CXhIhBFO1aIZY8Awc0VcaERMD8Rk7DC6oRuMG5HNCIyEaGicpCAjIUNgnOJdmwDVBL0ldEvY5qmcpQJ9QcbqICVORUioxVzxJwdAWYexKfuL18Ze3m700WhtY4wdMyXnBDWRRbHswgQPFuIUyQQK83Osrs4bvdNaP4DqHqRmSbz1Tg0Q6zm7KCVOpA3YotF7228hxVTSKGDZSzYRaZqlyKQ0FIueWSOT9xfIcoASJUCrxPfzWbgmFsZZ9u4RmmgxWEE+MPJDInUH56RC6kJ5FNIm1NdQRVhy4vopoT4I5QMYVdElQEFmzhF9YZzyg7OZ0o7YaSr8CtCYwusJSP4+XRwseiyws7ozF/pTwaaHcHvcqQZn184Uw0+xO/O+I2HiZNQEm6nvmyBlAWspPj9jXyZSmnmDHcBU4idP5d1JI546vvP8+Jbu/l0hgGn6Q0yQ8sCmybiDDSc0WzTebkygutfKndpF/9NhO92FBjnOrOfcvn70qEP18P3obBAgBc2FUPfXXZQZkg6ccdllJduGyYawQQQDzfZQkvqRtAApz4hvbHZjE/ca8YAhxztLQNQSAPXAwgOZC3JUOWcGNddiOMnIX3n7hgYpT7hXlB0TpZQadadMjiZPVEkCtWSu15W6CkMy60Pl8rhQHxZKzeQcjaimMKKz/U4wMdRvqGaTqR3REOdS9FkDGzO9GcYIcDtSXItFOHjyyf727sq7fQ+QYpCK2544KxM1Jzyr0xFANcBGOusksTjTFRmJ1CA3Q7ogXDxbkJWcEjlnlnIlSXW3CwueP2pF1qN1MxUe1oKkQk6Vy3JhLavXWHIh50JOGTOji8LmDhv73nh5ufH24yc+/vgTnp9vfPzRx4zu2ZTL92FdKsvauV49i6u1Rc+p91K1vtOHiz9QpUimSMFEaVH/Yiij2ZHtpCCJ2t7pS6j1xmBIcr1//NzedvbtxtPzE5oGFGVPhaqZVqv3yGnGdpd89+Zy9D485rSuyFtvUi0jU0cmL4l6ycgKaRX66LSxc+OFF57JFD7mY97yMV/kI77Ix7zlE57kia3e6Gmf6xRpFLIKuSXy4mCV34hn1UkYr0AWaBewnOJ+5KjsOFz7Yngu8bNQno7m2RHswRYL/Ue4qGGFgFAJcPIzO5fFCUFEsf7dTzkbVj2fSe6KYQUVoViiyVkTmbndmalpEE/KLaCkwB2EeVY8m2+9fuZgNImoE3zmwj04rXv8JE/bIOOUlySmPnC2zo7o1vICg4NXeGiI08Ibylt2Es+eCQU12AMeJvieIompoOvs3HCybWe6PbTwbGhxhInEjU8CMp7p3Bh3mdSsSXnGddbS/NONG0ahUTBuUinxzvP8uyDM0KPm1iKr8rOVpLBEyzNMubnD05VHrjyy8kjhgcQDsMa6dw9Q8C44zTDll9++wUHq5hcksibPlECzMouQqEdbOSdvhrWCZljWSl0Lubo3HtOdQj168gxqZlJnpjTU6BZxqkQfSJJDjGEqYSfEQQF67WlemKmyOVPymXlp1+MBMnWajxFRosT7aWRz3WXb1uWI5o8CpXqGlLqSh0ak7CGziLqSMSdyqiRxy5MJyqoBUiGUyClBSaRUyaV6nSWH44NkckqknJxaiHNtmFNyrbNtO8/PN56envnk47fhQddJQWHqcBVkToVaqp9zcal4V1/gvem2gxpnq6dTDVioEoNm1WFoNjTPnqKwPjIXlmiIS9TUs7He2fbNFZhjwF7QnEm7d+4kzaFOFPo4Zf3NGqaKbH6OVDJSCtZcgJNSQos6ONqgW6OJVyluAVkvPPuf8hL9J41RBimUlNHh4H1hKcQti6cOlkCv3qA8Srg1BJ2ZzPy6EnIJcxeQ+5zDz2QoOf3Op8eSd4t/nbLrKaUeuPjIpeenCY7dRcb3GZs3kM48JpNN/L4h9sc8e7oXRYxjqXWKbQ8aDXP6MpvL3JfIdIx7f4OMTorWJt05rZRmVnjSYUw60ox0r4g7+nhm1nVSj/N8nOA6vSl8X1/YERNMxrHE6/HO89lPTJ2jh7vtuAITwqCRLYO0Qzix240m+0HNjcjDfK/6cf6VkxnRu6vXgUJGeaaQ2Lm4QEmm9GTyOuOAqZmFC4ls1QMEmceQo+o3HRArbrE01XwzQP9yitlm7eKr2L6hQerZPiFZo9mOilHq6h5gYu6CoJmhRk7u1iDlwpCC1k69FOo1s1wzKYsvcAjdA/OwA4o6RDQCD1X2AW04rUcKcEznybZwp4DEMI4MS817B0p2GyZ3hjC6qosc9sH+cpICaVJeCcSCn9bhCrbuZrQ2QLuLSWsqkL0+5MmXu2IkLR77ZXPfv5zIxRtjy1JIOSKpHlladyqM4eaqIpmaF2pZKXnh4XKl1krNlVwc5Evxxl1DkeyPR+uN27bxydu3/OIvfsTHH33Cz//sL0QPklJKIpfE9Xrheu3uL4hQa/FoThtt7PSgBadSMpPc8DXBTicb9AHWmjcJd5f/i/l5HSNeOkg2m41dGHLbN15uNz55euu0Su7sqVJ7ppfwG7OC9AKaGN2dS/po7Lo7ndKFfMuU54zeFsqlwFg9E5LE3jvb2HmxFxb7BBPlI/sFPpaP+B/8Al/kizzxlmeeY3lorvaTREp+LUsFLkSE7psAPQXtJvN7QXbJbBWNfECm4uxUz7n4eRoIcbADLXKMF/w9JoHUmdzgaX6aAxRMTpDxO98XpRKSCJcxZIpINK/6PdckYXfL9sxIYEoB9tCwdZAo/8tpgOo1I++wyvH7SCzJYgERU7ohKC3yoI7Y8LUCI4VS9zgK8QVWSaiNoPMSLYB7NjXXOyHAbsqQnc6T11k5QfE8Sl/Gvb/Lc02XdYfClSlNUfeukMzGhRzCjyY7g8HGjc4exOPGBKmjLE26q6c6UdnZEdvIslFQEm9J7GTxqzT9QlIA7qy7Oe/jFacq0w/Qz30yb2hfeMXCAzVqUbDiZrz3d8V99jRtugZfzfYNDVJbe6Y2o4+BmbopaMaj0FQxdbFEsuTRKZ0hMAqU4matEP01ql6MHyNqHIZ4IQLMAWoMDTPZkDqLIuGJd9SZ8OInWADUlKLHwyLZff2me4GnOVGDl7BUukuODQ+bp39gKAf9FZE2nkVlK2Sp3ngpBlSsBJBW3EC2CKmkaJeRg9PX+LBk83YSci4OemmlFn8ttVJqCfeHiI/i/vNal9dgSsmUo/8sRXe7H68fqh0vgsp0hwxBzYGgj53e98h+QhF0NFZPMUy0EUgIh9UORsHifB0WUcN9EB3MhisH9519c2d6y520mMf9qzfmZlHvwzbxvjcN/0edUaa49H+oF6nV6A+FfFGniM3pohsbCzcM4VleeOEWrtxb8P/DawpmUQ+bNwBHgW4q6O7Bx+PUMMB9Byrk+PXz986mzMMh/FCuzqI+0R5KAFb8js0a3LsVnpO6Omk7B0NFg9Q+tXZnhuU+Cw4eh5jlbl8b3uzbQ+A9aTivRUGLeL2bBSBNIfw8bVMH2JnCDIveJCHqvnSm20M+ov75+6F5DEXkdFoJp0Vm3WuSqogf8R41Ka/3nTQlx1ELSc5Wfq9IWXym52lejSuhiCSsk4527uMsCrMFwGm689gje4mMziI3QpRu0OQlfqpELlSocR4kApN2vGMKzSV4mFCPszSFY2d+OkOezjGq4e66zjzUzEslg2mb9ctv39AgddufqTt+MczHWhiO7owKmlBxx+eEROoMo5gbhKZ4NM2BrnV3pNh7c5owMhjMnLYZSu9nPQjxehTikZ7X+P3CTurBQkxh5jUoy8VpjgOgpojCF13f5Py/xJ3AFEYo0yT0ACqBbOftllPx0R0ylYHmDurF5dURKDJshChEYQxmUdfiYBxgKktdKROklhrKyKlaw2sSoXwTvFm2lEoJQJuAlVIih/N4yl4ekmQHeOkBVJ2hblXUxx5N2ZOWmP4FOECJd88niW4aczd07oQkk1IdY/i1VGXfG/u+s22bNzjT0NSRxXxxXaHkTE7qHoF4LUt1RF9WiLsH5J4gGSkkmDVGbaidAomNjWdeUIzng+ibwyMaPR7c+eAfj/eh9fD7cMgUgU8CSXiXvLuvx7wLULPqcKjpuG9ON6aOzIHGM/h3gqljv/y5yHi2OBev2TQ6LGMyDpBK6LGcw7nE5gNip5PCqRKcGrMeMOW/lw85xB7EUhOHoanlm3Hc3NsDjJn52UBoiHWQzhRazMxsngGn+Oa4DHfw9oyoMgHZzOtRKndOEJF/eOCqcQ59PTi1hxOkpnvh6RQx89oR8OH7Mqhxfuc1PkFyWsmeC77FaA+YEg09AE5FwTLG8HUj6krZclxLOZSMMzS4HMDoQv6pEHVKdb771Cf6b6bjit9l2RaBmIR5djQh/ErbNzRIPT195EayyXsKSnpwWXRaSXWNGk2h28auL2QbdIN9KmFUaZt7om37zrZ3tr3R9h48dYqoR9wxQdXHaXSvDSnm2RQ9fPlAconIVg8/wBGZlCGYFLQIljzVzRlKzYhUjIvb3mBH9AwaPVfm1kIDVyFmc+dy9cdozYm1+AiKZXmg5MxSKmXOiVpAipGq+QJig9v+4kKG1mgzM0hET1diXS6UvHK5vKLWhVIqtXjDrdehPEvpHWwYo3stZKkrb968opYM6g/qulaIDEm1uzAkRW1wWVjXwnIVbzJuviBb91lKaoppP6JbDW9Dd77wmVEMaDYYDVLKkWXNTNEw7QwbtKb01rnddp7fPnG7vdBevERvMjxwaU4ejVqoFVii2TunqOtFxqhOE7vbhV8zBZYeC35OaFVa6bzIDRPY2HnimRc2d2aIRTTJHOKQg7yalqv+iM8FxvtsIvu1TJZJPN2ByPQoxNWDN/O+liGeBxEL3JSOz+6nWceYhfd57x9Lt83noXC233odovKA50aVIU6rORXlc8MyDUzZpQdBJxSLaQW4IMMrLHNOVIvuosaNHY5Pc/JxDwh8juW8HTH+zHdOv7kTelrszw0Rz6gyIxiImQ0aI2pSDoo1xokYmU6mRoYiB6035eVTZGGRAXZcAdxJrDHfibiuM5c9nS1OQZaiDHOq1HsRfF+qeQ05UZjC987OsMEuE94nKBJwMsX/k9o1utyO3z9bNs7gZtJ9HUMs8yBveOAVj7zitTUKxevRMmlan/DlU8AGKzsrjxFGnNMrumxR7drw3rOnr2qd/4YGqb3daK1AKUjKSMpkqdS0kPMFkRjjYQI66MMHsHkW7jUed+EetH1j35uDVGte/5DpJJ0ia1GXHw9OkMJAHMBcgk1cdCLD854mm+m3Dq+LzBQIrxMZmaLF39PA0lw2zAv608svMhcR85ERGvRLFkpJlJxZl4VaFm9qXRdySeQVJA+kTpNZj9qaiFNhSZ3SSlF7sETOLtFflnpkRjlHr1ciesE4nCNseBtoSXC5XAHh1dZ4eXpBx+Dl+ZHWd4buSHKH+rJkSs3UxZVxvvjjRrNpqsYmNeXUmoO9e9OV7H1JJRcsO0WXoidOJkhJUH4Gvbl0fd82WtvprfvwSXMQGJsvyr16r1dCyVk9yz38ICVGTThIeRzhdcLULaxp8KbrBJqULp4RzAe60+967WY8HzG6hQeDhVtKZD1DfGmZmVMSX7qdWjqlOdzRd7OgP5gmsw5i83NP8vBc0r9UAuH/nwB1/ud/K7GQX6KysTBrP8MSJp6l+ptMitQ/W6bQgylW8v2aGrwpz+9Ru7Aj3/H6WieF4k5j4Z9VlXR+RhyLxJmf7cEwW4WnE8KhVTxT2CPfM8qd/4bJyXRMd4owzHRm5djrFv/uCsRh5chITxH/meMOG0FdesnAc3pvGM6cGX2h+rU0IcUk1Nm0fQJfBBrMihzxvWl05NmqHv6L5564mtGBV6Jvze8Av9I1zHTPLqmNhLBxi1DB9xEgW4nzFa020eZsEWp8Nds3NEhtt2e2JZOWhVRXSl1Z8pW1vmKpj2SpiCxs9oQY7O0tOhq2K0M7e9/Ydh/f8bK9cNsbt9a4tR0zoaZKTYWSJtCBdYOurrKziH8kVDZi7k6eMinVydCELD36N4oXFE07krxBNtdMKpGdEPWhoH185lHHRie34X060dNj6sq9IoWahKUU1rzweH3Fulx5fHjF9fpIWQp5CdulvNP0Rh87ORvby+aAMCJaN8UsjFnWSq0Ll4dr0HYlHmALCtNBe7QA7i7kVCnLwvVypT3srHUhCVwfFiQPWrvRxgsmIZ0t4uer+DkwhCYuthQvFkZkGuIJBVIMY6sup68lOUBK9mwuCzk7cOXkAcPssbptL96z9Xxje35h33b6HtlaLK+6e21JL7hXXzVSdoBKyQHFwmJmdD3cyXtTpHmNk5RgSWgVWjZe2BkoheyNkexRC3IqaHqJZ4Rs+ZhtZjjQtTQHM+zMfEjCjkZnnS8W8JkTOc3YeZFp2eORtU9JneIAYfYY3QuF/Srf1VECjiY8Tashdxu48MgblmjknHF4l2dGuGbvJJT9yBzPWlQ6jj3JjPZn31GLit0c1eFkEmR2XAS+xV51ZoVl7ues0U3yzpg9RcKNsy8pCqsM5nJoyPGSyFbdAlY9I4yfOhd83zcPUgdIQ+O4PbNy2nOVwlTL2ZHHnrL4XWbDLwxx78TZZ2TkyBRrnOPBkEHiYyBHD5u/16wN3rfQzulOAN0auxhPB1iFylhONaVfKa9bveHGHj1TSWCJGc5T4eeA08jkCCw8861cuMrsbBNGzJ/aQiR0+/WQSdV8oZYH1uUVa33kur6h5itLfqSkC3JMfnWJeOvK1ga3W2MfewyWa+6E3Rr7aLTRaKNPgjz8z5R8gBTB37m02PtMQiss6i4OSby+YcHdjtm9HrUt6aTRveYVA/hcJn3GtTNlTymMY5NgxZs73cRiCjuc3TXz22M2M7uQJJFLodaFvKg7RiScgktGXQtDO6Unn7ybXHUYVuvkIpQq5KKkrEgex766as6zKR8S6SKGcqf666WBCi/PL4wxeHz8hL0LbUyZbHerjJyRkqD4o1+yYItgS4a10AekGJ8xFGS4r1/uGRkuER8UZBOG6qFiXJbFX3U9nCpc8OFUqttNhZBEvV1AIkJlhFBlfi8JqbisO5NImpz+zXMEjJLWQn2s1MtCWaMml10JWWVhoTKNOBXjwoL36nhuIOa5gKhEUHS6XUSU4yB0V7CeIuRZuJ4D8Gar8Cm79ntrwtjcznbzUzk4l9DEfQXkGGZxQNUEqYUrD6HwuthjQH2naXWAkhtJhC4bmLq61fCRIZyRvBhkmXq9HvUSpR2R/tSDzVqfdwd1zhqdxHHMLOcUYp+SE5c1TQm9H8vMMud7h1nZ4aA/z+GZm01i8fxckIO+nP/Px7/N6zUpwRkinA7307TJgSrho1mnK4RQ8dlWS3z+qQW8DzPsBH1mR9z8LP/TxMXrLmb3z53HOcXtLc6Cw86VKaiflafD8YMpxzmZpVOYEwbATCHOtNl12m/j9lWt89/QIFXKg4NUfeRSX3Fd3lDShZquSBQ4/cQlhhp7G2x752VrMfywsXevx2zdR3PP0RYWWZArmwbF0knYq2cyg6mccQGFiGFZD+l4mg+XTp5YfLyEJtLwRUlsZiXxZ/Dc75hmprOYTnKs0di3uS86fNR4Nx8zrhby0ekhVzV6pRRSB1XqUugjUZowhvNiycQn7VqiFCEXSMWOGpIPfFT/jOEN0Jg/lFkKtSzUZaGuC6O4SvLh4ZHeGw+PF3Iz8hjHddE03CytJLQGSBWgJVgzrJk8IA+NuZEGw2dYlVGQGA8ytCBFHCwknSBVV5a60Icby+aUz1lLSY6sy6cQzzJv9LppLEExnylN66zksa0pjDoYw8hDyZdCeSgs15WyLpRaqcXp54WFRVYKhTWGxK3cjoVPQ9w8zYnlmAcT1zn5V/e03BQE9FiIFLsrtZ9mPEdKfyfKIMB4lj4T9xTfXHTmEnwq3Gbekw9Sxw1Fr7ziwiMXXmM01BpNMzs3shQkq4+TpyGmhxpzluQ9MJs9T3NI4hQQnEviBKDTgulcqPXYZ46fPzOeE6TOLG4+aSeQ+HtHsz73865OoOJ45/uvTsXdPSE6P2tu74KUw88chng4deAZWRKY3uyKsNCAEiHO2bjrmelZU5syEPfa0IC4CaonhLQAqf2An2mkO6IyqPEO7W4vz4BnnoF3s+93z9fMmxUiJ94jw/Qm5q9m+4YGqQ9ef5ZPvf6QV+tnWMsr1vJNnM1kfsKG3dj3wfOt8dFHN95uz3zx6Skuzc5I7iG3j0FTPRp13Vi0o314Y2z43DGFTmbhDtEZ0hHxIqnLrYmXA9Yx0j0J9O64IrcAruQ9WuZ2O5LFhQlWXD2XPLab855mL8jM3NS6DxN8zoye2btS1GfPXK+vuGqnSiVPJSFRU8lCXn26bXWcYTRhJIHh9ZhcjVQ6kreYEyTsvbn57K3DqIgWsq2uAkxXLusj67pyfXhF741M5eVlo4/B5emKdCP1GMEtDc3dx9YvYIsfY7aEamGMldaTjw05QIoYjpiQvcAu2A6sRntWz5TIJMm8evWah8cLrx5feVDSG613H3g4XGCdcsTOI6NDDyf8nD0LzaWQaiWtlfKwUiK7TCmdNGIskGnNpGvh+k2PXD64cHm48FgeuaQrH/IhDzywBCn2zDMueXhiY6NxO5Yqok+PLhzmxtnBsogbw074uG/GncQozBJ+Zw479+TxSwg2mcrAWYm6r5DMSo5XI6Y7W2Z27fjY9UceufKaD/k0D7zmgQ+ZCq9dn9h5YbNnPpGFXZ69BSAWvRb76vmU79MSBNIxPj3qLxM4fB+FPSBhwbNoF7lPmcc0TvpSZSM4HC8cdB7TdX0Cid5B37ng3g8SnFvmXKr9UyVkFsIkcif9lqkYhE3SpGv96s08bw+gahgiHUHZmV1tnc7CFacAZ23ObZoEPWZSWcwHnp8xja4mfE+QmpN9px/hDLlPbZ8w8KnB/rtT5zh1xPmQsNe7l7ufC+78cebyrnDdufEcJrU3Xr6qdf4bGqSu9Q2X5Q1reU3NDyTWk+IjMgxzK5vWBtvWub00Xl4aKg1LHS1xaYJuc02FF+h1GNpAOqQR8aeeUZfJlFTq2QOkGitIAEyEqRZ1d5dTD+j9EE5Mh4ehRtIwJz08B4NLjn4qfxos3tPrUl4X66juaMvssrO0nd7DdNL0iNzmzk+qMWUjV/ASmjtYIE53uXxdIbUjw+vawk1iOE5alJ4lsrZcXapeLiQye92odXF1YC0MCiOukYhPF7ZqsBq6GjZrP+a1maygpmQ1upr/PWZUsWfYBN2g3lyIojef+ZSlsK6Lz3ZaVndrSEKtlT46pRbqCB9HLS4p7ynaDojxLSkyqOxuJsXFHWXxOpgkF3CouK9jWgvpklmuC3XxLKpIDWB64MojF1aG7SCEo/Q5av1cjO3oiYvE6qAe0/R+PGLjM7uYZqun9GDa/0xaKpojDvXavYQdZsR7fBzv5gf5+O++v2ZhiVzqwiMPPPoCaJ1sEiNXhH1skKDKC11SUH0TCJUcy+psy/UM0aHBF/7B6alwZnqzWfe0fIK5/Ntx9CdZN4HKDmLUc43BKVSaR585Kax7K6V3c4eT/Ls/TwVvoh9xZmezcsfryEPOvZyf0c3oMocNTln6WVta2REym7VQ1nHA6VQNpnkWzU0DPOc8JfJnP9NJa07aeKoU5xHLO/fPzJrOO+KUvXz5CyNaOw+9YuRnU0DkX3012zc0SL15+CxvLp+iymsSK4SvlG/RKmfK3gYvt8bbT2588nzj47c3LDesNLhElhKNlEdPhYI2xbYBu3l/GnGRpmJMdm+Wu5u6axG1liRYkmM6quS4JXtHkztEH7HUCEsfxRfD7I2DWaOUns2l7rPBvQBFIasP/ZNQ6TSQbZD7lZQKW7ux941FK8Wi+fiIIb22n6t5HUA9sZOR3A9wJFJRKDDSLYx8la03+j7YboPElQzU6nL9kheWemEpD1zqIz11eu9c1iuXdWVZK5oLY7jqUgV6VawqdulwVawoqRCUnADFqQKNRkQ1xuYiDb0JtgnqvYmUi5I1UcZCsYVXrx+5Plx5eHzF3ndK39naCySljRuSlVQFcmRSTaPG5vePj9solJpdOLJWlktmvWSWqzvnU7w525Igl0JaMvVNpT46UF3Shas88oYPeGNvuMqVLMLCwhzbkKMq4oy91yrVFO3MghTSnUPOy1mfmXLy2d9kU8ATLzUPXiapNmdE5TtH9mlbJHjT6cB93iYAnj1CM0r2/+fIpFYeuPKKRz7kNR/yhs8cGeGLXrnpM9WcoMqysKUO5QUNqnPSajM/W6YqLDIjl1t7Id8gqLJTIeiEWcZbhk+L1rndWxmNI9+ZHn/50Pg1puRkmr0KmdMP7yS5zjrUnJg9qfl71aMTcLMT6qyOdWZwCmHkeQDIkNNtnePTMlhHpJFYnfCTS+S06ciRJJz4iglV5pTuE5LuM8pJOTqX5BXEnZPgnPa/fmam9vJcN76U2Jy9ZL6vPoHAM+aTPnRR/imF0V8vIFXlNVVeI6x4h+qkQE62WO7/C080OOMliTVJzRiHP5+DlPcmEeq3YJcl7O/D+RpJmBQkW9Q83M6khKpMQnjhFGIMIDAfeT2bfUcLtWA3ciloCWvOkg5vMcsWvVUOipY5Pz/N6aU+b2MfN7b+wkt7y0u/krqBVrJ4YVo5i7XDzN0zbNZfXAQh5oalat0bmNX7ydre6M0dG1L0fjW7ITlR5EbOFTMhpUzXxsv+xNaf2fWZwQ2VDRXXbjkltMdoeR+Tbl1JMdgxpSmVhm7hLmAwwrpJNUVDoWDZkArpkskWS95jpj5k6mPB+oCeqb0yssf/VpXUgDrQpow2vEcrgKpUd9UITbzHm24ECSVhJbngQySoWfOfnTdRNwe9ZHEvfGlWMhd778CZMuoRJF2MdvZ7MQsiCSv5cIqfYdOxRfR63vz3dNSpRpO7rGhOxC0R8XeMFTsqEitXauRKa4BUjaqHA9YSouRoCg3KaXpUZhH37DM3Bl5Y8JEovk8a/T0+kcj3ZT6/zfPu+LQpbZhL7dlrNBfe8SUZjr/HWUo+Sbk526hweiSc4zLO1uL7DOI803NIY1gwx7+nd75KkRFynPuZVdwJVCaTEkfVjuX83FsPer3t+Xxm9cioz5nKrgVUcTC2AJQoSAQ0uDr0ic4LGk/gzJ7OOt67RlVOWfocqxeeeHvkQ/O6z/1cWCiSj99MoYA9e8IkApELZ9vzr7x9Q4NUkWsM1Zophm/3RVD5Jf6bP6GCZwn42uQKMgszV4tFwvuRnC6TWCBOJpoowksScvYLVVJh8r/TYGao/13MnKYzX4f67vLq0XzxtWGUlBDNaLigW7hjIO5M4E+Cp/kxvslvNxOfGTNubP2ZW39y5bldPcoyjxYHdoz7GCo+jgLzUekWvSbSg/bsoeZzQ9be3SMv431nzTbQTJaNlBaPIXNmaOO2P7P3Z9pw1+YZuw18Gm7Thg1Fuz+clg0ZFiatRB3uUMl7u1r33jMfiyLRV2auflwCBFKiXDP5mimXjPaM9Uxp3lJQrWC5kJphqTLaoO/i7xM9XzkLKc+qTyx1kv1nsmChSAT//DmLzEyj8VoPJw4Olu7eZnROjU3HcLpxcMmeWcsgJht7Zo5mDsaP6SwQ9+Ochca7EotTCDEJshEVg8RCDhhyT7YBVCw6nWDhGvKIy6FOdEnSSWud8JTuFiiJAaKueMskinn2NUe7cyxeIwDK92dSRLPq4WcohntySiHe1ZHNbrr76prdjQKcz/08/55D3tNdLQBihnwT9jyfP+t5Ekc4HR9OKcrUGE7xgh5nftbHTumBMCnHeST7Xd4x89hJxWYmFazh8mABDudYErv7tJldz6ftKWi2G50XRgygn03IpxSC+NQJNRzZ4BSPP9ED2gaVEddegMbKHipQv0PGLLzggD9BaoXYr69m+4YGqXPi4y+9+WIn0U9TqLPfJymWTo/ghh1S4tEVmrrc3NsfSBa9BuJZhwjer1OgLLibes6seaXkwpIXdMwx50+YutOC6anFmRRfv7mred9Bi/oQRktYVYhhfCaK1egiFg2wrN44nP3nBorazqZPpA5v90q5Cb28MK6vWGrlslSCXGAzow3Yd7CewnE7R0Ntiixr0MYLPUatb7fOaEZrXn9LNlAqJTW2m/G83aj1wnq7oHRu7WM+ev45nrdfZNMvsumNm95ottHN5+8MlLENdvEH0D39/NrNeV3TzcEEVxSqeCbVxRdwBHJCHtyGqZZC+iCRroJ8IOSesVFYSsX2hX5ZKRv0PZFfDN0HYxuMF8VaNOaWUO1LP0BjKHQSOYQsKREGuAZ9+IDGZJG1J/o1ZgCVOXR81orm4peoB2RMEi8UXjG7SnbceVwTmhNW7Bx0CtxD0hRTTEXYrG74PTeOn56eEQ8UPuCBV1wprCjCC3CD0JKtvrDYSpES+d4UbwtrZFKzJ+kkkxomDZJHXkmgiHCRGn1HmZWVKTtZLOZkBUQlpkKxh3PdzFqmaiyINxuYeD2vUZj02ylGcICa/uKJzEJhdlVNgJhzehsNnwTl2cN8vzUW44WKRdbiRHQ6zvt9pUYthY3XBDSHmBa6ubMXKx2Kvj1Ub95zJMeiXmOBn9Sdd15Na6S5/i1M8vQWebBP1d3Y2PiIl0MCfsNdDNtx55zwOs8SzIxK6LSonxo7O9WbDVijAUHZuPAQZ92JvYU9yNsr93KLTGWxB3/G79wofrntGxqkZmT0y252FozTvZQ4VkGPOmJOkNoJUm5hHuMSxI1Xpx+WAEm8xyaHJ14p4Wm3UlKh5IU+m3ZH9ofJzqjKsGOCr/bIDhoM85tEu6JJsDF7eIKYiCxKXOPOOSWwR5YFg0a3/aD98hCWUSAP11xE7ewYnHj0BBGKrxAFDJff9+61pdZajM1w2i1pwyzRbXfA0Bsm4p3zqaPWuPW3bP2JbTzTbWPEq+tOt+4vPDPb4yGcJrR+eAFSKZYoIbI+d8pAk1ehcXo1VfHpuFWQBWSNP7OLX3J3B+oyCibuemHWGSki5cN9Q/Fmfgvu1xwkLHu96IjpiZEsQfMhoRQcfg01XqZ3M5fO+/cQMzDBI35Gok8pRqCYil+nqI3aVGsmv5aeUJ3vPP88CatJ6cCkgjLCQuHKyiMXKg8oiWxQxE1cU8w1KrIcNJd38Pi73zuS+6fEMirxijBQ4prmcDgQcZjEQESpYlQIkBJ25oCKdysfZ4NqLPNH9nhSSi6kmDUgIjc6QaQw5SKn+GRCn1sMtZBI73c576Tf8t2ZvL+OczWK/6ad2nFm5l7O3C8df58DIxunuGBmft71pHFfnKByfJIlCPu36dPXj/fVGLzYeQkAdqJ9Dq+c5+D+XrxrKsfXPWT6Crpfew/nDmMBOjtrgJmrVFMoM32PPSxwtaMTxVmcXK78OjCY/eq2iDDDMLWEs4EETTcMmhn70BhrPhj7gO5BYO6Quhw3h/mTBlncKaIm8prDfqiylislVZa0QCueHrVbeM8RfHz0YdwDVBdGAzGvSUyQ0uE/b6aQRjCbguQY525AClFF9p8ZutNM2EbluSV035FmrOmCLRYCVXw+0iDGyscN6c60gGBDw+9uZ9839rbT24h6kL9EDdUXEkpPwm47uVdkZNQau77laf8iW/+ITT/xCaBs7NboOtjGoKvR1Nh1uKDE7Mhg55Iu2SlV0rm4zCK4hIFwyskl/Gu8HgW5glwgR/ZVLGOlsqaFVI2xu0BkbIOeG0MU3aMHTtQl8tawAb0rdCGNStZBjt45u3PLN/PMTnJCUvZxIcW9EodET13cTWfD5ayUxH0mxMLg50INzxhJPoDRYJobG+pp/pcsN7NsP5+BeyJQsHBVy7zmwoe84gNes/IatcyLCDckfB6mgGFmDCBR6RHsoAAncMzqkDsiTEukTsKbeGt4Kh7VCDlF4RWl4r1Rg8HNdnZxYfPMqHoc5ZQ+eBVvTrH1zMaYajVjQzzelLMON2JxT3K6iB8zlKTxErO9nrkdmW6J49Mjv0h34cY877PnagJUUOjxvfPKE9LsOeXYInNrYZnVDsDwDDuHfOTsXZuhjYNhDjpwSvs5QO8J9z785HBCbLiUxkOMCUv5OKqZ76gfcwSJPcjBnT0y0Audhc7CQgYGGxfcyWQqGd1p0SUqLk13a6WVHND11Wzf4CD1lbMoF0Spz4JpN3q/MWxHc8eKQnHHhDYG29hoo/Oyb7Q9prneBjKg7omiPkU0lbkAJtIlURahXKvPU7pUaq0hmogIES/ASzJyEZSMWvVmTZzptRJsXkrkIcgSarJcqFc3h62XAqshF4VL8Z6iNbn7Qc7uwp0HlAQVxtrdaWJJUBRNTr207mq23AzN3suB+iNXsiuDJIbSAV43m0lESOQx3GQVkCWRzHvSCin87IDsZrsjG113tvYJLT07G2473RrdgvYSz1o8GYxemenIG6HwwfZlwVyy5MuxiWdRXib03jIRUgZZDFnM6dzIBGddzULMIBoCEUmklP1JqAZ1uLimn2ouGzHb9sWJqC5uezWWQVq9jkXycSjURL4acoV0gXxNlCV7E/GMT4+hevex8Z03eXZ6V6+KFtDigpaUMrkUN1HJyogWBw0CaQLVrGacImEPSlyNt7Kw8AGvec0jH/ItvOHTvOEDLrxGJXEh8cIcF39Wfc5l8p6smmKJO1GDeRY9rKPxteYdlR1NW5ARHmUfKrGQ10+4qyxcZKWjPIScQmgUG3RxSUAJ4cf8dG8f8dEdExAyLt4AjurWgoT67RhqcRzP2fRsATD3k4UzD0cnUGEN0jTP5/2gcWfnlcScpoWCRm5YaEHZj7jue9TAzgZljezU99lpxhwGVDk+2+UsZw65BF04x75Pj8hJL889O4/NjuPmAPAatcGVRDXvBTy9zfU4jyUyrRxgf98vVVnAvL66hW4yBfHqLdobicTLrwdbpK+8zXhRMRu07tSSWvO6TlIsrIjcWLaz953tttO2Rts6LUDKhhP/QhTKk9NI6SqkNVEeMjmkySUvvhBp8smcGlRE4qAYkwZhEU7azD4YSehIsJSgCitlcelzXsqx6LJkqOKqv+KZlBmMPNAMVtVpLvBRGnlK6xtdN9IwWvfH2U1zPcIryQkQLOS0M3Pzp9VpSg0KThLkoE0jdS/hOk4CTZExSGeMja4vNLnRZfeajLkbubty6zEmyhcr/wrhnIdmEXPnBBk0RyQ5C1Rzv1NkWxVkwQFH9JjAqzH8ULsrNk1D7Ymr5rxnLJGKZ3GU5EMmQ3pvNrDNfRlHbvSyk7t5NFrNe72yuMj0YnAx0kMIOUoU3GVe/ylaPok4ORDZmMa6uhAGtVEfEDf9JRukhIgD/b1QYL7npJp8yqwveSsrV648cOENH/AqpOOPfIpHPmDlNUZmzn5qGLeok7QwA50ETuR1ARFTtHSQZn6+dC5vHteb+JSoc/McMsV9lY6lmVARVhY6F6qDN5UsPWRI0xX9BCmwaDo/u3umINzBJgXgTGouhh4eWSgR975L5XnXRzqAaiXH15OIlOPI5zWYk7LOzMqQgKoknrVND/o5sHCWHmY+CJAsUeQEqnoAiQPVzNb0yLGMWwQMx3iOLzmeU184hSGZ6ce4RJ5zpbCI1/iiqkpiGjZN0bm8Yzp8jAqihnGt2yx5z5VX0Fw400gIN575arb/A0HKN9OdMW7cbp+wtbds45mRdrR0rCraO63tPD+9sG07bz95Zrs19q3TXwbZhCELmiuWC8viUbw8ZsqHieVVYb0u5Fqp9cI0XPTpBIbtDoiiFuPpCykvPghNBMlT+AlJK0qm6kpJ1aW6ZSHnTL1U0kWQC6RXRI9UPhY8NRi5k1efNDxoWHePurwCZaBpp3fAdmxsaFmDknwg41kgKojijug6gmrCQUphxpu5Fl9QUqHkCzmv1GVxs9wsbNYx6/TxzN5vbPIxe39i788M25nygS5KF2McfJeLW5IBPSHDkObqPYCxiBdKCrg9BsgmMAQbETwUDyCkghTn0fsQtpeEdUO70TYfXKktJOwxGJNkWMnkBZKICxb6wBrRFN3RZlhTbDNowlgreVR4iGOoGbka8sZID5CuUBafb3aq4Gosy76QTGeIU1IRPmpitIsyVOgmrOa/e7GL19Gyssl055vL20FKx1MgmKRjYXvDm8igXvNNfAuPvOFb+Cxv+AyPvGHlFUamIcyhgx/xERs3nvgk9jcIIvP6Ug4ybC7Thl9/1cYYjSGNITsjbf6Sp9izWRVzsk9CCF+ifuH1JD8OpbOTwrrWaaRp6ypBiTmFRizXp2C9ROa0MhVrDiqT5psu4ZNynV1hegdKFwoPVB6oPIalbuXs6bLIfqbTxz1tJkcWJp6d4Io7YpaYGwZHX9uxIhxENhfJXMk8Uni4248rFy48UEJracATH/EMPEeUN9jwyqKyHsfu0v3ZuOugTRzTwiMPXFl4ZOWBFUFiNOcezigbApGPLwFqCxcuYYv1yMojL3FX38KnzwFrZrIOis+/HhwnfuktuHHrPt11NI+K3SIhRjcQDbtG35W2Kf2m9Nug30LKnQZacziDOw0gRUirkC7Je3JyuJhHZz1Nj07rqdqYztne0xSy4eJuBSYeLU1JrI8HCXeDlMlr9s9bE2lxsQZ5VjB8tlLKBWJW1OjuAzdrOCaxhKm3QYq57ZBk8d9JMSY+YskR8+OPMosBRCE4u7TbXctDKJITxcfkeE1MuwcI/caQjVF2Dw5SP/rFVM6XZaKrWKJ5Vzim9qIOQuIZkhWvBU5PPZKLWiLFOzOZjGfLMVG5W2RQ3RWcqrNHbb6PgCWkuKIOERcrtHTSciN0YKaeIYzh1kyRjRKSdKvmotMa5yQEDqdc+t6mcwqSz04VmOIQQEJ2bmf5vlqORnKnHk1m6+isCcX1AmZtwMvqlUsYwb7iNW/4FI9B+F14Q+U1mUfPDHE6OjGo3HDfhJmTRBZyF0Ef0gCz8xzZFPvcdzKFHazMLCcy0YOcI54DguQ6zZj8fvfqlDffno4VUygwa3LznTgyKTnoqVNS4r8zhUQTpOYrH3swW5nT8fX5unf8uG9VODVy/l6n5GTarRbaoeHj2Kf7zGsa+M7M6d0sar4Ky3Gs+1eoD86gxd/NorZ1sk0TDEuAqX9GCQC6HBn5lOQr7w6KnLq9+6qWX1kPn3zqdGdaWpW4sxJw+/Xg3felm8055ngTqupO67eY8OokwTG0zhI23FJnvBj9WWnPg3Yb9FtHSfQ8orcp3jdBWoR8zeQH77/J2YfhiSZkiNOJhBNEPHw5hRxVMpayO1RU77fR7Bfc5vTO5D1WJcXXNZMvE6x84CCSSFTEslNgZdCXTFsz3Sot3bzWk31x6NpJTVGXuJE1wC75/KUlrczbWk39vHQ9RmM4HZZdL1IKkp3mzDW5S/oFJCuaFfSFMW7s+xNburH3Z3rd6Ht0gcigJ0Vno264WElN3kiMhDt5iEEUV8VVvCaV/bpFIO7nXeN3vKcRsveVjTFABWuCdVcr9h4gFWUvN7ULpWT2icGoIdWwPcFNQjxpoIrl4ZQmbl5Dwses1IRdFLsYelVs8drnWRbvQZrsR4F8EBnaMbBu0jBCIJHvm+QjMl9YYmkcYS+V8FELHIs/ESFj3vjrTbsrr3jNB3yKT/MZvpnP8cAb3vDNLLyh8ojYFcRhwntcOisvDEbUEs7unUnuyLE4+XPnNJ+dqkZRNLlLvzGd6eIYLcgqyWCLe1xGx5h7SOwsFC4UhhlVLgwTVGq0LkwzqfknMQlXArUdVr2L6Jwc5XvqjHIO5niCzghQMYzFCqv45y9kFstUmWMeA6SMY3z9bGgJfS5zWrCQY/Jtjb1UKjv7yWnfndez/8yzlMrFFi5SueKvC5WLVS5SA6QcDjerbJQ7xeMEKWH2Mk02Mx91vRT9afNVjszoatdj8jIBUjMrut/PWWvluBemxbG3ADfbGNKZE5RrHPXLr0eQmlHvjP4x8abPIRH1FtJYKOPCosrFjEcZpLSiqVLzxp53WvLhAJdUWVKhyIxfPLZJ5uOWGV570j6cfhqC3gbWBjRvqhE9VVyGYDlhKaM5o0kPhaGimPRDj+zCByhrhsVFESklkpQjlkpSkFIxG/R1pfWVJhubVJrt7LwwujfL5uzrneXMlJknLSSrJFtcaTi8V6jvnb3tAdBCrpWUktOX1TMPW4yR3SC2l5szcGnwzI0bO1t+Yi87e77RSqOXQV+gq/ft9Bw+trFfnmBGxlkNy8kzEo1KR+LwyLMR1xeJKcJns6aaz/kSM5pFnSBqX4eqSh1QTxm3hAlw1NYCLFKvpH0h7Ss6BBkDyRXKQroU8pJJHxTyg5BeCfmN07JUVwX6MAI3kv0i/4ONxsqVZz5i54UXnmkybUWJJapG5uDKepdP5/B7mB1J0/NtzsjNhxhhlu5TTI81EtkWVnngFR/yhm/mA76FD/i2oGZekyIad8PkWbyfBrWuzhPP2zAIkUMksMeydEbR3l2eYXij2emg7mB7lO3lTjoiyYMumc1fMx/g+NlERuQae9GDXPPpupMsa5EVaazGUz/X6BHBT0g9KylOXeaDvjs6kmRhxUUEDxQWOQe6e2YoB2syx8Dfeb9Etqhxbj2X2CKoGAeg+XktGBcSilP/1wCiB7nwICsPtnKRSTUmchzfbMcdQBOvHk1ztxS553QHmZm8Sg8Q6e9kbA4zTj12Grs4dTknY93obAGsidkSXugmNFE2NoQnGoMXXsLk622oPDvTbHZEKHVj+6rW9f+jQOrczq4Fm3OgRvI5RJrJo1J0ZTFlpYMUWhJXT6VCTj5YbkkpGhhnN0ghWXYBhAZIEdSem0GgzRuB6Q5QPoYep27ipUFNWdRCPKqPW00EUmLUgVQHA6sGxTOaJFNF4/50Oa1YVnrI4ZMW9wIc0Mbm5rV0p7emMu9wWve5TMkKXd2SaDRjtMHY+6G8cyWheIZSDcuGVmUk/5rsLhg99XA6bvT0Qk+dkRsjD0b2rNHbmgTNQfkVCZ/dU8Br+FpluEM8k0ixST8el9mTjaBND/V6AJWrmpJbUyWL2NIOKhCxaClwatRHhqiLErIhmkmtIK14jUwTkitSipvJ1kx6lZBr1MIevCfLilthDensbAiZt3yCD1Tf2HnrMny2Q9o7QepsfJyMoStDl7jqzuZPt7azzjJpHXVUdwslQtogc5yGO5Y/8iFXPmTlSuHCzC+my9x9lWzOhnpnapHcP2HzdXbazPliWDwnkfHNKzy3SP7OzEompcixT++KQaazjNdcNIi/WXeZNSy/XzRgfCrnRjypCXf549ifSV36Yj19JBTB/by9fyu74MJC1MJUO4ZIi1OuMf0gJt12egqmw5P8/HmvoJU4Qj/azIWFVSJrorKKu6QUue9Lm/vBIT6fVUrijE8Tp3yccYs9OKm7+6snDIh2iY63QHv+38MnRuPsO8QqGRWH5kZDuDFQz6DYGeHTrsf5P5Wh7dc3SOHRXFB6NtwSR/pC6bDuA0Yl24WUL2ylUesz29jYdGfTG5i6qrv4iPIqYU6vldILec8+zdY8A2FmUy/q4yjanUy3BGuDoJLRhNe7kkf5w9zhu7GR06BUJV+BoozV6yIizo97deHKygOVC6s8QDH6dWfjhVY2B+Y9cRvPbvejjVyFrB5xZ8sUiyxKF9AFesOasj8Ntr1x2zfk4plTXVZkNaQqrXZGGl5rEi+Mt+TW+812tuQi2JfS6KrsddCrMaox1uQefFlpxRy4kkfmRcUXNAR3AyEyJGI0u/eT+dBJr0vNxMsio1KJflpVVPxRSimaFpOFz18sHKbh8o4rB4vBqsg1uzrwksLWx2dFodH+mCop+8yslBOyZL++hZgy7JSvR7SG8QmVGzc2DyrES+f+mO/MdtO5aOYoK08CZVI5JZpqp7eDMKgxXn4c2RVESgVHfF5JMefp03yGT/OtfMhvYOVz5LAnmnZVWxS5Z6+OOxa8pYf/RPKBW2RORdwc3zDzKvDAByskre6KYpVkFaMHRIxjcb2vRp3k3Nz/dykmOXptXC7g0flGjolIEhmMA8F0rPAFcuN2ALoETGXO2lEOCqsGpZqxQ5IwJ/sKHnxYLNX3s6uU6ZM+j0sD5gWvBjuZOHCfio7P3EJ6aP6MS5CSicI1HB0+OCZ1rTzIdEqcKkhfM7oZm3ReeOHGLc6XS1xc1CLvVPYKcyS9n9s9wqWZc6okNlKc0cRz+Fa8RAAaxQY6iY7PKhYGT7zlxW6IJKb9WWNjKjyns/sMPF6+VuPjf/AHf5Af+ZEf4ad+6qe4Xq/8/t//+/nbf/tv81t/6289fua7v/u7+fEf//F3fu8v/IW/wN//+3//V3FP5grm4bZoImtBzajm/nKkjJZE0Y4tmaIL1XYqTqFljFIhF1hLYcmJRSrVCtVSSJlBh0HD+29GKNPUjt6hzPCvk/j02wSawoNNzBV1yWgWi3OGnjNSxKf4SgnnhftqgA/RW7liZuRUsAJootQLWXdSihrWpD9FAvAiepSznx8VTIUx1BtQ2yAtPtYh5eSLcE1I9d4vcsRx0sNGprHLxq6u3NM0jv6hufbYdILPgi0cmRXqTc0+BPJeAOwZ0dE6NUC7zVFaIXzABxZO6u5w5Ihza9GLZfF4iEvTwY5OWql42nIReBRYQK5RDyKBFr+H8DlVKUXfXEqkIrNsFLRhLFri0Xg/4mYhiY/YlshM5gjzKQGYmeSMbCeNNkXfnmNE8GNTXMLxe+k4a1P07C2vKeobvsR5bO7LzPSXlKD42llHCI/sZi+uzmN3OhpltmXcZ1By7Ek69ujL5Qj3TbCncP5dB775/fmKRS36qKaMYX62Hcu+xJ/xewbIVPlNeThMecO75+p0hzATqnjmsTB1gJOkOl33orBwHMM5zXfEb0jkyIl2nCE9jnc2Ac6WWt//whS3T5C6hm5uZaUyffKm6N1z3S6D3XaazGbdzrTeOitdk/ZLLLiDxRIwAhz55umBOIUuEqMKp2Nf0JdxXSYD1MNEyq/R7P4aQRBOr/Zx91vGztfIBf3Hf/zH+Z7v+R6+4zu+g947f/Wv/lX+8B/+w/zH//gfeXx8PH7uz/25P8ff+lt/6/j7w8PDr/KeTLoPUO9RyloxdZokpUHJnbJc6DKo48KWvCa1lVv0CQ2ft1RgXRO1Jq4SbmaaGOb1jdEUbYoMRRokNZJ6f23CqFJIaSB5IFkYOTHyHIgYI8jVeXOnxpRSEmSjihdcM3qOaI9GvjVmFCHQc4OaECks7YnGTukL0t2RYU5ilBSLaw4Zu0RJOUC3d6U3r0vVi2c2qWS3G1p9Qq+mGHku8/b04ujGLWgHcwDJ0fxbxNWMRRxEqqCroCUcmEeYWao6WBpO85lnmaaeSY0GukMaFgMoo2MnKDuy+fiNnEgpFoI5GwxwYIp+pHT3WkGWoOteGVyAazpEC2L1iLgFB3fBpesyP184xAvjbomdHgLeH5QiqrWDzquc6qqThPL7N99RZCfRFUugTOn0aSGVOWm4GaS5t1uNJt4ry1cAKX9Hz6WmjejOi5M88uLRNTfOvGcFm7q+ez3au3+f9OWM32d3zSzqG5Msn/OvvIcOkbulLI5FIkgIvR/HJ45YAGc+Gr8jZ0VogqIxs2l/79mLBLNmDIi7klRxR44pfJk+d/f9SMQ9NaUxirKHGH2O//BMambJM3fkALt5/YQUMOTX54EHLlReceXKypWV9ejQ4jg7023wJjduPMcz2JkegYUpnciRgRZWnMK8AhsbCgEps8Y1aV7PFqeD+k5M/A56b86Jmh4XRmMOc7SA6B62udMe68w1YftaZVI/+qM/+s7f/+E//Id8y7d8Cz/xEz/Bd33Xdx3ff3h44LOf/eyv9sd/2eaBtVCk+KCxVJDsFv0qSk/KGMq1Nnpr9NbZmo/3FlFSMnKG+pjIF2G5JJY8Z7l0hs1oeVrU6HHz5eRMUo3x66kM9npjlB1qUF2i6Gh0G2y2k2qi1Y4kaDKYfSQZg4gkvY3vQrULVa5M3jvl4aNEris5ryS5Ussj9ERJmZKrq/KW5J6DC8cYdaK+Y+YNsKo+LA1xoEnZa1neuGsYFdd/+UIwxEcrWIryFZ6Z9ZirRRZq1LXaQggMYtnrERB0kCH0zcIyKgIAdZrPhmF6jkqQZCQxSIlcBmXJrA9GzkoSQUaA3vCFxcz7ixCvpU2KrlwFVoMHRa9gi6E5rqiA2sx1ZhF8VgM4IAEI4QdRE7qv2Ew2PoZJmL+v4BG3N4tOSJr2PYn1qD7M3pZplOpLYGYn0fDZtF4hcFMaIdvZ7+wY7sAw+7IsCCs7fCUGI4Y53PiEG59ERvXCVOVNCL1yGukkC/8BmXsvLj6RSVNPsPe9cGIrZAM25ega7tmJIhcSTheBkoyYRhzvPg9oZljMoR0uRpdoFSE6qvJU+819lhlsWFzD2Wm2HaC4i1eK5lmLd2YOV5xCqJkTeq3RyUUP03yvzjyZuC5+7/pFyZx74uftnNv8ile8YmXlNY9czUOMIiXuBRiR2byEidMLz9x44sbzIVrY8NnHM7jR46h8b8rx/Poa1uHwzzynWsG9hHwGZmYjRu5I6CynIMWvR7NOk8EL01t+dmad7cW3r1Um9aXbRx99BMCnP/3pd77/j//xP+Yf/aN/xGc/+1n+2B/7Y/z1v/7Xf8lsats2tu0ssn388cdf3YdHsDO79YsUnwabQ2EkRk2KDmNJC726dc6lx0MsLklOCcrF+5RqFUpOZBFa2PscxW/Rw7RU8JpWSm4UkbIiMQdKspDzRkqDJM0VQmZ0G0hRck7ssvsDg7cuTtXWXMYSc4B3xSQ656UgKSMljFzM++O1KzkJKRWnqeqUecu5ftw9+GaTkohXiBNSkpildRp/TqGDR5mzy957wyyyt5FcLJIzjHCFmAb2FnJZV9s5Uvbd0KboNg7AsqN44eBEEqzgEnYByYlclLq4Q3lKdsxz0lBUqOCZU/zpFKaDNQtYOFVYtqOhdi70p1/geZ6EszvHvxs50JFcnfH27Js7qcApEJ5+bPPcedXGLFFl2pOmu0+cv+nA4lJxrx5k7FAjpHdmSc3If/ZpzUXH849zLmtYaHHzse+RURFkYA5QWiML8rljITuW+zwqzlScCFeEnzmf02qEAa3v17irg/nPTljgLoeNYOrurE6gOmg5mcHEaaE6weFdWnQSiVOpN8dz3FcIT2JycE4Bzsfn+7tMYX0LmNeAwLlnpxQksmM5cyjPoH2Bn/1Jl2PS8cqFB1bxLGpe0CGz224OEtyjLfgWbbeTuPUcNaNk8zEz3qXhdblOiSvfUMrdWkZkSdMJY9KcxHkRkElu+giRdNzr8TyLr42TSZgjSM5Mytjt62BUh6ryF//iX+QP/IE/wG//7b/9+P6f+lN/it/4G38j3/Zt38ZP/uRP8pf/8l/mP/2n/8SP/MiPfMX3+cEf/EF+4Ad+4H/qsw/WW71ZtpJ9IF/ymsj8GXz9it6OWKDDiiccgMJyZ2YfvsAjBG/vj5fmFhfmGApFyUJOsCyCZCNVJddCLy/o0hjJ2FP3jAOfJ+Pu3UJPShXv2ihcWZhGMHPU3MoqnlHNkQTIDtJgXUh1pS4P1HWD4fNNy3SAT17HoUSvvhmSB+QRN19kU6grEGN0veBeZH52Ci1E+XMIhFcrPNqqqUCaSkZI2RgFtEK+QH6AEWOFNKkPMhTDVGjPyng7aB8P9Fm91hd1n5QlvAsT6eLHkapRFqFejMuDg2FOMG7uNNH1VAfqtMWqkC4OUOWVA9S44gKKpEfUd1/Wn1ScL2HzATtbOO1O7nBfizmrbDN78YfeFwjfZp7i8oAcdZHCJBpHfE5nCs07hY1Ep9CJKitF5nJux2fNGVKuKHzhhScqz7iguuHiYldiNZ7Z+IQnfpEbb9l5ORbyGkHRwgM5mpnd5DQyqen7GEMg02zQTnL2CTpxfUTrkzSa1aoWJqRTP+ZTjF34bXEOjakUm71X87UzpRYxMQ0Rjaty76HOcTZjSA+D/QCWflzTmUl5XjYJwhIBQY0zs0PMaOq8BE02P3Fmly42SOH84P86XQ81As9r0HyPvOY1H7Jy4TVvDr/Fxk6XTuOZOd9p54mNGy98wjNPPLPxxAsN9ySfBrxdfA5Yo9Dx0SPGhUGK6uONndPvr8XKZpzU5imUAYlAp3FjxBWzu/96wN0t7q6zq23WtYxNvg5A6nu+53v4D//hP/Cv//W/fuf7f/7P//nj69/xO34Hn/vc5/iDf/AP8tM//dP8lt/yW77sfb7/+7+fL3zhC8ffP/74Yz7/+c//sp89o7npm5etML2MTxnr+dM2DVVt3qo4ZSSeFUjyr2cTLBjZ3LO5WKfLjoob/Jt582LJ/rslCSm54KCk3VuB5IEiRklKSVvojTaXmmc7eHmPmmaKfEY0Zx0gx0IkMUnXM0QLNaEUXxwgFuekbjorjZ62OC8LnY1uu5uCEqPHLfn8pNGjqUmi8XQ6R8zy6XnGRcIs1M4LMSnTsjhQ5RCj5OL1pSFeNxoDtyF6UfrToH3SGW9dNZmqkBegJjSLU47TXzDEEE7p6XltLXmD9/24WvHrkBM+1LAIucSMpuznGzkj69mVf+Y5Xk+akt7z+P3vM3eZUef8Kt39zLt+2L7dl+QHBIU1Kab59RGnMvn9M2Invnve4fN7MKsMPl8o80zmbTwPgzkGr4VBaQ+XgEnTTDhNx/0Wx2j3TgNyl7Y5VR4TGyF58HN/tiwydzd0nXm4i5enMGPmmN4a6+dzyhim5GJKwU/39UnhBUgdIH3Sr+dVOutK5/8JucCk8afl0pm1zf/k+PR7D5GT+pqKQDne5RRdzGd4Lu/velnMNoTT0+G+w2vKOPzzeoD1zJtOIJg1I78DW+SVOz61y/3I/WxNFmB+lgPbOX3MmFXGE6ROgJnB1jhgfLavnzPBxnFdZt3LjZu+mu1/G0h97/d+L//8n/9z/tW/+ld8+7d/+y/7s9/5nd8JwH/+z//5K4LUuq6s61c3IOu+j2ZSVHmKBFhO12vkrCFElmB2d7tKgNSX/VzcpmbepKbq0c3YGdppckMtOPzknd01RfYysyPJVDqLJJYkLKnRJFFl95pJntQFx2Ond7fCPTX07gNsxwNj0QPk7uBBcCRliNHTjgg0ecH5kxQ2/lEitcawQVcQVfYh6HCndlL0AL0TFc1HLjtAxc1s5mq3HOa3RWEUoy5QavR6goOdwujG2GF/GvRPOvsXG+NjB6l6TdglUR4MrX5dLYq4zCZuwgg2GoC9/SB539ikngCCxs05kbNQqmHFRRf97oGyu/N86tcsIumTeJuk0bn8cXd1JkCedMiUNKdjuZ3flSMqnaHHPUAdIgBTROaycGjFcBJwvt/5zrMedmNH7BmVjzGuZKuIzHrMzhZ00ZSgT5Jm3ndnNcHPRNYSCtEcjvJ+HUhhjZQ6lrpPHziWsXtYnwA1hQD9DqQ4jjgFxM/65wlSU/o9X7P2cVcjZk5Nmp9zDzR3bjLI8e89rqaG5MFB6qx/TXj0dcDVded/XgWSA8zPdz67i86w5xTCh2rRSszaeheg7u+kedzj7rinZ6LJhCY/GxxHMtWGglFDceiyeH8/F7h4p5hf7flv876fU6tmjWtq/GbOVY77zdiZg0P6lwD4WZWaruq/0varDlJmxvd93/fxT//pP+Vf/st/yW/+zb/5V/ydf//v/z0An/vc5/4/f7778slBP5RSvRZkQilXkhRSDmMOmQTO3He4Z7yPiOeshx8/gxlD3devqFJGYWgnd0G10dXrN0m8STilIH5MSHpBR8XkEbNHPkgLJT0zcg23cE/A543sj6Unz4M1qIl7sIKppGlsNH2h6Qs6bpjuYDuWdm/YzT5lpxByfL1wG5+w66BpZ9O37PZCtx2a0G+Cvr354Ed1BRzF0KJhOOlGn1MYH6r6aP+JhbK4Gi0Xo2SlrsZSPBDoAyQGPtqLMp6M/aOd/X90tp/baR8NUFjfFJbX7vyeLxktFplSmAR1o+9Gv5lH7yTabTCG0LcUNTi8xoh4D1xKlJxYs1tUzcjPYaHzrvx79i6dwm7iqszy+FT1Te/82UMkX/IwzvsqR7Dj/eaFqRubZN2MTCcQzsbPaXtTMO/jimDKr+ysdtg7gJZp7DzxIl9kAd5iJJkGQO4y4DTSxm4DkzliYhKT7Yz0bSHZ4qrHUIeKREbLwGTH8g3NL2h6wdILJjc8im9MBzs9mA2LpdpVYZ4B3TMdp8BgmsfO8RPz/1NlNpgEfI+l1xfE+Q6eA09gd+px2qwKzkQ0vO6zMauGZxDiRKQTgR2/12ZFyq1vZ//YSY1lfIK1ycxmJBZssFAqThJxiIPJRgMy1bbIQkcEkffDN2YNMkIpOTWWkyvqTLWh27vOqVILC4NHCtMV3sJ14kI/qEoC1tzOKAHT2GnKQs47VePKOShu8XdDwZQk9s797U/W1wikvud7vocf/uEf5p/9s3/G69ev+Zmf+RkAPvjgA67XKz/90z/ND//wD/NH/+gf5TOf+Qw/+ZM/yV/6S3+J7/qu7+J3/s7f+auwBxHNxakUSeQweK2lkHMNkApng2PJ4PD+OyMge/etLf4lKEHv1TGShshCBbPGiIdVDsFBDcCKoqsVqsGwzLDERTqaMg+pOzcs+ShZTjeBdzn4Kbs9y/aG+mA9OkMbqg0d/WhCNTyqNetgCY2bdlijW6Op0lVjem7EQMMbaNnvhBZLpCPFFTxdTrmER9QGMXF40qeS/PxIVlJSUjGy+CJbzPufUjdohu3KuA36S6c9ddrbAQZ5EfLqGd0UUdwTKKrmwxh3Ozrg+24OUk3Odi2LllFxpWJJxiIcY+vzfPiicfWUf7sB5wlSZ6ZyTyPBKQf3eTvcKf24u+O+1FoVMJ99lEwO+7npSwBTNTo9vx2g3Bz1Pu+wCBtiEZOZuXlja+eFncLOdFKZ9OOIpaXH/euDMMDNln2hD9rJwlB5UlAyA73IUyTqD7L7eI4UYzrkfh6sL6iTdjvlDFNMMZdZOX5iwq+D1Hn2T78HfSe7mAX6e+HChMJ70s7Pv9sGmczBInPO00myngL3ExwlAhqXW3iAIha1bCOAA046cwpD5jvP5b8cADxnNyV8COM94Mwg6sgIY7mTyfp8CbzPXHPHM9LBZAAayYhxHDnGjngF3K92ic+b40zmPW1fdjWIqdMzQ5oDKCeQHk4luG1ZEh/cmd7Z0196+1UHqb/39/4eAN/93d/9zvf/wT/4B/yZP/NnWJaFf/Ev/gU/9EM/xNPTE5///Of5E3/iT/DX/tpf+1/+TDM7KbljkwCIUAZJYr0s5FzIxb3KXK16JuNHTWpyhl8CWuf34ka1dPxOH4mhA8k+HkLahEChpOVsRLQLqoM0XpHTjWI3hlxY5JnEyo23ISXNQTd0irl4QW1jyA1lB+t4U+rcf7fmb/ZCay+04ZkUegM2j2qzLxwbioiw2wtpFPKo6PCF/oXhsnobWOseBT2ptxyZeUMv4r1O6a7B0RJZCqLeL6bdjUYNxYrXJKQYSZQsypqMbMYIi4C2Ay+KPmtQfZ3bLzTaR91BqgplSdjjZIjkbg0LgMK86Te8+/ZNGAN6F8rqjkf14kvrIolLLtSceMjeTL1j0Xk/yDKY9qAVf1BWUmRJHNTGDvGaGr2YpkJ0B8nZ3uqFwnAZnEAkJwGo4lq9LHNikwNPx8g2e6PO0RJFphQ6RraIZ1KTFJ6Ld6LH5z2RTUlyo/DEbI09o36LaBkylYUS7+MqrqKVbAtFw03i8AZ/d+ihSmPYTksv9PTC4Bnvs+pISD9SLI1HhmieCYj1qMcmxAo+Gt0B0Y8khdyhHU/lzKVc7DyNfCfFaLEwRhhhhsmEk1Mi7ufawcEFCYNntoASz5w8a/UwcWYVToO5xq0wPC+KwGEqHl156/visnv/LbXpilwDFIqLT0W52Y6KRSdbjXfXI08cNkIlOunuM0uZ9xdChCbNxyAGmG3cKBR2eeHKAlxZeKDGf51JNzemY/p0xZjB2QnQetQbW6zFxDlSO8smc03OMsU279a1frntfwvd98ttn//857/MbeL/6/YuQPnnq3bG2Nm2FyLUABnk4ovy/UU9omA7wQpiMTzKUemIrM9zfmYy8c/RG4ILLeZ/oW5KofeWe923JR5sj5hWWaSy2MIinuWYNC5SWK26xDz47POzZ1QDs21yKm2wjmhDzWWq0FFpUctxjMujM0Z353P1SOe+m5w59E/VG2wPffVUmUUZV2bVRM/oyqJGlrwBuOXQzJm/H1FTz+qiuiJCTULNiZ6dirPicWhdEmVN1PlahKUKtUDNPgsyG+6XGJmcNkMHjObOHzqpYGZmqN7/Fc4gsXQ4PYMHADmWBwed6Wx9dtAcmZVBET9v89+TWdwr84Y6KneInOPi/f7zJU+txCmWO0BySm/W+s4+nfm7HHL5fsT1p8jA7xd3jvM+oXNw+MJZql8s3wFExLmWQdXre+FKQtSg8gQoyZx1QXMDXxvomJN5R0jDwdkNgwDp2dckYvG8nZmpe/nNM37vZAGH/RJuwSQoSWpkyRP6hKmfHHcBp5qGAEiOAPQ+g5lZwHl2wyRa7vO7d+taR6N3PPPJJv05F/Wo6onTv25O63fZzCg8C42zeEi8U8g3zk6jRqNJPyTvXhyYNaBzoOiUfM/jOMsWPZ7a2V5csKi/CXOYxnTC4Dx+88BobhLfEynMO3uuxZ2GipJpx3riEo5oWxDvDfxqtv/jvPuO0fHa6H1j2578IcEY9uLzn0phmlkK97QC3NM2wiwGCzmXkNYmV/tNkImLIuKNpeEPS8rxIHJmdCnNWUB2POApWPNqLk7dzaf+PJvXntRuZ7++VaZVpADTVmVSR/Pnqrj6TejI2FHd6LaBDY9y4xG0oeSRSD27v+EQr7MhR+/WXHSG+cJD7P985DluPydhjhJ/TPedAw41KT27g8QYLiu3Kf5SKOYCE8twKRmtPn5EFl/E6jWzxGu9JOolcQkBRi0xsJg50ipAane6UndvnNZpdiDiWV0RH5CYomkZfeclcU4X3OfsrDGc6r4DpOSkjiawJPEliFgq5gLmNBvch0qEtemQwrvS99mAOcvx6UsWkCma8AW2xaI2DYymus+vTwvgmd5tPlDC5xK5yjPNxjOicXZkbCi6C9oDpFImpRLPxCm7n6L3YYMxYhqyDkbq7p2Y8GguHrA0M2KDaWA7JzEfFT3xKUiHfVdEkynueKyi6vWklBaKyFGnJK7inAc34t7tKGouyhcicIQ4b6eS9lyPzyxz1s7eFcnMMzZ7kE66dsLlu+T8JMv8Sk+JxbuvqYBrkc/MubgWtsQubtnideM0ge3ca+pOkOV4ZmdNcL/LE2tcQa83ucf5mXB4oDSbxWEG+KdVVSZxYQ6W79wYNCovnE28EVqFddLG16gm9bXfXHFz2554+/RFfv4XfoY+bgxr1MX7bHKZgJHJqZ6AJROU/M8s5fi5UoMqrAulVG+MnbY4KgwdAYYnzeBNvUYq/jUyZe3JG3BFKFbIPTOs88gDLe102dl5xrsVNswaZg3UlxTK7Bs5H6ZK5sqCyQWtlU5ib4N93Oj9ma43VD01Vy/z0IdhXcg9oXvGmo8KsENhkFxhmBIjGSMLrXS0uKJxLoNnAfVsqsSmbUuIY0XZvYgHw7Dda1CyCWUIa0rIJdFeZfKnYOmZZSvsl46I8PAtlctnKtdvqjx8KrNcEtdrCgPgaG8yB7yBMFRIu6sF9WbHGHbTiFYLaLVwcz+1eXP5WA6QGhR2DyesH24as4JS4s9Zfj+XpLmknBUVi5/MMXLjcheqHLopy8d8qT0EMh7z5lhK9AirJqXnpPD0WHCD1f3YK+KaCHJA12zjzGRb8IpQ8swP85SzCwywZwf5/hIfCNijd6inh+rZS8ocGkIb9DZofdBeYtFM/p5WElKzKy092iHY2gD0cHnBR2dMOnG66A0ktGdeD3ZLfbCeUGtYLvTckLwDC4nO6Bt9dMZotAQ9Ga36TLMmEpkLAa9nTc9p2ykChzXOe4lF/sxbHez96iUON3Mj6rUTEt51sNC7e2Vm7+vBRMxGBGNK2gezo8sic3IBxFuecWtpt5f1ab8b08FvhpM1+JUJpu5O0iOL87CsUynWGKEcTXG/nEzN6ZzhAbELLBYe8WEyn6LyQGZl52M6L2z8Ij1qnTL3wDwwuEX19FfavqFB6itTi15Tam1n3194fvmYvb/Q9UbZlZSNUgRJJUZzVI8GJTvopIhXJZFTgFHKVLuQS6WmKyQliz/kYn7Sva9Ind5Ag/6KwDGZOyskPYrMKWiVZALqlo9FCot1NDWarJ68m9eZum70cSNL8cF/x8M1BzJIUISFi2SaRJnXOuhOprkLwXCCXg2kG9rFpwl3ooYU1MpBc8sxXmQk3EYqOc3gKrrg6ydIyak+U5stlh7l7ygYJDUfItjNKT/12g1VyIswLtlHsr+CIgmScH1VubzKXF9nro+ZugjXi6v0SnJwEmcxfX+Jv0/zX+fAmG1TYZLvo48gCr9nZJyYwoRpBGuRnZ65NzYL4/EQB1180jwn+CVO4mXmQ94Ym4//Zq51OmwPd4CXe5oxeVsYIeiWkKlbCjooMdtd790wBEMs5AZmqIlnyhbyjajjHazt7gGMvjV0N8aTBZgIWgTLJ/ko8ZmznqtDT0sr3LnD10gJXjzcJ+woysXFMhddeENi0IiTxI7zbPh1UfXu7FbcVsuErAqaMQvdmCV0H0g3rLkn5sjQzGjZaBlGnrAxJj0Ri2g6wGfC5KxFzntImfIPZZocnc6EdzU67gnC49M45fjvdsEdWZqNsOCK0MKULtPZwkGqHZ4Tc5Tm2d+m6FGvP+lIOPvPhk8FlxlKGuNuijQoYs4QHR6DFiAlrgr0BmW3wr3yhoXXFK5h2FWpbLhcpt2FgX4+vmbCiV/L7cvFEg5cqoPb9sQnzx/xCx/9LNv+Ca1/Qi4bkpRcjZRLUBYeCSZZyMnphQlcJVdy8gzqcnmkLCurvKLzSIkyugTQuFuF0m0/3Bp8fLtHDTn5aAgvGk5ro4Roou7VKYcmkMxnKSWf2Nq58dw/4UXf8jS+GLWU4ko7McQ8wrtIxli9n1wubMHPy9iRfqPKRjeAFIVjPMvokBqwq08i1Oz/ns3Nv2NyrdN1wpY8i9q4HTdzYcQCdg54lLBWanDIUW+h/MsdyqbkDcompCHUBFx8Nk39VOKGcbFBfzEkC5fPFdbPJC6fSVxfCbXCQ4meJbPw9oO+i38NyBAHwl1hF2yJB35mUtktm+bjMytsk8YTGQcATZeAWWZ3MApNo/l1TnLSa1NpNsOoWWkh8rNEjfmn5RgNPkeZT5lxRuki7ITcnHOq6X2v0MzfO0IzoVvMvz2amJ1GS0YM4pSYNJyd3tWOaYJWseGCmbFndBfaJ4O2K/1Z3cC3gJbkwzNfF5LUcNofniQPZXRl7INx8367YQo9QRHSpUR0IDBC5WUc7vSaEyTQmiEvpFwwmfJ8JQ0ow7CuPvyy4bPidEApJOlI3hEr5LEzng12ccVoUfZiPD8a+6rsF0MvHcuK5MHhDCFL0LyVBTekvqIUM6ooQyaxfRKqM7uWeW1E/YYU3rmrzvbi8+qd2sjhI2RCiKAym4j977v4udzFPQJvNJ54xseb3kLwMY2RvJOxBNguRxjkQhgPLt3rU457aHZC+X2a5u0az3OYhwSx584oK4VHXrPyhtd8jgufYeEVGz9L42OeMTY+oU+DYosaMMIzXyfefb+W231mNZsDXQK7Q97Q8kJKAy0j6KyCJKe3TArT1maYkzCqhSyVpBV0p4wVHZ1hnaKrx1YmPl4iXLu79tNaSUPhYzVGPNRY6BLlbkzGpBmnnYxLkbxptrPRdTpBTPcDPf4zMZJ5VL7alYyg8pqcXrCyMpL3XiQ8g8n9jh1vhnV3GPfnzUC61w4KB0U22wOnk4ZF9eOsesQjNxEgMpoZ8c9Q0NQ/j2bIbrCBbP7zOQLZnIRlScgD5DcZveAg9WGhvkmsrxPrKygljGTjkbduXtzfiZH0QtqgZKNqplwhr4Sbx4xdU8irZ9QrJMs++5DZ75IO2qdwOpTNP2aLr88cOb3KDQ9aZmzMMYYkiECxaICcD76/m791DnzxiUYJgj4lBj/6/dXxyLqjDIOhEgKHeR1khvxgoEOR4T8zegc19rGRBpgObI8aZcvkLUOH9tKO0S1Uc9cPdWFFOgrlcjx/ZrE44/1Z4qOx0V2xltzJXkHVgwm3JXe6z+tK6nZkJZNLQ3KBVLBoKxjmtVK1EW/kFmTumO/WZNo7ph0bA27q1PJmsJsDrQ5kV2RXkplPvb4kckpkSUd264uwg9SKC2MqMKc19Slb536y1KkFFjlpMuL795nymVf5Qmzs0cB/WBEfmZbnN4MhGplUu8ujPINSc3n/KsYST/kEz4VTxzwmm3Dk2mfGdy/LN58oyqx/w9mKDJU5favwisprFl6zxtfKM4ZReO3LSvShTYBy+cjXyAX9a7mJOI3lUW7wP6kjuYHsUDYsN6gtgCmjUqNG5NotkbM8rRRUvYHRxk7JqyuXtJPtci4AoSjDzKNGzBfkeIBNzxHac7KmT/wNmJLoOpezCdnCFqnHdNfOjmZ3WNDpVO4rP95wWlnFTSrhNTk9oWVly4UughtZuJQ5Ec93d5pPg3bzsDaGEkhM0I1obljU3A6gamBfQlMoPr1WXQLrvUlBISWmpSGjgWxgG754mGcuJaKsdU2UkagqWHcPuPWDQnmTqK8T9dEbg1PuTDHCjKzNDQugCHnz1d9UyFfc6y+r74sRtFC02oY7hshRkj+IuXuDmtmCMAk9R5M7/nBWq4wTqAxUPYMx85liXbwXp4hHqGW+FVOODibTuU8wda5Sw2Ny3IlSBsZQQ4dgDcwt6U+gmsaKzQUlDBgtFJYNGMbQztgbMhJpS57VdxgtHigT0tVcmDC8djZHlpyMRoAUGrSvSzrQjo6EmgtwxjB/dXcasXFWaUQ0fP+Sj4hJGYqrB1UiFhD85+bPx7OuOrV53iNoY2CbeiZ9i3s0GTKUtClpHZAFuUBZQsaUPDvIUlgphPew+yGb99S5j7vLKe4BZ6r9Dh+LUHeWE7YmaXk8NXfzbYHs9mAymCa3931aA6WbhrJvSidOuEKc7J2CnFMJKiGLINYUosbFEXTNf5uOIi6eEqZAwkeczPf0EY3CSuJC4Q2F19R4LfZI5zVeb3vlIG01hohKyGCE8esBpL5STcrLKOZzoBZjeRAP7DDS6kXVVDfOfoKIfg/3Ao8+1WYxsSCWyfaKpCu7PVP1FVlXxpQ695h+aGdkJGYMTaiJG5xawjRRpZAlsabMhMPpGCAS+2MZs2izTMLInZE7ehmUVJjWL4PmQxHJEAMUBONiOyKNnj9gyVdaqZTtRlEoppThyrvRXEQxukWk3r24kwpIC2NWp5vGGPTWsbZ5Zlp8lILXOjxLSwNyS+QulDh2RNg9VgAE28BuRnsW5AX05gtcFl+LMlAuGckZWbOLV2pi+aZCep3IbxI8uG5diwsLVAZjxALeQTdgE2pNlJdEfUikig9svCipeFY3wu1idgTcN0NmIJlH1iVyqDwZ/UhpxIzRQ+beoQ8PksaQyGzMAwEF211yLwNu4j0wSEFzoiWhZ+8hqVmw5JQseRa7FZtquTYYQ+mjs0ds0VT9MwfYnnzg5u4Bg4OUB1MWLiBzDIoO6K3RunhwsTtt2jebA4OpeaWUyrpeKOmBtQZ0S6bk4qrV+TxG1C+pI6lR6o7Zjo6d7Rlag9uzsXeldZ8APWIem0VwJxYVHEmkFCxDSQ4mFTdgrvz/yft3IMmyLK8b/a39Oue4e0Q+qmu6GWDm2mf3MmBcQ0GAMQMFA8MQUEBCQEIF4QMUJEBCRBo0DCRsDGQ0EDEQLhoCGHAfPKYfVZWZEeGPc/ZrXWHt45HV0z00GBhfW3u1d1ZFRkZGuJ+z11r/9X8QZ/N+DJMQojFqOxXtjdYsdqe1SlsLvVTIimuK0068NJMfhI5Ujz9YmGmYAnEKRLenICcSRlaZafdJqqhQxTaNVhJfiet346eB7+5w8at4OYyStGsMX/OwoNLEU/fto/J6NgBFG0X6XQdW7/w+OwtMLtE5oUP9xF0Y7u/THyNnGW4ITt2wf9uJHXsRHZvYfZqS0R2o6bqKnGgcURaE7+B4IPAGzwOeA05XQw7aDXRCNBOCXePJmTONQYD/7cfPdZH6STsp+w07eJwTE4GGsaibxXQxce8gdEAhnd7d3k7YjQ30vus0bG3o7jI3cGrx6L13Wq/Q++sNhkE4rZtBa941SM3eIC+OKh4ve5Ha/4yMxW9Am/Fo1Hk0KhpAknX/r9uI/Z/9WA1jN7TgdSZIskN+EDx2IoG0jvRuhImuRtketjYqzlj1bizjx5DYe7POtFYIBrXcA/h2iK8YGcO1QY4Y3ZdjiAsVqKBF6NkOUpeFLjLoyVYsvBeCOIIPxqSMnnjwyOJws0OTo/tG9X1HF+neipTaOAZOcEePOIdWZ7EcHnBm0WLVRF9jQNznOyPu+wnXd3Mjg7gY6wbamJbLgBqr0geUpVWGqz70Mb3o1u8Hf6HZYl4aLjTU2UEcxgS4g//q3YBZPBRBu6PlURCro3WhqTDS7W0XNyYpGWis3F8gLLpk/Ny9DhlAaWZN1aDnRqtK2exnosMydVJSvCRD1sa0uE/8d4cDHY3OfZJqiLMnVLQpLSv5quTa2GpjzZnajAlIH0v+vjd5ws6yFT+ek+BnwU82f4Q0XqOwW461e5FqrZqgvLWxh7Mi5Qe0jHEj8IvRqKctEH0kxkiQyJ5lGwbt2gQCfcDpRkp69cbw9+L0Sqr+XL+4TyCfc2A/34O2ceUpZrs6vP9kL23jPLr78u2KqH3zZG1rGCVmQpiB02h8d3jNNFb2nQlmUGX78c/kOLrvrEey82A775ShLolORFmABeGE44DjgDAjmmxuaxF6Qss8rgubvsU526kLSPsF1UkhGDQRHD45poPBdc1F0iHZkJAGDLNb6XSozW6S/SbWbh2yoXhC8wVxE6Zsz0Bk65XWK7XlgY139vzRIEJpQu2wbkaJbs2RJBBESM52H16EMPRVXqC3gNZAK8kgFTfjp4ifAmmZB4z0Smp+nd0GSCUOxwnHicABx4SlCw4IqHQo1bQvtdO6mnbJKTg7zO0uNkv//RBsZRhaXs1aSXw1urBa1y1NoAi+QeiO1Ac/SATvRqnvQl+hX4V2EbiBWx3NOXocZSF65hRJLjH5xDTN+OhxbxxycLCI5X65zoajiClD1BubsGmH4HCTM2eEyeBUwPRdYdDkq+0lQC3m3mMkkfsN6/FqUJ/TIW7tHpr9jJoNLmWtaGn00uirZZP1woC0On3raFH01umDRenwVIHuPDVA8kqdIAZPncAH09WJH952IvTi6a2SN6hFyKVTq4xpaFyrFdMyNbUipaPGmUIb7Qy4utNqM3Pk2mhDz1S2Qq2NdS1juhbevekcDkoMh9HIecss26E+jCrAfuTqIMRLwfmCiPlx1Nwot87tqbOWyq0WLm2jtMpWq02Z+8Td9JVt2q0hcxHC0ZEeHXGx4MXYHS0FGo7gxJxOtN0nTm0dmk28HghVkDFK7Ah98oFQItO8EDQSXMTPg5iE7aMiMijodm8bmV/HR4w+8blvPrzCfAG9py/v/uKvMY0GDr5asDLKlN3POwd033vuKq5dJbUnNu3WtmCT3gOOI8JbdqsCe1ZMmxTwrHfI8dVY1mEOGDtIqHyuDttdCCcaE523wCPCA44vxt94QnRGeoR1Qkulr4eBKBX6FIx0kywyifwLMEn9tIcgeBeJfmaZ3jC5AHFhPr7Fp4ZPZvezFyLtSm1Dia6Kjhu41EptxqhpLqAhItOCxmQTjtqy0Riwg8KL7XPsvHO4bhlMvjt6d6R9OTvYYM5hIX1jipA2JpvSR1w8pCkQ55nD8kCaFh7cGw5yYuZAuOfvACio2SPVnrmVym3trDfIFyEXoWahFoc1mDIOdujOoc6ZKtZYFtTqxm560I6DTXb91tEcDYZSg9gYFtw2fQlNd1K6u7tv+GZ7KH32yFVgg5iDGb1qILhE1EB0M4lE8omkE14dUu3zFUGrcc5tQLYgS8Z7V7uaxqcKcQ3IJri6mwzZhENVpHdc77hNcUWRsU5S3JgWxvSpBpnp4K3bzs2hVaxDv2V0K+iWqTebRFqxfKzWlJ6bOV+s9itVETF/x+6g+U7xFV06NXjaFAgxmiZvRLYgjlYrvTfq1im1U4tSi+13SlErjuPv7t0OeVEQ7eYU8RnhQtFBPoDmLYWl6Zhoi0FA4m2aSVNiWiaWw8KyLCzLTEwBHwRcHz59naYbrW903Z8mIm99o/WV2jtFlSydnBoldVoAdWNmaQ7fHCF7KEq7ms6qbpWauwnkBbsGJw9LgIODg0cO3mDAprjawDczla6mUfTjtfG+0ErDi6d3m/pSOuBTJMUDPkZc8IOFu1MVdl8+M8b1NHbJ+s7l+2zu4G6iqkbTftUW7a73/X6vvlJz9P67+zy1h3OEzyDoz6k5+5Z7D5NX3NgSQRwA9WvQ4954jZ0Ue9HCrmkinjgCLMP4XgbsKDtlyNrvyEyUiRNvOHBi1iNBjOmsNKpuqFau+cJtvfDyfKHUldYKMQa896RkXqa38y9Qkfq2d9++44gEP7GkBwgJP504nBohKXEaUI3azW1Fqt8JCa0VWm9sZSPXTOnV4tS8Q2OieWMbqQ4byD0LXM2SRQYzz4w4LQBQ1QgU0RnMl8SN+HNGOJzt0no1yIxgcJlHiHNgmiaO0wNTWjjKI4scmJhfOyAYJAo7MHLfrEhtynqD7SqULJTNUauz/QmjQAHNi3U3dS9SBivpTqByQveDEZcClGY7QR07jgEnNR1LUZF7kWLg6q4KPgt6FtxNkOwINRC8OagHPxEIxLYQJRJ7ImrCd2dLf+xg9VXAqRkH79lFg1Tg2l6kIKweVwRX9o5/QF8N6B3X1MIoiw4lsNLVdDaqfh8OXg1td1NV9QYDtm7FdoV+q7Qr1NKp2YpHb0rfbGKtazMIrSpIoYn5BfbQqT4gWanR06dESMZcDEFtF+AcvY3JJ3fbSZU+DHQ7NTOKVKfsEG7TUWQZVkY7NVrvMS66szfHIl2dA69mHutsJzfNiXmeWA6zPZeZmDw+yD0WwvRcVpxaz/Se0fHs41nVZAhFlBKUGjv9ZEXHRU9sgVgD0xphU/KnjOpGq0aIQDHoKTgkeTgEK06fFSlXFa0ecc3g4/oZrFkVL54WGk492q2Yp2nGp0SKB0u09g6VMqC13bWh4sg47TSpvKoCrUC82gjt/u1GgtoJ3ebhMegSuuuxjJAi4u9lRu2HRAfBwGyYXtVE5sBuvoe71+HOODUnRJv8AhE/iC1uRwJGuVNgstt7gIzOSA065j0dRUo8HYu+DpLuhSrKQtLEkQcWDsyyDDmOH6/XRlO45SuX9cLl5cq2rbRSCMHjvSfGiBPH7fILVKQ+303tS1fvZ6bwyJvj7yImmBbP6ZAIwRa++0XSxwnUdSC7ak7gRTPX9sKtXlnrjWtfqXQ2tw/pJlLsztHUzFSh4sVaWO+Gh5s4giYsKyYZ9XxAYBaEaMar4hR13dTz1dGyR7p5ExymA4fpDe8Pv8QUjhzdG9LgHnkSALrba2rmpXzg5faJrz9d+fChcX4Rzi+enDvb2tmqLfWLMziu2SrM2G27K47onS7cihE4ugj9KdKDR9OI5VNFe7fDUE3Xozt2yfCGVtuNhOZw1RE2D5tHmiPpxOQ9S0ssTEwtMruF1IPlkUpAmpgPnOv2ermAEyXKjB/C3dBtWd0He02b4jY7fV3ekf0x1TnBOXiNkt+jG5Teh2lwd+Mg668NjTjT1/lIcAntSr54uEC7FLZnpeZOyc2KRhuTTev0zeAnbcbqc+JwkokhEEKgHDIpBeZ5IqZI8IGYEuKM4WasPoPnam025edGb/b31dqp1aas/XPtfpC72SifezqZiyxtFjR6CDbJ+BIIt47PQuiBx/ePPD685Ysv3vP+/TseHh5ZDokwCSojpFwrud1GRMyV3K7UeqHUK6Xe2NrKqp1V4DYJ+SiUk4Nf8vjFE5bEoc8sPXG6HtCrcvvBlefvvxjxZi02RiWPHBPuTSK8n/AHh38I+OQtwNKwMshqBs91/Dqwvb4WemmUuYydm+LfHHCHhDsd0UlQD5kX9uhz2BAs59ZJwyyD9vAWU615XoXgn7ugJzXYP2D7OqQMAoIVDTf0krtBlqB3hie7GQA7507vIScmpjXaknlhGjKz4JjweKZhJ/VatszyagC0CkkhVmuc6QGqaU6kp7u5AS7gJBDcZHpRl0h+Jkhk8QuTRCYicRTUtpM4tPB0+4qX5wtff/0Nt/NKXsu45r3ZaSGs118Adt9PfgxoSewgmdOJlDxzjMzhQPBGL7VPfSVd6h6XTqPoStCN7hJNEt1Ftmqu5KK2kzHjzAFt8aqgb4PR07Qb5MdrnIIX91nG1JienIwCBeq578XU2VSkqB2qYjeF14D0fSTHvn8dm7K+UvrG8/rC+XLh5WXlfC5czo3rWSlZyZuSixlPVCejmxazDJIxUQzIslWhN7Hdx7iB+iqoF9shqcEdpodSVJ05bIjggr0XxtA3uEya4JqD4qBYEQ4ESwd2EV9Ne+HKMNN1wfRsXeyAR+/SH+R16YsIjP2FG3tGOlDsXpfK6CoHY8xZJIYJSMc1oIyYEUG7/dy7Dkeb+Uh1J2jwEAISoh1+t4BeHf0itDM23WR9LVJ1/PvQpGl/VaQ419CRzeWdQBsBf00sL6sL4izeZKee7zukWm2q6s0mqtrUPtZsz9h7N6RQ5NW+S7Bk6UEe0iiwCCRFotp0WKxhitERW2A5LiyHhcNhYTnMzMs0YBuHyh4/Ybq5rqYh7FrG/bGnPBu82QSad/Tk0UXhKEaGWQJBE6nPzGFBfaefG3G54ZOH/V4JgksON3lkCrjJ45LHhWAkqb0Yg1HNA4i3Rkqa7ZC6b0h1aLAJy6cJSREJaTRqnf2L9MHdlzFR2Z2+R90blCqyQ3GvuiQZE06Q1+wxk5Qwvt5ORP+seWIvU/Z4pZ7vDdRn7vnshArbXIn2sXezf8xpMNA10omIRrpOduN0Z+SpDr4I2jzUgA4hv+vD3GA0Y048McxWqHxi8gveBSYmJheI4s3SShuNPBr8jbVeWcuV23rjdl3JtzqgRcE527X+AhcpACH4CaLysHyHlCbmaSH5E05GCwlwvyTYN8wonShXCje6+5rqFlp7to6nZ3q7UZvRfnNx5OYoO6tLQaRb0qzr4MziJWBQnoyipQK9jz2IV7vZ1dTnvYl13s2YTp3MVMaS1Nn0410bOo2GslK1sPUrl/zCWq58+PR9zuePfPjmI5++ubBeNm4v1TzVNnMEsH2EGsTnoe8wEAPGQ+mtoV2o9bMUmb1IePf6+g1Hdu/MG807a852NW8bZA+/73a6w3VDzaOLhBDwPVqQXoyIRLQFtAda9YioOSipkTzuDKrRiXpxuPH69zFRoZ+/vTo+zxOG56JzpnnSIQzt+ya9OvrYr7XezB1+F506MXPiKSFpso89r+iLpz1D/dTNnWGH+7pacd2p8QNa3t3cxEENDRccdKWlYBqm2PHe01JnT5Lei1SpVoB665RS6a2Tc6O2Tu2N0uxn6Yx9Z3D4wWx0ySGTs0ywo7FF3QlkFkjgVfAV4gWmLTK1yLsv3vLm9JZ3X7zj7bs3HA9H5mUiJA+yB2Rkmuaxl8q2j9JBP9dsBcwpLQh18vSDoA8OeXC42eNDGu4bB47xhEbTb12fb4SXFRfEmM/JCpo/BvxDwk8eHwPemdw6iDUgmFmFXX6tG9O0dzRVemlEyqDgdzgssER0msANpxiG1u0zL4/dstcPqoFX+ZbDyl5mzPGvE+kkdEB9BvZVdrLTLqgXdpPpHzcJeuUO7ve6DlBxbFe7DM2RQYQohGFA3Yk0jVQi2hKtR1ybkNEg1g0LGV1Bq0OLhzKcQIg4Hwgx3Cf6KS7EMBHDbLu74EkpEKLDBaH7lS6FwplbP7PWKy/bR15uN14uZ67PG9ul2vpA7XxGYdt+AR0n7GGc/uATAszpgRASwc+ITHc8S37bnxsQnpov1qYb57rykm9c8o3n9cpaM5d8Y60bW83c8o1SM1vJtJZBC1lsKA80opgx6KRi04Kud9quc3bg4/Set9R9pVfbBfXNMDinV44hs/gz36RClIXoDveVaNdG7ZW1XLnmM2u+8fT8NbfbM89PP+L8/IG8XijX1exqShsHuS3u973EvireHTOMWTUcGfoexSCIt2lpt2UynnNjdwsI5ktLjGPn7x3mw+YQF21aUe6L4uYL0tS0VAq9FqQ3wubZNo/ztneqms1ih93bbUBvYrRWYJADeO2mZUwPTvDOihS+45wjOPOs091vcezWmhkk0IqJTevQJdEV18xhwVcrctqVVgqtVFoxY9Va2hCpWjUXtWLuBpSqzoxpbFrtVjzc/eW116R3+xlzGT13tfeuW0HqfRTBAe21ZpN7GyJfFbOSClMgzoF0mPDJ4U7GjmQSeHCmUD04K1JRSN0RqjBdhXm1IvX2zVveHN7y9u1bHk4PzPOBKU0mthVH2y2O9td+vPx2jYALgos2AZnkIqDJ06MVk70xamJTWHUFnNJ8tUTnqLioMPLEfLQdlg/W7XsX8UMkH3ayyyAsGOqsJpfonR6cCX69otru+x+Vjkq9G/s6MQPemT3K0NOYRtGA3VqssaeKvYpuX0MxB3FbTbzSMYShYvdU22coNc/H10JnBWn3/gMZju3O3t+OuYo0GeJtZ2uuar9Hh1U7VSu1G5tRejNWbxWzulptsq+rieV7EWgWv5J8IE2eaQ4cj4mUAm45IGnCxQmVjdcMLGN7VW6UvnGtn7hsZ27blVu9krUwMmyQ6KhaDAVoZv+09V/QSUoEVI3d50RQVbyPeDdhZFL32WfrZ/8/2E80sm6sfeNSVs7rymVbOV9W1rJxWW/cykYumXXbqK2QR5HSXvEURM0XK6gZOKamOA24vtNJjWJ8NyvwFXXdLF0GRNSymlNFv7G4wuSufAwFLxNB5nuZ7dqorbJtVy7bhS2vnM+f2LYLl8sn1usLJa/0LaO9G7W4jzWtMFT8+0Sh1FatE1c1zRaD1Tasm1ywwuG9R3ZXV16LlPemQQ3RioMLDh+GSa+Xu2hWZeT89IZ0paqFDvbmoDe8d/js7WAXpWgxjYg2MzcdU41BE/5emHZ41OQdcncv6D7QnMcF8M7h/J1BYQ4aY/9kkjelOrXuvzcTbio2/TmhNo+vbnjVVVqtd+ugVq2AWIF6BXFMWmMHz76/QGw3ZjpJK6h7RAqtG2tUbZotpVJrZ8vFrpGh79oLs5nMqr2uDpw3CUZaAvObCT97/BuPLqDLmGSS4A4eSQ4JwqyeWBzzwTGvkbkmHh8eeVgeOD2cOByOTGkmhnDXlcn4pnX8rDqgYrP4smbGBSsuNoEEWhQkyrDw2ze8I1VaqhFYvAm2JZj4mihGzQ8jxcA7nB+mVUPX5AlDmDoSjjHStHHb+5BZiGm39tdaXong0FCpY+/UB7nAQitgdyfZTcVeAxP3U0T02ym2u2fE7qG4z9CNIbLVz4qUDoLFWJPu6IOM76Sr0qrJQbQ4kzUUMTF8dehIodYG0u3ayVWGiXMbMgmoG/ciVW4MxxlzERGEJXnmxXM4BnxLMFsMou8TTSc0VtRFVKNBmE5oXCm6caufuOQXrtuVtTZKt+orwSHRmqqqlbVtdG1s+gtapPaHiGXdTMk2xgN0+ymf3WlkVq5s/czH8g2XfOZHL1/zdP7Iy/WFp+dPbFvmfL2yrZmSCzlvtFopJVNrpveGGK6DajN8f2C/NNvJ3FWr6l4PJmehgV2KGXQ268i1g1aP1wmnZuUoGiyxVK2otmbZPdu2spWVUjJbudGqfX/a6rDUaa/oHNiORdp9H2ddmtrSf99p7GCFs0PBe8c0Rbx3xBQG8WPkJg17mn2N5KPY502eaYrEGJDJyCLeYYLIXe3bHNobvRecwDqKmRMdh4ia+/p+wEeH80KYvGmoot8pVCMexSZVorc9RgxGUPABTYngvMVGyL68ysOyp1HreB1Wpa0GXxrLUZAa0VbHn6tohzXfjAVaMqW+TlH7/tuxEzV2eHTvua1UOT+yxqLtXHBWpHZIr49d07ZulNK4Xre7ZZ0tuAXvgr2WTnHRdqFhcswPgeO7hcfvPRJOEfc+0I5COwAPggQ3SAe20D5oIDXPIUcOW2JuiS/n73KKj7yfv8PiT0Q3WRG507OHM4pyzx9j7FCDhGHIqkzN04JjChEm2xdl6cNEOZPxbMA6inqVGz1kSJWwgIuQkoVf+mCTcRQjFoXx/9NnziA7n9OPiVXpFO/AF8RVm1pHAnWn0bWw6ygiGcsANtNVC2sx3WCxrzjiUWyG2p3SZUxRTu8VELrVicowhhXLVOtFcMPxxBeDJF31eKun5m047ldVQdWRs1Jap25Q8/BBvA0G4+ooQ2MmpY4Ik2Iaugp1E5MtbEpelZqVfOsDObAm1TnhdBBOD57Ht4G2Jg7HAPVAO8xoWwjxQHcR0WCuPCrcuHJrKx9vH3k6n7lcV3JeUE246UA4ehPby42cCxc9U1thc79wcJ/+hI+oLexldNjoj5nQAuN2y7pya1eu9czT9YXLdub5+YWnlxfOlxeeny/kLXO9rmxroeRK2axI1ZKpJY8dRh0pts20Tk2RrX/O892XOvZNiAy9iUEOtVdaa5RSR7qsQLeLQvqLsSu6M2r0vUg1ctkoJdNapYyC2XYOOTvNYsxx96nDtjuqOtwzlFJel/S79SrOJqcQHNIgBI9TI1B4L+AHicGruSqJYeYyNDBBEsEFUkh4F3DmUWTMtWGt4527w16666/6OEDEvr/mjDjBYCnhvOlsvNnm6ICYJAyHgtkZPDk1NHS677gI6rxNhLRhbDrSinuzHeGYJHtThn7XulrTAFPYqd1QaqZWM2HtbWiUuhVYUSs6++MOP96nDu4d/W7IscOuwDDF6Ca0rXZN2B7KdqDOYXlncJ8enRuOHdEahLQE5lMiPCTcY6QdoS2CHgSCGC1YbK8398gUPIuLLH5i6YklLcx+JoWJ6CaCxHHdDPr5+Obv37tyN511PuC141WNVYsjEuneQjCd3K9A9C5JNU/6JpUuFRVLD7Cn7XadvMpM7fn6z+6xuBcpm3V2JqpZ5Ws3C6lalVIbvXR6VWQQo4JYwfJ0Xk2NXgWt+3tkwShGuDH/VZNiSIfeHa1aoapdqSIUgTL2qz1jRsul4bMgVfBFcV3wTYaOy84nBjy/ZbXvOavtPovSVkWL0rchgG4KZXc7MUmEuYgYhF2yklf7s3nT0RQbJOocJt/AEYLndkw4CaxzRVzG+UzYCl0CrkY0DNs3vVHaRq5ntnJlLRuqETC3GJ8Er2p+kKjZVvlCrb9wRQo+v833NE7t+5LaXqA7XX3kB3UqtWde2kee1o88rZ/44Tff53w98/XHr/n0/MT5cub8fKbmynYr5HWQENaNXhs1Z3LOtNqotdoF1hotV8u62azg2O51TFLj4kcc4k1bheuUVii1UEqhtz6EoZaK2pvBKr3J2BnpOBhNS9IG1qvjhhlf3rpt4c4qhNdCtT/asKVpjVGk4J5yB4TgiNHjmkMTRIn45Af7x9heIdqS3nnBJTsA0xQ4HhLTFDgcZpz3yHCflz2/a086HozG1sv4eczRo2mniBWrPhi7LgpucjA5+mSLdfVGISaKFarF7IZk8tRQCN6jvlKdR10kiB1EgVHMWzej2mqctdYs7K+KMRSp9j1112hSbJK63djW9f7+t9qtgx5wo73WA+bcmwN5bZYscarf3TK87uyy3XTKTFmNFFHZcrGhWG1S9c52QwazKjGan918CCynyPFx4vT+QHyYkO9E+iL0xVJZEOOIBWyfc3SWb3UMieM0MevEyR1YxPQwkXnQritIHWbK+/XM/b4Ch7hAkAl1nuYiU/N0LyzeQ2yobwP62lmsxp8r4z4ubFRX6L4aQ88PpqN0vHS8vO5+LIDv1Qh4BOjcqQz7eUDzaPUD8upsayHHbMSZqeJSxcVGlGqWVVhOlP2McfhpxnFuWIyfRdIMo+auuGFLpUWo2Vt8TFcKwuqUDYtSqWs3x/4VZB0TVR5awiavDvG8uuPkoZHLuVnxKWa3Zc1sf2WSjl+toI0/s1pBKkXZviUCN+RGm0GcZTVWr8MzT55WPd7P1JqodaLLTKqBPkXTfHoxi9tS2LYz67Zx2yqhn/BOCFOy1y90Y19GpbliyQ7yCwH3GfuG+4We7wf2liu1NvJWxmEYxh5FEA/NNbpUsq5sbeXjyzd8fP7Ap+dPfP8H3+d8ufDNhw+8vLxwvVzZBvGgV9sZaVPybaOWYofUlqm13hlXrbbhHdbR0obVi77CfQxfMhGDrJwd7rUVSq1sebMpKbfROYthzkNcqnvbqqPv1rv38rcORPqgWrOTWfWVmuycUeHFE7yRI2IcnmLKXZXfWyMGT4qm45nnxOOb03AhmHl4eCClyLLMtjfwINERgiPNkcNhsiJ1nM3V2pm1DrumSvYJqqLa2PKVUldKubLWi9lo+kwNlRYaLB2XhHS0+A2XjJzUnFokVlA0CH1S8xlMje46bcBjKt78+0ZPE7s3J4liRUL7cIpXBW3DyQHa1nDaaFrJ6tGuXC8b6y2Tt8/gvmakEqHhd8q7H/snUarWe0CmeDUxt0Bo5mlmuWZuiB/N06+3Yco5rgdtgg8TzkemtOCHSD0dOnGGw2nicJxYDol5isQp4sJE96aN6/t0CEPeMMxwxhLf2Pm78HcclvdERLhPF+P9Yzjkdx0/syR8eDD4Wyprtdo9qdB9RV2j7VHu+8RuLSPQKZoteqPXAaMraEH6Ztq3lsYkGe675ld3vB1a3fe20DqUrbFdKtfnjevlyuVypawZnSq6FvzU8LETgjVETkzs3nCUbhPQHhjQVSxrVs1QuNaGa1ZIpAquiAneuyVgVzob3ZKeeiffKm1r1LWbQ3tVXBakYWuB/aUeDFRVqIMoU6vBwDqe7A1r11fYdZAsWsXIWE1MqO4UN+/jfaM1QTJotgnfBTslSqncriZN9r6wbYHbzZNLJB08lcCcPenkyE4oTem5Qw84EtEfCXJgksUa5NBJEkjVkQ4gXU2G8DM8fq6LlKq5au6R7Z3b8NIr3NZMzo31uhcpT0jBOveoVFdorrByYc03vn7+mm8+fODjh4/88Ps/4vxy4cOHj1xeLqzXlbJa57xDNXTYriulFNbbjXXdqKWS98X2gH92AeduaHpnSyD3SSImj/N2KO1CzXUzJkwro0ipjgnqlUWlOlarwr3wGKPus2lJXj+3jwveySsks7tNO+9xTu4kBBWhlmKwYR1wTbD90jxPHA4HHh4fOJ1OfPHFF8zzzOl0HEvtfcFtk9ThMJGmyOE4W3Hacb1xWNrUp8ay65Xr+kTOV9YtEgoU3fARahBKVDiCS8p0FNwoUuItV687LNDQdXqyf9fYR6qKGgMMNZOXNiyAmrfDQQVXB3SG4XnaBW0mzq3Zsohar2aN1NQm661Qcx3vub1Pux6mizUCqm53OKKOnWXTOoxPjcwBGHMwCHgjCoDBcqV4UCWt3vRazuFDxPlEjIk4itS0NMIsTIdImiNp7ANjsJBPda8Faj8iRg9zh8juHH7VAe3tdj67N91Ombj/iTsECDttPhD8zDh1SaFTUWLrVGeOJMGMee4C1R16151EMYpe14ZTRXuFZnEi0ov5KVItcViMrfn6GBID5C5NqLlTt8Z6KdzOmevLSq0bmhr0jZA6YbJgRBxEJ2YSvceL7Af/+JuaLbZs4s57BEizH7nICGS0SaqqUtQI+6111muhbpV8q/StD+HxeLlGLtiOn94trZq+No+7jOr1MBgklNd3STHPULPFGqJ60+gaehNAqlhsjrfi6IZZgTFJbb/pPdQqlGKFbs4On/YgeU+NidY9WiPSR8aUmwnMBJkMJfKN6DyxO0K1nyf8jNXn57pIFf0BWV8ofaX2wlqv5K2w3jLnp8K2Nm5PIx1VZFj+A7HTfKGGzKoXbvnGVz/6im9++IEPX33it/6/P+L6cuPpwwu3y0q+ZaiCd46UEiGY1ma93Mae6sr1tpLzPknZGK13my797Nd9xJH7JDVNaRSARBnT2Hoz08+77geGxub1sRchm4o8MZrtiA9ueNG9Lo13d4JelNrHKsx7RAIhJNKcrKgc0mBOCdu62qR4vd0nqdPjkdPpwJe/9AXf+fI7vH//Bb/ye36F0+mRd++/wHsruMa+c8QU7y4K8zIxlM021YkdS2Z3NCIWWub5+hXX2ycu12943hKbXrmmJ0q8USbQU0GSko42oYgTrqLk8XpsYnk/1ZkrSJU9fMA67eFlbSp6lXuelxZBtOJ6NyPS0uhro6ydcm3kaxu+inp3N89bNYuiu7hWqbseRHUc2EKI/r43ar3Yz9vznaqNQmwGTbvFDoY4pfF+BsQJOVaDlZozQk044HximhfiBOmgzG8acYblXeD4ZuZ4mjhMiRAjziV2O5+hW+aVjqK44UJubunDJsw3mjOKuKfiBiV7yNPvhUp2C3Y2M032iTnMdOyQzi0jUsnd4nLEOZukpI6AcTtWK5lOo/RM7YXaC60Wa6oKUAVXOq4EvOu4aJuuVz92g+derYu8sWW3znat3F4KLx9WXj5def50pqUVYoHTRpw6aeq45KjeId6bx2UV1i1RG2zF3Ycck6x0em20Uui10ddy97G0IE4GumNNYutWpC7XjbwVtlsmr4a+3KvfyIw3wsRn9zuy9wfGYhxGANZcWkOGx5IAnAxLrWCGy2oJyiEK4aTo1NGDoqXTrg19KZA7oSreG5lqW23XWkvBx4YPjcOLMh+E1jzl5qm3AIcHqiw4fc+kR8SfSPKGyMLEkaYNT2GRQBXHAkQR3PlnO+d/rovUZfuEbBtrMb3Sdb2wrYXbZePlU2G7da5PFmut8GoJExotFFoorFxtkvrRN3zzw098/OoTH776xO3lxsvHC9s1k7eC684EllM3g0Tn7fdyZr1uRqYotqvqOhaY7PVprzIMbZHtx9wgC9hj3GBjg94/c2H/CZwQuzCdkFLEh8CUEmlKhBgIyd1tlvpI7FxvNumVtVjI4eiq7jZB3hNSZF5m2y1FM9osm9BKvhe/EP0oPInj8cjj4yNffPEdHh/f8MUX37Ui5ZzRoJ3Dx3gv7DEN8ZTbu70hHt63gy1T2kZ3G+ILTa5sfoJeKMFD9JAcGm0aTpGxq5D7Il12aAql74GA94RcjMGHUNXcGXxzlGJdrN6gXK0rrlelXDvl1q1IbcPuqBj+34epa9naiEzX4TM3ltbdXmPnAR0WomJQs4xJXMd1wI48DvjWpmEIk7+/br0qThz5pkYZ9mHAfYkUZ4P6kjJNlTjDNFlTEb3ZcDkdBr9j36FuQJK6eyxiru/qcM1ZzpC6e8SKyi5DfX1994/sOyULgig4Z0SM4JLNk2JTSfWe6IzeX1GiAWYD7uv3sjJ8qcZ+VQepxXRpMpzN3RC7O9UBre6zg8IdPnT3mWK//8w6iwHhWtHQahGAe1pvmUzWkL2Y7q1U1kumVGXddK8hJtrv3XanpdBbo44idY9v6dZcGomLAct18q2RcyVvtprorZvkUGGkb9p3vkfnDEq/E5NU6Ljuxb9OR7tRoEb7PcRBiUj1iEsmB0gCpwazoCfbi/nQzR4pgN880SthTPLOMwx3u03/rVPrq0dl3RQXugmonbeYSDcTSQSNw22jmxVcFIJzpDA0dj++GP8pj5/rIvXV5be4kLher2xb5uX5hfWauTyvvHyqrNfO5dOuwLcrQEUhdHqotFjIYhTiD1995NNXzzx9fear//KR9bJxfTbPqVZsgRpCoCyFaZoIIXC73sg5c71ubCPmoPff+Xs29tPr0zkrfvYMJhwdgj/9KQUKwYpF9Dw8nJjniePxgcPpQJom0uJtMS3Z4udb4fnphfW2cX2+Um9G5PDIMLcVQgqkKXF6PA7X9YD3sN0cJa8EZxT0lALTFDkcF948PvLF+/f8rl/+3bx7854vv/zd970fYJ2cGMzk9tC0z65LO0BsB6FUKpmiG5o2JBaqv7D6CW2ZSQIuBCQ4uvc410m7xmgMyH53yjAXRoNp1PYBu+NTUQ+94XodjtvCbe2wCf2q1Gulrp3bcyG/VNZPhfWlUq6Ncql3p/E2ilQd3nw7S+qeIzXG6OCCNb/e45KRHbQ2tBm78FsQlQBiDEQ/OaZjIE6RaZpsUr42Wva04tAaCO6Ac4mYDsRFSYfGcqjERZkXm8xTMIDTdXB5P6Whm9TJdD9DsxPwBPWEHsykVKLFk+CMlHoHkl5jJvYkXGVDB4ncuZngPUlmFDuM5uBRClPgbsNVR4CfY4/46HdSwjBgNJZst8lOq5rV1Xi6aEy63SFdrPIbGWkPUbwjFjsDUqxp6I5enDUb0mArkBoaO+sc0GBkjbwVygbnj0LeMust2Gs2LLU63dzpi4VSllzuu6JWbE+0v7XODSdyVbZcyNWIMLmZmNzuGUycPjLF7MI2V3ofPepNzByCUfFDwGQdUZEowzTQ3T0CWZM5StwSzoObFd5VOHR4LBY582JNpdzAXwPBQfKdFL15kN4t3IaLDubwX4fuKkSFIMSYED/T5WD+gRqt0RErdGERUnIc5kBx1ZrOn+Hxc12kfuv732daIi+fXrhdV775+iO388bl+cbzx8J6a5w/mTj13vWJor6bP1gYB3krPH86c366cnm+8fT1mbJWtmsZOyG1oMJgzKvWFO89edvuIsvef3s1ucNxvO6MfPB454gxEqN1yvOyEEIgpYnrbbVra90GceHHID5nibNv3p54eDjw3e99yenhxPv33+Hh7RuW44Hp5Gmukt2Vtb+w1htf/fArzp9e+Pgjz+XDlXKr6AauW9e6F6DTw4HlOLMcJ0Q7Fy9cz2cDifbi6uywnebE4Xjk8fEdp4d3TNPJdn477/qzvdcoW3z+ix14w4FBh9JE1Awog4cwBJ9N6RnaNmTDGxCHv54xXe8Fqakt2u86ozGRShNojmr+Srii3LZGKVBvK2ToVyVfi+0tnjPl1sjnxvZcaZtSr92Ydc0CLI2yryOVFyNNDK2QDFVmDFiu2cl2RCF5SnaD1OPYMeE4OUIaOVhLIJ0C07vENE9My4yPjTR3tCZadrQt4DngJBHibFDV3JiXQpw7UxKiC6bZ2bol+1aj5qsfh5kMuoFYZxudBf7NMjO5meQSXhccyYqNMsgTjOJUaaw0vdG4oOPpnLedlBj3LuCZPLbPcN0ORLEZqiIE+p3xWGl4xBRY49s0IocFULoazNuxJlxNeI2Dmzjyl8QEvFaMdiG1IMmRlkjJnfmwUG6VvDTWvNF6pxbL5iq5s40irhFu58p6aXz4wcZ6hfPLZ5dxGIV7kG10eCvqHpsyLLHEGbQeR2PpnO1MiYoLQhSHiuKiu4uf3ewgOpg9kkxwneYJHzzTNBGDI3hH8hXvlBDagNkdhIiqg+5pZ0+7edqLs8kvVvQ96KHQHzusHSYs0n3CJnIvTBGWmXEvCkgEF/Gh4QPEoyVndx8QTggnJvfA5I/gFnoJRvaqle4L3WVcVOIkLEdLP+YnnJk/6fFzXaQ+fPxEugaePjxzOV/56gffcH1ZeXm68vwps14b56dyd7E2NbzBYOpHsRKL5bieV26XbfjcbRbSNqjjusMdinUAgPeeUusIjNs9BGSMxvbYd0775O2cEGMgBM88mVFnSpFpFKkQkgkMaxnR2T/2A8tuORQ4HmfevH3gl773HR7fPPJL3/se796/5/BwYnoINJ+5uTOX9olbOcNUiEcxK/1ekGDMJcuoM1zbB8+8TByOM8eHhdvlTK2FEByWxAt8Vqj2wjrNB9K0EILRTa2L+7Fv/Cc85LMipTICqzXcTWDVy/3w6nuwnyEz9ATdi+UODN1QU+tq91gLo+cau8kVN4qdLbpLVnTt1ALtprAp/dbZ9iJ1zpbddGvUazd23zqIL92NZbQxLmsTyoD7bFTm1e4oGMxh9kSROAUkgK/D9aANO524Wwg5/OQIkyceI3GxsE5pHeeUeglU72gScLrgJOL9TIydFCsxCjF1YlCCc1akxo5J+4iwMAqhXZ8EC6V0jhgs9C+GRHQTkWkEQNiht5Nc9D7vmAi2Y7EaygaygVSjiuMQsaTp6JTqheji3SfSgtdB7hy/sTeUPYtJ7s2dMEguPSDNTJZFg8WKsKcmvbo83K9VxgcxCDtOhhikaWJKmVYT9EKtwyKpQQ3GmBOU26pcL52Xp8r10nn+1Md9KeYOtBN/xk6hD89Gcx4xH0znDSZvPSAS8dFZ+KaMePixK3eTw0VnFlJHK0ycAm72yByYDzMhBKZ5JnpP9I7ZFbzrRN/GfeOMSdQ9vXnKk6ddhM3bxFgD9MdCPwr9oSPJNF5xNShu8oEpCHOCwwF8aEOpbKxcXMN5Q3JcDKgEYMYxE2TGy4S4NOTQUHqlOzMbdr7jA6RkTUyb/huw03j8XBep//Kfv4/3wtc/+sj56cz3//OPOD9fefp45vnTxnZrnF/yXWBpj71YDahoYO12UfXPSA+vhIVdDNzURLNNq+XO3A1DrQAxLHjMEsbdKd7BM6jEjnmZSDFyPC7EaDfL3iE5F5ieAt4Ll/PFDoI1378PJxBD4OFh4cvvvud3/fKX/P7/5//BF9/5gt/9e3+VL7/7XR7evCG9CVS/cZFPPNWveclPHP/fia++/xXhQXGpc/7myk0L7WqTYfBCip6HxyNv3j3w+O5E3laEzqc50gacsdseeS/EFJnmiXk6kuKB//HLaT9eurG1DFswX0E6tXfqxVwg8k3uDE05CzIDk5Jbp/TGVjJbbmylUa5ike6rQ4stuCUrrhmRhK0bfHQz+mxbG/lmKbLbNZuHX1VjWnVB1ZaaRlQJ1rQU057kItQhK4sR8zBMjnQMTIfE8m7m8DgzHyL5OlGzFcJerPJKshs4LBBPkfgYmd9NzMeFw+lADVAvEEqn3Tz1GqAtdlDLRFw66VCZlo0wN2Kqtr/ozijOWEPGzscbcQzBQwqe4BPTZEais5yY/UJ0E0kOQxtl01S/7xErnUzTldZtkupyRuWMyAGRaYhqI46ZxUVEK5v3BLcRJZHwNDIbZj3U8GyYPVYVTxFPkUB1DS/BXFfaTKwLsR4IzaIo9knKCtQrTX6nonux+zMtid6F45sTFIdsHvrKtnbq9mxTOJCDGJMU4flWOJ8rP/rmzPm58PHrfOdFhiCDtLCLfYeOsY9Ayl5R7cTkSYvRCPwsJB/wB3DREacIi0F1/ujxs5nuxrcBt3jcu4A/BfwxMh8PxJhY0oEkniSOWTYCjUkqIqYWE2ZUA70G1m+gvCgvP6yUntmco70r9EOhPirupvgFZueIV+G0CXPqLHPj4SERoyKpjsnZZmfb6wXolj8l8hbHiVkORDfjXeKiGCs2F5quVMk430he8C6hzhF+EXZSOWe8d5Rc7mr8+zMXcq5s216kdiLDXnnGLzJQ9n3a0lcWnTHgBrwVR8EJbphbunvX5Krcv2AIwSyEgmmPnBOiHxZBwbEcZlKKnI4H4iA7pHkZFHAjKzRtLJ8munZKLp8x/OTOFJumwHxIHB8njo8zp0ebfo4PC9Mp0VwkiuJbIxXPp/fvqFvm5YsnLh/OaO3UZ4sw0O110vPjew1+LN7T0NgI9LbvsHR00UYX5rPdyt5l/vjj82DK13///PN2QonRWg0iHLu57mjF0W6OerECJn6wDVaFCbbSWWtn3RprrmylUy9DBL2ClIZU0FIsqbcMMWUBVot2r1ujrIWaG/lmkoOufCbAlld9mR/XzSAemOGtx3klLUJaPGlxHN8ZdHp6f+D4MDMtkZwadW0ESfRsxV+HaNUHJYRI8OZAHXwghGgC5QgtCq06fAzgJtCAd4kQOyk4YlC8r5ZXxgh00J2B5+6vq8PMeSPzyCaLTBwJkkiyGIXYJbyY64CT3Xdyf7f212O/h/r9uS9S3b1ITXgiQZRJEk4ynkyQRCMTiFRWKqtdAeLIrlKiohMwC0G8EXBixIdoRYsxSekOMe+0jnEtqu089W5qbLTWGCPTNNMWZVtn0MxWAqIWyO4mj8SAzgE2hay4UyA0Jd7qfcryfkdKdjr+gNiakSm0D6nTEJj7gye+CaRDID4KkjCLqKOhaXuR8rPDPwbc4nBvPP4YCAfHNDuiF6YgpIF0JwYXTLhPrp5oCQIh0ZaG1o5fKq0r3nVk6fTFWKTBmbHwYYuk5HnIjiUpy9x5OHVCVCT2+463tE7vULKjV0+vDu0TiqfkjraME8e2Krl08lYpbDRXoHUjumB01iY/W/n5uS5StRTQ19RSGKwb5c4Kaq3ei8lehH6WhxUoIU4G16Q5GKV6MgdmcWJCuW7KbgYF1kgV3vQpIRhtPQjBm2PD4WhF6uHhQEo2ScXpYOQC70lzRLzw6dMzqnC7rGYyurO+nBCjZ1oi8zFxeJw5vJk4PEYOD5HlFJnnGVzkKIFZPcd64PydJ6Qrt/OZ9fmKEyE/KVur9Fu7i029WCBj8P5OkpgPiRqEXis+GaPIsoMytY0kVi0YA8gNhh18XoS+FUz5UzsoK1Kv0QUO7XYjtNVTL57y5OjqwCklA6lBUC5r51Ya52tl3SpbbrSrWNT85pBqBbkUzOeoiFHJm6n2W+0jqrzSSiev9bOD2I/pzlwdjPXkBgNLUNkNTwMhCse3nuUhMj8E3n1pO743XzxwPC1MS6KcjTG4ToW62t6z6mbmsL6aBVGYCC4RnEFwMmypiAbjtBrQIQTzEonJpqcYwY0EVGFMUnej2xEFLrYT8C5a0qqbCC4yuaP9Kgvx/nfPQ+wbPtslws7Je923DpW5beQwinrEkXAciEZr5OAxxwGXbZ8lmcyFwoXKlSiOTVY0CDJ54iEQHuzgW44HpmkmpcmKuDOvjN0ICQY0fC+Yg4moHu2FpiYgn6YJOXhcjpTyhPOF3CKZZiSbx4hMAQ4J7wyqm18aOnlLnG5G6nhtXTzSoy2yWqDUhspGpY8p2RGOnuld5PC9ieUxkt56ZDYkQI5qsSlHg3r95HBHMa/LkyPM9rHksUJPI4p5XwQ6/v4cDE0iaKS5iTplWqnI3HHaEF8JhwbHjhwxIoOPPDAzb5G3NRrUNyuHg1o6tLeYnNLMraJWWG9C2YSigtZArYGe87hEVq5ZybWz5maQnzZ8rbiu+OF0+LOBfT/nRWqebb9zPC2IOPLaSWkhhpnoz9yuG+DMwmjEKPQ+IrZ/h2IlDkJyLI922EwHz3KaCMkRpzB0SG5kM5lhI5hJ53JYhvvCxJSiLTejI3qjXh6Os1kFnRamaWKaZuJ0xDnLUTqcFuIceXm5EIJnva1sq9HgGUXKBWd4eOhoKDS3sfUr1/KM2xQ3dYKYLc2DvGfyR748PUMWbt+9sb4Ugkts33TObaVdbjaZSKdXy9xpueFlJ1Ms1BJorZBmh4ud2lfWfOayfuT59g0uOtJkeV3C78za+Xyq+tbrfv8nIBpRDWiN9Bwp18h2aazPzXRIdOTaLYDRKU/nxm1rPD0V1q0OuriDJrjmzXKgleFxZBTkewjhyBZqpVKG1qllY1nuWhMRh3fgRqy7j2qQbhh+eSGQjo508Lz/XTMPX0yc3iXef/fEcph48+YN82INSn6Btna2h0a+FMpWWNeruY20lclPRBKuBqQE2DyhTmgPqIt07+gxIJoQPMFF3NQIsY5mo+Gl2mvphLEWJ7iJ4C28bo4L3iWiXwg+4X0kxmlkBcW7lCBIsp+dMA4gRbFdl9AoEmnO3OilexgLe7o3+h0LgrkPiAtInMyXzzW6rHQsh6jwTOWFRCK7KyFNTKeFtW/c9IbgiFNiPs6kOdlU5QNhFClwo8QMKbAWMw2u2TLaikO2GUokxYVwTMwCyiemtdL9bLZFTuE7M7pE+uPC4ayES8efDpSXRv4m00tBW4VSbDdZHOQJrZG2RrZcuFyv+PVC7ZX0Rnj4cuHtr5748v94w+FdYvoiIhNjUlSz+prH2icAUS2cMnScV8R3omw4CoGMjEO+3n9mHQx0sbzgLvTqKK1TeqHIlSorPdwIMeNiY46OQ4i8iQtfpBOHOvGuzUxRmJMyRbu2EUftUJuwrVAKnGtnrWaYeymZmivr7ZlSCqU0SheaCgU3hPLgN3CTEjQMvd3P9vi5LlIxBmIMzMsM6sgPHScB7Y5a1LRMa6ZsDe8rWcpILq13G5Gf9HDepqfpEDg8RpbHyPFxJiRPmoMVCSfkm6WibjcFzMfucDowT0Y+mEeRmqNBfVakJlKKHB9m0jQzTzNpPlmR0kQbcQwPb46ULTMv04iRMCr97lqAU8QrKsayKrqx1SuheOY+GeXYJaMSu8QpveG23Hh4eOTh8YGyFubjM/nc8HGzr8ln7tu12WJ3sPh8gNYw2NND00ppG1u+seYzWz3SNJvtELsp509+/PRJijsstUcV6xCutuypq6esnrx5OiClm9euKNcn5bY2rh8b21bJuUE1KMjpICgoUPcQQvtZ6fvPrMN7T0dWk9nriPhBv/V0V/EKRk9z4/AwSNdHY48tx+E8/p2Zx+/MvPveA8sy8Xg6jZiLSAlCXdVcsUOm3CrOCaVkZBPzRdSAdG8al+oHWSASSHTxdBdwYiF3wUdcaHbduo44azBkh/sGXBfDQgozIcws6Yj3iegWi7Lxwb7OgHrdrskRkw/4zxoP0+90nJp90w637TsZczewdFg0jv3UASEhbrFr1nXMKKgSmSgaKAQ6K16cefZNgj8EfB1uHCmQ5kRMYRTRV9IEaod2024auN5oas1pL0LLjlAcvgvJBSTZfnHLM+InbiWCL6gP9LcBPUTam0RaBH8CpxP90qmnYrE3pdC3zcTVm0fX2ZqpSyJseZj7myg5PQjT28ThOzOn7y0c30/M7yMk0AQ6qeV1JKOTu53cJR1cRUbCgImpX51BFIbYwqj2SqLT7oa32mQ4ZVjKQ5MMLiO+EVxn8sKC5+gjb2Tm2Gfe6sHYfcEY4nY/e1p31GavX0GpwVh71VWLlSmN23Xldruy5nVYMHnUG1HERwc14JoQ1DaI7WeEtf6nF6m/9bf+Fn/7b//tb33s137t1/i3//bfArCuK3/tr/01fvM3f5Nt2/jTf/pP8/f+3t/ju9/97n/333V6ODJNiSkdKLlzOgwh75sLx4NZGsVohWq9mZ4p54q/bJRcydmgwPtjsOfS7FhOkXffPfDmuwsP7ydOX8wWOzGHOznidlbK1rmdO6p2EDy+fWCZJx4ejuaZFhxL8MTgmKLjcEzEKYwilZjmmWl6Y9i/zszHiTh7nj4+EaPner7ynALhxXG7ZUvDjCCxo6FS3UbWC5f8CXfrVFbC5DhMJyZ3ILoDicD78CV6VLYvLc7Ze8/T9y/0DW6fNrPucZ1aCnXL1LXgwMgUb2Zq8/QWMCq6UvvGdTvzfP3Ix5cfIlE5PZ5Y5C1JwGh3/70PA08c0RTyLdJLpOZIvgbWc+DyKbBeDF/vDppYxMbHbzK3c+Hp641trZRcBxXaAg51N9Dt5VV38+otY7ZTagxC+/CeoRXGvk6JUYZQd0SFq8dPgRAjaYk8vl04vkt8+Xse+c7vOfLulw9857tvmNPEMZ6YnU1I5dFRNtiOyu0ps10K8cMntnUlcDZNmjr8NiMkICHbBDkSdDZz3jTZXmbswiQ2XKz4EG235feYcm/kAheZ4gNTOjClA4flwQIDZTKvwJ0ZJpifoOuI63gxu1Y+Y2y+xp03PHEQK0bGUA+WqiwRDQlkAR7wvMETibLDhopyGizBtxROFH3AayWzEKKwnGZy2tiW2ehOO4rgHTJSYdkLY7fAztoHQaCs1FrYLpmWlbaNvEdncTMpLaTTjPePXLZMiTM+NFzslF+e6IeJ9nBEusc1j/9dDtnAPSn9tqJbprxc6ZvSLp72cqDdEtdPket1wx8S7urY+o3pu/Dm9yy8+78/8OXve+T0fmY6pJ0oanluYuJzGcLmZqXgLp0xn5TCHmHfB6hZ2eXUjojp27SBK4LcxBiqOVP1SncrhBspKHOEtxJ54yZ+iSO/y7/hgQOPPBjYLq+4BkS6i3QfWQlkr0jecPUG5caLNnopvDxfeXr+hvPlGRcCPkTifGAOEy4k/LoQDoG5e/weVfQzPP6XTFJ/8A/+Qf7ZP/tnr3/JZyZN/+f/+X/yT//pP+Wf/JN/wps3b/jLf/kv8+f+3J/jX/yLf/Hf/fd88fZL5mWmbGb6uh6rFanjmeAjz8czTa3CX68rfvJsazGTxZvRm8vW7hOV92IUyTkwHQLzKXJ4TCxvJw5vk+kckr+/gdrVorlFYRSpwzGyzJHDITAlT/BWnIw9J4RZCVPHz9Xw57kS5kpwhtQuD47jm8jju4WSj7z74pE9gkFE8WMqcwL0Ti0bpURKuZFrIDRhq2e8F9Y6I97hJdApiFNzizjMLKcD82lhPm1Mx0ToIwoaC1HMxbpcnLGT3G6T0+zw0z0sbuQw9V5N1Knmo2h97X6w/c4Xow7a9v6wacqe2k2/Uksnb41trdyuhVorTbYRW164PmfWa2Y9F1vWZqO8iogVqWF62LR829lBZLh+2FZDnJneGu3ZbKOcs1iRGMw6yodIiubuMU+LNUrzxDwfmafElB5J8cgUDiT3aHRudyKIERRULUFWRem+QqjMwSNhpYUZJx3XQbOna6CqtwDIagemqE3tViA8jmiu4L4TY8dFJUwM/ZPHiRWpFI/mThFmUlysOO3mrENXpAwIdBdA78Lsb03GZo20R1eAo3dn2rE6fnVu/J5ZIbx+7p2xBIyOHz9M6LslAPSN2rI1FFTEm12UcUAsaE9olhzdGl1t31X6ldYzta3UfBsOK1ak6grBH4yEktqd+BCiELuQJjMpLlFpU0eTWUJ5bxTxoOCTEJzAIFS0MNFXoblA1gPVJfoa6eqY68ZGAm2kUyc+BOKDIAsG84Vm6T2itLubx245YWLm3Y1l0C94jVzcraSHGHu8H717ujpaFjQLskLfMPPYKvgxER+ccHDOihQzb0icJLIQiLuH/C4GxgMzXi3ZPPmEqLLMV1qbUE2cjldUldPDA003xHecj7gQSNOBtEykOTL7iSSO0MOIJPnZzvn/JUUqhMD3vve93/bxp6cn/v7f//v8o3/0j/gTf+JPAPAP/sE/4A/8gT/Av/pX/4o/+kf/6E/8etu2sW3b/b+fn58B+N6Xv4fD6TBiLKCuyu228vLywrQsfHr6BL5zvly4nC+EJ89621Bv4XzKiFYYDx+EmBzzMTI/JA5vEod3E6f3E8f3EyFYVy77tQTEZOJaowJ7jsdpFCkrTt47oocQTJkd5oZPHZk6TAqz4OaM90JyE4c3cLpF3v/SEaFxebkSgrmTQ7VJL3kzguyVsq3kzbPlSKoOXzu3MoFr+OBAG8FFiqyoa8QUWE4HDjlzeHNkPW8c3i4m7OVzGM8iFXCdNFtoQetKG87ODMnQ7rvXtY0CVYwyJ2nw9Wyh/e3Ht4uWQQr6rd81CMeZcLYOB+ubNSGXFzP2rWpFqvbM5dPKdsusl2xFqjRE6mfxJPtSfUS3yMjFcs4mrmFCDLs7/cjIGkXKvBETPozJKSViiizLkemwMB8WjvOJwzQzxwemcCC6hcgbIrNFF8iEk0ho3kxEu0NdQ0KjxAXXNjReLICvV/TWaAVydsTucd0jzQ5+j01BjoDXieiseZmSI0zGLvTefjbvkolrw2JkCG9Ccofg7gakg5+pZqTKcBnwwSxSzM1rB5k8d1tY9aDe7sFqRBVtDvV+0NpMG3TPqnX2DltfMqx/uqPWTq6ZW76S+5W1XcwFXQvqixVP0XuaLypId5Y5MgIlc7vRejEH/c18J/OtWJG6CTEtxB7oS7VdjwMfbf0zTVBiJ8dOnhotGZzlhtYqeAizI05CWD0uA0noN0d1iVUXsiT6LaHiWVtmC1eETnhTSY+e8OiQWemxUXynilLoQyX2agtrW7/KbgIoIx8YdtunvaHYS5dDxyTbW6CtDl0FOUO/gW6CK3YtzJp4dJ6H4PlSEo+y8AULDySmMYuZt9JrgwEPIEe8HhE/40XpyxnhgvcLW9kI0VP6lTDBfLPrzblIigfCIRIWzxwdwQmxmUL7f2uR+vf//t/zy7/8y8zzzK//+q/zd/7O3+FXfuVX+Nf/+l9TSuFP/sk/ef/c3//7fz+/8iu/wr/8l//ypxapv/N3/s5vgxAB/m/f+zVOD6dxozhag3W98XJ94fT2DR+fPhBPjueXZz49fWL6ELhcrsjUCR9tt9KruSOLYLThOXB8O3F4lzh+sXD8zszhy4nlbbJI6gpSFBrIHOjJ06eASMCL5zAFUnDMsRO87RpGM0pHKbz6n3WJwMzkOz7MaOjEY+XhHfzSLz8wzwFtcDxGjg+m9u69MR8CQaCXwnq+4r0yHc0C3/XKtThqv1LLlQsHvETK1CjeFuuH44HWG49fPFC2wvV8o7wUpEKjsJUbcu3Ek+A9TLPtNRQ3zDCVXgfM5GzaMBMxse5PsqUHix8d90433/lQv/NjN9zZndtbsz3d7XbjfL7w/PxCyZncbtRmJqS3820QECzBttX2mc5tP1wBMdKGUe2DTUhxsSj6EUcvg61nHWUwWM05Y5XFSIjGoIwxshwfSPOBNB84pTcc4sKDf8eiC1OZ8bcHXEkgJwwCTcgacEVwG4SsUDuzvsNRwK2UdqX1jb6eqVKpUugahqOCPdV5+1okvD8SNbH4xGmeSAfPdDLvNUvddYMqHUbxtXj1cRlan95t59jNeQjd4b4I4Se+bdZlq9rk1IqjZUfdHD2ZmM6NpCcjS/+YwFuHALsra6uc1wsv2we+uf2A3J4pfMTs4S1CHjfcYnYXFhmkxSZ3l/DaV/PRaxadUkul3Kol2d6Eqc2UFqjLjeYjGhPOFbwvTKlQYqWkwtW7ESUCFgMSUJdgpA2nIIQqRLegU6D2iZAXtpbos5HCV82UdiU6Rd5vhLcefWjktIGzslRorFTKt4rU8FG8l58ymHuK0WRMXv0aTeJx6lFNsB3QPNOePf0qyPNeoDyxTUzO86CJX5LEW/H8bmYOJB5ZSMSxd3yVgTS169XLW+AReIuTA+Jgmc54f2aaz7gIt+2B5U3kvL5nzZchsg4EZogCAfSQ0VDRttG1m03Xz/D4n16k/sgf+SP8w3/4D/m1X/s1vv/97/O3//bf5o//8T/Ov/k3/4Yf/OAHpJR4+/btt/7Md7/7XX7wgx/81K/5N/7G3+Cv/tW/ev/v5+dnfu/v/b2cDu94PDwOKMHRujBPK2laWNuNMDme1q+HnKSS9YbEznWd7ALeIjHle+ceU7inmaYlEg+BeAykYyAulufjtl38ABK8LYhDHAtmzxQ90QvBDU2CjAZ0h8eGpqjSBkzT6JJQp+ASIcK0wOlxgi7cvsi0stFb5vIymwNENJo42qm5ULKnlo3aErU7cruivaKlUfqGkxHYNujjMQamaWI+zEyHiemYLOxvU4vm6JVSwHdbcJoLtx0yrQ7/sf46bezYtbl6NLpWlIyMC/XzC//13+HHMD72U/NuLIop/7VbPlcplZwz27qRcyaXlTKK1LaOQ6kNw9eud5Pf+18z3os9CFKxnZMPiRAiPsaRFCyvxUoMLvXOEdMei5FYlpkQE/N0IMYDKRxJ/kSUA1EeiXogtBlXTkiLwAHVGdWEZg9VkCK4YZYamFBpRNnswOmr2W3pZtCbOFSEHgzuMpq3dbtOJoJORJmZ4sQUPVN6NQh1A/vfnUBkL1CDS7IXK7XYM6qqGR26kVEkZk+0Cy2AUSHMA89gN3v2fZLqO1y0P7nvIHTkJLVmRaqUxpY3buuV6/WF3J8pnHFRcUHxbnyDsvPYDK5VrDj1jslN+jYSqcs9LbmWSstQi9Bcprt8D9VUKiLN2JC+4739avHytgXqdw0YgMcF8BIMxq8B1UiYJ1pKaEwmHYiNOEVST+AzujT8IjAp1dnfaU6VjZu5a44itTdzdtc4VRyNgI4yb3KChsPf9WHRCEY6QYnoFqxA3YCbolkswUE9MYlJUph4IHBiYSFisuqdxj+ILxjUrJpwfkH0iMgROAIQvDl0iINDfxguGpm4BLa64HpA1ON1svgc18nxSvMbtRvhw7X/TUXqz/yZP3P/9z/0h/4Qf+SP/BF+9Vd/lX/8j/8xy7L8D31No2r/9kX8FL5kio9wf3NhSpXD8S3+AG9vR3J85uFp4vDRIQ+V5SXQQiYk2z+0W6esxmSLiycdgumO3tpzeTsxv52YHoJFwYt58KiqGZ66hIQZ7wJeHFGUgBLoODewdOlG75ZGJeMscAZT/SeKdLzcUKfEZeIkE1oPnE4zSSbmybHMnlY3tnWl98KUHE6VfFsJsbNtnq0IrlbcVtmaR7aAKx6ngen9A/FhJs5H5jShCA9vT+R143o+Qx+MnXMlt0YrjtgMAEhTuCfq9i40p9DlHs7HEEDXVtm40XzBidGGzd40jK4vgVhI3etUJfv/BvBhEI/ZG+1Mw24RLOvG5Xrj+eXCtq5s+To6ZxPg7vIC08UZweHbUKOMKcp2JiIT3s+k9EiIiTTNQwNnMK2MBbIfgs1pMohvmiYOhyMhTszzG7w7EsKRSb5g0gNT+Q5zfWBpJ9LtgFNP2wJaE6UbvX7fmrsGsdkx6EXBV8gvaLmaI3s7k2uhJSEFYVqMfRq8w3Y9Ca8HQp+ZZOYYPdMkhGlwHX6HwXWs5uy1ajZJ1Q6lm8M3rrF0xf+4y9X94dDm6dWjOdBzoOWA1gBtQEbi+UkTdO8W355r43K78vzyzMfLB75++iFFP9HcE2lxxNmRxA/xdqNLQ8V2oYzE2taMBFN7HYa0lby1wbwt1FVoN6HoA0UTtWy0tNF7BlcQX/HBKPwhZrtvnR/U7oIQBr09AN0KlI+cjjO4RMkH2stMX5MJhVEWd6C5hRAb9W0nPigsSg2Z5mBj40bmTOY6oh73Bk7wzGMOTUBQIagV/6AOWiC0gO+BrslYlH2inSfkGunfCFwVns2cV1RIPrFE5VHhvS58QeINx7vr4WtEhDVvvUfyNqP9gJve4Pw7vLzD5MM2YYaxsz1NlTkuzFMg9zdU3e4TqOuTJQ9o48wnCjdWXixxu99++sX52eN/OQX97du3/L7f9/v4D//hP/Cn/tSfIufMp0+fvjVN/fCHP/yJO6z/1kOGtcu3PobH043yHR1TEubFsRTPqQQ0RB7WRG+NVjrleeQEtW5EhmCMKIspcIMO7YcFyND3SDcKuPOIN8KBpYSaE7fDhHPONUSMKYXvEAYDKzZiaEQPUSrSC9o2KjfLoemO6Dw9eg5L5HiY2R4OPL45sk2BVrNptpIbguFgIXzF3BW6WIyB5G5JgFgses+Nno0PJNVSlYJz5iEYA1KU7vWeynoPceh24DlM+W8ZSHZwa+/kbWW9XTi/fDLHDWvfByDh7wwxL2lYt0Qr0NgeyCje9ro2rdzyhS3fKKUMCu0eXaJ3YbYdrGNh3w2KBMWNrlsc5lmHCbjd2DN5b/TlGCMpnIySLW8JkvA6WQDiIGwY8NhQ13DjfXQOi0LA2/RMwstMYCHogaBHfHvA1xOhnvBuwam33Un10AatfZ8+B2zlR7/ucEhfzI8wz9SaySXge0cilNAQrYR9kyFqRA+VOwhkpI/dwePbmrS9MLURyNcKFt0x/A9HSoZ5/Ome3fvbH4qMFFi7Xs0gOBotXsL4GX+HGin2nlqkeSZvK9t6Y71djQDhr2MP6lAXLPIkWIFCDA7eK2wbMoK2i3gVmoo54I9E61aFWpUazFOvtmY09T6CFWV3y+hWJlQ+E8kOX0mRO3nB6ZgunZn2GrNpZG3t07mf0FhwrlrTOpon6WoZXbr7H+6ZauOlUahqO1nfnIU8Ns+2emp1tNURqsP1/b025w89d7hV9JsNvTk4C042nGTiXKFA6G6AxCMNTOXHJCE2nYnMOHcAOeLcbJDnLpimUduVNb+w5U/c8kdqv7G2J4quNM1Ev+AlMflgcg3nWeIjkxxY5IFOJ8brz3TO/y8vUufzmf/4H/8jf/Ev/kX+8B/+w8QY+ef//J/z5//8nwfg3/27f8d/+k//iV//9V//H/jqn0EJcBeJCg7vhBjMzXeeYenCCY/MkbVNQysD7Rm2qVHWZhCHVzw2Sg+xg+ltmh+Hdze83iv4UaSG6M2J4rTj+muRcq7jQ0dCQ2LDp4qPnRQ7wQvRNaRntAq13ZAq0DzeTaTgWJbE8TRTtgNv3z6wrYlStrvob4qB6CO+e8iCrmbV4wCqJfkimHddbrStGIRRGh4lOGEKnpw8UjvV280valTzvWAN9Ifg/BCvejwCrZHXK1fveUqDnj8EtrYHMUaRE09yyRoAl+4HmfNhvIZmLNq1ctleWDfbMdVqguk+DA1en+al17vQ+0gNFIxt2V8hoH1f4cR0NdNwK0hpYk4nprAQ3VsjIvQ0YKpdDV+BDdyKuGZalZH4iw6yjE54nQkciHoygkR7INQHfDniZTYYpTkz8+0y2JoywuWsIOyJ4b4DfUar0LaZUjbyFvBaoHdKKjgclUKTZtOXYESCOzV8kB3gFWJT7r/2boLMEXTLTiDTHQLsdhT9eIX5fC41jY4Vg66CTXUWrGiQqf+perj9a7ThGl5yZttWttuV9XohtwvNX2g9UKuJgl2wF0e8FSndc2y6ET26miRBBbq416j37u7BhbXa9FaqMVhrqzS1BOC7pZMqbuRUeTpeO16GW4MYq9ANqoIbcj78oOiLQyUgLhLcRPQThIp3zc6F0key9oiRwSzQdGRnWXac4NSEs9odviQkeyR7theHrML2Ar5Y7lcIY9r30K8dvVXq14quoDeIIRNCxUtBihBaIKkywb2h+Tba4ICIMBP8CfSIyGzkFwXodC1s5Znz9QPn6wdebj8i1xu39kLtmaaVQ2qkMHOaIinN5jkYFtPyBZsY5/K/qUj99b/+1/mzf/bP8qu/+qv81m/9Fn/zb/5NvPf8hb/wF3jz5g1/6S/9Jf7qX/2rvH//nsfHR/7KX/kr/Pqv//pPJU389zxeWWKKo+B0I7iVlAoH13icHbEE2rSY6WOI+G1ie2lcnzK1FmqrdqMWQVdHvzn0JvQ0bro+qH1ecdE8/KxIjUO9dmPjdBMQe9+Jc8PHjk+NtCghKdM8CptTpBRQyPWM6zJYXAe8eOYpcDou0Dp5e0feNra8sVMMQzRHYt8SbjXTx56HCSveYEjvLWl26+jFkk97LbjcCR1SCEwx4qIiafjw6ZDk6rjbAVEzulEnaAh4VXrJvHwyjc/tdjFsXZSqdX9Xhp7HE93u/zZSXcXho0EkaU7EKSIOLuXKZbtyvd0s+bZaJHsfQtHePb0HejMZgNHJR/Kt87gxCXmnd982c1UIHOYDMZjjwpROxDAz+Temh2rRDocGrTW63ug806UgrtKWBj0SndIWj7qICzNBDkR9YOINMw/M+o6pL8x9Ylabmlrfpw5jeu5xQRhaShn/LYBUDyVQbgtl29huEdcbOjW2qdpBJp4gtt9o3g5pHYzLnza/9A45Q8mQbzY90V/B8q62k9qnrX1X+5OmqXue1IBVhUjwE6IHwqDq71RqBjPtW39eRyZRbWyrFafb5cLl+YXcXmjuhZIDcQr0nnDBXMe915E0sFfdAVcCLVkYoAZHxRt7rssohlCqUCrUWimtWPqvDm2VjjSDrvgOse8HuRUrJzo8Evr9KWOSslWRcfB6N6ad9wvRH8yLc6AaclGYFG3dMrVG46IF829cFamWEN1zRFqg54VWIpIj25PSV6U9DWPkBnMSQlDmaH6QfYP1Y0ezmSYvS2WeO1No6DHguxC0E8YGbN80vj4ckBA5Evw74IjIMj5e2cqNLV/44df/lW8+fp8Pn37Ih+cfkcuNaz6bUQKdd49fclxOfPFWePMmcDoceDx9h+gWknsAAt5d/hsnuj3+pxep//Jf/gt/4S/8Bb755hu+/PJL/tgf+2P8q3/1r/jyyy8B+Lt/9+/inOPP//k//y0x7//YQ+/Pe6eIBaj1ntGekV5wUvChkQL0KBxqoF6hPArro8fT6FnYNjHTUW3WqVfYGdWUoc8wHIQ9sHBPyHVuX8qP9afqHQIMXvFeiUGZgi0dpxFHIZgqXJslgGqvqDbCUI2brZJnmiLLMpvtjTc9k2LxDV6cwX3Z0dVgGMuwwZwnRejVdgysGL5eC1qMzuWwvUt3ziC8/dTSQVxoek9mZVCHZUB9tVRut6vpW0o2dbt2Sq/j8/fdlR/5QgYVmdmpENJEmhLzYWE+zDjv2frGWjZKKfdMKFW9U993N4p9jW/fTwDnER/s9934ORRUhRQSwUeW+UgKiTkeSfFoO6nwaMvnHu77GXKxKA42mmGdiO/U2GmV4Yxu34dNVBHPdH8GAr57wngxP2v870XB7Qw1eb2ctTNo945ebMdTi6dWGa4fSvOd1ruJQOk2Rez3wJ0J8fkEpWOCMr/JMgqVjn5rL4762fMnlaY7q45RpO7JuTv0HvA+3kknNtHtB7ve/SdHJR0uHyYgL6WQt2xNWN1oLrPr8HwUfDALKg12j+3fsDCKlAjN2bWl3g2lnglk70LtcW+08b2bdGLo/3Zfzw7SzfMwqNwdNdzd5+Gz58Bq7wqm0UShAwoW8y40Oy6FTZF1EIPo9rHS0K0a6nHpkIEMutm+T2uEMqFlon2q1Ftn+1jHmKgwW2aZm5S6ddrWWZ+apUfXhqcTpCMNS2cWxVKsX+H8vbG3a2d0TmJOIebb5O3164Wcb1xvZ56fP/Hx00e+/vCBD08f2PKNW7mMqVSRPlOLMKUH5qkyxW4u9hyIWFMYf2xV89Me/9OL1G/+5m/+jr8/zzO/8Ru/wW/8xm/8T/jb6njuharT+0brN7b1iW39RM1PqJxxYWWaKoLSfERqwLUJOSu3pSGa8M+27M91RYrQV8sY6pcOsSPDDsmJG+7mYRiLOovjEIjijTkrnrgXpKiEKIQIS7CLanY7O0q4FaFmNXscKl4qzjdEO95DmgKtTTw8Hsg5EtYwCAOV3i2RR7Nh7hZs1glBYOqQmvle0i3q5wZFK6VVynmj36pNf30fznYj3k6rleowR/D9gHjdppjlVG5s62bfg0CthvfnUu43vhssQD8YgnbBGxw4zQvzsnB6fODh8dGiC6JQtJG7RWy32syqaMSo2H5Q8LKLbT3eT4gPuDDhZAhNu+29HI45zaSYOB0fSDExp4Pti2TCc6Q3T80BX5SqHS0rvT+hvVF4Rl1G2YhBKbPSqoUuWq5RMtiPhSALkZmonqRC6K/1fj8LnL4C1cqYXhr0AjVD2YS6Oco6UbZEXSM1WlhmbUronx2yA/oyOrcVWG3yeZ2iq01yW4Z1he1qkxR97FF3qZHc68fdFd92eeNjn+nZuhpD1fZBgrlaTODK2Ps5EEMV9nv0c/RPO9Si5LVxvdy4ni9czxfOz1e2cqHJlWkJpC2AqmkU0+6VaJ6E98KPfe9dvPk1BGd5vyJUJzTfqV6pzlHF0qvM1dumqKrN9mvdmatKsT1v6rNN2OLNyFYDDAcQNNJ6oA/iSKue1jytudGEzXhdrIBnB5exG6xiZ0mz6YjNwRU0F/rTiI9ZgZxAJ1QfLJKlztSPme1aeP5wtcj71uknISVF50bOhVoa15dM6w3txQQAHqILpGgxHNEbhLknF+wV365F20l5mYADMNMVct7Y8sqHTz/i+eUT/+m//md+8NVv8aMP3+ebpx+xlZWt3sw1wxmc/Oa2mi7QzTiZaO/cHX0QwtCC/bcfP9fefbV9pDajk/Zu2HYtV7b8wqfz/4/L+jUvn75iczfWsJLnSnGdWhS6JwbPcgpI72wvJlStpVJqtvevNFgz3IDJ4L04HKad98QuFvsu0axsBCK2YA9u10l1vBZc70izMV1V6PLahWm2EV+rR5lQiQNV7PRWR5aTOQm33mitGamgVmoto6N0hn07M9T1wVFKIaSA8wJ7B+otiKy0yvnlzO22sl5X8nmlbJW8ZXpuaDGyQGsdB+TQCH6nlNs+RveOeIhBW+sjCLKT67eLlIlqPcgeQGlkhuV45HA8Uqs5VKRlwk2ehlJo1FrNXaJV2nBTviedjptImAjBTHr9ME51EoiS8M4MWJd5ZkqJ08OJGCNzWnCSEIlIn2jFkVdh9UpZO5eyslVBuVJ1oZFBV4tf6Dq8FD+LKtkV3gMRbnWnPWOH/91uaUxL42OtYVDUBnm14lE3qNuYpqoHNUGsc53g+ygCYQiQ5T7lmiO/UAv4MvYlWNNdm01PtQw91L572pvoffJ2xh503uPD7t/37Zlq90doWgeRwUIzuw4ChQzGpuxQn0XD219mwvtWIK+dda1cLxu3S77/mmulyS4WVmJSerRJzjWxfbDT8VrqfbjuqaFuCPud0J3Qkqc3Z9/GFJEp4iaHBBMrMwgKXZ1ZOkmCuiA+IfUB0cFSa8m8CntCZaaRyP1AXyP5nGhrpG2RvI2mo0ANie4VesBVh+8eqQHvlVQnWj4TN0e5Kpoz/aWim9qzzmifqD3Ri0cynD80tlvj8mkkeWonOdAKAR0m2pW8FXNYkUqInmVxvHsfePcu8OaNJyUZtP42Bm+hqacrNrE7tZWFZFSFrRQu1zPn85kfffV9nl6e+ObjBz6dn3hZr1x1o4RMnQrEhgRY5zNBlKdrJD156IV3p7c8nIqFI7pEq/8XIU78r3zU9oHSbtS60WplvVzY1jPX6yc+vvwnrttHnp++IodCiYWSleaEirGsgvcsxwmnyu1k8QzbreDwVlByhU3g1pFJkeYJybRMHiGp4NURhieZd44wGH7eKV6a6TCoSG9IabZc7QYP7DCQ5tH9NmfdE8EijPZpptb7hNK6FaZSiinqt3wvUs5VnDhKqHjvbVe1OZyXYVhpJ1LuhdIqL+cr25a5rSvbZbML/LaZK3ixPUKoDvrIKPLOipTaHNBGRHbOFhhZcqXUdi9S+27DfTZ9AugQ1DonHB82TqvdDD4k5tYJLdKd2cXUsk9S1oy0Xo1G7gLBHXByQtyRFB5xbiKEE9Gb2/cSF4ILzHFiOczMc+L4eLQiNRmMIWqwSs3CelFuKJt09HZFpFN5IfcZ+x2TWNqkORw29kytQY3eWVqtWUEog1DTu9wPafbJCcYy34pU2axIldWKVCsWVYLuERtmfRRGkZJdQC1j7a/ja1WIhTunqHQrUnmzDreaJ+i3dFIG2dnTu9F8eYO0fwLwNzZS1eJF9vcTj8rY3YqO12V3TtjhYqE3pRbYts56a6NAZW6XzHop5Fboro2JuBNnCF0BMwuWJqYrVNPQdcyJopeRuN2guQH7JRnNlCApIingksNF0wzKICsYCSea9qcdkTJDeYv0CacRVyZc87hmMopOoGiirZ58idQ10nKkbEod2rIeorFcq6JbQMpEWBN4iDlQciBsir9Veva0a6FnpWfQutB7orZE3xxtVV4+dCtSz2UUkc6cgCaWfFwbpTZKtqbAhUaMjsNRePdF5N27wONjYEq7FVgbKJ+YdqkLNQNBzZrLbfTe2fKF8+UTHz994Edf/RZPL8988+kbPl2eedmurJJpvtCOFbc0XOpsesZRebopQUDzxtuH9/TeOUwPxDBT6v9FKOj/Kx//+Yf/L5aXxHq9UrbM5dOL0VivL1y3r8nlzDl/Q/WdFqEuAfUejQdzEnCe49uFNMF2g7xV1lvBPV3NLfuW0ZdiTCnZcCkSDgvTDDEKUxSCC2Z7IwnvDGf1YvTOvX3WXkd73Wht7Lbyq3bEdhxqsejq7Qlot0liXTNbzqzbzYrK7cZ6W8mbGedaCNvQ9jhH8GFEeshdK9Npw029U5rF3l9XM9rdtkLZMq1WylZssds6eS1457jGbezfBkilduPnzUx6r5eNkhvrrRobr3Vqa/DZxCTOwgL3vZ1zDh8cj2+vPL65UYsgLnHIjbnNd2eWvG3kvNGqPbVvNgGFwBQfieE9Kbxnit8huAMxvjVhbZh5nB+YYuK4LBxOE8sSOb2dCcmRpnAvorU08qrcnhsvqXObOi5fSJuD9UwrD2RWmn+yZTmN1jOtbzRuFjnhNtRnuhhtPme43SG1/ZgfRXowLmmv+6HrC6wXuH6yX/OKRUB0i8kILtl+MtiOMsY4Yioc6qxI1QbbhjFUw+tafO1Gxd7qKJzViui+Mup97KXEiGrOQYyeMA1Pv99GxrAixbDuEek4bwVVNRppRwCGTRaZuwJe7Xova+f6kjk/bTx9vPH8aeP5U+b8ZFM+YVw/HUIUWnK07nDVKN8q7f5aNtQGoqRWvBJo9HTn6YtDksMdHJJmXEi4QzCDWm9MWjByhuKhT8j2HtdPRPkuoRzwZSack4VlroE2XoomnZaF24tw+xC5vThu50rOnVwqpVuaLdg1EJwn+YiIkEkUnajd7LB8D8iW0aK0omxroFehrWVM1nB5ulFzYbtlvGv40JliRw9mm1ZrHb6bhuCk0DgeE+/fB37v70l88S7ynVNkDuClARtmpyT3BiZvkFKjsxLDM60JL+ev+eqbr/j+b/2Q/89/+s88Xc58fX7ipd649JV6ynBshO915LHhDpXtw0fqxVG++ob18okn94g04e3jV1yfV1Kcud3yz3TO/1wXqefzD8g1cjvfyNvG+eOZMqisW3mithu5bTQ/KKgKGhkYt4wQQ3NESIsl0IZoTDS0Gm17U9pNaWnEyg8bIIfHVzUqeBN8Mz2Q99GsN8VZgZJuAsOdSjX2UF2dNdVqBap3Y/3oWMbuZIFS+h3y2p+lFHK2KWpbN1uAI/dC4n29a5nEjQTZXk1L0vuAzxrrmi37JVuScW8GF/Te0WbUb+cEX4ZL9p6mq7bn2LZC3govz1akbtdKq/0eD2A4jBpBRGRoq4zR6EfCsQ+REBK368p6y/gQcVMYeVlCq41em7GfBqlEBrwUY2SOM1M6MMcHgj8yhfdM8cgUDzweHphT4mGZOTxE5kPg9CaNXDC52y6VrbCFjuuVfu1IbVznQCOwdc9NnO1dPDivpn9zBSQjI/7AzNzMxcBYaxawGIRXUs149r1IyWuRKtkmqbpB2wwO6w3Q3dnDDxPZnbDj7wuknTxRB4vNl9ERj9VU7ULTQTlv3IvzbizR9wGvGxRtjYQz5umPF6g7MaOPZOb9n7Fw36dNrDESbaOg2C5L1QgTtULJnZwb29bu4ttWlNZ3so8x4PZnaw5GAoGKwNBEdXXGpekBwwUjuotJvR+5Xx4fEy5E2y17uTuPCAZTmiZScOWI8IDz7wnliM8LcgnWWF7dXVAsUmyvdoF8c/cJuG6QNyFXoVmWjLFbxfLAnAhV7PrqeNuFqdjEVG0dkW9q4ZvXajDtBrdLHU45jeAbsXdqMYiuVmdIS3s1pfVeSZOwHBynQ+C4BJL3Js4Wg/vozu6BquQCOStIxYUNkSu1KbfticvlI08vH/j0/IHn65WX7cKNwiaFnjpu7vgHRd503FHpa0GLclNwWei98uHTV7QqBDkR42IN9s/w+LkuUj/40X9kmjy380ZeK7enm0VN5IJqxixUPM0PTL47S/zsFuXs5kg6zPjZcTgr15fM7WUjxUgvlbYp2/OGy5VYPTpFUoGpClSQeES67SO0gQbBJWM3ueAHDK+jSO02P6anaKMd6+jYtxi010eERK9j6qmVLWezAcp5wHMb1+vKdtu4nG/D/kfuxcBiF0zQuh8htRlTbreK6a2zbXagttLvxcmo1zq0RXl8Pc/uGA7jZ1JlXQvbWnh+2shb53YZmpWdITce9wPaSHd4b5NoiOZIIBKY5yPHhwvmPGlwjJvEtFKl0EqmtUxvGyIR75RlChyXhePywHF6T/SPLOG7zOmBKR15e3pgngKPx8DhQZgPwuGdM9r+tFtANW5rZ7s2oq+40omuUa+VsFY0Ncsj66Yhm5bOtDRCcoTU8PEFHyacn1E503Viyw2HNzQlD6mB38kIsLsSgU1MJcN2tmc+w7ZCLjZxdBEIQ4QcBB+8TcjeYFjVTtGMrxM+d9bN7JOGbAdkzDv6SqzYPfoYsKQMYlfzZpEH9h6ZBmd/Fz9jDNIG9JrNXqh3s0YazLbWhapDhiAVR0HGTkq7kUO2m3K7Fm6XzO1S2G7d9jnV5AXORegJHc/ePV69wZ/72DdK4Z0lyYL5I86gg/QQ47C2cszxwOQTIVk8ibghntaAqxPSHK4FpP8SLn7B5P4fhO2EWxe2D412VdqzhTVCRtON2qw5uz7B7Qr57Mmr/XzrDVpRc4iQCXUHxB2wAe5K5UYB1t6ovXLdKqV2tqLcLo1ahPXaDKItStkyvRe0V1qwe3hdK+KFtDlrRLUhVHyAaYbHR8e7d54vHiMPSyT53RzYdoVNodTIbVW2VdnWRmmrzcntRqmVD5/+K199+AE//Or7fP+r3+K8rry4RpmgzII/dsJbhV9S3HvFHTutrKhWciyUy8rl+ozWiQ/piY9f3fBuZl1/NofZn+siVUohBMOqvYsW7e7VIrRldIGiFKls0rhKoTpQn5CQcDHiZ49UR1yCRbIfZqZlpopC3yglQ28EMYeK6DzRiVE444LLiisenxQXKm0WQgjEmO7Th4xU07u79qBR6ZD3t2oTTilWmHZYrvfXKWrf9bTWDHseC9JtLbRdhDOgGbdDNG50ryi1jkmpNloZe6PS6EMsW6vBge0zyrf7vOjt58J4KGrhgpslHpvgVl+X8Z897robe6teD2kZFNhugYNlq+RUiFuxPCURWi2WFtwKvWW6ZmBDJBFDYZ4Kh6VxnJXk4Zg88+SZkufhKMxJOBxhOUBalDR1fOy4VNB2gbYi4RMSM27a8IdOao3l7Y2+fU2ZfkhePxH7C92vTIuyHOX/T96/+8i2ZXn96GeMOedaKyIfe+/zqBe/Ely1DS4eICGQAGEhHBwQEiDh0Q5CAgMcMHGQcFA7DSbiD2gHXRMH9cVBt5urbuju4tQ5Z++dmRFrrfka1xhzRe7qF/W7F8SvfhWlqMyTOzMjMmLNOeb4ju+Dx8fGPBfm+QMxuvOJ2ef0PlHyhjT1OUOQm3BXwmuR8tdwdE/ZyBehjHlUywMCvr1o3EgZqENbnU7txaHeQ/CaTszZyRRpYsgVRnEEROyVWj4IHPQxuhr06+P9845+lKZbN+WVzVlx5ZbCWouf/m34+dXmUePFKkhBrBDMxUxt879xX428j1DC7jq3GCbm6d59uFNimWaWaWJOCzFFNwFOEY0BiY6AyOFer4ouJ5g8WBFmjzQhuvOJCA8kFgIz881R8pYXlWdiSVidIL9Dw+fE/Uv6daFdE/nHjbY22sVF1I1KSyPWZu/kS6CuASkBrRBbZ+6dZiBEgiQmXVyPqJ2ih+rKCAPhkE/Tops5+WZ0VrUY5sMqRMqYgTZad+3XtgsqDr2myTidhce3gYc3gfsHz7MLejA1OzZ0YbkqW64+H9w725bJvbL3DU2NUjLfPn3Fx5dvebk+seUreyuUBGVW2lmQR7BHkAdBT0pYFH2M3rl/1rDWKVb4sH7g5dL4+GOHtPbtYBf+4bef6SLlRpFuEqpBSZPHSYpF4mA/qQqbZdR2Sn3xNzaIY9MpEqaABiGdItNpYj5NzKcZseZJsAVa7gRrWO2ckpInJ0gQr0g0LAc0CSF2rAamaaLPbswa1DOlRIcbNa+wmZlDgb0LrUEpB43ek3jtVqS8jW/DPLWOYpOLz5Na9cJ21IZbMdGDmm+UOjqp5kWlH0VqpNDW2of1kAsa7ROYTsNQiByFahSZkhsle9xJ/+R6+3RPG4gfjI8CN5/Z22Zt3oHVT6BHE8VUHeqrzm60XqBnICGSibEwTZXTUjkvHnd9NyunObBMgfszTJNxOsG8dNIMaTIkVjTtNPkI8oKFH0HakelKWDqxd+bHjbZ/oEw/Zl8/ktoLFnfmxTjdwf1DZUqFOZ5crEwEnun97AebMtGZaCqEQfP+iSI1/va6G7WId1QHs++A5cQGyeS1QDEOXn0UC1rHsnqEQi7kMntych3vV/ikkzW5Pe6nlHjGr7XxZh3F63bYeOV74OBiHSbExeGn0qgVuBWpTmidQkWsIFIQ69A8fLBs5jHk2QucM0BHkZrPdDPCFFnmxJwSc1p8LcU04LpISLMLtzWhYUJCJMxnNE2IelikWBjpwS7KPYswiTtHhgFFSlekeSeleSLmE2JvEHmLxne050S5BMq3lbZX6nWjyE6VnRK8Ey+lk7dIzwGtM6Gpx1F0iLci5c4wU1I0Vrq4q0k4HGpav+m/eh9put1TotuA3rGMzwALoh6j0w5STIYYHeJbJmU5KQ+PgbuHwPleifET4pL5TLH2SqmBLTf23Nn3xrpnpHW0Niys5LLz4fkbni4fuKzP7GUn90oN6szJc8AegAeBO0UWJUwKd4FeDN4G2gp1bzx9eMGuBfuYsRrJPw+dFEN7MU1nmBb09Jaoi88m0pmg7iN2bReeykfa9Su0r6xhRecFWWbk7Nk+00NkeTtzXjvnNxuiUPML26a03TwaIgtzbCRt0DKyKxZWatyRuKFh4e6+MS8L53NnWU6kCU6Ls/9UPSqBmxCwDb0IlGps+6uYt3e3S3GRbHVqd+03yC7vlX2vbNfsM6babh0Q8HryHkWqtnr72VK9SJXab3547urgd6dZD4jqmKncxMpjn1Sc3Vc9zZZj+D70ebdz94CUbh3W+H0xKikd3oOKdfO/c8+U3SM+TNUd4Gsep0hXOgYNpBCYp2fuTi883l94e7eyTCceT4XT7NEL51MjRmGaIJwaYWpoKkgsEC70/iNaf0+T36TrFdILeudO2Ml2TnmF/YlQ31NtQ/SFtDSWU+d8h8etIy6KrkbvX1FqxNoLpU5IO7keSoZd0+hqYnid8rQi9ALbi3cYZXflQzWja3MBtoyPwwPS1CHc0iq9+gbQZQK947SdCKK0k3phZBRI8fnYIVhtgzCBy22c4FIHDb0xzH1HdzS8AZFGp1FsJ/eVvVzYs+c2tdzHehRyMQjGaoUqmWQrcS9IrtTnxPoC29XZoL15nM2UJs6nOwjv6LIQ0860BKY5cF5mQoroMqPzhMaJON+jYSalO2Lyz6d4RwhuGhx0GvlgI+3WOqGtBApzvxDIiDSkKpIjsi7I5QTXe0L/Eu3fQeoXcInYGqgfMmXPbJuyc6UI7FpcGN8NK7PbmZU7tCcSia4umw0herjiIsRThbi7iTFg1pHdqZ3aBkSs4mGivd/c3FtrqLrzicbiDjaxU0e2iq1+GAtBuHuYeftZ4Hs/SHz53cS7z0d+mOqY4XVab3y8FtZVuVwqz8+FfRcuW6Wx0uTC3r9lyytfff2Bb95f+fhyYS0ruxplTvQHwT5T5AuFdwJvcBlZEPStoTGgRdha8/Ts//aB9T08/ZcfkS9+WPlpbj/TRSpqJMboEdhyJoZHot4xxXvmdE+QhGjEykzeIbYXQjNUiofkRCCYa0MmiLOQFo/qqNk95TpCaaB0YoBcdJwgBbONrkYNAWJAgxHC4nRqnQghOQw0+wlTRp7PTTGJ4TY/vlG0WxczuinzzqcN6O+YBR2UYesujH3V7hxF6rVQHbEGh6O4n8rG7x9FyWHEYdzacLhhFBURZ9nJ+FxHoTlYYf44DH3N63tzdEnH93QckTzmM57npCOYbwzrj2O9m8e5awcNlUoIjZQ682xEa8ypcVoyy7KyzBfm+YVlSpxOLyyzMKVOmpzoQOiYDAmnuQsJdqHbj+m8x/gKkyuEFw/DmzphKcSwk/TKXFdC30ELaarEyTF/1Yb0FWOj9w3ajvTd6XPFHQV0vG4hjqJ/mIMfJIph8Ooi3AHxfSKodvud4dhwtDwcB4DjvfcO++iKj9/16Xso8iogPnwY7dP320CqIcG76lobWjuty4CJdMx4fRPrttJtw2zH7FVUfxgT+/P1vstw4pB1J3gcf1OInZg60+RB9BYSFk8YQkjKNLuJ8pQG6hFH1xQ9EiOEEynekdIDISzM8cH3Al1Qkoci3myMqpOLbEPS5q/hmCXTBWocURcJ7QvaFyQnZIuwKbY3elZqlsFZNLJ60TYTpHkIo9roqnWCmDyuIvo4IM2Gzh2LfmCoZq6FLJWuFRsQoLuDjDXb25j7NUxGAOLI+hI99ghzqNAcsZkX5XynPLyJnO8Cy/Ip8cmvo9aNPRvb3lm3xrZX9l1Y9061lWIvrPWJLa+8XF9Y94297JReaQo9RGdTnsAWgRnkxhIyP4nNIPcB7jv2DCVWdmusW3O4d/05KFLn08z5dGZOb4jhgWX5PjHck8Ib5nDvOUoEZH9PWZXZnii1o7Yik3gvHqq7F8xGulNObyKntxOdwstTor8E9qo+ALXOusFpMUJoWAk0KRTpoB0JBeuJkg16QiRhPXBaBIsBPRJK5ZV80HujNhlssAH/3TQ4/SdgPrNPNB+jrVFRN3NVvblFAAMi6reOxj0i3BbmBsEZ4zkcm+LxNf/Y+xi+j01N5BWy85mS33+CFHArOH5r3dDifJFmvEZ2p0hKkZgCMQ7MPBxu8m73pFSiFix6V9TvO1JgCsacKp99fuXN4wfuH5XzkjhNz9w9CPN0z5TOqDpFurPTuguEeq5IrIhslP4VzT7S5L9iukK8EhbDgjGZIaViyY3uait0dkKshJEU2yxCO9PqQs9nqCvSNkIpWG6eHjg6zBjxLCLPKrw5PdxEvv3TFxV8M3lNZnW/nuPw8VqpnB3qfoN9CINr8XuI/rsPlDDy+lhtaGxb8YLUeqdYJ1lD5oKlja6VuLj2KUkcW3Oh8TWNJ4xvQXbQfqPZqwhxdG0qig4XcZHibhSxE+fOcmrcPxQkNaoKS07s5cRWHzFmRGd3aYlCWhSJYUD0MxoTKZ4I8URKd0zpDTHccQrv3JW+e1dDFy+S5sW1D7dy02d66DQtPgMqgpQJ9gm2CbUzoZ2QHNEtILvSi9Kqs02LNQqVjBMVIPhWYoeZciKkiZg8cCPNik4dPRU4FXowsjW0VaiZpjuVjLu+C6U69F1ro9Xic9nmXRQYGsyNdtXF/seB1u48NfrN28BnX0x8+b2Ft+8mHh6Sr61RpHqH2ozLtXN5aTw/F56fV7Y983yt5PbM3j9wKd+w5yvfvn/h5bnyslb21slRaVPEzhHuoZ8FW3Dm5YG4JN917DPDrm7RVO4KZaoUCrkb5acbSf1sF6k3jw88PNwzT2+J8Q2n+UtCuCPqo9t6WKA2Y5JEIhCrECrOyItG004xQ02xAHHuTHfG+W2k90g4BYijm6pQAi6WG47KFvI4W4q/Qb2T8wmRwBoWpulE0O6xD+YprzBa7u7+gL3LOA0PY9MBtfUhVLwRGUbIoGoghjgEqZ1ymgftu40ubLw4bl/OESBYBmyQi9NYW+uDQGHU1qnFH6fpa9HSW4T4p/MoG/MJuzU9MrxEzeS1KxoFLXRzA9TmIXcahTTpYMcFUgqkKZDmwDwH5kW9W5o609zBCjU03r0VTjFyvyTmqCyz8eXnG48PTzw+wCkpKTwxTasz7UIafnbu2mw6WJjWkFaxvNPsA62/YPYtRgbZb8Xa2XCV2gql7K4tqxkJlVyKbxRiUD1csueGB8gKoRqSDao36oj/Pg4eRHotXOjoiGf/91ogiNGsAxmjuA8l4+dHjEwYQ72ggoaJSRcCE/TgjvoFYnLauTaD4cTQmtONt1wppbKuF3LZaC0T6kZqlRYLLWSqVHQOTEmwKYBUTCpm3yJ6JaUnrFeCGn3EsagoU0rMMbBIJImSEELqDhc+diwWdNppqXLaO+le2XJizwtbvqP1hBGdAq/mbEYRJIQbdTxaI/RGrJ1ARbUi4XAoF2gR6zqiXZrTvFPAggAFQoawOYu3GeCMwhAmgnnKNs1NbSV1ZNoRVqxd8TycfHO7gOFtKQf1uzMFmMehZJoNmTqyNGwqVK3EUmDfqWWj1N3d7nOk7ELZhLp36u4kp94c/ldckH/AxqiM5+42WSK+th7eJd58nvjsy4n7u8gSR+6bACZUE0oXth2uW+Plknl6NrZNeLpkcnth7y9c65W9rFyuO9ets5dGHpaDNRgkQ4cbSA8uCxhHZD8cBKMvij0okgPpy8B87Zy/K6QTlKvx/sf/433+Z7pI3d+debi/Y54fiPGR0/yWoHfuQmAR627pES0SuhJmh3+kdix2dy42b5NNAzpBOsH8oOQcCLPeID9rbvNfK76pN/HTGdAOnYVCLRmVTAiFkh2ici81d/6+TaDNC88RO3EMqj+F7jxvx2EyO/KCRD0PKUZaaszLNGZVn8yjgGNHtGGeGYpSa0WDUFS8SKlQm6G1o+KwgUq/PXaIcoOqXq9xP/IfFGbMNw1HcQ6jXf/o8yhDpNPDGKsEISYlJhfzxjRmU0mZpuPuRSqlhtJIodEfYInKeYqcJmGe4bPPdx4eAnd3zuyL8kKUCyKeV3UQQHpvPseRfiOrYJluF4wNeObwgTz+LIdgG61VSi03fRrSkOI6NFCfWteOVWNGCAa9evco1eG1/rt0Um7qasQgh/MPLYE189caQ3pHugNL3eqgdx+5UeqbqKi7MuhElJkgEbp7CvYRxeHuOa8naNdwdXJxN/3r9oFcXij1SugXJqvIlJGpYtpI+/Cii0rQ6vRzPiLshHAhRRskgOQFlMQUYYowD15rQtBx8jftmDYkFnpozKUTTsK2R/acWNcTtbsHHnJoC72Nv1HrBXcm751II9goUoNP7wQ2hR58XtNlwNl+GO1Sb0XKhlmwEAcBKw2yhQ4I29OBJWaw3emJzfUBTla6lSnXTmlHo0N70+ym0tPihY650VJ1irh4y1v77geh6tKZmoNrrXKnFc+Bs350g6P1+ASqcDjdYQ9RFz6fHwL3j5GHN4nT4too4YD6oCEUE3KBPXeuW+Fybayr8fKyk9uV3FeudSPXnXUr7NnItVNNqAhVvaMjQQ/mReqAVgQMf796GoSKosR3SnpWls+ElKAsPwdw3xfv3vHm7Rum9B1i+IwUvo/ICeE85joN219I4tyr2DqpVkLZsdBodGpx1b7IRFgSQT0csRNZHlxYahppPVCasZfOnjspGhL2Uai6OwVJA7nSqtLazDz7INcdJdy40UdKR7fi2hLrMiAbpbdXLcuBTTMKVJCIBWGeZ3rD9VgityDAW37PgRhJvxWpnN1VfM+ZkkdXlYf4tjby6KpaPp5b9yKlEJL/ThEGFGluYDmG8G754/eDvRZUbkWqDeZg60YISkzRzUOnyHyOLHeR833k/BA4nwPnu0aMjZgyIWSwwt0kWI1YnTmdGtMEj28vnM9XTiclhK98k+zJNyMTavHARApjk8KZkAMWlZulT/sEBhNKEfIubHtjXQvXi5NXtr0M5+w2KNoGbWQLmXAXlCSBqbmxbGxgB+X+gFmPt0acJj64LfRlkBcyiONwNNvo+Nyn90jv7oigFom4q7v1gKUTIZw9tLEGTJyAESKEaEjxYliKse0r123l4/W/sm0feXr6bbb9I7m8kOYr89IpGBV3BesB5tkoDeLkG3CTHbQyTZUUwujeFjzIZeG8eJE63eZghidR7zBllrAznTbOj4XaO++Ksm4T237mcvEQxH3fhhVWo9V6e8/sBjZXpGe0bWDRoXGdhp549oiZ7sLoUiFnocWGhULPK6QLxBdCnZCWUE3ENKPT4pCdW6qPGVBD2hWNL4g8I+UKZccOwXp3eF20e8rvXJnuK+f7xjwr0xmIhR53iu4+F3vaaNuVrTxz3S6sa6FcE9s1sl8Ddeu03N2308ZBpY+//0bZ5IZ6CJ0QYTkr775IfPGdie99ufB4Tt5J8Xr9FYO9C9csvFwd7vv2feFyqXz4uFH6lWIvbPZC6ZnrtbqGqkCJgaaezm3RsOREHhfbHXbOhqFuwxYUuQ+E0Ln74cSkynIBuxjlpcP/c/8f7vM/00UqhoUUFlI8EcOZoG42CmkMczuiDQ1+0grqsR1Kxoa7p1Ex8XmO6JgbzEacIUw+wA0p0ELAxDzts0IuRrDmpwatPv2WgHVnLNVx+j4Euv0Vhxs71WGLdCRr+mBTb4DFT86exHABJ0qM3Tu0DtbajZGHfOKydpiOjlA1fy1ex9lORxWaPz9pvAABAABJREFUNqoeAuNOk9ciNeQnaBrHf47IA0PGY5oa8ruKlGcljWxYARGHXdQMDZ90TLMyzcK0CPPi4sNpMVJyyETVSRNIJcQxXIn+vdNkpKkTkqERRCqCYhb9BGfuj+gZSULrcqPr2pi3HY4KosHnOU1duV+9UJXc2HfYt0Yuyrq5U4hDsS5cVSJREkknTCfMvKPw1OGf1J3aENP29vq5jn87SCt+N4d4WqNT6b0MWPjV+dvf6YCqH6KUgHR1bQpCyUeR8hk+Crkaey7s+cq2vWfd3rNuX7ONbqralY4x7TDtgkaYBgMrROhizkiLbj6cQveu1QzUX3+lMYVK0ugiXskIO3AZnYAguqOyIbojPY84DTACtaYRV2+0prRWqYP9eZNZDGTBxcwZ2opZp7bZr8emuHg4UYs4RbxUer/SwxUL23hOGWnByQ6Hi0cMr52UN38exB27E3niYDnpgVQwWuNj/zA0GWHyu84dnczh5uC6sd4yrfusqZbqsovddYI1C70ovdptXnk8ht36tmPOzNG4+NUwoPTTOXA6BU5TJI34neOHDH/6R2JxrcaeG+uaXZR83SjmM7IsldobtXUPt5Sxxxz7TLcRN2KQnbzhHCobETT+zGJQJAX6XSQ+QnrXsdkoyYD/mxcplTMxPBD1kaiPCHcwVBAi1RN044bUFdUrQa5ErsR+dcYMzVEeCYhWT+BMRlqEdO5MZ2U6RdKSaFvy06jBWiqIMU0NDR7D4doFxbq7ItTiHUsu5eYM/qolOkS9I0pCfFFEjQ6JWAcLr7qio41hhL7JyGeKmRiUfqS+HQl6N15Vv538NUAsesu4aq0RQhvx3Y2Yhni4jM3A+u3ApgHvyujUASu6e4HdmIAM8efvjnfowzaqD9QmRGVaAue7wDwr9w/K/YPw8Abu33SWpTHFBgzXEMuIZTfPHUnH02Sk2L0TshG8R/VB+YhL6F2pJdK7UkrwOWLzAmQmQ183olcUrCutKPuWyJuyrrCuhZfnlZcXYd8z13UUfQLDYphJz5yme6bJrz/VE0pyg1a7Aby+QYyZY8ue2FDGbEo6lFXIm5F3yLlRWqP07PMoyQeqOKQJkSP4UCWCTGARWqDtDvXdEozN9Timxt46l+uV6/aep4+/zbp9y/PLb7NuHyjlhalcKdUIM4TkcE1Mw5UCL/axK3Pw2JAY4lGOgXEIwUhaUTLKlYNZ5w4HE/DRcVAKIhfPeiMTW2fqQpsmqgZEgyfnDsd/s04//CDtKOiNllda37GqNNupnCg8o/KA2cS2KaU4AtLD11h8Av2I2gvKRmBCrDvxKHonHkmIeTJlREmYz7NlKALqILzghz4Rg2GZFRYIixEWQ08dnRtMHSPT+kbpK6Wu7Hlj33e2NbNfKvulUJ4L+arUPThUe6CdP9GD+7xabBwC1SFgFWGeldNd4P5N5OEhcZcik4755fFsrVH7IKA2YS9wXQsvLzvPzztPTxeqbVTdaKHSxb+/C1j0BG1icAh5B64Gz65p0NjRk5PQGEzfjhI00JMwvTGsRPoPFFbYnxoOtf/ht5/pIlXrTC0TKSyYzshwYPa30zc6UW/R1Z4J5YWwvxDWZ9SckuxNREBTQ8LAlEMkpMK0wHwXON1P9H1Ght3LdXfmzV33+UlKHs0hWjHxzbX3jdY2al1ckFt8UK06tO7i85nQnaIaLRJjAImghqoNo9n22mWpz1liKD6XSpEUdRQVblEQfYSq+emy0rq6XYowTrNGrT7kbq0TY3gV86bXMLvX+InuGLMBqnR39fSYiC63SC/5BO67IY8j+0fHPMVPepG7+8iyRB4elYdH4eHReHioTAvQC61mai60UjArKMU7YS2IGLW503TJSloFs4RZwEriSO5tdcEs0tric46u1O6Gms3CmG8YKPTmruPbUaQucL1uXF6e+fgU2beN6+pAhmDu0RgW0vIZ2DtU3xDjAyncMYWZJJEk4pDfKEx9zBe3zdxomEHp77A+uUXS+mxs1ci9UaTSYwXJ1OpWWscQ3QZN/4iN730QI8ZBSIdIuGSH7Uwht8Zl27lsF15ePrJuH3h5+ci2PVPKhdKuNOtM5860+IA+TU7uURxmEAJ6PpFkYpHgmzwJGK7sCIHqrysV4erdLe/pFihtej3ctOE5WPtt3uvJtsfsNQ17L5/H9N7GtdmpZcdqweziTiS90+sT1mewE9ZP9JbYtug6xCIwfYS+EuIHlCuBjR78UOEaP0WmRCKiQyfQumcsxTyRbCLkNPSO4bVsqMOqIeGO7QvoZBAbXY3SK7Xv5Lqx5gtbvnD5eOX6lMkvnXIxygX2a6WukV7slpp880Q8QiRvD+rXYQwDOg7K6ayc75W7h8hy8iRslaPA1ePYOshSDBMBo5bOtmbWi9usNSn0ULHZxximTlqJIaBTosUAVbEr8L4jsSH78OYc9SnOh7n12BDU6CfFHht8J2K7sZ9/Onrfz3SRai3S2oTZCCK7QWhj5VNBVoQVsSvaroS6onnFrCLW/ASkx3lw2BjpjIZGmCDNrpvap4RVo7XCPgS4aSTuCk44CNKp4gP41suw8ikjoNC7FhgQ0+imQgh+0hgZVUGaw4/iLK5mXpwcjoiDdRcG6068mHUbcAivRAHrdKvegndoLWB0Yg8e7DZadjek7agOFmE8osiNI/33mKOZeZCcU3vHgFQO0oQXqRvENYxtZRik2kg5TUmZZ9dunM6u5zid4XQ2lrPPmtzs1oXNDpe6H1kP3v3JmDUZQg3qFjM9YT3S64S1id4Tvd+BJbqdaZY8M4eJjo4i5TCN9T6KVGAvkZKVnGHfr2yrsF4r6yasq79/KkJKE1NcsOkBuEfk3udCcSGm5LMpFWQYxTZ3MHJYMcurfskn2eQRRJivRm5GNqMGh6NprpXpTW9OJIde7hbte0RgZL8GTJ3+37q4a3cwcuusW2HbN7b1yrZdWdeVfV8pZaPrBsENX/fcidnI2TvOGJWUZ1JM6LAVmiQ4zOiCw0/W3+ENN64NZECuSi7BiR1N6dU73Vb9ANH7EU1yREl4OrWGoQc6DGv7gEHJUDOmL3QpVLtgluhtotWF1gJbTrSqw2V/dZivXkB3JGRnK0r3OhAUxROk1QIeH+MHDY9HSQSNQyQstzmjDMadJoiTEJLDfk4UMboVaivkktm2nXXfWC87+VIo10bdjLZ5jlkr3aG+/vra+YlPcSD3+PqA+dQZngd8vix+nyfvduVm7usH126dNpAQRxQYMTuFffNUha4VUnNqagQdjjlBw7Cl8veqb4K9AIv5xX3y0YAmSMn7awnqlsMCfRI4BXgTIBtJfg4cJ2o9U9sdvZ/BTtwcNXGNiZExnsE+Iu0Dun9Etw+El48+iKSNizO6vczQKIHPY6YzLA+R02UhXwp5FfJLpu1u7pnURniYEKUSg2JWaLZDX2ltpdaZkndyyeSSSTj2HYZNEiEQUwLpTD3RGoSu9BZGobBRpCJR08CUK3ObqC1Tl4k+LFR6G6fMWm/hiFozoblbtRYdUJxSayPEMOA6t2E5El7dHbx78q81n4mYeDdWO92cFXjMT4Yh9evGe0CEwBEqh/lFvizK+T7yZggN374T7h+MN+8qd487MSpcvIPqLbNvmVaH0l6Lq+0HpBhILtokQb3D+kQrZ+gL0hdE34AsiNxjcvJAu3QCjVhMnmpLo1keseKBXJWShZKFvD+zrl/z8hK4Xp/ZVp9dxKDocqbbCenfJfAlST9jnt6wTI+c5hNziKQA7MOLr+BBeBVsc4JE232UaRX20UldXmCjUaSwzxmxnRA3n5FVaMPB/tWB3A88rbsJ6X71Lq12iDOkFaaMz6Sscs0b1/zC89Mz6/7M8/OFPTsENfcr1SrpXIiLO1yk5ISNIInTDEyM3imiLAgLDrEnfxDABWIVWOn4OttqJRfj+Ql6mbA6Ye0B+gScQTzfC52HQbPDbSbilw9Gl0qzQrMCW4G8UWyl52+oeiFLpeHFsNSJ1gJ7nb0I1ohKdaf7dCVKJ4nRuEfDdNN6BQkEjQQCokNXaEpKE6W5fsujTSPNXCyschQov7ziZDB170ZEaC2zlY3r9crTx2eu1yc+/viJjx9eePl25fo+s6+N/JKw4oGj1saiGr2Uz5sHSvSJq0xUN2s+nZT7+8jDY+LhMXE6uyXbq0Ngo1pnpbM3Izeo5j6L++5xO5fnjZenFWJD5kaMRhBBZyWkSJgSMiV6VKQJ5eK+k2LNg2EPvUUH7fGWNjD5q+n+vwoSDWmw3f8cFKkYndWk48K50doMh6roY+g8hs+tjtgHP6X3QXyQYKhm+mjzzSJUQWnEAGnyWAkNkW6B2pRelTKo6H3wsUU6Ki7EDXZAU2XMqTxJ1wWmB7FgDKVVsRD8lKqCdqfEe5HqqHp8SAhpwHmBqj6QLCqfOA40eu+o6muRUqENHnsRdSy7C6rV5zPd/Gd6H84Do4syd1I/TlriqxUNY6B825AGPq5++pMxgHG4R/j0drhNpORGr/MiTHMjJkWlDqqw3jbiNrRVtfmQXIM7GgRxiEstITYjfUb6A/QZaw+I3aGcCPEtIgsSHnwzlQmRUaRChOA0a5XdBdHm4tGeXGMUYieECyIzIjtmMy4liKjcEfTs93ByK544EVMkzS5OnoJ3m03d+dvGaEZ0MDH7K4GiNtyUtXUqbifUuhMLsEK3OGZhA9P9tIvC29lujdr9miwmtyBNbeCi5oZTDxTVyTdcmeiWRqeZbgSTbgPaHK4u0zQxzyfmaSbJQpAD5nulN8NB0nF+YLORJm2NXJyM0qu40N1mhMVfW73z56IzEvz9EY8F9j0vuO6mS6VaplnG7OowaJmoU6JbQK27ewdj4xTDgs8fqxTUvEhJ3RA1gkIPBZOBGPDaothtvx2BksNP81NxfR86KR0i+8OU10ky7rsnZq6t3At5y2zr5vfrTl4zeRukiTxiNnoFK/h0b7iNHBYhQ8zta3TMqKPPDZdTuN2nqKQjrPK2JR5+j+Mg6iwIOuK+mcO0uuQC3ZGVUH0Odbwmt4y64N2bDeZG6IZWJ0+01agRaugOZHVPUu74517QFYtg8+uc7Q/d53+q7/q/6G2e37Isbwnh5Nb+DGdx8QGNDwmLM+3Kax7TTSDXvGCIdrRtaFF6ETqK7Uo0SBGmOZDmRM0NI5JroO3KnmGfXT9luFgxSHEYj0wKmai7z6fqRsm7Q1/9OB35c1UNREkssnhxGTSvI95DJHj3JQnA6bY90Y7kzqHnqQMaK7W82ipV9+yLEimxEnUn6NBm5HzTAvkifF0EvXe0QWuKiBcL6QDdI9F78PTR4bVzzMRszAwO0fINnxAv4iHANAnL0jifjNOiTNE33bL7/C1v5gu3+IZbx/zGuhc/MUV6hHbyFNV2JvAFwhmVd0R5BL0nhXdukSMPaFjQkJBpdpZkCljMWKhYWB3y0I6cu4tpd/c2nOYXYlwIIRM0E0MixZkpvmGKZ+b0yJwemKY7pnkmLYn5rCxJWCIwC20H6TZyxsYGcWxmZnQTSjdKdyeCzE61K8WuiK2YbTQ8DPPYSMV8E5LBqnR4Nx9EK3IXmkXMwvAPbHTKYKBNpPmRZI0QN7dqajK26TZQBmeDpnliPkXu3yzcP5y4O09Mwe2H5DgaI0Ad77s/TrdO7pXSoDSl7IlWAlR3dFA5EcJbRGdCeEDjCY0zkk7IIE6YjgRehS4O8ZW+02wnhELeDOyC9SsSwcJGbc426xGsCFY7neZfp6C9YuUKmANommkUenwtVJ/+r5lReqfUfNOW7aWQa6VUbmhcazJmhuIMS/Hr1cQZlduauT6vXJ6uXC4vXJ4vXF82tksmr16oWslDM+mMXj8kNAhuhXQUqn5YOZnrouaTcPdGeHgTeHiInOKhjTpuIy0aI48Zm69NhzRrM/YRfrqtGZ2MpNCr+AHnYHCpINHvMXrBSsPiKXaw1eNs9gZUJ1DEE6SEW6BhKEKY/FizLT/dPv8zXaRO82fM6S1BFw46yasmxWcNZW/k3Mi5U4qr7UuDXtwclQFXaXFnbHXiFL1EKIGIMEchTg5BdIuUGsk5sudCLsMQtHViq+jkabZoJsWNqBPWHfbb8hVEiLGPrsIjMEJwrVYIciM9cLMq6oM4ocNpe8wcLN2KsEfKZ3eV6NUjB2wEwg3fvxRmSilMMZPi6popXanNf/bIkcJeuympOP3XXgecQWzMnoQ+ipMMarSNsEP3Baz+u7ovVugQfHYXY2eeG8siLMlQGQ7Tu2uc1k09Sr3ghAfz2YpvpECNWJ8g3yPtDbRHTun/IIZHpukHpPDG7bH0kSATQU/EOBFSIs4TkvD01rBhmunx4q9hKiTtlKUTJINurGXm9DLT+kSvC1FnlnTmbnnLMp85L284LXcsy4lpScxL4HQH59k82rsYLQ+dm/qhp2QbmxrDdcSo2qkhU8KFah8p8pHCM8IL3a40S3RLrj3xK30UKhf5dmtU28ls7DR2NSwkJCamaUKiowWxKdbOnOQ7EGfWDNkShQkJySUb2keelHK6W7h/TLx5t/BwXljmRNDTIEkkXudPG4c7xlY7pcHlIrSaqHVC2yNiC6E/kvSOGM5M0xs0LMTpHgnzePzZSQIydDbDCf6A74tuNHYCjaQREWfaxv0OXTY/bNSObp2cO6FvPieu2WfDlrG6gTk6MOtGsEwP3eHfYaILDWs4fT031n1l3S5ctxe2ffM062IDhgvUrLQwwibHNS/Fi96+VdZr4fqycXl+4eXywvVlZb1u7JuzgEtp9Oabl9D8fVWB6H6KEtx9AztMoL0TmiYPNXzzeeTxbeLhMXKOyiwH69LXaAXKsFxqzSUaOogNZlCz58vltRK6P5aV4G/xQIpMfA1LgjQrTIIkCJOPRyiQL0bLnZzx9OOzEeaOJmGag9PkkyMJu77uK3/Y7We6SKXoF7szi/Qn/u3wpGsj66ZWZ4S1xm2D6Ad0jhFax2rFomAS6K0jbSJIuKX4alAMpfVAbcEjM1qndnxzNo9lVnVD1DDmKJgH9tWaqXFCROg9gkCQI8bjGJAeQ3Ebf4d3JZ5BNf5GAwiYNdQCvVf/DnGatX/vT74OYupDXwIYBI3e1o+q7s/JF5WOItXGyd3dk0eoXddxEPCfMQMJDl11NYe0xNlyByz1anrrzzGoU8hTElIUGJlapRi1BfIeKVVuVPJuSuu+sFoXrCjWIpYXpJ6R9kCSzwi8Q/k+Qd4S9YEgZ4IkgszEkIghMqWIJJDJ6DphutMjNKuEkJHuqae9wp7TEB07kzLGyBQmprQwpzNzumNKJ1JaPM49BnfQmHBm6Aw4M/w2fzoYW1K5UcNNoGulaaHrRrWVxpUmQ89j+XWGenRSB+trOFGYdZplKleqFCqVoJMfksIJogytoGJhYuoPNIM0XYk5E2obNkQNDf1m/jvPZ5ZT4nRemOeFFCMiiVtK83G9UjHLtL6Si7IX4XqJtKL0MjHpPUHOTOELop5J8c6LVJyJ090oUAk/Qchg6R4Q1zCpxQ9zzQI93iE0an+kth1CoMeZ2jpShy9faISroRXYy825obVKtUYYSEMTRxu6NLr6R3D4u/ZOadW7qLLfRPEen+NLtquODsq7KO8nvYAZRtk7ZavkrbCtO/vq9POcMyV7FE9vjWZ1uI6ZO4mIQ2ueCA2HVtH/Di9BMbnW8HwXhj7KYeaoryCscdh8+d7Xu/haHteR2TCZLm6PhrpRMW10dQdMIk4EkeBMRp0EncRnTGJOLOtepEoX9+9roNXQSVgE9yQZ0ossPwdFSuQRuON1YPt6c0y4s14b67WxrY09G3sxSnWGVdtBdnzWogUNDQ0FCY1uE9KEKQh9TiynRF476OSbQavsrbFXYS+N0oxknVkKSYUp7ky6E2XC2pVWJ/b9Mjb87rRzSWhwlwtVtwi6DUoHs8l3+YO9xa34HLOJZs78Krqx1807q1gGe/CAgow5eEjdHndSmCglowi57KgqpZdb53b7CG5gW9vQ+hzMxM4hNvabcmRCIW7C2aRiny4Tp/o5ISXBaRHOCyypU7Ky78rluVJKJDefk3SUai6gLVWpzT+2PWF1wbYHtH2O9u8yyR8lzl+iyy8Q5R2TPBAlEURJ4gs3JeF8EnRyLUuVnSaZGhLdMrVvLNFjT1JSTFYu28Lp/UxtEy3PTHrinB64W95ymu85z29YJi9W0+QFbT4Jy51wPnlhqpkbpRhft9gwn7Vg9NBo4UINF3J8T+7vKXyg8ITygtmVbme6TdhNnI1vIANzNWs029j5wM7KLquzVONMDfdISO4kborYHbMKXe+Yr525CZVESAvzqbGc4HSOnO8Dj2/ueHxIPC4nUkhuxzS24XE+x+GkPGyWNp6fJrY18vRNwtoZ+iOPd1+i0xumhz/CMt2zLPeE0z0Sk8+fPhG339Y3wwbkdncNU2BGQnVdYQCJkVSvhLJTe6P0BuuGbplrDTS9os0QuWJF6NU9GWmNnHaUTKkZJINmom2oOcU9550tb1z2Jy7bE5f1mXXLbKVSmt0y62oVapHxXhuteEfUrbNdC9fLzsvzhZenJ16uT1wvL2zXlbw7Mai14RRPQCggw7k8JkIKhKgje8votQ10IozcqMjnX868+2LizbuJJQbSJ4f2DuwGe/NY+1YCVv0wa11p1bV5+17ZV1/7KRpUQaogfWgYaKANCUKajTjo9t2GbdzmcoLWO20yLEFdGnKnyCKciaQuLLM75RR+DogT3j3pGNYfW7vv4q2759q6Zra1sK6NfTdyhlKUVp1BJ4cQ1kZK7BEqhhC10ZP/xmUW9iUwLZFpzKdEOmZ1QAJGjL7pBmlI8sA3+kGc2Oltp7UZVaX3BbtRbl0D4X/Ba3Kq/0HH/GLwvY+/8vg6x8DWRcFhDFqH09+t2+mRQTl1Q1mHCrIPielI09s86YAIY/DTpGrwYLahDVEO7vRRUkc3ZubviXUfVhlYHwVNnLp9BAB6uslxYh7kgeEE32z47A1HCN8ER1JpgbqrL7KcnMnHCeWOoPdM6YFlOnuaq0ZUxckvUUjjMVXHs7bozvHd0XJfiDqKqfs2Hplar3/ruOaIr3cb1kD9Va/UqwcP9jLYfRlatlt+1NFV+clZhh7IrXU80GlAU70gPdP6cCnoB+NyzCyPEewxvD+cKeT1edRybP9+lbnmzVmRaZo4nWYIM9OSOZ07bz8z3n0eefsu8HAXOM+BqDLSro/i5L6Co+S+kkosEeRE0ollegC7R+yR0/lL5umR+fwZaToRphMSl0Hn1k86s4OzWDErmFUamcMR3iggxcXbdDQEYpwxMbo6eUJ6o0lAwsTdXpHgkfP7S6HlQF1XpAWomWpGaYUtX2jyQpMnJD4RiNDuKW2jtJXaVmrb3A1/CN7UDhj+II8MItKhK4BhEl1oLbv2r/pHnwP7eyjwik4c0pnjUhwJ4zJskA6ik5kg0plmYT65Nup0dvass19/Yqd0+cOhpavDh7T83nv7JFOOPjwkW3dfxHowfMTdtgMQdEg4Om33TszRJZ/PVXFTABUl5YZEpWeBJPT6c9BJvV7cn9zMSQy1VXLJvDxtPF92Xp4L14uxZiFndeVzwb3Xhk+O4Ear6ggbhILoRIhwdxeouXN3l1jPsw8Vg9Oxt91Yt4ZgLKG7eeSk0DP0ndZW78DqSqvTYNw5ldpCHO20vUJ04/9etTB6K1a3bxsMos7QVHRFbCwY9YKmcvM7oAf/PdaFlkAl0HodQ2ohjsXncEijNk/cpUOWQh+LRYUx/pSxGMetg0rnSGe1rq9ZPcPlIowcqRg9buZwV8eGRUsxSnFGI9EtrQ4OUu8O2eYNyiZYDYR9QmQhyB1RHpjCA6f5DefTmdO8uNvDMLuNyV3Ho45XpHuRohutB+/a8M0dXmEWP+CPMbrJgD1HKJRFpHuBoutgJnrmUA7uJ2IZ6gb5AvkqlKtDf0dUxtASO6szQIyGVN+g+xCFW12pbae1fRSqchO2inmn7f6PCi1gNdFptJBoIZK3QG/q/34cCsytX5dlQuLC0s+cHgp3943v/RH48kvl7Rvl3VtlSRCHP5AXkG1UWR/A+NfuxrFImeMjYV6ID28HC/KB8933SNMd890XhOAIwk3oOa7649iDdTo7nSvNVjIvmI3Zlzol0sg06jBVnb0QMROtk2joNJOWAhpY7s6c7hcuT0JZL6wfM21badtKxbCWYf1IJJFE6enHBDEC9+x1JdeVUi+UttJ6HutS8GCO6IGmYwLk74nPggE8imen1p1SNmrdKHWj9zL+JofNVWQAQqPdHotN1Fl1w9F4zNrd7kJxK7G7e+XxXeTh0Z1cYmCIePuxm7iEZOg7S3Fnk2019q2Td5/fleyi6ti885duzuptDYq6KnxzxKVHwSx44TP356yr/47SOrkJLTnLNCoEgXlzyY5NOPP05yOZ9/e/mcG2ZV5eNr59f+HpsvLxkrlcO3sR8p6gjdN464Pm7fiLWCPI4Shc3YEbY9+U3iIPjxP7FaQHpDfMlG3vvLx0WjHvpBT6krG+YT3Q2+pYu8zUlBBxjZeqIrU6rRvfoA7SRx9xqda5bUQHrHZcqNY7bcA/vfVx0g+3DkrGSc84ikQgRmXqo0hZcwFxVErNw8yzUbUiWim505o4FKpuGuvInSvRvb54kXwtWAq9D2GmvxnOahOmwQY6otQR9wPzUx6UKj6MDp1A8yKuwMg7yrmzrUa9BqQlpC6gJ0I8M+sdp3THw2nm/i5yWl5BYLNPknEZnUcDq+LuFCVQLVDF4TfT/ko9tiPKuw+HhO5N4tj0rQU3Bq5+Qmwi5FUIHchgmxep9b27SuwXOMIITZwxqfhrG4N68e6G9kZrG1Y3mqzkspLrNE7zJ2ovxF4R6yPAMDiZpJ382u5Kl0QhselMjIGWwujWgOB6udMpcIpOsHjzxczjY+MHf8R49yDcn+A8OdvRrYwOksTqDL5e6H2m9wTtDdbOJB6I8zsXGd69RfSMhntPz9WExsW77d81Qz7eqC4rjY29v2erH9jbM2v56MzZACmE27zMG3jxWatElD6mV52olZYaU1woj5mc3/D8OLOvL3yYGtvTM6s8eTRGy+TtG4JVAjs53pN0ZUkTW84+i6ofKXWltYx7NgYm9XiPqB7t4bIMh8uQ4aLfC6XtoyM7ujIvUtDGwW28Fr3eSrWL9/uwGVMfEwCHPZTgB5vzSbl/iLz7fOLxMXF3F50mPgreMZPqXamtU6oNP0pjuzbWa/dRyNrZ904t0JOfQbQboXdCrUgBNqf0W3WNXi8ByYE66PZ5rWP00chNaZNQRGiDAdhX7wYlgRZB15+LTur33vyNNEpp7Hvhes1cr4Vtrez7mEdVdb1KZ5yIPHaDMbT0jkqR4AamJsayCHkR1yIskbwbbYsYjVKVfRfXRIyW2enYA7Lomd53aLubS2qktUKtEdVKa8EHsD3cIJxWX13GD+ub47TpcRN9FKkhQh7meD5shhuGAHzyhWEdNQTFIWAk347NaHIM4XGyhQREhi3T6J5eCSrHUNshGn84eZ1VmTiEhuP2DvPJLdjwBqHZa5HqQyAs6ktLtI/B9NBujZNgK+rwZPdBu1gi6swUEnMKzJMwz3LMmbmFN46iYONxrQ7KcPY4loZAMncJ0INdOQbVh6sHDPaWk0jojusf1OMehjcffujvq6c7uJuEf7y9JXpcsGMjkiOPy0C66/twvLC27LDRofsz76TUhtGwqXd3LY0pOXSNdI3UPWDNYWXV7vETuAA1TUpYlLgE3n4WeXwU3r7pPJzglIykRzJwR6hjbrK5G0gvtJq8i+93iD2i8hkhfI7oGQlvEF38c52cESiBT8GP13iZ4Q3Zd6qtbPWJS3nPVj5yLe8R6aSozGkiEpl0QgiIRCD6evXe1WemQWnaERVqT8xzwiQTkrBd72m1se+ZWiq9dkq5ohYIpmj/QNOIyocR01KofaXZTrd6QylMlDCCR3XYh9xsYM3oVodG84Bq860T/t1dlKkDfTJeE1EbRs3HWjkIDPjJFXWT30mYF+F8dheXOalDfcfFbzY6Z4f7ahuIRIG896HR6j5Lqgcb93V5q4F6SqK3Q9k90CyqP18b9PZmw7V92C5Fo8mAmouhqTvhKXfY8STj/ee1SA2mystl58PHlW+/feH5euVl3VlbGxh0+GRmYwRpROrQYjBOQo00NeLsprO9e5fy5m1kX31TuNhOK8ae3VKgFWG970zRZytadkwElZeB7wpIINaKMZGS+/n1XkkpApUjT6qWxhHvfaOgHy3/IDZgfURm+OnKbtP5A+YKg8rrhrK9d+oh/B24NqPwSBjeYKJjZnHMLz45+d++prdFIKMoypirHruvEHyGEcyDDZMXjjR5oRKOYgytujQgVze+jGHo3eh+8h9PoFefJ/YSCD0iMhNsIcmZJZ5Y0sJp8STk0zxGYx1KGbW+g+Ux9jfIVShVyDnQPDoPm8FCp8URClmPhNRGrUYK+Gmyu0i0t+Bd1HBPV/G8LsniKOAV6uqd1PUj7NfR1UV3KDDx10LNQw4OuypPZc60ttH7lT1f2XMilyslnah1p/YKrRMag1kW6XWhl0gvAy0oAXoieF65u/zHzpJ8OH7/KJzfKqeHwPe+H7k/C58/VpKODmrMfoTCzUnCdmrprKthbQZ7IOl3CeELYvwuYfrci1MYLhI3UsTvu2IxvOOoVniu37DXj3xcf4vr/mO28p69fIOqscwT52VhShPGA0FnQrxDOY9D3DHjg4gSREjhhIVOZ0G1My8Tua30GPyU359o7KyXC1qrR3/0B4rudItcd+OaO7lcKLX5bEwc6lP1azkOMhEG3XbXG0qnVvegzOWFUl7cH7F6J9WsYmIDig5oF3e9wefCQV+z2X7Cdbx3sOYrTJW7O+HhMfD28+Tzw6RD7O6v7VGoWrMB8xnXS+P6bDx/3Hl5ylyfC/vqxaoPw+g+uCrSDW14rhVeoHtpLvgujZYC1eTG6rMjfDWYH2uSIrshqrTVNYGIYRE3p/0pbv+3LFJmnX3PrOvG9bqxbru37Xa40eGuxzKCu8SohNtpLOCUbdTjCmLyTJ3lBOd74XwvlAJlFzJCzQf1WymlucNwhVDdcFZZnc7ZlNYTmit7jqS0k6adUhdSipQ633z4yt5un8tg+gQdtNTbenf7IncvHzHTo2iJyM1zSxDq6Lbqp6LfttPMT8R1CIA9wdeL575v7uSeR+xIq4Oea1jotwLlYYLcIIgQlJBcRBUUpkm9SC3ClOSWgVUHgcDJLA59+wKR25BXxszt8AAMaoQU0B6ZNDAlZ9RpVCS4Cr51f/27296x72N9HzM/G0WqGbUbpThM18a8xoJ5yGFu1NJGqrCNoTg3xf4RnXF0g0dRbNVFoNp8/lQ3qIc90u7fr85AcehwkFt8WO2zPGv+e+oQ19bqwXhl6OHq0MS5i0IfG4ogLSDN/8DRUzjTzIweOyFVQqxMS2G5Kzy8Kdy/bdw9Nh7vGqe5ErWicnRQB8w3XIRR4DwcUCIi3wV7Q4pfovrOXT70hOiES0M+ZeyNrXMc1Q8iSB4JsHtdedr/O3t95pq/ptp7Wn8iyIWgEENx4oa4TZdZpdejQCTqAcdaHcQbkMkPVCLQQsZSI54jcz1x6oVeK5qUXiu2u6dc50q1yLp/xZY9HLD1gjG6GgljXZXx3+Igoym1+bo0GrVkf9/KSqkbtR1SghHrYYIFIQzdIXrMPjui0e9yEJDGKGAQvFTdN3SanNR1PilzVJJ+yrs9ft9wbqmdnCvb2rheGy/PVy6XjW3Nt7Tu4ybCMN/mtSvDXU3oYK15RBh+hHGZixHx19pk/JsZoQvaXJ7Si1spWRPa/nNWpA7YwN+Qxra7V9bLZeWyb1xLJodKl+6akQAa1YO7BKpFj5W3TjBcOCsyMlqM5eQiuPtHuF7H5nf1zXS7OjXaupKLkIu305ob0TLSrm42GRrsgskGYoS0ENOJu3ximhLnvNzcofNeB/zlDCZVT+MN6qa0emPGmUMX207e97F5ueO5Bh2CYaG21yLVhpN2HbBR6/XWiR3FqNbGtm5OW9/328/dYulbf72IFRgXp+rxmrm4OgVfRNPklOx5ckuVNkgQANvmZJbWoHVBhraiFSex+AzaT45TBCwSLDLHyJwi8xRdxR6FZi6upkDJ7me3Xb1w9SMJGf+d1TXTFBtM7uCLzUKnhkoZljV1mH72wQuxQ2cyGHX9KFSdG3vKNTRGXaFuQt2gjGIVRkEyT/xw1HAIez340jU3rXaqNWrLw/vRtTU57cMHsiC9Dksrh1C0h9c4idEBNx1FavI8tZgqp7udu4edt+8Kj59n7h8Lb+8zSRtRyg3i+7RI2XjCIneoLkzxAeV7CG/R9ANEH1B9C6RPZk6vHdSna9SsUcpGbivP6ze8rB+5bM98vH5F7S/U/g0hPhPilXm5uPNLjMRY0DC5Dqk7t99nY5FcxoGrZV9rAZ+DBT+wFS20WEn3iZOeIPlwMi0J6412BU+Hv9LrMNrdg1+bPWLmJs/cNEYTh/2amY65qg2GbL0VqX2/kMtKafvwXPRZk2PH/vqo6TgeH0SUcCOWHMXJxklLcGJECMa8CMtJuTsrpyRMKigHS5ABO7pxQSmdbatcL5mXp8LHDy88f7xwvWy3ANTbtGCgI7f1fcun82uNOlCdbl5ggYB7CcIgSx1vfce7seJn9TKu+fo/jpIC/hcUqT/2x/4Yv/Ebv/F7vv53/+7f5V/8i3/Bn/kzf4Z//+///U/829/5O3+Hf/kv/+X/hEfvtJ7Z64Xn6xMfX575+HJhrRt7z9TUsOiUyJAEnZQ4CybqPlsFaJ4vGjwgBo3uNXfEVTy8dRq7KKwXF5fy7DEPvSt7DezZh5AilVo7IT5jutH1Si47tc3s7YKGhRAXHt7cMc8T9w+LUzlbZ9+KD+qbEUIc0RwTMQSmKRFTJERn5tVSuLxcuFwu7NtOKU4tD9EHzKLcIL52wH3WbyalHuI3YMPRcbXW2DcXLW5bvv38cdGKMhb/wdITjvjqA2ZUcUuU8+LF6e7eNVKC6zXqiO7ed2XfI7Xq2OgdaisKGjw1V01IIbjGIiWiJO7SxN2UOKdEmhWNXni2EU+xvnihWl/c2PUIG8RGpMVRcFWcShvVdTQBslb2l0pem4sxi2P2DVyoPDrjI1X5VgCbD+6rM0y8OK2wXcdM6upFSs0Tj8V8BuanTKFXz7VqGX9cGjUU8r6zbzvbvrLElT2upCH01droTaEJ2hw60gpdnVzTpbqDuOzElFnOhYe3Lzy83fjiOxce7ldOp5UpbB4yeStMhx7qOEknYAG+h4a3JPkuwpcg94i8AznSCP4gaM9Zb/uIq/jqm9/i5frEN+9/i5f9iXV/Zi0fENmYlyuPj5n7u8rd1EgzpCWgqSLqs6HWZkrZydtCLcED++pOLleaVohGejcTTpF49vWCqs/hwsKyzKSo7NeVkIT9KZOfMvu3F+p6YcvvyXmi5uRG1ubejzLmhmIujjci9Ti/dV9XtWafedXM5fqRdXsml83h+eNAF3whHQGkYzw5Zlounq+DgGQHFdzc1S8eesOTcHcWHpbAkoTpgB7HrLhiFOvsxTxp+lp4+njlw/uNb77+yPtvX3h6urJvvlcddfMoUDJqpSePd1esmfkhqndM3SE/qjBFSEGIak7GCl7fuvn1LcP9P+NdWln/oKvkJ2//04vUf/gP/8FP3OP2n/7Tf+LP/bk/x1/9q3/19rW/9bf+Fv/kn/yT23+fz+f/KY/tJ+TuvnS1kGuhjJlCNaOlYyCPU4yj0JMH31kNuIGnx2EcsbSibpAYMGIyptmYT8aywzT7XEGD0FSgy3BHsAH3HV5gBdOOaSPvQmmV6y5IyIRY0NipbSJEb7m9SOWhWehepDQwTRMxRFqfmCwRLfoJsRb27LYt67qR8+6HmTCcMoSbl19vg6k2qPoH0YQh4nUNjn+fK+v7gAf7EBDKDfc/qO5+VdsgZByDXr3ZAKXJSJO7NYfgp+la/LTfqpKL+qyphxse3pohtQ1I3R9X1a16IkqUMJwgAmkKI+p+JPGO3+HDYVi3oU9q5lDF6H68EzSIvsEcm0Q3LzStmGf79Neu5AbtjTvH1zs3r7aOa72k++O24tBmrX5nwHl0RuDikYZst9/Tj7wlRjdV64Bqy+3zJm2Iufvo7saM0eRmeuovYEPwhOqYCtO8czrvnO8y53NmmTNzLASpbjN00z8NMs74vWYJmBF5ROQdEr4A3uEefguvnP2fXJUHN8JlBA4jX9Znvv3wDc+X93z19e9w3Z9Zy4XSngix8EDm/twRYI7uTKLRkNDcEaFFDKG1QC1Kzsq2euTIli+urYqdNM1EAiklksyEEJn0RBRf3/PdDGrk9YQ16LWzP69YbtSe3R+zTxyGr75llvF3FjznakDfBgzvzVKyu1PUTM4b5dBGDbsyjqn4jYbvDAkTee3Wu3BoOG0Qo1yLaDe94ZQcQp8Gg849ee11PzSjGdQh4SjFD8DrmrleNtZ1Z999nbdPzAJuKO0naO1RQH2NddpYCzog64gXyRhG9y4e7lkHuUKaF7tWRydVf7p9/X96kfryyy9/4r//2T/7Z/zCL/wCf/pP/+nb187nM9/73vd+6t+57zv7/tobPj09/QHf6ZtsKTutuJGsNX+BOngWkioaDZsUZoWThwzSnRYZaiD15Gm7U3RGixiqjRhhnozl1Ki1c773zedyN2Lfa6BLp3Rj3b2tT9GYTtVJEyqe25IT13UHmSGcIOyUNqNhp/VO7Z28Fe9eah+OFIGUEzFGTn1hYWGyRAiRUgrb/sLl+sz1emXb/YjimVM6kJcDhhhR3TaoyAxnCl6dJvrYJD0w0VmE/kM4m00PY1N/1XVoOUKAw6onRi9SUzKWxZiSkaIvIGcSeUdSCoO8IOzFc6daN0JrtG6uGwIUJQUIi3oMhkbOKXKaEqc0ERdF0wA5xv66Z9h347o2evGOR/pB5XXKv0bz533MmNtrdEnPXqQYmVxOlvhkbmYHecLhy5Id0g1a6D3QRlJu2X3udjiepOqvVWw4rKZ9uHuPYDpro1MoVMsU2R3q++Re6k6RjFKG2zgw2G0HFKTasFjQVEhz4e7+ysPblTefb3z+5QsPbzbe3j0T44WgVzxvYWCltw7KZ0twwvo7zN4Qwg9BvgB+gHdWv19x+sl16YedxsvLC998+Jpv3v+I/89/+3/z9PI1P/7mN8ntSukbEjaWBebZdU9TnLg/n0iTYJPR1EtFkVHIGfDqLuSLd//Xy8ber/RQCFUID0KsgfPjHdO0uI+cLqSYWO5P6KyYVO9sY2e7vlD6Sg1PVE00nRyEs8VnL72jvTmZakCg3Q44rI004cy2e3G6bi/sZSXnPOyU+q1we206UqIDxymv1WGthEPz3WEHVBpBfT0tM5xOxnk2zsGlmWH8zFFdKpDN2DNsO6xr9wTeDysfvn3h6cOVy8tGLgdD+PaWHZyLWx896uVoBsZBrjk/WBBSCiwKcxBP0FbfFwudauqzUoRqYGL/++C+T285Z375l3+ZX/zFX/wJ0e2//tf/ml/+5V/me9/7Hn/5L/9l/tE/+kd/aDf1T//pP+Uf/+N//Pv8y/HyHTc/mRzRAss8sZwWrHoMeUsN0rCaDzps7gMhwCzisxNTztZICnF2u4Fa3YqkZD+NHCy6aeosJ2dIpSC04g6/pkJuQhg+dgm7iVmTdrp2ojYIHUIbG4pju2GkBtrUCW3QaDnIEu491u1g8gVUj98t7huXguPnDMfw+CnRQtGjAxidFnLQykeR6k7YaM2Lgw9T2ysFW/SGV4cgN7zftSuDV20u9FPxv+uguPtrZ8MFwR9j34d+o3RnT5nQUr0VKbr/DsR81iW+CJKKx2GkQJoiKURicGcEd5H284Xrj7ygoHIYZTjqn7zTDPN42gGq+HOU0Y7JJ44AKkdBHyzHYQ7bmqFjE8YqRbIXr+4R3aUbu1U2Glk6HSUMuyenNhhNjtDBHTRjmjEy3Qp9OC/4/Qj8O+4jGFEA7UioqHUCBYkVmQrpLjOdC4+f7bz5YuPtFyuPb66c73ZiWFHZENnxPPAG5E+o4Xfj/hki30HkLcgXCG+4ZUDdCpSzMm++ezf3ksbz0862Fr7++iPvnz7w/ukDTx8/8rI9sZcLnQ0hk0JjToFlmljme5blzBwfiAG6HE4XHWRCdCHEN6RwByEwSaTZTBh1trPRnjaoFamNXhrTsqMPkZoK8+T2XcROPAWm4skC04dIa5F4Ppwaunv/mVLLOjSS6plYBwJx5LENxm0plT07SansoxPOjXaTqIzOiNvoyYlbBztjVANrDWvOABYqIp0YjJQcyZkmh/2iGC6k6Lf34yhX1XzW69ZNTlwouZH34t6B5QjSHCChHT5/AyJvAzB5ZY7gspgRFWtO/gjmWFSAATtCpxPMi6W7r3/SNPx0DPT/tUXq3/27f8eHDx/4G3/jb9y+9tf+2l/jj/7RP8oPfvADfvVXf5W///f/Pv/5P/9n/u2//bd/4O/5B//gH/CLv/iLt/9+enrihz/8IYew1Y8j3kA7wSCxzDOn08Ld6XSIayiTG8jKsJkPUYkpECOcJmURZZHIHfX2YluHUru7BmTfTLvbV5OmxnI2Ht8IWxJqUaeSmrBX38BRhztUvJNI0XVNU+zOkItOa05DQwTqy17GaaUOjY45bONdwEELfi1+MTn81VrESHSnxLlF0uikONhZA6+LKYwT3HA6N0/1vc3CVKjVXdh76686IUYCb/S/MUYdHwOH6JjuxTGoD5Zb9/mQa52Msntc+Lb3wTxqDHSP2BoxjjwsM0L0RRmDz7iWqERVJpVBb3fz1xQiYSj3DXeXaAFSCo6p90FUHk9Rk18HccYhJOlOA24NsbHg7fArPKjAg7AyoI7WDB2vV3N7fYrswwzXw+VKa6y2sVGGT0IgonSL6GBrdd0w2yFuEHeomc6496NIuR2QmRen1wLVhqBFvDDRiCET5kqYK8ubzPJQePvFzrsvN959Z+PN2yvzvBPCFZEVL1D7uLbygDMDnqX1CHwX1R8A74DvgCx4l3W8oOMzGdeaHdTyStmzD+mfNv7bf/2Gp8s3PF2+5ePLR7b6TGkXVAshVJYJljlyPp04nx45L4/M8TNCMJrsbrprBZGEhjMpvUPSG0JJrDrTeGZvILtDoLmv9C3Trhdq2ZnOM6KBeir04GJ2AqRzwCxhOjN/mGitEO+Cm1FXc7d8c89P6dFZmGWm9+iwfLdbsTos2XIplFrJpXmmVHESjgeMvs6Ngo3XTcMg68snJIU2cvAKSkO1ESKkyQ2M58nRiiT9kyLl5c8Y4wdjuKEIJbsLf9k7eavk/ZDBHGv7leneBgkoHEVqMCEOBFBHoYrm5201RqEyknHYYhNQPG7K2bS9m4dB/l+hSP2rf/Wv+At/4S/wgx/84Pa1v/23//bt8z/+x/843//+9/mzf/bP8uu//uv8wi/8wu/7e+Z5Zp7n3/N144KRnGUz3t6ggTku3J3ueLh/4M2bB2LuxNrIsdFDpk8QJkUnIS2BNMH9WbifjLvJuFPfrPJzJr806ta5PLkDw3pt7Ju7dk9LI0RjmZVtjZQsXJ8abTf21enS/rY2N1adoBWHYyrjZB+F06ykWZlSulmgmKWb20EtlTbysFSG0G/cY1REIvOSaH0hRCVNYdjmtButFXntCFxTKaQp3hiAh3D1yNpqpbPPjlWnzQWPt8WI9xIaxiY/hTH/GjZMbus9upDGoXFaW3Wh325sm1EKbJuN0DU3y0UgpUqMfggQa74Ix0zrtAjnSYkamDVxioklTczTRNLIFP3xrRt9FrdGqqN7vM2hQCLomCfq7P1M7WC7YcWQ3Q8B3myKO9UPBwDheAz3QDv89sQyTQ3TC2oZscReG7kVLv3C1XZ2KUxMJCKdefiDQNdMk0yXCz1cMV0x2bwbsH0ISReQimn1P0pcuyRW/I2NoNNOiA3VzHRXme8rj18Uzm8y3/3hlXdfXnjz7oXT6SNBd+AFh/leO6nWC/sWaVWYpwdC+JKQ/h/A94FH4J4jGuf3v7nHXi7f8vx85eOHC7/xay+8/2bjN/7LE1v9mq19za7v6foE00qKxjwZj3cT9/cLbx/f8PjwXe4evmCO3/NrnmcKT1Q2lIToPZK+y7J8SesLObzH5ANbW9A1YPkD+ekjWQt7eGJ9UtJdpJTG+fGeu7cP3N2fSTEyhZnpFJGwcPpspmtjeZ69M2qGPGeMSs0Fq4K1TtsnekvUml5nqd0Nn2sbUoGWfTbe2g3ybDY0R2M9du1DXO8tlYqOOWNzn7+a6S2joSLaCbEzzcZyFuazz8nDJ6LrYyrVwTPKqpIz5CzkHfats2+N9ZqdHJWH3GUUSxsFqha/x4oXKDuMr3z+ZGOOlsQRjgREjGigo33wVBwncLx0l9DUoxra771yfr/b/7Ii9Ru/8Rv8yq/8yh/aIQH8yT/5JwH4tV/7tT+wSP1Bt2ZuvBlEh/I8jG5qQH4jS6cwUTVBiDRt9OiD9pCEaXKr+9NJOC3GaYIldCw3ytXhnLw3tmsl586+Vs8DqqNQJEjRaeElC3Wv7L1j6zBsRT6hdQ59gxoxmGezBAjBN0MYG2I4zh82OrPuqv7m3cnNdFL9bwXvFqYpjVem+0ykVwYf59Y9qYpv0CqkOTncFcNo940a3H+u6XBBV29vWuho7Rzx8piNTsohRtFPihSCjGwcBjPJu6kxu6muiyo3fZSb3g7IGoagUXDluor5EJlXHVYM3k15JxyIIQx6PmOd+twnRWhJbjMAHZZMIYEkfy0kOaxDg1DtFvR2DKAPHw0++f8biWKQPFSd8EJvVC0jn84orVJ6odhGYaNQPDW2R0IzTH3R+0ndIT6T4nfq2MwcXhbpIP3mSHHQn/3zcV2EStAKqbgW6lw5Pxbu3mTu32TO551l9uBL1Q3YgB0jA8OXzYxalZojKZ5RvQd7g8k9Ih6N8xMT9dug/uigdrrt7PWJdXvh+eWFDx8uvH+fef/NM8UuVFnppx0ZGV4x+iFkngLLnFjmhXm6Y4qPqLxFB7tWaQ67EhE7ue2S3iN6ItKI5hlSWl+QXGg5UoCN4qUzB/R08XU5UADmiTQlUE+6DUsgnoIzAq8NXRsSGiZOOOptp9dILfmm93N02D6Rd5RRpLybqr3e5lHNGiYjWcDBFi8QvQ1XjmH0PNi2B9SLNE8Ujn79xuQfXevZR/9lY/14eGvrI6KoyGtkUTE3gi1+2O7tldxy3IaBxo0kxYC7nSThOM74TqIOf75BOx92AL7ehHFANsIgccjRRP7v7qR+6Zd+ie985zv8pb/0l/7Q7/uP//E/AvD973////RjXMpHUlPmYEQWx3THyTdGn1WcTjM9TliZEE1epObKdBbSg3B+E5lOwtsH5W6G02zM1slXePnanLL5fufDj3c3T9wdusJgOQfSHFhO0V28i2/iIShlt9GxDAv+wz3CfNOdQvM+OvowFFyfhEYCOuipoF1GAekuXBRuQrugQorBNTvMqCjTVJmmOExii2uh+tjchonpAXPNi1PbQxpR9d2ou+slWm2EGGi1EYN/rdaOtdFx9U5MXuCOjsw9yEZBGM+710qrQm9KMWjWPYfrcJnIo5MarEPDhYcpOGlExfVJyxlqDXSLMJJbY4yk5BlRKXrhSoOpJ92hClUZcKt3UJrMZ5LJuylUsOjmsFJk5FhxxBiN+u5EEcE/2jDrbM3hWOG4N7q6xZb4SiZvhZwze83sbWfvGSudZh4zEdpwWg8FJNPZ6H2j952G2+eI9NHF+QalRxCenwAwGYm7wYjsdK2E88b5bebhXeHz7+/cv818/r1nTvMzc3pB5QnvnC7jYwEYgtSJnO8o2wPL/B1MvwN8F+GeP5hi7poqo1Dsa0p74dvrf+Pbpws//ubCf/9q59uvKl/9aIP0hM4XljkTtbIscD4ppyVwfz5xdz5zd37DefqcU/wOUb4HNAIJ5+C6oatxB5yxdsbqHVIh1MTUE6kaYU/s73+La33iuQBPO3o21h1OH3bOHzdqaZwfTui7eDvAxbvIZBPLFydq32nd0G93ZO10MfYGda/UHGk1UfJEHfObWr1ItV7IdaO1QmlXmm20vlLaRrfiibv4SykhOMlF/VAcDNcrVrdU6t1n6qJ+MJxOynyG6Q7S0glzc1bmoU2zQEPJ5uShbYN1he3a2C6N7VpYr5V9LeS93WzcsE/2lnHAS9ERoCPqSyZHgDrHLPugnvv3xpFwoAEIRtfjUG3EDs3MLekAyT/dPv+/pEj13vmlX/ol/vpf/+vE+PoQv/7rv86/+Tf/hr/4F/8in3/+Ob/6q7/K3/t7f48/9af+FH/iT/yJ/9OPk+vKXmeCJCQIau6l5/5izq6T6Bt9QIjDhYFZmM/Cchd4eAwsi/Bw5ymxS2CwUFxQe3nOfHy/8/7rjZrN1dKDoJFSYpoiyzI7/bkZ28XPeyV3onmH1FFn7I004N47YhWsIlbprWAEumTiwGx9XuSY9W0SNE7dQQMqx/1g73kjHkNERWitUqqiVf00JnYLT/PTlxBT8q4kxYF/G2pC004V12dU9SLYQieEjrV+O23HGMZhYEKDdzU3NlDz66CVQlXx7qx6V6XBsXVVcx0UnpIKozupRh3wY1DAhG11xmCaAnNyg0+LDo9IwO1+hmZr8Cx8cY11ewyn4yS30DZ0RFqIIuKEEyF49IYNccgnLMbb8+vH7MEGMWDQxdUHVc3CcFhP9OKEml4zvXqYYrMILZJN/fkG0OCpsKaVmj3Tiu4dsMiI304yDgZO+tGbI0dzfpV2gnZCaKRT5XxfuH+z8+bzlbvHnWW+EsMVlRXwQEWzDZMMVj3Dqwd6fUD6Z6i8JejnqH5Kkng9QTtj9DgaX2hcqTxztW/Y2wvP+RsueWctG7l6QKj1dhN9nmZlOgUe7hPLHDlNkXlamNKZFM/EcCZyRjgDFWVBmVEyzuT1uIe+Fdolk1el7jOUB6S+Q2qn72+p2xPbdqLlgrwUct9ZL8b1pSISyG8rKc6kKRAnxYKhszC9SUxbo+wVPZl3VKliIdDEKH2i9URuiVIHIlCcOFFbodZ9iItXP3TYSu0b3erohsdm3TsaOgSPczdzGzOP8zg66ubQeoLppEwnYTpBmAyNndcIlXHJmh/+SjHy1tivsK+FfXWIL++FWjzpoDVundRRnOaJcXjwA4SmEXA4jXUzTAVNRgSOwjzmxjrWI8GLfhseh8GJrMTubMD+v7NI/cqv/Aq/+Zu/yd/8m3/zJ74+TRO/8iu/wj//5/+cy+XCD3/4Q/7KX/kr/MN/+A//f3qcXN0ZOoUZUSXKabTJgwEnI6Y9+kk6CvQohFk4nZTzvfLmXlkW5e4Ek3aSOK8f625Q+5J5+rDz4ZuNshu9ODkjxMD9oyJ3kXnxC9yA9eK9bN4rlObUZVFqM8hOZ/aMpQbaECv0VsAC1Qp9vIlgt3j5Y6AiA68OGggS/I1XHV/3/27NLZFqrbdBf+sN0z5OYn4qcqKDCxy9uHjxUdQjMsTp4KIOHQTtHrF9uLOb+c+HwDzP6Pg9TsXl5pZRRpUQUVqB3hohqEOFzbuD3p1Y4j/ox4vDykUHSWHeBrSXAqfJ/36bHV/QT4pUjGPhG0xtOK4PooQEHzjrEHM771Co5udzd7IeUyK74RrcsrxsCC77+Puk3z6a+ucOY6gP19tEy4FeFCvZ/c8KtBYwCUhVehwzxmi4K0ml5ebBdjcLnE+IPtGd7ENwJpi0w8fRT6whdDR1llPldJ+5e7vz+NnK+X5nThdUrohc8bj3HcRjN9zWJ9F7ord7hM8IfI7qZ4g+4pOFYxQO3I5O3kF1Xqj2gY0fc7FvWfuVl/KeS62stXh0Q/MXT8WI0cPvllPk/n5iTok5JqbpxJROxHgm6okgJ4QF54eNImXe+ZnhhIQ1U6+RvIYRiHkP9S1SO+Q31PWB/flEvl6wVAh5Iz0XpueNOEVqbiwPC8vdxCzJc8wmZX5I5Gshb4Fw6sjckKlgQenaqRYpPZJ7pDQvUrm6EL5Uz5Dy+dQ6Yld8tvipuayIoIMViI5DntmYbQ03DauYjMSCSZhOQjoJaRFCGgWOxs1FeaQlNPOiWXbIa/cCtWbynsnZNaQu0mesUYYW8QgmhfNJuF+8QGnyO8HdWWwc8kJwJu0UHPILKogPptz2aeAMoQuh4axng/i/0xbpz//5P/8JhfX19sMf/vD3uE38/3PbygtTVlQSvXdCnNyGJF+HlsS9spCGRg8lJMJ8J9w/KA+Pwpd3rtRe4sFNdqJC3QrXp50PX698/TsXvvqvOyUbvSohJaY58fYzpT8k5vnM+cHtV8qOw2S9Ua6dnoVeCrkZOXePYaCTYnGqdBP6vvnJDO9K0jRhMo1ojeAdDkIK8cZeTHEw2jSiIYzW2zf3vfjrsG2BXXdqrZgOBljofgqPQhquFT5LstFJebhhG1Yvqh1B6dG7qD6sU4Dbc5iXxcPnUrhpsA5nixACilC00opgvVFLJUbv8GLxza4OjM0wJ2kc10/zTiQmnzkaIwKeSDs7U1FjIybXBoV4CBrHULqP+dNQzqdlkCXimCOYoNm7nkICi1gPt/fmyOA6jH8bRrXhGG3mceN06ogcFyDUiNQZ6omyuRi6bkbPgV6SO2jgTieize9prN5YyLVSmttioceBYnSRS2CaIzENJmkzj25QEO1McyXMhfPjxuPnG59998r9/RPzvBHkI3Ad9wuu/d8o5nT/skWHzsp3UH5IiN9Dww8Qucd9+PRGMR+LBWSl88LKr/HCV3y03+Zpf2HbCx/3zCULl4p3HXjeWdBGCo3zWTnfRx7vF1L0w+YyPTClB1K8J+g9KncIM1hE5ESwhcDuNk9VyFtlfd7JH5Tt5Y52nelr8piUnGjrl5TLE9vHR17aE5WMfX0lnDrpzgk9l+9shBS4/+zE+d3CchfQJMzvJkp2n8T04AQiXTZkLVgOZDLFlK0F8hBq72POU2uhtcOx3jtV3D+EYxgj5hlwne5sQXAmXyijSHVq27ERkaJJiLMw38F0J0z3kE5GnDqi9XaA6AjNhNLVs+6u3jVenzeuLyvbdWe/7s6wdZT3tl6mwRo8n+H+Tri/Ex7OMmb4fsC1AH001hZ8Ni5AEO+QFC9OnsUqzvTt4jrUIsQM0o2+/XT7/M+0d19p2e91Q1CSbdRS2PfNM2DKTmllOA63WyeRZmGe4DQL52jMASIee1FaY98q27WyXup4cyuXl0rNfoJOkyfq1mJukWMB0ejMunliWhrzaXJndAyP4Oge9CUO0dyG37jZa6e6Yah6jIa1jo2TkYpPJUUc6ktxIoaRY6PhJvb91C5NFVobcy7rftof0RpHpzH47A4ZHUXh+DqvdOvXCA5uEOTvZua8gkBeaMTMady333sM1Y9vHCTZTzrF2/d0e/WdG/TvUtzfL+6BkpWSnOLtzMUxtxmu0keRsoir3M07LRkUdh2QhLTh3jyiSBgw3y31eLw+R3xKH44S/vz8NT2Ev9LFu1g6NEMafgBpRwq03oIHbZi/YiP3R/1wQOhgnp3VxuzPxmvuVjw+Y1M9YEx/kn10JyF00lSJS2W5q5zuCsu5kGImhB3IiDhBwt3Mhy1WE2oTWp2gn1Ae0fAGkTeInhkYz3j//XH9mTU8mPCFXd6z8S1X+5rVdnZrZOsM4rRTPyz4T1q92W/5aynQB6PFEpgzdgVPD3YfO/y/JTqUOnZIa/oKqZYhqK+JXiesTVifoU1Yn+gl0oYjzIG2rE+ZOAcuHzbXFCYI00ISJc5erOLkMoUwmXcuyZDQHSpG6BaGqwO32Wo9Ijl64zXBuI6C85OLx2w43/d2I5C3fpAmRoaX9FdWb4SQnLQlweeTtwGq+aHpYKu25rPkWgq15ptjicOJwxLt01mUfjJXGtfZbb/o5gVnrDGCOTwVxjU6vs94BSFc8O6hpr06lN+zr++efzrmxM90kdrzC3tWaErWQlfIe+GyXvnw4T0fnj7w9PJMD1eYNqbYmJbO3b3w8ABv7uFN6iQxCpW1FF5y4b9/deWbH6386Leu/PhHG9/8eOfDN5VWABOWs5/QPSzMyHtgLtPoJqrnu9y7sK2mQJFCz9CoRIb5UmiAeitf86Cv7h6xLgGrkwtm8W4qhEDSSNTIlBaWeWZKE1Nc/N9i4Ag7TcUdHDB3PnBaMXiIX8PqsDYpBkNQelj7tDE3q7XTi2umrDan3LZ2m0kNdgSH43oISmhhOFr46d6V99Xx71opNQ/7FS+e3m2NlNnu1PfenCByGK7m5tZV2xpBEiaRu7OHAzpbKru+CHfIPhab4oWoD8p5GMVpWgaBQn1wKx1Kd0GVtTiKiBccf35jEN68M1QaXRtVG4rbFJl0TDrNHILTuqN1QmqiZmi1ue/foPFTABuO0oNKHnqF4CyuOlJ3rXsndUvp+pRyBdgQjtILQY10qpzeriz3mc++t/H2i43HNysxXVHZ8A7K2Xxu6ePO3bkE9j1g5S1Bv2BKPyClP0KI30HjHd5F/e7VVzE2Kj8my9c88Zu88GNe+B2udAqQQySHQAmRKo2CUrpi+UpfL1xeNkzdl2+ePNIlWCLFGWsLmEN9r2zCCWFGWQgYagnpJ6gnrCy04j57JUdKjtQcoU8gCyGcUJkRS96dZzA61w87Bkx3Tz6r2QoonO4T93MgJCUuynxWyhnmew/ejDtorEgZlJnugtncbFw7FetHUSp8cuq5vYJ2Y8E5a8ElHm0ceA4vzXrLlhJp3lVPnTgbcbYBqw3B/eDVFYS9iQe85krN3S2a8kop63AtKZTmkDpjrYQBh8foxcrM12He7FVwnDqSRtzOMLomDTLFuEjMcGi/u8wkF5dqrNfuBtz7SCr4eciT8kjmFSuBjFO/9y1zuVz59v17Pjx/4MPHJ3TeiewkqWjqzCdYJjiHzoLDb1vbub7sfPsx89u/8cI3v7Px49/aeP/jwvOHzvVqWMO1SbW559zeWdfOy7MRJnHBXJtBhGkxaJ0YhGAbLXZ6KERrbhApPrOR3tDqZla9NiwMka8xrIDSKEaJZVoIITLH2TupEEmaiEGZYiCmYzbjjMFSlD0YLfTRUTnGbUNr1a3Ro58+j7DDmn2xtGbUXF51WkcBaQdTsTujTwM1Z2Skh/Yx2zryq0otDp+2Rt4KtXTy3jxsrQxodbAJWxsmrg03bW2CmJ/Eaw6UENAQyJuwJxtiyZ1a14Hd+4pzayCh6yBRAEFd1xVvzjM+j+odKGBF6Tm6BqaOU22vP3G37iLaZpWqFaU5LCedJs1FmRhSN7QGpHqRLa15om7PY07hx03puKGrVqciSsXaKFCfmAUeHdXBwPTuraGtYYclQKjEuXD3xll97750l/M07ajkQaVyXZUNyKmbUNpMyX5X+5ygnxPTF2h8g4Q73PVcXjU0ZnR2ql3Y7Vs2+R0yX3ORb9l4oukVix4EyQR4Y0TXThPIvVPyyt6vhG+vrHujNeV0SpyWSjSHNmuN7s9nh6OFcahu3ALaCDozh0ey3lNlRvqEtSFYLcd744y4tESWkNA+UXXC1CA2eunka+X5q9Ud7Iux3CXEjNODO0qoGHFisOqE+WTUrRNjp45Ow8whtm5C5yDBHCaNrzPlA8mAo/vw3d/f3/G+3/SIni/1eus+uggjuDLYSFq2W7vTMao1innEiMe6NMzy6/1IPeivCMoxz03R10c3Ly66uuj+6LgOAsUUgrvyRDyWR1zzeZTgXDu1Cfvmwv1SjMt1aCOzr7vy8xDVUftOKYnaFekVK5Vt3Xl5vvLx6SMfn555fn4htcIyVYdVojEt4sr2YMziVkOtZa6XnY/fbHz13y588zs73/z3nY/fVi5PnW0DbOhuqp+WcnZh7/XSmRbGScYFuWkC6ZUWDOkTPVSaBKI5tBPMaOKtuFhFTN0CpTYsulecmpAkMceZZVo4n85emMJEEJ8RRHUrILcIGh2DRswq2zB01eOk1Su9DisdMYJ1rCoWD1aR3OyKnKk0MqZK8S6n90GL9a7AdRNKGeQNEA7/vwOuaAe00NvIZ+pO5S9+r590GUcybx/mmr3BCMqmFi9QGoPnT2WjlOrhf211/7reBnx56MkGDBlGKrA6C8lPhXJjAVIFyjAZbkMI3OugErfXIXZzl4cm3k01GrVVuox0VcZeUbeR66TkLtTuibOtZWovr0XKQKlorzQrHAFYR/DdIbB2hM0GVHZYDb2mMnvWdyVMO+fHzP27nYfPMsuSSTEjXoURGborq6OrVkpLlHqi5Dum8BZ4R0jv0HCP6omD0XcUKOg028n2zLV9xUW+YpdvWMNHMi80NqdYpuCT9NQgQtcyGHGVXq9ugf1xZcud1gP3dwvl3DklYZ6VVoPPBm9dlAI+i3TYTwhyIoV7otwRSUhX1y0N0WytTp6SCGmOzCmhNlEk0qXS1bO76tp4+Xa/udg/fnkiJKWP4FERI05Cmp3+PS2dPBtxFAsR87V/g4rHCeQmrrWbNObmXvIJnN7H++t2Z3a7/2TX5Z2fs1n7rVDduBKfEFkqDj2WamP91VGgit9viQf+E+4M8wrzHbPcnP3f991uNkmahDArp5MxTzAvQkRv+s4D5tuA0oxt7+TVKNm4rp1Svfj17gfin+b2M12kct7Yo9D2TKuB/Rq4XjaePr7wo6++4un5A998+y3n1pGToDEyLYH7O+Vu6pylE6nUXli3C998deG//pcrv/b/euKbHxV+9F92Pn7duXw08n6cwBnUY2NbG5eXxodvPWDtnJXTXSTEieU0e5T4HFGutNRpIRPpqLlgtGaDzcWXiiB1d4Ve78jIl0lh4pTOnE93vLl/JMVE1Imb4zWdGLwznBaIyYgtYyJcNyMcDMK6U0uh5J3afTFoUmcMDRGumVDqKBTNKNVPXKVkT6ZtTnrw7srLxzEohYMO7fdm7QZHHRtuHdHSJRv73n3QPGyRSvUC2buMSHLvXGU40+ccMXXbmm0PTBPs+8a2X9j3J+qy0vQ8Tp7+nEYCAoIXJw2+dx6oWbExs/qkkzrc0ltzdlYb3U9rhdr9m1WqC3bJ1BaGpx8YzVloLXjcdsuUFlx+YJlCoY4CcWQc2/DfwzbEdpRXyvHNiRovENYPqLTQWkV6xiw5lJd2wunC2+9uvPm8cH9/JegVYdDNxWdSUOgU9uaC3evljm39nLq/ZTr9H6h8D+S7wAOvtkd+c/H8zof82zztP+Lr639hj79FSx+x+6+wsGJ6hbCgkginCTk3OHf6lKla2NpK3jdqWXneN2Iyzu8Tb98svHlTWBKkpOQ90VrCLHK4ybx2UgtKJXAicYfahLRA2ztlrVyvK9fthS2/QCikAOclkfRM00bVjWo+n1rXRt0769OV/FLZnir3ny0IxpsvZygVM0iz0s/K3ZtAuTZ6NualULMR1YghUoMhGly0Dz6/HCcXFRlpuyMV3HxDB0a8BzcU4vcjnTkaMNCLycYd3N7BDzDNo7HJw44r5xFi2gvG5jl2hx9kr7eyBq9wnw5LsVrhcoFtZaxPF95bMOLJeJyMc/S5/nwa0h43IMQEXsbavjxDvri+1C3QRmc2Rgs/ze1nuki9zj2glca2Cdu2sm6rkyfyTim7Qzqiw9/O3NY+GCKdapW9Va5r5uU58/w+8/GbzPO3lfW5kzd/UW+OBfrKFDNzEeqeG3lvhNiYlhFIqIqEhFodSrghQsX9udzTzofnaUSap2qk6M7f8TDB1UAMiRQm5mn5ySI1+NAhGjF2P9lpc/+5Xmk1U/JG3v1eigtL+8i0UZTQHU45otLGwQ9n+xUXBZfsvmO1UXIdWHO7mbXenBns2EyPU6Gr5m0QLQ44r2S7qd/bwMV7OxzGZRS1I+ztEM8e5IPhEF372Kx3j+Nuu0N+3Zw9fiAgjC1uEFb02PU/nbEcz9sOM9yO8/iOuPDD1aHdyC4mn979lNxliH6l+Pstiqkz4tAMOkxf5YDy+iB9NIgZCRmJdVANzHkUcRxzD+1LGyGVlum6ozEQdCGddub7zHzamZZC0B1RJ0qYHRCfQ4nNHHYpJVDrgtg9Qd8Q41tCeEBkQcSjMOAVhsplI7crL9cPPG8feLp8pMwvWL8Szy5Qvb3WMl6CCJIMCxULlU6m9kyuGbFM8ERylqmxzEarQuuKMzn946e0nONqq727L962j6RZ5XLNvKw7l/XKun8gtydIhRiFc5rp0x09QI2FZonSI/ZhZV8r+zXTdvv/kvcvodKta54X+nuv4xIxY16+21p7584yj+Q5RzigoFCINhQLtOx46yTYEAULhBLEhiAoYiEIakPLhoIdFbSrYOMUiDbs1Cm0oFp6jlaZZebOdflu8xIR4/JeT+N5R8y51t6ZuXZmWukmxyb2XN/3zRkzYsQY7/M+/+d/IcyJdYqskyPMEZWTbDgUIgPohGFnvOwnratYBy6rpoe0LR5DNeG7zIpU67Z1Yzep5tgqOrN2LVw2Lt89FG2u2nRSzklum6AmrUip0pCc5gxfpRDU5kiiTBH7LiN/3n5L4wZxSZXOUqBCaA4SWt6+pBUIGcnoil0raq2wVkoQlwnRT8nvn9YqCQSrRNanVdjNW3L1pof8IccvdZHKUdJb4ypagHkqnKeZ0/nMukzEMFPyKgXKW4ZdZdxVRi9MqEziVFbO68qHTzPvv5n59qcLH38aefyYOT9IemzJz/oB14HvFLaHqqVjWENkmleq1nSjRRmLx1BVR6WQqydVTyoOY0tziqjYDlwvEdIuCNZrrLCJ+kHTdRbnPJ1vcF+/wzuPNf4yQwIpTNYFtA2gJRZ+XSem05HH+3uOTyfmZb6k7m6Sb1c01WkpnI0WZ7Swd1SBGgTmW5e1pZQm0rYbii/iBqjP7uINpvj+jvC5UAk+LZ2UtP9CGmk3VdXQnNS53EjCDtJZiV1PzJJ1FSdidMRkSelIMjtSKkJOaXop/aIbeenED/L51UbqQxfQEZR0GqXZE6EjyiQpHjbLWFoFsAvViPCztnCfqtvsgbXtKmWZN7niVKCaIIzPssXmymenVcZ2K9oktFtwCGW5L4qiDdUZlBFH6ZBWQpqJ9YS2FuUj/aDZv4vcfLGwuz4zjAGtTwjoIl1UbYy+mDMhV04nS84eyg3WvcP6L9ntfox1N2g18HJpKCUT08rj6SPn9YGvH36L4/qRz9NHuDqihpn9VcZphbEWXWVLbpzCdKD6Qu0SxUlhzayEtJLXBaXFwHnoEuNQKVmjikErh1KN2XcpUppMIRKZ8ol5WTh9jrz/Fp6+qbz/9sT5PPP0eCSkJ6peGPYTwx666wP2SqH8Sun3pDoTy8T73/rM8WFmOT1SayUuhfkp4AbN0yeH1QWjCkVlCVEcDXZUuFFcH3JWDKtCOUuXHH4chBSQEjk9w+TPZKN2j5St+IsD+aYfK63Ay/e1d90cILpeMYyaYa/pd9LBGCcZW4V4oWBkTGPWbay9ivYV40VDd0ndfFGcYpTfN5vnQrUGuX9KlbTrlKF60USVqRJPldVXnC4or6mzzB1Lhem+EM+F6ViIUyGv4lZDrRdm8Z+IIiW+WVuYVxVH39AolkViDbQpOA/9UKRAjdCZSlWZici8rjydV775duXDN4H7byJPnwXiW1qBUoqLNb4bFGbQmN4iRKFCyIE1BnQwhNhjnLgO5CKW/zE6UnTE4CSeWyMXDA2fNpouNg2PVmir6XoJ8/NeiBOd7+hdh/ce53paVCYyTI2yiJlIqpl5nTlNZ+4fHvj46TNPj0fWZbnsiG3LUFJaOihjdIuol98tuyvFsgRqzYQ1sMyZZc6ERbKgQnguUpfZz4v25Dvl4AXrpxTJXQqhiI9YFo9AgcfVhTV4Sc3dGO+1zahSbXOsSEwzKRtS0cT0RNR7kgtENKoacavY/MZqY9BuBAAQMaKpsru0CcxCUas8WrekdEYb6ZKNSxgVMSqg7CJ3qxOWE0b8C3UFoyJK60uKRc4Za1aKDZCDGMI2SrLR4r7huiS/xweoAV0TKUt3Vo3BWEAVYpQZXCqOcbT4LrO/1ezfJHavVlx/ks0KZ+noCPKoiVwzSyhMS+V0NFB7hu6Grn9D57/A+TuM3oESSKpWSDkwrWc+P37g/f3vcDx/5uPpa5b0yDkecXrF6UQpghOZqqnKoarBKi1uCF1B+Qw+U41sAHKJrFEsB5TWwiYtFa0N2liMsa3reL6S5DoR9OO8PvF0zLz/5lu+/jrw8FXiw7ePzNPC8XRCmYDtCqN3DNeGuy8PjHcjZkjo8YZYF0I5o4rH90eOD0LRLjURl8RyXDl9FljZuYopmWqqRMB1GttMobtYGfcG23ty9vR5kM4jFGKI5FSI8QVDtAWI5sv8SdCG7WbaaODbTaSQjs152O00u71mtzcMo6brFdqKjUNWEt1TKSQMmTbrakJw55FNsZdUgY1sUWnzobTdg1wYec49O7YkmmzQIlHwK6QZ1mPB1CJzR6coSmjn87GSlipwYVCU2IS8VUxoFUi79wOOX+oilfOzf1rOjSkW5ULYWE9GC3zmO+g7md04LbHcoWQe18jDOfL5U+DxU+Tpc2I6VpZJDFA36MI5SeG1vRhQ6k5TjaKoeklMjcnK3CY7SjXN5NWQkiUm+ZqLoVDFoaEptJVR5KSwrj4P9Z3GNdsha00Tzjqc9XgnAUgyeRE4oagoieYls4aVeZ55Op14fHji4eFIXFfByrUI9uTCNxgjUBpKipR1ZuMioXUAGty3FtY5sy7Swa6B7xapyw5Qf1dX1Ybulxtigw6ikE9S3m6WRnjYokSo36l0245PHqV5E66kIuc05TMpT+QUycqRtufbdqdViQB0I0soLqkiGElNxqyS46RCK1IZ9DakFvdpozJaR5QNElJnLRjbWFbN4LgIEUYXJdo0kylmxZoV8YIJDX4LGJMxpmB9aYVQhtuqZukylaYo0xaLQspB4E0cxne4Efoby3CdGA4B4yaUDoizeWodlNDNc8mEUJjnynQ2GOUY/BXW3tB1d1grUF8Db4BKyjKv/fj4nm8/fs3j8RMP80diPRPrxDhGVCfCb6o46tUWBKibG4hytREoMtVkipJQxxAjCtViWZprhtYX1qiEAW6t8DZ3kSK1hInjeeLj5zPffpx4+Ljw8fMj8zxzns/4HnplwL2m3+25udtxeGdwu4rZrcQ6E8rE6TFQiqIfP7PMRcg9ayZMMD8t1EFRe0XnpBgoL675xqsLmaIbDDZbcvX40pEyhLWwLpYUC1qn5gATG6MO+WyqBIrWZvlw8eVU372nnAfvFf0gCEs/GHyvcZ1YfGFKYxQ2j882HS3tyTavynapXsYVL+8vEbbT0A4pWjE+jzeaSkPWBTQ6KcoKaXnWTGVL043J+pmDIkRFSYqaNS5lXJW9naL+CSlSRbWhnjDtwhqEGBCFZqlUxnlJht3tKoe+cugqnaqsKXEKK19/NfPxm4Wf/m8z3/wfgU9fSZFaF/mwXMOA/Qh+1PiDwx88dnDQa7KthBJY4gqrYg0J60ojAlhS8CyLIy6OsHisyyiryFri6J2vmE5u8F3e5jDCFNRaY5ww2pTZblgpTlrJfEvCMDKpVOa0MoWJz4+PfPh8zzfffOJ3vnrk/vOJkhPWCCNnHBVdp3BOo7TcbAqDNhrf2SYcVZyfZKdfUyKulXWuzJPY968vfLd0u+AvBbbZNG0LjNZbWCBCushJeAVZXZwmNlhCfc+1XQ5533VLwc3yHDHJY40L0/IeiqM3n2SHag2qOqoR+HIrclWoVKArsVZJDTWBbCeyOVH0JNlOJlBNQrmC6SqugCtSpKxOWLdimvO70Y2m3PKADEHIMTWjsmhddJlReUXXgFVLm2WtGJPQRqIXtClYlzFlERZgEHZjxqCt7JHXdabrKqmC3SuG25XbH0cOt4n9dcC5M1qJ7zdI7llRiVwr55g4nisPn+H42dG5kcP+DniL0u9AjWwRHBKLnnmaP/PN/Vf8r3/z/8tv/87/zv3TJ0I+oWzG+MxNr8hecUgvPBWbtZRRCuMyZqjooaJ6kWEkAqGsrHGFqlDKkGKmFHBWkAPnPdYIKeW793xkTQuPx898/HzP73z1Lf/7b33g40+PfP7wILHtaeHqpudgd5hxx9WrG370q2948+WefrRYF4llYU1n5oeMqo6fXn2k5ExYV5ZTEBufbyPh2jDsNepWHPbVTmFG1SC/dj8qR6Wjqp7CQIqKZa7M50xYC1oFYoyAIsUmBWkwYMliC/bS2PUly85aQXD6HvYHy/5gGdtr6sdtYysQ6Hb3pOIuyIQyWja8vcL2Cte1YrXZhbXjYvDSilXKAvFprdAtxbx6gxot9IbkZP0hbkJERbH2YrUcS5aJri4ULfEyKhVUzbjaRPx/XPHxf0sPraiaNt6WFloG9QVthbDaKU3XSxyHsxXbhtYpiKPE46fIw8etgxK6ZNlcCtocynpwvcaNBrdz2L3DDR7dQgOFdlnFsSImQkiEJZGW3Np+iEFo1DFarKmEKJELZtuBqxaZUXVTaRcKSeZLccHMFkuHswHnQitSuu2QV5byyDk8MoUHPn088vnziYfHmaenwPksN4azAArnJKwwZyNdFBqtLUaLaa42UiC7LhP7Qtcn+lDJUZTiqZmibsfmhiBftyHx1lHp7xSplAvGRJSW6AKVomhDLnYxqi1czdes/azEkgjlfXuuTexYiiaXmVTOpPJEKh5dLKqMGDQFSQlWVKyWHXHVmTUlYi7EupDqmaSeC5SyBeMq1mt858hVBNFaOaH92422Wy8DZtvcng0ZVcWTreYCNWFywpa2QCmJaihKTHu1KQL/GrF4UimjSobYqurm5ZbFOLhUcUy3PuAHzbh39H3C2YjWCaWam0SV+aOE8Mkge120pCFHj9U9igFFh9pcJbZzWwopR07TkePpkYfHz9w/ikA+11WkHB0Mi8Wthlw0pW40cZkjabXB16olAjTHjnavJFGYU0xp95uYHTvvLtfhdtTWHyQisa6s68wynzmfj5zOj5zOR5b11HzuEsZ1+F4zjh373Y6rq2uuhj19Z9E2ELJFA173WOVFAtKiZFIoqAXWuWD7gu0NBXFJN1qJ96VXGC9uF9puXUgLKSwbQ3TLkEqXh+gBW4T8FgBYn9ebyya1vuisGjPV2O3RNFJGNnQCkCvRPaJbxIdq8Tb1MpuV0aloBa15fu6qn3/XxWnCiL2bNho3evTgUKPH3Q7Y3tJdeXxvcJ0FJ51W1JBqIdXCsgRSzKxFMuRKbjqxoqV7RIglP+T4pS5SxSqylayjpDKxRjIRdBI6NprBOa6uFeNe451QsnNOzE0T9fXfnPnw1cqH34k8fcosZ1mIN2y4G6AbFMO1xV05+lc9/U2PH72YkRYNSZF0RZUsc5yiIRmJvYiR+QQ5aFJwLEZ8uo5eETrJevGdmIZq56nVCJGgRmpZiOGRGBMnJj5zQmHR2gksVpXcuCWw5iOn5TPz+sQ3337D/f0jX3115OPHwPmUpYvq5JK11qK0JeWOUi1KOazxeGcZ+g7rhJa+XheMrqxTxvvC0AvGnJIIBWsrJrQOSi70Z7hv+6o3bxWE4besgXWV3eUyL+RmbZVT0xA1myKJ/2iwjzFoazHOygxto91ujtPlSCw9a/4atMSCp/oKpTxae+FFpAplBZVAr4QgUQpLmlnrxMoj2T5R3YTuC7YquuzY1T129TgfUXi06jHaYZQWFma7+fVmDUOjg1YpFrpkVImoItOCgizWhQsJUIxxdblo2kpMlCpEl5QTMYBWRTKMikJpQz8u7A6V61vNVVcYbMI0Fp9SUqQKwqRbAjwdLaeTZTp3pHRF8QeMuUbpoZEUno+UE2tY+Hj/nm8/fsM377/h24/veXp6AArWarrB4l/1mJ0iJCM7eLqLDa2hNv9JdSG4otQl0TjFBAWSSTJ7sZZhGBiHkbHvcda2vDRZhiuBUGfmcuI0PXI8PfL0dM/j0z1PxxMhRkyLoDkc9tzeXvP6zRtev37L27svuRt2eKepTEzVUXTFZI9OlhIUaS7ESZw/SgHdg+ksprfkqjBaglJNrzCDwg1NZxUgB5khLstMDDBPlfMpNR2l+GeGsBLWVWQEKTUyxYtz/iKcczs2lqpWoE0VyNlm6cC1zHBrFUJJQZPqJh4X5IDSTIoRGYa30HlF56VY1eZ1aTbHiQYLOq/xvcN4x3Bzjd/v8Fd7dq9u6Iae3fWevveygTOZSOJcFuY4s6aVp6cT67xyUhNrKcRcqMqiEKJIFgcwNh/D3+v4pS5SqlPgNcVIWxlIJJWptgiEpQ39vmN/XRl3YG0FVVhz4HQO3H9Y+PDbK+9/Gnj4Jl/0UEqLJVU3QLczdKOmv/b4g2d4PdDfDbjRQVLUCGUWpkosmXkJ5ABlVuSQhRp/Li10zArtPYm7eT9IAOAeg68a6z0VS1EiVM6pENZCXidKtITJNap2EzNURamig5jjmXk5soaJT58/cTqd+fThzONDZF2qpAIXgzKWruuxzpHLSCkeaofWA8ZanBvwncN5y9VeuoO8Vsa+Mu8q66IueLVgfIJ5q8sfn2dRwIuCJZ9ZTJllDSzzTAiB4+lMWGeWRbPUiVozVH0hdWjz7DqvrcF6STA1poKSzjnlRKonUrEs+XcoeiVyajqhAaV2qFIkwydPVB1Bza0oJmIJxLqS9JlkzxS3oLuCwzAwUI2iCwnfRWqxUC0aj1Ytj6sFLmqD0MuhZYDR/NcKuiR0s4ySeBAj1HWtUapgdGQzPisxk0mEFETbFoNEcChLjoFa5Jx0/cowwGgtnalotekHpICnmog18nBamGfFw73mfDQsc48qV8ABY/ZoLTNOEPp/ronz/MjT+YFvPnzNtx++5sOn93z+fM/x9CROKM7QB8/ubPGzISZNLhaqaxuWihagUrrhrd3c3BWaIJzmL4dSWGMY+56xH+h9jzW2XUcV8QlcCWViziem5Ynz9MTp/MR0PrPMMwqDc579fuTV3R1v37zhzd077q7esvdvpGsC6axrRpWZulryrIlTIcyZdYpi8ForelD4fcWFSsZSNWhvhIbe4LMUAVNJJRJCYToV1qVyPhXOx8S6ZpY5tA6qZURtllffO2qbB23aKtgYrwBVpApKoXW+xPZQtLjVILZcudK6NkhpS/htZH6l8FbRe9UcziE5eX7f7JC6QeO8JH0P+x2+Hzi8fsvucMvu+o7bt18wjDsON9f0fUfnPdEIfPsQ7zmFJ87riY8fPnI+TnwCTqWwVokMKTVRAs2sWvNDML9f6iJVZb2gaIlLyAiEgpbcIOPEZXkYC11fmzapkEpiWSKnp8jjx8TTx8zpQYZ9KXGJGL8ozBvM56+czKSuPXbnKCuUtV786moprDFRquxd8iqu4WFp+o8kzKVSFMaUS6vvOvEi8XVzfqiS5JkLy5pZzjPrpDg9yk0RwwYRiDNCzpE5TKzrxBpWnp6OzPPC02NgmkpzOhbIzAVDTI6UPbn0lOqpdQBGlPIYM2CNxzlH3ylUgXUn2gjnIHSqsZe4YATaXOh932X1tWMzn6QKNdf5FWMcbl1JWXZ6KQfUugqBYoP6VNu9tjTeLarCbINf5DyIK8RMKpZQPlMzZBXFJVyNiOOCRKPUegQVqMzikN/8A1NJJLWQ1UwxK7iKrhpfncwnXEtnzaq5pFuUagXKaOmkLmzpzRmiolpgpSoFXZqhp1aN7CLnUGnxBBQGY20JsLKopbSRgZpJZxHDUWMSzie6TtOZiFUb3aG0EiWQWsiZ8xw5nxXnU8eyaGL0ODOCGtFmQCnLJbusiu/cvJ45ngXme3h84On4yOl04nyeMFicc9SqWJdMCJVUNKXoNo9Sbfden7VBWsnEXD8zPWsL2ystX8gYg/eeznu8cZjLXLM2GD+R60rMC8s6s6wTyyLi4BBWOieOLOMwcrg6cH245vrqmv1wTW+vsNpLJAwRVT0US01aZn+hktZMChm1CmEmrYoUFClJ8Ua1iBknxAmJr6gS7lczMRXWpbDMhXnKLHNiXcWqTeZPuUHbv8t69gLqM6ax6Z6Z6212K/rObXarGsxYqqZg5bopWXSFm4Fv3YqUxGh417qpHlyrEX0v2qt+Z+h6Rzd27A87+mHH3btXHG7ecLh9y+t3v8I47rm+uaPvOnrvCXphLROfw8Dj0nNcOkwKPGlFPk2oJWBiRlspnqFZcqnf41y8PH6pi1TUImDKupB1paiKchVnYdw7ukFx/aZyeBXZXUW0TRQiU5h5elz4/M3Kh9+KfPhp4vG97GJKESq46RT9tWG4cfRXjvHLAX/rGX5loHvdY3aWeMqkswjk0qPsYst5wcTIvATZMSRIoRWkoohRY2xlmQ39AP0AqWr6nSYr2ixBAhdjrJweK08PidNj5tP7yLJk5ilJDkze8O4sPnar+HSta25sx+dAM9EiaZS2dJ0D7TlPPcZ2dP3AMI8Y0xGWHVp5jPEohP3X91IonIPo1YWyuhWpy0xKccErnvVSWxdVmwg4oc0KWIz2LIuk/hq1IEN7iQYxWmONkZA/o7F+C/1rwYZaqOhryOglY+09qcxUs2LLZ2zeU7kHdqBuZQ6lMokjpS7EcuZZQNnczHNmSZGUC8lk8AqrOwbb4zN4J0Pu0rwFQWJbjHnu9FBcBLPP8d/intGkUUIrR7zeamM2kgslJ9Y0s+aFJa0sayCFlbSu9L3Aq9YXfJ8Z9oXdIbLbcwm6lAopRayQmXPiaQl8/Bg4HTWnB0UtPZQDY/+WrnuD89cYszH6KrEETvM93374iq/ff8Xv/PS3+fr913x8/5H7z/dM04xTHu87SoD5nBgWQQdycUD/XKCoaCxGWWxnsL08jJeiDnK91AJGS4Eaxx3DsMPaAa0dW3cnKrGVNU6clxOnpyeO7THNJ0IIjMOOYex59eqOH335JT/68Zd88cWPuLl+hTcDqvVR4qRhxXorVMpayUshLYU4y7yw5krqFPmQyavMUVCgrcZ4g+0VfifmwdpLhxBD5XzMzFPh9JSYZ7EAy/mHLcbbUauQk2pBGHON9KOq5NAZvenrVJuByV2zdVI1yeZYLMaERaerwH19B/u9Yr1RvHstz6sN9DuL6wzj1cgw7tntD9zcvWW3O/DFj/52bu++4O7uR7x9+6cYhiuuxmuc6XDGktTEUk98yL/D/fItj/NHdiZz/8lj1pWhFk4KnmIl2MRcjKxPa0ZMj3/v45e6SIUYqVq80UpFWHDVYnRh2BeGneLqJjPsCtYnkpLo8vOcmKbEfM6sUyWurc1WgpubXkS27mBwNwZ3MNg7g73VmFuFugFGWl5KJYdKiUJvLqu4c+siKaw1CWW7ZHFiSDmhdSGGJHqEUEUEPEMIz6SAnJQ4B58L85xYFqHsxlhY1yQOELEQgizyIWbZcTfvslKeCSCw0UpF9b2sGW0Sp1NEjGENxlh5LUrT95muz8TVkqJiPhtSUqTYdpVFmHlSoPg5RQpees+BLMQ5C7FkmVfmaWFdV9YlENYoWpK2kCsjBAxJDZYFzboqRAZXUK3TiDGwrOKmYI0m54gyFZczxgTEb36BtoGpFGI9kmsg5jNSoNoJahqukCRxVyK1xXg3t6H6c7eo2txMmGlqa73VMzCVq2j1nouUrDS1lpYZ3R5b4GBcSWlhXeS8CFM1kqOwwIwREflub4TldW0YB4mc0Sq/oGqXtrMuxFhZlsr5BPP5uYOy3RXj7pphPOBcj9bSSeUqNjrn6Szelw8PnI4nptNEmANpzeQglPlq8gXKpAoxueVfN++SIrCoylht8N7Q9bIZ6wZhmxkr/YBWjbhjHM55rHUo9Szi3XwDc8mE5qq/zEHiz4NYRBUKzlmGsef6+sDN7Q23tzfsdnu6rhc7ok0gR6aWSC0rNQdqkU77Ei2zbSZSlRiohPg7JoUqGqPAGo3zkLzBuIjSEShNCyWPkl8K3n+xY+ugNpF7edlRbTojERBKB15bvtmLbmxza6nNf0krsFYxjLDfw90rYRNrq+l3Htd5dodrdrtrrq7uePX6x+x2t3zx5U+4uX7Hze07bq9f0/mRodtjlJCtIhGFYyyOpVpCtQydZfWWXWcJnYPBo64y0Vt6Z2SdWjP/xw84F7/UReo0L3S5EGIhV7DeYbUMBa9fKcarxN3bghsKZogsVZwKPj8FHh4Tx4fMPIltR0X0ANqBv1L4vWZ4a+jfOLpbj/8Vg70xqC+B20IdMrnL5F6MPlORALy6JGqUhZzmC5dXQZtqaom8QhQVCrornE7CIOyGZoRqthuzEQ3OmXURAWvOsjNbF+mY5ik3gWu9dII/78ZISWY4aokYs5KSaFvmKTJNkfMU6XvL/f2C7xy+89RsKUUcyC9MwO3i55lxt9n4X44myN0Wls29OyWJ7lgmKVIhBI7Hk3jwzYEYixQDoy9x9K6T2HjbsnysT1TEqPU8n1njipkiJQf63lHqhHNnjBmpJKraUfUqIkNgzWdyiYS80N5Fe9HNfuliEqob3i9CxLplQZXt77cPaTsfsqBUCrGIO3tMobmny262tg8n0RYUCrn5qOX1SEoLy3xkPp1Z55l1FimFpuI7xbg3vHrX8fqd5807z/XBsBvajGI79dDo4wI9HZ8Kjw+K5WyBHX13w37/lrtXX3C1f4P3I8ZIx1JyJiwL9/ef+fDhA99+8577jw8c70+sp0BaEiUUqisUu13gtRnliu6qea2gKGRVMargdGIYLePBcnVnWI+G+dHgek1ekagZ53Cuo+tGnN8gSN3eU4Mha2ZZIudT4HwMTKeVdQ4XGUM/dhyu97x995ovf/SOL758x831DeMwoo0Ee8qNuVLLRIpHap4gL+jakq9UW/gzEhkTNTVoCBoVNCpqLBZloRs8OYgVmrEy8xSIVjaQZSMv/AGOTc+XNzeW/PMK1RaxUy9C9drym2rZZozq8nNaS6Dh4XqLcxd6n+kc3bjDdyO763cc9m+4vfmSt6//b+x3r3j79te42t9x2L+ic7dCRFL2omdUVTYbnan0GUJWjM4QO8tV5ymjx9fMzuuG/sh4JKyZv8bv/L7n4pe7SJ3PpJzFvRoleHZn2e0ct68Nu6vIq0Om2EQ0iqdTZjpFPn4Tuf+QOD7IkDMmWZNsJ2Ld8cbgry3jlx3dF47utcP/qkEfFLyFvM9UX8kmk73sWnPJZFOJ5yxdlWk75WaUWmOFUMXlvLmJKyUXmnUSc2+cpAdbp+j7zfSxSlBYBqVzczWX55BOqoqXXcuF+d12brUxqtY1opQihkgIiePR8vBgGT95nLeMY4fzFucsWjlAQ2lBVWiZLyjxJryINfUzMeJ5h9e6hpJJDZqMIRFDYl5W5kkybZZ5YztJrpTS4DfKuQXnBeKzXUY3b7uSVyGLLEeoC9RAThNDbymlx/sz1vVib1QHImcymoIiEIVsQWITD28PNksmNBTLls5bo20omoQiqmIxzYlbzD3bTFILI2+OMyHOhLiQUmyL3mYHI8m+hUJq7vulJOLyRIor63xiOZ2JiyzAWmWcq/SD4XDjePfjgS9+xfPuy47rEUYLhnLZ1EiuVWFKmfNUOT1qTo+OsHSMw4Hev+b66gvubr5kHG6xtkcpCSMMceE8nfj04RMfv/3Eh28/8nj/xPlpIi4JIugiwgejxF3faHEvsdpgEZmE2bRuSJHyOjPuLOHacnhtL0WqGwwRgSud83RdT9cNeN83Vl8jWSAykzVH5ilyfgycHgLTMbIuwgjVSrPfDdzeHHj37g1ffPEF796+Y+yvcKYRQxolv5QzOT2Swj05HallQuuE0QWrgab20kWjs0FHiw4WHQwmuaYpNPSDosRM12usE/p/bXTrrfv5wxwbzJfz9zqqwmXWJEzgpqcr4lZei25SFnP571rlPu06xc2tYhw113uDMh7d9fTjHa674nDzE26ufsTd9Z/i7d3/g6F/xW78Eu8GvG+ZXA1FEJ/LSGalsEBd0ASMinSm0FsYvYbRM+iKuunAgG7G9uvyw4RSv9RFaglRxGrFC/btLK6rdKNh2GV2u8q+M6xKkkHDmpmmzPEhXbDjlCBXBUZyZ2yn8DuN3xv8tcHfGewrg3mlUXsNh0od6iVOvVJhLdSpxZ73WfBtV8imiO3OBoOVKjBAEjy2tsgFRRO0NnKC9YrdTopV51+qz0tzNmgsrlovu7UfAivUFn8e1tgiriurjSzOsMwBY4UxZq2VEEXjBA5UrhUl3VKApdMR5h3fZffJb3ouUtvOMheB9UJmWVbmJbRAxNQgI3lvpjYDTsUl5l10IVlSSLWiNnfyNSzktFDTijOBki1dJ9lPNkcyRgbtFRKaXDVRbYobMdhFbHalSGEazKTFADRDbTEe4jFbWuS3vEClJF5FgDbRbOUSRRAaFkKcJRSxrS6qxTikBsel2iLGcyQsEykurPNEWBbSGkgpNSNkhe8Mw85yfee4vnFcHxyDL3i9ySe3zClhPIZcWCMssxLng+BRw4hzVwzDNcNwRd/tZEcMlCrklTUsnM8nTucz5/OZZV5JIba5htgjW73R7sUj0RgwSgTmz+dSOi2NxaoN7ttEqA3y85qawCgj15x7hvtEJHu5cuV8FZnVLnNinRNhFRSBtgD3vWe3G7m+vuJwfWB/dcAaj95EfUrOT60LOc/keCLnhVJWtMpoXWVWtumh2vvR1bboFYPOBoVDa9tsyxLOF4ydn+ny9Yfdjz/keHZZeQHjbaW7bgQqdcl7EvRVXdCOirq8HqXAOPH/67ym9hZtHaYb6MY9vrvm5vYV1/s3vLp+x6vDl3T+Du9eNUh4Y1vKZ1KraPZSFT/GUgPUiCJhVMHqSmc0tTNYZcVSzgt1XxlYl/yDzsEvdZE6LhNZVwZt8MbgB8ewh/1N5fqQudpVbp3mKcMcC6eHxP3HyPvfidy/T5weKjHJ8HorTsO1Zv/a091Zxh9bup843BcW82MLg6LsoOiCVpA2XzLd4K2+QCjUPovKuivUCaHfLhXOXNrdNSQJ/gsi+Kstmrx5lbLfS2u+3yvxDOzEGsVooZP7TtzZ3SIX36ax+L1uDmFUQWgMMaXiz4hmxZJGN32LRWv5akwT9jmLafZJktGkLhYrVbW97wb3FWGKbQ7q6yKzp3WNssBkcUXWDTb0nZWUT3Szx1FSoGxpmL90piFOhFWgwriupLBSsjhpGOPo+g7vOzKBTE+oM6EYUtXEKl13Va0rQqOx0k0p8YyRrspK55QdRAdFUmM1Do3HtcE1awYj2ryiZlIOnM6PLOHMGmZSCq1IwTa0yjW17nuL3YiE5Syu9ctMTRJ+KN6Tmn60XN95Xr/r+PJXOr5453l73TH6iNNb8qsUhVwzIRdOU2E6K5ZzRwqeWg/47i273VsOh3f03QHrBkBTqmjNjtMTTydh8k3TiWWZqaVgUAy+w2go1dHtFH40DDeG8cq0obuV6wWHadeAVVv4X2IYHGnv2N86zjeO8doxjA5dFb72dP1A14/0w4j33YsZmyzKpRbWsjBNK+fHhfkYCVOWvEdtcJ3j9u6GN29f8cWPvuDV3RuuD3dY618srIlaV9Z4zzR95Pj4Let8T0pHjIv4AYZsMY0Z3B8c+7Fj7DyeHlcdJvdo3aOcx+w9uiSWvaXvF7wPTWz+B8T4fpd79juF6lllcEEvpatSl5gcgf5akaqaWg0F0Rp2ncMMA1o5vKs4f0U3vGIcf4LvXnFz/X9n3/8Kh+HX6OyPMXoPdI0PVQW5qqUVpyOpHjmlbyVfrH5mzU/kOKFKwJIZPDilKaNh/8rid5rdrcU4WOY/AZ1UrIlIxusiFvK9uJO7QUwhnS5oEhQRRC5TYjomTk8yi4pBthfGSKqkHzR+MPidkU7qoHEHjbvSmAHoaH59DQlqBqN5UNgrGZzr1WAG0L5SdwbOoAaNOgNHWLUmTpm8SheUYssJ2pTnWi7G5eK+/pxJY520VNZDPyq2JM4YYJ1Fu/QyVuR3O17i2rU9uVizKFQuJK3QKkvCr9YY0wS0RuGsbYQG8ZPTjTwBL4rUdzqpTIyisg9r66ga2eNZbS+FSV7Xi+GWel6gahayRKmFZV4JoQ3O10QOWwidJq7p4sZR9NqWbkfO4ooQs5W5E1KkhP1iniE/RDxMfTYsq8miisEUET5rXKMza9Cp6a4iqS6kHDhPT6zrzBq3IlWfFxa4vA9xkZDOL64zJSdSDOha0FScN/SDZbf37A+Oq4Pl6kqzGxS9FS2fVpdPkkohlMySi0R0J4vG0vV7NDfsdjcMw0E6KL3NfKpo7epCSDOprKAKzmv60bPbD1gL2lZStlQSbl9lQ3dj2F87dnuLdwZnDEZpuQZUEY839IWco63BuJcPi7MK1yjt3rt2rW3U8+29QSYTSxAX+CC6I2rFWcs4Csx4OOy5Ouy5utrhO49pOqsLFFojKa+cpyOn0xNPT4+sy0ROK8YKpFqVx4/CRhwPHbsbz3jl6HyPNxZDh1E92nToOlC6SNcXuq7De4ezCmslbDT9sEbh97xPt3v1JT398mdePLb7qDlNyFx1K/SN5KQM2njxAzWVoTP4/oZhfMt+9yO67hWH8R29e4W3V00/Z4TwlUSvtyyyEQt5IpYnQj0yl48kJlb9QKhHYpmIcSWXgDKpWYkV+kOm31d2N8h5cn8CitRKxqjE4Aq4ih3B7RR+J4QEZzK6BmqRXKnzMXB8iDzdZ7FAWkVMaZrtUb839FeG7mDpbiz+1uBvNe5WvLpUWxU2O/pqQHlNuQIUmJ2CTpNnQ35rqJ+Bs0K/N6ijgs8w20B4ypQFqIoUxDZzM0LORQrgXCHaFxenag7sVuF7hTaVbqx0Q2VdhKp+Psn3ph/oifXdYiVft0MWidi6rOaUrkWnpLVEfG8OyRcrnZesI6Q73CxgJChxI1DU7xRSgSK+u5Bv99amNyo5k1Iipsh0FiHwdA7EVqQGD0Zl1hkkz4nmai7JWSVZcjaE1VCqCE/rZh3TIBKxY9qKlZAmalbSURWFrqYVQCsUfaWEnEGDPfIihqzziTUshCAzqfqd1YVnIsDG/CuliXQLpUQ6J5qwYZDidPtq4Pa15+aV4+ZGc7VTDLY2N2nZZNQq1mBzTkyxME+GnBxGj+z3r7Hmjuvrt1zt7xiHA9a4NlfI5EbJX+OJmGeMqww7z1UYqRwIoWO/eHJdqSphrxJuB8ON4vZtx+HO03cNIlZiRAVg0GSl0VWjjX12DPHycN5Bp+jwIgztOqxzjeRAO7+t/NZMqCtrWFiXWWa6QN/3dNXSmZFXr2+5vbvm+vZA1/UYs/kQAhRKXUnpzNPTZ+7vP/Lp40em6USMC66D0Rr8rmd33eMHy+5moN9L0dqNUkQtHc7sMKYDu0flyLyDYXyiHxa6TrN6hbWyafxD1qmfKU7b+9kK09Y1lQ3i2wpUUQ1ebt+sjFjsNaalc4ZxNzL0d+yvfsxh/7fT+9fsur8Nq28w6gbwUIU1u8xnpunI5/tPLGFiCves5ZG1HIn6kaIDOJGAVALrOpPyKm70PmFcZrjJjAfN4ZXGGYXzfwKK1KITVieyL9AX7EHh9uB3FW8jVgeok7CmlpXzY+R4nySK49jSdo2csG5v6a4M/bWhv3H4G0N3a3BXGjcKw2wLJ7IN6y2If1bt5HlKrqgrIFVqqKgnjZo05htLfdDwUeH8wvopUhZZEEuqhBDFN6x5WW2FZmMGaQvKKoZsME7w/GEv86CSJCH4cx8xtnI+Vabz9y/sNrFodeBFLfpdjwt1vG4DWqE5x/gSHrw8ffshXvzHC8HxBS+vP3PDbb+jlO8lkm6LdxYdyBpW1hAIIXA+LYQ1MZ1io/dLgqjRsMzNWUIptAuNByE2VTUZ0mIkkyobckI81lJ97nZQl6JLQfzF0vbets5ARNnSgaaLoDomSfBd15kUIylFysuB4eXLdn5eUPRLvnxSWIc2it2V5/qu59UXO16/7Xn1xnE9KEbXyBIvKnqmEkpmmgLnqbKcNSV1OHfD7e2P8f41r159yW73CmevhMpKpdSVNRw5r4/MywMxn7G+sD90YK7oBkgpEGJPVYFqIuawYsdCdyjcfjmwv+nYOUevrThMtLeq27xPY8SfUj9bWzkvi76Oml519H3HMHRYo3/GVJZG6V/zxBom1nWm5Iwxhv1+xO/27LoDb9+94fWb11xfX9N1/lKkNig0phPz8sDHD+/58M173n/9nvPxiRhXhh2MxqGs4frNnn7XcXi9l4JqBf7W2uDo8GaPdSPGXqFLZNlVdrsHdvuJcWcIIYnwPTTt5R9yPrXVmQvct1HkN2vHRqgoReQSpbSitREo2qxVKXkP1o10vmM33DEObziMf4qr/tfx9hVO/whxwu/bhnLm/v4DH759zzfffMNv/85vM68n1vpANhPFzOghYHylHyumkyiQFGZqjhS14LuEGwrDjUDyV07hNTj9J6BIFVPItlBdBV8xHRgvWhptJMpbdriRGBPrXFinwjIJRJZz85Uz+oXY0LaHRnca7TXKPhue0gpUVWKjaRA7HKNlMG482z2B9gY1a0x24ITaHB8KNSn8LhKXhD1rjFUtduT5vW1YdEqSiCl5L5qqNNoZvNdYK/HxximWObPOknyb4sYEUmwDo4tNTy2Xru2HFCt5Lc+dlmoMiZfryPbf9TtF6rt/9/tDkPWyM9wK2xbJoRSEVTQxyxJYF4H54lou2qrUzlFOUnRKLiiTUSohjhPSppaYyVFLKm3Tr6WYG57PpYOTC0z0TTU9t7PiTahbkUI0OpszRBLNToiBktLFYeBnNwbfPRmb8Hlz5lBUjFb0g2XcOfbXHbuDZbcz9O0G11ucyaXT2AyOMzFWUlSAxdoB21/TdzeMuys6P6C1Fy4PhVIFulnjkTWeiXkGnSSDLRsqnpQVLteLyZs+gB0L7ioz7KxEVegN6tuyj7foFYFyUc3qyjQfxiaAxmqs2qJoLM/xHO08VXmd4lEp93LKgmlbrbG9Z7R7roYrrq6v2O139P0gUN/lwpTniGklhInz6cz5dOZ0PBHCSilJWKSdwXaO/U3HsO853A7ymrUmJ0kK2CyxjO5wrqd4g/eS8+a9w3kxcLa23RfqZz7uX+i4bOo2FP0lxlfqi2KlWqGC7+ijLpDfxsp1aN2j9YgxB6y9xtlrrLnB6mu02rMFTZYSiHHhdPzMx0/f8tVXv8Nv/db/wXk9kswjyq/gA35fcJ1ily0+in6MsiIWZwl0QtsiiJUHbxQeSH8SilQaK3kHXBX0WPCHjO8K1kkMQkKMQ+cwczqtPDWo7/wIYRHdi+sc2mn6vZVuam8xO4sahZVSraFoTVLbDbexZnjBqaJhvkICUBtg5Cx6p0WHstNUr/FLoXpF9xhJKhNSwiwGQyGlzEunkG3RzkXJ0B+DVYZiHbrTcmOZgrbitk6JdK7S91CKEUPHRiPfyAshJmIQSvglxnkbxv6AYytWf1TspZfvMyXp1pY1bOsaKcuO/3xeWJbAsqwsUyBF6YS31y2R8s/KfF2VZD8phVKBWBMqK8qqiIvifFbiBpAkh6w2IeTzvF5OTEmVummd2vxO/PYE68xZYhdyzlLQWoH9g50geQEC6WquX3W8ejfw5a/uePtlz6s3lr1XdIbLFbjtiAqZVIWEkxNoZfDdiLO37Pdf0vdvuLt9g3f7xnYrlLoQykdOy9fcn9/zMH3NMotJaKoSd+86yXn1KLAGZSr6ymH6gttpOt9mUZeChJyjCyElCWUbI+wwZ8Wlw4trRYqaTvV0vqPzPca8LFJAM5bNZSXkmVgCuWScs/idpd8feL3/gpurV/z4x1/y6tVrhu4ao5urOzSroMDx+MDDw2c+f/7E5/t7Hh4fySqChm7n6PYd/a7j5s3IsOu5ejVcrqspZtlEFnDO4Lyn8zt0iex2K+PYM44d486yzBHfteym+Ae4DL5/VZTNDachDm3zVLK5bMpSVE18XJvr+NZhbRR0mcMWOkrdkeuOnK/J+UDOIyU7sjJQI7XO1BpY5kdOpyO/+Zv/P/7G3/ht/rf/9bf467/5v7PEE2a/0F0Vun1lvDZ0vWUNPf1g6bxubvwZYyJmaMGILQRUXf73J4Ddp8R+TE5CX7A+Y1xGK3FDj0RKicwpsiziSByW2ixHNFptFEyxnLfeop2lGkNRmpw1NSt0EhNVGsS1bWQClUhlqZWLwY6i2cEIE0+bSvEVhgr7SrmCugI3CrUa9GSwT0ZiC6b83Z2X2hAoRdWaojVZN5ZaUajcPOBqwVhNPxqMgr7XoB3G9WgzgDIsc2RdE9O0Mk+BFIRZKJECzx3JH1R8+Ic9apWUXlRiXZ8jGlIy1FqZZ6GrhzUTw/ON+EzPl6ImlHWFseCsaHmUyRgl8RklVtJGNFlbkUr5UlOU3obMDY7LhZI2TyMpUrzY7Zdcmkdac9iQN/MHPg9KgXUa32muDpbrW8fta8+hdVJG10vsCK3D2IqVUqJZclbR9YbOdXR+5Gp/Reev8LbHaI1qIey5npjTZ6b4gdPynvPynmVZmeaVZU6kkJHwxwIt5l6pjEqJEgt5reRVrHtiEmJm0hLWcXGy3yjpqhnMGgX2ux2V1hZjHcZu3c9L/LgiqV+BXJPMPbXCdx3Wdxy6a+5u73h1eMX17TW73f65i6ogLhCBJZ4lduR8ZJoX1hDkenNI3tJo8e1hO4N2sgHNpUjMyRLJSRAJ10nx1qqxYJ0VbaE3OKdxTl8ym7T+LkLyix7bfqdUgaZL0xyWrJr9USXHZqOU5N9qFl3eM21dUZoOMGdNSqJ3XFeFVgmrZ1R5wNmE4kzJKynNnI6fOT498e23f5P3337Fh49f8/n+E2taGFSietC9IhdHzpUYNJpCjRqjE0ontI3oLqN8JURF9IqMbjlpfwKKlN4pzL5idhU7ZGwXGyMtkAnUEig1MMXEPCWWqbDOlRQEr9Xa4r3HdgbfO2xn0d60IqWISQm7Nyqqa7eOBlR9LlK1srR8FJBMFaMkpsApLdoeV6AH9pV0LWaz3Il63SwG82CwqaAehLH3MzMbvRUpS1aGWFsyJq2oFAlHHHcyHzPGY1xHNxxw3R604/i0Mp1Xnh4mnh5n1iWyTKF1ErTuCmr8Q62xf/Cjig9aLYVllsU/53JJCl6W2MTAUqQ2Sq5uxUldMnAkhdR5hW8ef8IwqpLMG4rEWp8LcW3q97z1wwptJQhStWyfkgs1vUhPbS936xpezpX+KA6lmz5u0BxuPbevPW/eddzcGq72Eq8gbuf5ew8pVM5C9Ro1OvquZ+x3HHY3OHuNs31bvCOVM6k+MaVvOa7fcFy+4jh9wzIFplNknQsxbro8ULZiAV0rrAXT3nKaClFDiFWKlGksUWGuvOiixBW4Wi1Fyhq0dVRrMNpLkTIb3Pe9C4OFykoiUnVFGU039PRmx93VLe/evuX1zVtevbpjf7UXUsjFrSIT88y8SrTH49Mjp/PEvAZiztheQv3czuF3nm7vMZ1A/GITlVnWxHlayBFKdvg+4PuMaqGX3ju8t3hv2kPjvcYYsbOKf4hu6rlAbSnkTWsZka4pSCJuDFVkCyU337722OZVVbbOZE1IUsAnXSk5UPKRFN9j7SOqGmKYWJcnHj5/4PHhiZ/+9Lf46uuPfPPNRz5+/EQiUjowo8VnMczOuRIXBamQtUbr0IrUivbiqbqsos8KRYJV05+EIlWvgeuK2SfJP7ELTmUUAvXBzJwWjvPK8SkynYo4nUeN0RZjLH3vcYPD9g7jJM89F00OinyqZCdtdk2AreBgYy/HKtTxUAQ1R8kiqZXCGoUVdQLat0VEadKrZob7JHRnnSz20ZFLxXzWlFhlUWzQkzFNZ2U0WQvkt2RDKhUToFMVq8BrzbizOA37q5FuGNnf3NHvr9Gm4+PHM0+PMx++tdhOM58Ds9fEkAlrYp3FyoWaLyr3i4uE+uGzpT/MsZEnllkgyXVJGGtBQYiNtr5BGS9fRytUkoej20PcnrWSiANLQddCiYm8VtJSSbE2A97n1rXUIpCEkRt5055snN/tOy8w7x/l0TrCrjfsrxyv3na8+aLnix/1vDrAdQ9GhQaTZGjZUSLEyFQqzoqHnrcdY7dj7K7p3R1a3aDogZXKQiofWNInnpbf5mn6mzydv+Lx/C3LOXB6yqyT5CQZK87f1mtsajKE1GyrvEGrRAmJccyYnRRJjGobtXaNY4VEoTXKyUxVe4txHu0tnRnwvsM534rLZSgIRAoThYlMAAu2c+wOV1z113zx+gt+8qOf8Ob2Ha9v3zD0V5hLImcmpjPn6YFPn7/hw8dvub//yNP5zLSuhFpw3mMGS3/o6PaebudQTlF0JcTItAiL9PHpRApQsqfbXdPvgoh+W5Hqenn0vaXvLV1n8L4SQr2EGcJ357c/9F56Wahq3hxotvmzbLpzqNSSqVnigXIqLbW8trm2zKdM2wwuKhGWGWcK3gW8e0QrRU4r63xkPj/w+eM3PD4e+Rt/4z0//emZr74+c3+/oEylu7HsSicIBoZaEK/JNcmap1aUTiizUm0hm8o0O5w3zHuJFVl+QJYU/JIXqdJXcl+oLlNtQquIpJKuZIJAfTGyrIl1kR14ilCywjTHgIuDgmohglmRgxKj2Bmyr2QlNkc4ULVSLWAgUcmlEhttHMXFVXiDBLUSyA8LdIUyiOuy2iv0WWMOBrs35NngektRWXRYpQ3TzUZSUOQCKsuFL44Joo1xRsxXab5/3msZuu8t48FjrGcJMnAeTpbpZMkpk1aB0nLSGF0oWl1uqJcFavv6t6LDqlX8CjfGn2ki2HTZGb6oFL/LsZFcBAasbZJYhBpdy/MK8XIILb8dam3J9S+38/+nlKSffd3ITNN5SZPe7S37K8t+bxl8pTNFGnklRAAQwW8mNe1Vm2lpg9IOZz3OivhUK7EGKjVTWUTjkh9Z4wNremRNj4T8xJoia8ysQWIqTDHorMlVdGbGaEoSmC6FiusSWifCnLEuE3PBaCnwmi34cktrllwpZaVQWWepzuGMxzrXiBPPKc61dYeVQFGRqktzIbd0g2McRw6HK66vrzkcrun7EWc9G2ZeayHllTVMnM5PnKdT83sMpJolkdZJJ2U6cWfXriVt10KOwrxd1yCO9FFgtJQSuWRkPkmLajFNAC8Bptbq5sMpnei2Hm+yjQ1W//3uKSkC7aGe//yd7VLl4na+pf2WZpKcciU2Y2mKQhUhEmkMUc1Yk3B2xRoRJMQ4sc5PzOd7Pn/6lqfHEw8P9xyPK/O8EmMWakxVKFXEpq3Fh9SWHi2ay9iKVCTGio2VmDSpQKwG3UYlP+T4pS5S01VC7SuTKey1aQ4DEo625hMhnrl/mHh4CBw/R5ZzJixVDBir7PLE00ss+9O57UQqlLMIbcupUvaVMgEDsIc6AB6SqZu92wVzN03gWpSY1WgFuCIQklFwkA9Gv1V4ZdDKUU8DXWfxqyZNiTwlwhzJRTzeUMJaW04JvRRWIwwBVSqegreV0BXyWEm9wncJ48W4VBGFOmsy3hYRObuKdbU5ZTQa9uY59ged+f8RHlKohPaeG3+3/F4vrLZ5WhNHx1CJThwPtoRlVSWzybtC56H3crMnBelFZ7bNEYxrRazI57vBi7x4GX/Up0kbhXWa/bXj5nXH6y89b9463t5ZDn1h1DTunLywQiQRmfNCzJKYXLJD1Q5nd1izx6h966AcQuU+k+onHsJPOS3veZy+4hy/ZS4fCfWeQCLUypo1MSoItkkOjGzolMIo28S5lhQ9w6Sww0zKDuUG2Hs6ZwTubkR0qzTWaOyghPCw84wHhbIdB7Nnt9sxjEMjTmxnN1FZiUwkvYCr+J1nvN6hbc+r/Wveffkl7979iFeHt/T+0GyexFcul8RpeeD+8QPfvv+K95/e83T6xJwmso6YQWFHjRsNfjSYToNVhJwoqbIuhfPTwvm0cJzOlCQL85pXUk2gxf1dRMobrb7Dd4muF5FviBKauNHFX0KZP8TOTGsJI/QOfCdO+NtX55sllZZZaW33wdZhhbWwnDLrmjg9raQIOWpKitTi0OUs7jGWlrWXSXEirifW+Ymn+09Mp4VPHxdORwlgVYgDfNcVxr6wGytDJw48NcqmXQhZAVRCuYSNFRMlSy5kiEihy/9nFan/4X/4H/h3/91/l7/6V/8qX3/9Nf/Vf/Vf8Y//4//45d9rrfwb/8a/wX/yn/wnPDw88Pf9fX8f/9F/9B/x67/+65fv+fz5M//iv/gv8t/8N/8NWmv+qX/qn+I/+A/+A/b7/S/0WtaG+a4qs6LJJHJNJFaWOLMsM6fHwPkpMp8zIZTmyKBe6H+KxGIskhRalkpcC7Wv4oK8QpmrCDvHZ+FnLYrSy6wIa8AIzkp7VPnW565Ey3xEu4rqQe8VOmh0NPDGk4zBT5p4jMRjRD8qcsyEkNrAvxBOqT0PLfpBdiOrqWRfSTPMnQICIRq0PVEx+C4wn1fWZSVHiWsvKZJjuhAoYiwNTuP/EoUKNrrt740zbn+bGw09BAhrkY9ESYGqVn7eNBfonOHqCuwi84I1cIHyrJVO2HouXdaF0v8im+vSiLXv2f78My/192/8hCxhwXea3d5xdXAcbh2HG8vV3rCzGqdq0w9tvzBL8m5JLHOUPKdsIVu06qi6h9qhlIfmS16JxDKx5kdO0weO60eW9YGYJ4paqTaCzVRTKUoMeVMszf09oapBVdU2PuJEgu6JWdPdnsFZ3H6g64W4Uht0py6uE+LdZjuDGx3DzqBUz2h2DENP13XoF+y+SqY0ElRRCW0VfvCM+wFn91ztD1wdrum7Hc4Mbe7VYNpaySUxTWeOpyceHu85TY9My4msAriM9WAH1eZSCmVa6V+FLTudIufzyjwH0TJWhbWGqiRopbXczcLLNMGyEECETJHwvmCtxNBk9eJa+QH3mBJCJa4VqK5T+F7Sc30von7nK8bJBqds4vMWnplCEZedc+D+40xY20w+ztRsqNlhWk6cNaV1QyspzqT1zHRaWeZITOJRaix0StONmv1o2I2a3QBDX1G1CKIU27p6oZJt/XAzwq21RdVIj/xDjl+4SJ3PZ/7Ov/Pv5J/75/45/sl/8p/8mX//d/6df4e/+Bf/Iv/Zf/af8Wu/9mv86//6v84//A//w/zP//P/TN/3APzT//Q/zddff81/+9/+t8QY+Wf/2X+WP/fn/hz/5X/5X/5Cr2UxBWUKM5mFSsJIkaorSzwzzTNPD4HTY2Y+psbmgs0VuFZhttUIuVSSyiQy4ZSoXYUEdVGUCVTVsFcUrUC3SDdrwDWW4Oa0qYT8QJUL/qIZURV0RTtQXUVfSTaNqgp7UhRfycERHwKhD6haCbNQn3Op5NBcCQCqMHgolYDspIKDpV3IpSjWFbS1VCRSYDon1imS1oUSAyVHUhC23LqkFs72s7u7P+5i9UNICZXWRSUpUKvVjQXXZiRVADutK30nP1OqUIRD4AJx1ioLgrivs5HDSG4bXD8Xq5cZP/V7RevlAvR9ScH3jw3CcU4Yefu943DjuLn1XN9YrnatSOnMRgXYWH2pJkJzBs/JUrKVuAzdUe1IrT2wOYBXSitSc3zgOH3gtHxgWh+I5UxRK9hEtZlqK9U01/igyFGRFqix5WrVZxeSajpCVnR3Z/AOdzWwO1hcUZJ70yyvdHMusZ3CDeKz2e8URg/szMgwDiLA1S86qZqpKpIIlyLVDV6C+YYrDrtrDocbhn6PswOqEeEvjh45c55OPB2lSD2dHpnDkaJXlM1YJ04ydhCyDVosuEIQA9vT08o0rSxLIMQghdYJJF912RjusDEVrWkFyuEuRIqCc4WkKmS5dn7ooZVco95D14MfoBsU/ajpBylS1lesk0yo2jLeaptfxbUwnxOnx8jnjxPzlJiOmbiqRq2XlGtvFc4VtKkYJYbHOa2EZb3o7iotkUAJi3i/M+xHzX5QdH2jxcf2Pqu46KAKVW1o03NIUa5FGH4/cHH5hYvUn/2zf5Y/+2f/7M/9t1or//6//+/zr/1r/xr/2D/2jwHwn//n/znv3r3jv/6v/2t+4zd+g//lf/lf+Et/6S/xP/6P/yN/z9/z9wDwH/6H/yH/6D/6j/Lv/Xv/Hj/60Y9+8GuZKBQqJ87sW/RWrIlUAtP0xNPjyuevFx7fF473tfnb6Q3NJzexaK2VvIpmKaTIWiPVV/HbOyjUlUJFTb1SlKypxVKzeM7rTotZKVKolNHPGDKZDbagNoaYBjyYK4lGt05ha4e6VagRlg8r88cVpWB5DJQoRSQFYaOV0iYkbSGM7XctGk4t2uN8jlxdF9YYmaeZYWfbAp5ZzokUIiUVYhScXUISf7Fh7t+yQ5CM3/Pfa5GOaFngdKI5VRRyEgJF3yNFHRiHtjN1klK6ruDsc3F2XhaGrZOqFemo2+8oWeDBS8EqrZi9HHO9HHttz/GimL3sxGRvo7i6dlxddbz+YscXP9rx7suBuzeWw7XBt65w66IqpVkgRaYQeLhfIYsNjvc9xY303RW17KRQKS0/k2fO0wP35/d8/PhTzuETT+kTC0eSWsAnVCqoroIvFK8IGcICy0MlNjJFyZuuV7Emw27KmKsrslbYnWP3ymF7TWUUGLwaiZzXBtcbup1ivLHoVx4zj9zqW64OV4w7yX26RNmrlh3GQlERaxTjfgDtueY1r/vX3O1e0dsBq767lImoeuXh8Z7P9x95/+kbHs6fiPWIv1pxY6bbafxO4wa5D7fMs+m8Mp0i9x9n1rASYiCUFWMNVjuJZNGZrJK8Vi3kJm0dtutwXcR3nq6XkFLfZZQu1I2V+gPuM2Okux4GGHewP8DVteHqWnN1bRgPivFK04+lQdSC6mSlqBlylI7w9Ljy8Gnmm98+cjoGnu5X1hlSqMQgM2zn5V6wttI5mS/R3PVrhVwlIXvQBt9bxp3m7tZxe6O5OYD1kqiQlkJQ4tlZVL5MclOFVCTTLibZdKcihmI/5PgjnUn95m/+Jt988w1/5s/8mcvfXV9f86f/9J/mL//lv8xv/MZv8Jf/8l/m5ubmUqAA/syf+TNorfkrf+Wv8E/8E//Ezzzvuq6s63r589PTE9ACM0XGSCa22zeRq7TnYYksp8xyroS5kpKW2cYGJ7zU2KDQVaGLwZTmMhAqakWcPM9NPzNC7aExbKlrY+M5SSutBqoSRwpoosu2UqlaURFMVtiKMAA7TXewwgrMWii+pTB/tuSY0U5BaEK+9Kx92D7f3F5KUdJJaA3GFJSGfqw4DzGKgjylKvBNlT2NRBNUrHne8f9ftlD9Pv+UMpgkRefS0CI+gbT99TY6tAaqf7aesva5M9pmUlq38XuVxlg34kjeiCQg2YffH4C3grTBfOV7BWtjTm6Fy7WubdxZdleW62vP1cGxPziGQeO9wMQXmI8NTqktjqOwzFIpjdIY67G1QzooT8U0gbE4LqzrzHk6czqdmeLEykoykaw2ViMCH3UVHZBrvJ3HsFbiAimW5w5wv6KsYzovdK3rSCmRynZlCkFJGF9i5izEEAVjh9M9gx7o+g7nnBCYXszdBI8QgEgrjfeOWhyjGhn8SGcHjLYvIEI5R7lGUllZ1ol5PbOEiZgXgfpMxjiwncY4jTLqMgfNSWY565LFwDgmYpaipGqTiKhygfwq8t/KKGFBtm7KONMIFRJrkrbCy/O18fMu6+28Wiubqa6HflDyGKWL6UZ1gfys1yLD2Kih0NxahKoeQ2adE9MpMR0jp6fAMolWMEa5zp2D2MvX1LX4FScpvkqJmbSxBlst/WjZ7TXDYOi9onPiI0oWfegmtixKIMKaW95dhLQKBBmjzLBi/mMoUt988w0A7969+87fv3v37vJv33zzDW/fvv3ui7CWu7u7y/d8//i3/+1/m3/z3/w3f+bvnxUimUqUUVxNhByYz4HzQ+TpQ+Z0D+cjxEU0SiL6FDt+4w3WGDyGLm8D+0LRleLErbqGQnmqlKhIBcqkKaMi7jPVVXJXqD6grKY6iVQuWqGVuFmbWtFVGOydAasqvZZwP68MuzuHvdG4V5bjNZhd5Xy0RBVR95oahBK7QUvfv7o3mAlkATydaPEfcrP1o8J3YgirGk3P2cp+X/Gu0nuYJwitG0kvQhR/GY5a5YbbBtMhwOTgPFS8Fczce7nxu+55EbAWXH6G+6jPRWej4Gu4rCgaGgVPvkdXxGT4+0zI7XXxAvZ7Uai2GVcusgB1vebdlx03dyO/8qd2fPmTnrdfem52mtGDJqHYcsyFGhRKZpoLx1Ph4QnJbHI9vruicE3V1xTVk9HUKm4jT+dHPn9+4MPHe95//cSSj6RugT6BLygli153VSS+xkM4i+MCWhaVdS3MU2wMMqhuYs0wfD5hBsdwN7CsB8YkOzmFaaHyQqTojUYNBnttGV8dcOsVr/QNh+v9C+LEc5GSCUZGK/DaczV0jN5wp265Ntf0ZofdwjlBEAsya3ninD5xXD5wCp+Y8gNRn8Au2KHgd9DvLdaJXZAI/WFdCqeHyHSMTI+RVGUbTFfQToThaIEhcw2bp4XAcp2hHz39EvCdaaLeZ5bpdh1cOqnv3ccvr8vdDvpecXunuL7THO4Md28td28NN28M13ea/UGhbRVyTwSUplRFzoUUM2FVkrx9jpyPkdOLNPIY5RrXTboRVilSeS/X5OAN3eBltuY6lLYo7RhGyzAqDteFcVfwrkBNAt3l2gTGQnjKSsYUqm2C5w/g0Zx3hdAr1vmH3d+/FOy+f/Vf/Vf5l//lf/ny56enJ37yk58086ENUpOLU6i4mbBklqkwPVXmI4Tpeee85SZZq3GdxVmHt57NjU+yRwtJRxKJrFvcRC2EUslzJbsKnaTzJpMoToFRKGcvE/vNhNUoCYpzSuE6hWuLZddXuh52VxpnDR6ZK4SkcTcKMyuZjbm2Qv5esNeLoxRZqM8nQFXWBYYxi3aokx2SdWAPSkgjV3A+FtYVHh9aTMgPYB79X+nYuhPViuxqpehaK91V37cC1TqpTUpz6W7Kc8yJaqSXje67mXpeIDu4aJr0i6+yCXh+vDzq5f9aQa3yO4dRdsh3rz13bzrefDlw97rj5sYyWuh0EWlFK1Dit1eItbKusM6KZdY4Y1Gqo7ADvQe1I1dHiBXKQgiZh/sn7j+d+Pxh4uHTyloSalewpWKqQnuJcOhGLefJw3RfSKFiOum0ChCiwDs5Vdw5ggtMx4XhvLCcl+ZMn6m12d8ohcZgMFilqd6iRw+HER927NWeru+xRhKgt5mU/C8je3KFUxZlLGjPnh2DGjDK8dIvMJPIZeG8PvI033MKT8zlRFQTuIB2CTOA7Q2+Nygtc7Y1IL6e5yyMuHMmzFk6Ai2+oFpJl6F1IxnUSFGKqivaVhFh97YVKIPewkBrbSyq718Qz4fMJdu60GmurzXDqLi90xzuNNd3mptbx/WN5nCQedCuUxTdZskZcUMvkHIh5UKMiRRl7QqhEEIRGU7iO/PnWuVeAbkmlTY47xj3Y9N/9RjjMMbRD4aur4y7ROcSRkXJmMqbI4YQznIspFqJSChozTB9rJiiGJzG9QIj/5Djj7RIffHFFwB8++23fPnll5e///bbb/m7/q6/6/I979+//87PpZT4/Pnz5ee/f3Sd2Pj/7LEp27fWOzefrkxcxUx2fkLgvoWLY7bWSlpxa3De0nnH2A9Y4zHGC+5cqsAjMRBzZImBFAtmTcRjJqlM0RFFIatEMUWyoJxtrb9wR5XeNBStKI4K10F3Ven20BuJWPaDJJeGZJijwV4r9FFJhpXlOeF322T+Hsc2P9kW4HUWMXI3iLDQN7GrG59H8d5X5kkK2vbzv0zd1CUYLjfIzkihtgZC92xAu11G3qgGDz9DcFuhQzUYddOovEDbKlxcrV8Wpy2s8iLAVlIQv2PE277G9Px6x71i2GtuXzlevfG8+aLn7pXj+sYyulaktrlmK1K5FmKphADrIkWqeId1HZWRqnagRylSqZLjzDJHHh+O3H8+c/9p5vFzJNaIzZleV3wTQmtT0b7BPV2lv4IwVzFONsLZikkWu7BW7JRQLjKfV+bzyjKtpDWRY5GFueGmW7aU1RrlLHbwmKuBLozs2LVojZfJr1xOekWufYvFmg5Nz46Rnh6DuEvUtguoNZHKwnl94rg8cI5PrPlE0jPYgPKpGUgrXG8oSVGiIsziQjKfCus5s06ZMBewBWULltrmcJstVUamKu39mYpxkqDsvcxwtk3LJaqlXgiBP3NsRarvNbud5nBtGXeKm1vD4UZxuNFc31gON1oYn4Ni7BQBRVSVEKGiyM39IaXc4P0shSoUYnzuoLZ7+9LdJ7leS9UobXHeM+4GhtEz7kesFR1b1wsZZBxXvFMYVUiNVViTrDM1VbHMKoXYKmFJMH0qmKw4Wdksx/DHAPf92q/9Gl988QX/3X/3312K0tPTE3/lr/wV/oV/4V8A4O/9e/9eHh4e+Kt/9a/yd//dfzcA//1//99TSuFP/+k//Qv9vg4rOqGq8Upd4L5cIus5Mz9mieV4gvkkXQNVspC8M3S9Yxh6hqHn9vqGod/RdyO9Ham5spxnpvPCMq+cThMxRpZ1ZU2RmCM+L8QScTmREE2TNkWSZo3C2tK8veQCdE7hDwU3gL9TdK8rvSkMquIdDF6zXimWqOjuFO4IegDVgfoFP6ntwptnaeVrgTGBN6B3An9dXW/ODJrHQXE+FcIsV+82Avxl66hKRdxB8jPmvqxSGIYgHVXfS/G6FLYXs6ItqK6m1iXx/PXl/mArUJi26Khmy2Rap6afi9dmR7ctTuJoL8+1v9bsDoa3X3a8+9HAT/62kS9/1HF3axgteN3auJpAZTKZtRbOqfI0wdNJcTxZhrHDdjuKuaXaO7K+IiVLXiPnp8+cjzO//dvv+eqrT3zz/pFv308UvTLGTGlxJp1wEtCDiNdzgfO9LCb+qmJ6hfLy2nNjU85nMWg9Pc30Tx3TaWKdAymk1kltgR0Wh6PDUW0Hw8Dw6po+3XBXr9nthhZ2uH2SL9ARcZ6jx+EYcYwc2OMY0MqwndlCZskzT+ETH4/f8vHxa56Wj8zlnuKOmG7B9BU3WPwgKMoSNCkozo+Z6SkzPSWm+1akzhnTFUwvfoTOiMO5JEUncgnCWkRhbcV34lzf9RbnZB6lVfv4tk3OBtm/ODaSxDhqrg6G62vLq9eOcWd49Vpzdas43Cpu7xy315qbnWH00Gs41yoESgq1WkqR+J+cZPazhsS6JtYg3VRM0i39vPumVLmIre8Y9ntuXl2zv+q4ut7hvJNgSgdGZ5w9SxEumRI0ec2ktZKXIgSKcyamwhwy8VSxHo5KUT6DelJ4DzH+n1SkTqcTf/2v//XLn3/zN3+Tv/bX/hp3d3f86q/+Kv/Sv/Qv8W/9W/8Wv/7rv36hoP/oRz+6aKn+jr/j7+Af+Uf+Ef75f/6f5z/+j/9jYoz8+T//5/mN3/iNX4jZB7DHiyWe6hiJWBbJsmnBerLba11B00fBRp81ArF5R993jOPAftwzDnvGbk/NsLqB3s0sfsWrToqUXVjCSkiReTWEvGJLJWQlBpib6SlIwTIK5bKc6cbIrQmKU+SukkeIp9gCyRQ5RmpKaArWVNEwWIEZLvDTDzw/tS0m1C3G4vnr9vfbzt86GeAL3PA8u3nZYfyyFKvLcPrFI0YuPmrWPsMd0ApOKyq5wXl5awQQUsqlo2rP/xIKLAbMBhM2WGd7vroVqUbEoH0mG2RIg559b+hGw3hlGXpD5zVWl5a8W0A1mA/Zna5JDDtD1MSs8MVR6UD3KNOBcqSsWEPkdAwcnyaeHp84Pp05HWemc6SahN3JED1HYYVtbaJW0r1bD64HPyq6nWJdNK6X3bpa5Q2J3lAiS3JM5C2mhBdF/QXsp7TEzA9dx+B6htpjnW06sO0KrxdiAi1vQKNxyuCrxSqLoYUjbt9dEzGvTOuRaXliWiQltqgVbRPaCiwn3Y1INVKAuFbWubJOUpxSEMshirxmo2W2JO7nki+ljCiAam1XiZLnNc1pQr/YnGyzye8zaF+SJIQoofBO4b1spK3ZHDvk3W96M9VE1RvESYNDt2iOS8bUC+f0kp/9Ln/+LEzsxLre0w0d/dAz7OQx7nqstS2Vu+n1qr64S4S1ElfRl4bGQg6LCJnDKt6CaYVpqKgMrklm0g/0NPyFi9T/9D/9T/yD/+A/ePnzNiv6Z/6Zf4b/9D/9T/lX/pV/hfP5zJ/7c3+Oh4cH/v6//+/nL/2lv3TRSAH8F//Ff8Gf//N/nn/oH/qHLmLev/gX/+Iv+lL427llIPJjMrdYRlZyzSxZTs6yyi56DZCyGOPo5lrcdZ6h77na77ja73l9d8f11S1XVzcc9reoqolzZJ1EBDufZ2IMrMvMss6EGHg83TOvM0/TI9MysQaxT8mpkFNGZ1AtwqEYRY6aICMCaoYYNHG1RBOx1wb/xjM/BaaHFTWt+By56iqlg+ohGClwv2itqHDJplpXmM4SJ+Hc1l2KTkxpGPfP85jpLIv6/IJM8ctSqLZjWxxC4+pPi5wPo+tlBiW7w0a6yI0uu9kt1hdw3/eYevBdNuD231a8VOXvvzejUjzPvbQBNyr6LAbB3WDZXTvG0TJ4jSFeFqKNwzqVxDkWTlNlXixLEMPhrHdgr7D+Cuv2VN2xhMDxOPH1N595vH/iq6++4v2H93z6dM/xaUa5gB0r/STkjbjIJsrYiiriqu499FeKwxeGXMCOirga7IN4wPkOnKtoEpRIiSslRnKK5JLEIomNb6IEslMeZwau93vGuudQd3TaY5Thu0cGZB6nKeIshqFTtrkBvqSoQCoz8/LI/cO33D98w+PTN4T0BHrGjxntK8ZKcYqLhA6c7ivrufD4sbAcE8sptVw2MMbgnbDoxr1muHLs9iN9L4SCquRzUVubpCraCIqitk0Jz3E4uXz3HtqYdTIvVez3wuJzDhkjZJhnSQSvBs6TZlwNqTpS85fPyKy8lkwthpK1dFObz9+2yWxG/tTv3sPyGhT7K8kue/X2ildvr3j97pq7Nzfs9h27sWtFUVGrbEBiqMQgBI3jgxh4Pz1kjo+J8ynxdCyEUFmWdh8YUAmmEdbT80bxhxy/cJH6B/6Bf+D3FFgqpfgLf+Ev8Bf+wl/4Xb/n7u7uFxbu/rzj/8lbRgo3FMaqGeo9SwqUALk9UpQClcs25d7s9R1d5xmHgd1u5OrqipvbG24Od9xev8EoKy4PayCFyLospBRYw8yynlnDwqfHjmk+cf9keTw65nnheJyIa2SZQ3MlbjvNLFMFtQglM9VKSJolaOayYvYa88FK2zwn9GnFx8TeV6IXQelJy67+FzkudGr1TKiY50Z/9oWcRR+Si+xZuwHB/5uIMAQwJ4EMQ5DF/o8rzuMPemwd5VakNUL9BllINhcKbaAEKEm6ilLlRlcb4rbtUF+0CC+JEtt51vp5HrVBhS8HEQr5d+dgWDQhyl2snaYbDM5o3Pd2yjIBKSw5M4fCvCjWYAhJUZSn6hFlR7Qd0XYADCFmTuczHz9/4P7TAx8+vOf+/p6npyPzNGN8Ii6KuFrSCnlFKrIt6CJaGesM/ag4vJbEV9splkcrYcdrEbf5TmJpFJnaUopzTuSaqdU0YoNAfwaDU56+doxqYFADvnZN57SdrJfMvq1I1QZGKCzPWsSt4yoUQjoyrfc8Pn3g6fiR4/melM9gVnxfUVYKSMmi/8pL4fxYmU+F00MmTPIoSbDYLTKlHzTjzjHsHMNuwPcdztn22QobeJub1dY9Swe1dZnP0PJ2WPusT9rtNF2n6HuFdyJCz6kQgNpEsKnC8QjjWTNFi7EGbRWZ5ulQuCTxivknF8LPz9tcbdeubbEuV9cDV4eeV2+uuXu15+Z2z9X1nnH0dN4iAYtFYNwsovllLixL5umYWabM6Zg4HQvnsySEr0FYw+rFGrQsAr1b+8OFzb8U7L7f7fjbeMUO6DhjiXQ4YtFSpGLj5idFzhKrfLFnaUaQ3nv6rmPoe/a7kcN+z/X1gbvbW6z2UAR+yzkR1kVmXWliWU8sYcZ9rpzOHtMXtK/4s6HUzKIhpyQmlkjfLQxNuUpUkp2qTgq9KmzSqF6hP2tUqfKYIy4VRl9ZnLDVjJa8nh96bAvmNsjfYK91kYXW91vMhBK2mxJPMK0FZtRaFvVSnn8+NZjwDxuJ/bf62GZOQcyrcY1UYZrtjLjNQ6gS01eDOKLnyiUJo6bnzgx4iWXJl5cdk3ouUi+/FZ43Dt4rMXJNmqrFA851UqTsRWcng/dMJdXCmgtLqqyrIiRNykYskIw8tO3QRvRRMWWmeeb+8Z7P95+5f/jM09Mj5/OZdQnYkonBkIMhx0oOcg7yKjCbspJP1Q2q5auJ+Pj43oq33VMWQpCXa0Zt13pLK970TTLS51KmHJZOeQY6ejqc8q0vUC/OlZz4TVqiqBgqIpv/HtRVBeoL+cyyHjmd7zlND0zzI6nOoCPOSQ6VUuLMIM7glemUWY6F6RhJbZ6iVW2eeQbnjVDLh45+cPR9j/cO6xoLsW1CK5W6sSIUjXIu9/sldfrFfHfrorpeMY4C8XUdLdFXxOilQiyb1EaCOqdZs0RxWXdVy6VZcyNn6Ab3bZCfQJa0OdgmIN+ODZ7sesNu33N1PXJ9e8X1zcjV9U66xsHhjGqJ1onULJekSypMU2E6Z+Ypy9e5ME+FeZa1Y56f3+8Gt5ci19EP3ez+Uhep/xfvGKlE7olMnIuhRkWYIcwSbBdS66QqOC0RA84bvLd0nWU3dOzHnqurkcNhx83hiturA850aGVaJ1RIdaHUQCwzSxpZ00T/IfN08rhdwY5wfLLUmjhbTUlJ7G6TpKVSm2N1afh6EqyYUFkmJbsvX5tATrNrM6FurMSxEkfZ/SctHdnPO37ejr7zzzCUUlJc1iA+Wu7MJeyw62lBgYJPi1FBxQeh3novsJhSz353v1RzqiKzuHnmEpY4DvK1b+JsV5ACpRVT1MKWyqL5qA0ykSf7/pO3Lz/nXHwXkHouYsZIV5+rBbXZ6FgGZ+iMRpQ/z7qDWCtLhvNUmU5CBIqhIxeHsldot0e7HeieqhwxFaZp4f7hka+/+pqP7z/y9ddfc56emKczuSx0BeIKORRqNGJfYirYSlUZVSquk+7JHRS2NwzXmvnRYS3UUCFLvLqzVQLvUpQ4lJiokm/TEno38kSHp6OnZ6Cno8cg/oLPR0EiOlYKC7C2tT+gL5F5CYECl6YNihxPn3g8vuf+8RuOp285L5+o3RltA7aXDLmaFXFWpBniVDl9EhPW82OixEqJhX6Q9GXpoizDaNnte4a9Yxw7fGdkLtUsUsuFJ7ptTJ6HmNs86mc7GLk3dyPsxpaybaW4pQwx5bbuGNyq8Ivi/lHTHQ1Pk2Tfmc4QK6SqSLVQioaiXxQoWbs2NGdDwFRr7Y0VXdfhduTNF3fcvd7z5U9ecXMzcPtqx83tDu+MwHs1tEKVxU5tTiyzwHzTlFnn3DwPBSl6GafTkHJyK7pLlk/7T0SR6sh0cGn3a5W8oRyLXHAXLUBDxJVcfFrrhh0joYR6o69Hcl3JdUXXSlGWTWFeaqDUSFUR2dcUoep6cY1wnegufGeJQb7WbMRipMZ2oVasb5qKHsGdtHg9VwW1UYF7Wxm8qLViFsfubbe/7cxfHhv5YaNemxdFyrYObIOfZPe0dVdtP1qVuI2/bL/bjaUQUoX3cuGFSKOqNgLG78L++75OSH7PH/YT/4Mf22KxuXKEKEW3xVXJ6zUK6xQO2dkCMhxO9ZmR9Qu+h5+pZ3X7HGSRsM5gvZUOygvMZ2issLY0V+QGT1UoxCkpStIoZdHG4/yAsSK4zKUSU6aEwDwvTOeJ8/nM+XxmmiaWeSUEuYZzczxVqrbuYRv4V3SVzkg1+E+5ih/kfuqvFHHSjNeGEgTfMi3qobb02NriVuRdCMevNFmv5EuZF1MlMSKVc7Mt+4lGE2ELdRDKd7x8VQQqK2IYHYl5JuaJmCdyWah1Raks17w25CxdRlqEch7OhbjUFhgoH5YEGYpNku+0fCatgFgts0xNRW+7lkvDtFHlv4fvNkLF9zeQLzeOINdXah55m2NErVuCbaFqLVquqFizZi2GtW4lWyNBNEp+traU6JpbYnS5ZKKJNEK8FLtO2M37qx2H6z2Hmz2H650U5L7DWYfWipJyM+LOxBiJMVGKWL0pBJHIRjayQ1cvBKOUBUqvzclkvEbkN3vxx/wTAfdV7luVfiJzIpWVlCJpyaQgqZUlaUnhxUjcgBGmjjGALhQl3mBrOnIODjOD6yrWepQxNGSZXCOlJkpdiXkh5pW1TkS1gi2YTuGSpd85UfhHj9GZGCCY2C5ixXDQ+KEy3oJyUiRDeqb1DlYxONh7UEURXGZ6hFP/DE9t0Nt2GCMXwzCKW7J1XFr87dDbTdEGpcaKQaWxCszG4pPIji3ob5sHGycmO6YJ+zdT1nWFGARjfnlspIDL/cozTPbH2X2V2mZqbS1x5tltQis5vwOicctollUW6pKba/4fImH15aE0GKcZR8f+qmd/PTAeOsbB0WmJtdhQ3W38lZDcsrAqUtDkZDFmoOsGjDvQ9SNaO9aQKHlhPWfu7+/5/Pme+8/3PNw/cHw6EcJCiivGFXxRKFWFPu1FK2dahIvWwijECDNVu4QfpGjuX2nAUpIincWAlioLXwmVEqpoEpvyWUjorvHTChqHwSJWVQUIyCbyOawQZgoTkYnIjAIijsSMwWIZ4HkiQ6qRNT2w5iciR6qZUTa0DZkIiUuGsiqWJ1iOMD2KcDeFxlizoLWmHxtZ4sow7DTdoHA24TS4suLKiimWmldUMULvbJ1UVcLEq2xGXM9Q1+YRuXVSutmZxJRFTB7F3y4l2ZSAQplClxVZac5r5RzgmDWuaDQWyK3vzCRVSEqo+LkWWa+KEB22HDatwHmDtZbDYc/d6yu+/NErfvQrb7l7veeLdzcS3DgYrBadRlozYQ7M88w0TzL6yAGtMr0HdtA7RW8gjPI+Ym7dE4AH5aF/1Vznr8A4RQyV/8//+/dfDH6pi9TCe0xVrDywliMhLsQQSGsmh0KJPCu9VaOety4KU0FnMoFUFuZwxM5QdaCqVYSFRjeqsGDelUytkVwiqUROy5k1riQVwBS0Q6Lok2UYHdSIsQUwsrtBs7utDAe4/RWF6cB4WexThhQUnYJeVXa6QKpMCCTQd6LF+H4npZtB5NU1XN0o+lEuAHFBFmroNkuQ+YsSDYeV7CKjFVormUnkwjxlyZ1J0j0ZI1CgawLPqsBF2RnZSQahdXpmLW3zln541gld2E1J3mf8YyRfbLOpNQgU7Fo3uJEpvAFTFdW2uImqSaEIkUIajz/UIaJNJaLNQ8fVTc/htmd/5RlHR2+0BHJ+d3pAqWLJFaMiJw3V4myPsTs6fcB3Ys66roFQM6f7hYf7Rx7vHzk+Hjkdz8zTQoqBnDNeFWqR/DNrZWPWdQrjqzicWMDUi9t3NWCcwVbFeIPMd7NhfoQ0K8JZYL+cpEDVRj7YjJEMjtJmSYrNxkjKL6xwoZ5rIFCYSHUiqq1IKQyewIzCojijiFATscJaAks+EuqJoqVA6ZoEMVESYV9DJS+V5ZiZHivnBwk2rKUFF7bPvNsZukHT7zXdWOm6ilUJQ8EUhSorqlhUCVAdz+KnilZaZl+6ITdKXRCNbZZ7KVIKSoJ1kQ3h0qJjYmxLlq5YXygmo7xmiZU5wTkpuqLR1Uj6cZUuKgJZNVSJQimSKVeKELhAZlDDKHP4N2/veP32wBdfvOaLL95w+2rH3d1V6/IqlERKmXWJLPPKNM3M00SKEihrTMEocFpROkhekIda6iWqDw+MCnro3xjsTtHdGLSj2SL9/rYTv9RFKtZHAopQT8QyEVMgxSgFKrUZQlXNGFIWY7mABFYQGE9c09c4YRaoKlNVllwbranq2Z5Fuqp0+eDndRHrkZLEZFIjF7qTuVeMhlrl4ldV9ozdTjFcK27etoiAHtZVkSKEReFrpa+VoUJdCyxKcmTcC9YYz2ul1tI5DTvY3yjGKyk+KQprCVUhPmsxuk4EiZIaqi/LRiyFFDXL1NwEghRH1wnjb9sNVg0mPu/wa21i4Upz83j+PVvnt/m8RclB+2PVXW1MP6Wei+ZlmG0QuE1BtTJwjgEms+16+e7J/wMe1iqcN8IW23t2e88wWvrOyNyUDeTb4D4lnXaViIVSNLUajOkwukP7EWs8SmuJ8E6J49OJ0/HE+XRmOs/M08K6BNEwlYxxtXWQFWMlBNO5thGxAj2jEFugRgbQRjZA3a7NPpJEUAcDJYgFWNlQsLydLtU6KFE1CcvPIRlX27QiUi/Qn22Q3kJRKxlJ2QZFYiWxovFo5vZLxCIqlEgok8yOVQCThEavm7aoWmqSLi/OhfVcWE4CV+nW2VortmW+N5LV1GusL1hfMFpANVWUFKfiUTW1UQJsu+HnVGj1HYivgTLbflkgeBrrL9Do5lKggrxdlIauFEwnwtyQK2uGpSiWqnBVtf5UDK2LEqrJ5r4DSSC/2uIFlUCZfd8x7gZubq64vT1wd3fN3d2Bm9sdh6udjD6ypC2XVIkhsa6RdVkJa2hdVMIo8SzUBvCK6gXiUShs6570AHUPalR07zRur+lvLcrBMv3A++UPd7v98R4THwFYwwMhHUXTNCXiXBrUB1SBESR0Tbfio0TjUDMpB9YIp0kR0sJ5PvFwekRt1eBSpNoNVRvGS2mR5hJ1EdZIDInQQgRTLC1eo7IsYpKJziir8IPi6s7QHxTdlSZkTc6KEAwuFmwqdEslTyKYHAbpal52URvs7b1Y+d++gXc/NhzuZKi7zIX7j7JSqLlIsqcXHYZvReqlHdBGzz6dxBppc6nos3RFrmtdldG4DtygsF3Fd4XcClCMQqntOsX1dXN2N6qFKVbmNRNWWRxDePbK++M4atNCpVaobOvsujYz7JXMIEqC5QQ1QdQ/Xwz5Q4/NKqkbHLsrz+3rkbs3I7dvdxxue8a9x2mDUc9t5raMp6pJxRCjJSePYsD6PcZd43c31GIpyYhDyhT58P4TH99/4tMH0Ug9PZw4PS0CwSF6odTMlq2r+L7iRzBO8n9qla45Uy/sMaMqWM14ZXBN/Km1Yu037Z9BFX2hPVPF+V/0UUIeF4pEh8KSkYBSs3VFlw5rIfNE4nSB+7aeTNNRaAzMainZsCyFdQ1M4Z61HMEsaJcxrTOryVKCI02FeCqsT4r1COux4nvQTjZuvtd0o2puFJIxZaxAn9K1qXb/B6gBiK0YNTspI5tUazXOqoZatHtXQ37JDanPkpDNsmuan2NgQH6mVFC+YPrMsiaWkAixEHMhVXmP2wyoXApUopKAgFIRpTLGKLw3dLrj1asbbm4O/ORPfcnbd9f8yq++4osvXnO4Hri6dsR1ZZ4mTmtinlZOTxPn08z5tDDNC7UEvEliF2UL1oldqeufZ+PjCLoHc43EHe0V/RcaPxr6g0FrxXT6YffNL3WRKgQZKBfZPaaYybG18FkIAUpLF6Oa2es2JKlV5i45FUnAXRMlK2Ks6FV2RBc/MBpL5uUD8aaSDqK2IpU5PwXCtDIfA9MxENbA+SzDXmUy66QIsxaFf5FBrbNKYhZ6g43SqVilUbnK3Oqiu2ivHRrN9ZlCPQyK/UFxuFYXm/35DGcHdm3eck1o6qxQbFNtBOGyzaPKZS6V0rOt0NpEnlXJTlobhbea0vxVxrVFtgdxbu684PrGyHkXM1LE46uI+DMnKPqP1x+w1udZYG6MKEUjEZiKt1WiNFoXuu2G/8BIZStS1onDxLBzjHvHeOUuVjrq5SCP2ujnNE82RSmWWkUxZGyHdR2+60QoniCGwLqsTOdZHtPCsgTWVbKNNjJDaqaklYLS0k0ZKwNtKuSy4bRwEeq0GZLRheoUddD4QVEi2E488MjqYqz8fMUKcaKixMhWOVSVhNtCQnK1K0Ko1mwxpolAIpIao0eTSARUXclY6v+fvH95tW1d7/rRz/NeWmv9Mi7zttbaO/uS/NTjDw4YSUGrCVhJRUREUARB+GFNiRUNKEQtJCBYEvwXrAkiVgQDFoxCDCmeg57ozmVf1mVexhi999be23MKz9t673PvtfdeO8f8jou0tTpjzDHnGKP31lt7n/f5Pt9L85TimFNhSYlUZ1pLfabmcM7Tim0Aa4YyW45SLXbvI0ZLdz2eJES6s4v9XVWozbxAq7Z+KoTQTWctPl4Rb9eL93pepMN63XRyzvfPZM+ft4tPZrkyfQUuAZtVaaWtoUxIzbgq+AZeCqIVlXamT6zELjB0x/kO7fuADyM3N1vu7vc8f37H/f0tN/sd0zQyxGgEl6bkXDrEt3CaF+Yl2wa8GAlDVmKNWu4UTgnepnBu7bp9P5/9uopXD+nn6IscX/IitVBVKTWRcjYK5GJkhdasIvngLFXXmZWJSC87zRbjnBqr55+xbexNXmmcrVnkRbt+aD33VutNn5ZCSYV3n51YjguHdydOTyfSkjkeT4ir+Fi5fWnMoeM7jx8C094GiTI43BBwSXCpdgpdgygGv3T6+HvmpleK9ZtbePYCXnzYtSyjMh/g8EYpHqKDwStjaAzB2GWtNGjSd3LaAxD1vLM7nezm8RHGao/tjZid1Mbb8H3b8KHYsDfDEG2XvdmEK22WMS4VG9TXstpUvX9T/t91XOuZVsbf2lHFznYbYqMOyjRUpsE0REu0+pH/kM/ZbmBh2AQ2u8jNs5HbFxN3LyYrVJtgC/yZ82Z74kUbqQqpeGr1NB0RtyXELcO0ZbPdMR+r5aedEsfHE+/ePPLu3RMPD088HWaOx8yyXJ50ThZApzRcaJbwOtqCXZunZfPu06wWwdDUiBRO8FINynGOvAOaY946WvJotvgL5+29XunOTg3ii93OSCRQ1ToUkUbQxGpQaQXqQMLyrhJms6KScLrQEBvoFyHPyumUWJaFOT9Q2mz3hY+0aoLSsgjp2Dg9mu1RrVaEfLSZcBhgmAwtCFFBjGiQkuJCgVCZWsF1DV1wDecaGi/XU4hAJ5+Mo9HLpwk2k32O2jWfr5CLM4pRL7PS6+tqhaZbMWo8OeOTJy4nYqmMLTOIFYpGMaNrKeb+0ZNxnYcQhWnjERnYTje8evWMVx+84Otf/4j7Z1tevLhht9kwRE8tmTQXDk8zb98dODw98fBwZJ77ZieZZqUWc5sIrlJDI4aGDJd1adVl9fqFa4qrpg+FdkGqvsDxpS5StWVLecxWIPJcjSiQ1sG84II3/Fwug8zWlFLVDBjnah1U6um56tDq7KLJ7UynrdVomKUWK1za1pEBACkVciq8+fTA/NSL1GE547ne20Lw4gNlGITjaxgmR75rxDtnu+sYEWcso3JysJgdSheQ/0Ci50r/HEbY7eH+Hp6/6DsY4GFjxrXR0UNIDPH32gfzFct/SavwWc++fquVUGtgWUJ9Ed84/OCIUySMytgaw+DPNv3em9N1CDavqFXxi1IC1NYAo3SXYi/g2tz1D3t8v4h2Pc46DX3/7538oMDZd9f0sdmdFZww+MYULG+rjVakEoC+D1VeU4zhB90Frp+nc2ZBtNlH7p6P3L0YuHs2sB08g7d+gx6ol6kkGosqSxZycrQaQDc4tyHGLcOwZYgTxSdUM/NsZshv3z7y8O7A0+PMMhv8fO1VWLuvm4q5fPuxEkeD/1oy6WxrphNTMSKEGdEqMhh85EK1TqpgH51DvV3LPtCDAa0DcxIQIoHpzFysYrBUA4o4Vm+KyszMgcRMYiFTrAslk1gskiKbx948F06nmbQklnygtNxlJhEP1FRJRzi9Mz1Unu1+FrHiFIdenAbM189Da5WWlaIVfEVDY6kVac50is7gcg2dXC8OjUoo9vOGQa1AbQzhGEfpRY+LPVlPOL62SvphGx/tIiOXMz47YjoxpsqYAtGb+L+1StFC1oqoEdeFhu/zz+1uIPod+/0Nrz54zkcfveSnvvqBsUtvR6bBNhVLKpxOC0+PR969feTp6YmHx2NP8a7k0p9PayQaTislVmJo2CDd7o+YjT0rCZiNVlgPHamIFecb5fDF7u8vdZFqWkwXVY2FUpJBfaZrcRe4r2NjF12QaTlKNkPEVitOCq0JWg2Ka9WKVC3VKJxdK5D7R9VOCeuL01qk3r0+MT8lnt7NLMd0znPxQRlGiw1ZDqYzKYsVBcRMLKPzNgMTpfZ8qmYi8v68rxZGLruWsFLQN4YFe29i5mG4cuTm6tGl57pmv/QuqPYIibP1z7qzmzthw5nXn6rDe/uhguvJvqbtcOLOD23gSkN7Vxui/fzQdV/aLl3MH7ajui4Q14VoLUzfX0iA9yI03uukikErqgY6OVGCs+DE0pNS1+7vuhCdc6Q6Ye37f/f1kxWRbuTp2eyDwX37YB2oW1l9lySloo2kjdIcpTpai6hGnJizhPcDwQdEii2EixWq42HmdJyZ54WSDcZdj8tr0L7bbgb3xf4ci5yhKLP8EbsGvbG2RExbSGiEUYlJiYN0Z3c5L/baHScU7cJkK1QrFNX6gL9SCUDDEt0KC5mZTOqOhXai7V8W0EytlVwqKS8s+cSSE6Ummq4FzSjurShlaeSTuUmUZBea9AyoFeI0UpKdo1p78m4ruLHhSjvHnTcxmLo5QX1fV7BUXjo7MMR1HmXz3xgvso0z07W+b978Q69vOnVGBdcqrmZ8TvishFwJaheaa83+/qpAiSjBC3EIbKZIjN204G7P3f0tz57dstkO1mV52zDkVOwaOiWOh/kMF9daLEyxi/9bUaR2uK82WmhEf5lJlQx4cBlIoB7CrOAaMtrsri5f7Ib/Uhep+XhEBebDwnJKHI+V+dRIy9XCB6yCHWUVoTaWU0W7z59ZpXiaeWRS1l1ObrTSIb5iLtQG9/WVqFsJCZCSidxef3JkPiYOjzM1d+pn77ia9gG4mtNAbZHWBlQi6iJOYmcAqe1im3U317Hx58fVjGqddRitXCzUcBDCeMHZ6RdXSUYGUIXTwcxjj0fz2Fp1Tyvjbe2oegOEAvPJ4L+cTeDovLlD0+d20o3LpFlek17Rb8fJIsTXwfrqn1eKdW05XWH1f5QXjl5+L9g5aGo4eRxsExOl9eh5E/3qBHln3dQqkL7On+Kq6P1Qer1YoZ92kd1d5P7VxLOXI/cvxt5JrTCfOfUlCrMWjq0yZ0/KntpGRCbETzg/4HzEiXWyOWUeHp548/qBzz59w9s3Dzy+O3Tx5ftndH3ePmCMto0x21RtBlmrdW61Oeg2QV0Tj3OKCw03VeLGNiPjvlJjRZeKjxnxC4UTmYnMBmGD9fGO1o1jCwcsFm8hYouRXaqJmXckjmSWbq9k34EueLXIiVwKc5k55ZmUMkueqdW85bQ4WvaUUyGflPlYWeZi7i9iO/nQZ0l2LZh/Jb2TUhq4Yu4bUVnotmSDUAdHGy2FGzFXd58FqcIwBIYxMG4i02Zgs4lM00LJyhzae5ujH9U9nR1jRoMMN1tldIXYwJ9OuKeMDB6GtX0HWRqSFdFODR8du/2AakDqjnG85fmzD/jqV17xla+85IMPnhOCgFSOc2JJCw/vHnn37pGHh0cenp44HI7MS7JNOWrzvWpmxC01WiqkWBiiwcPmlgFVrJta1Gbs7gQJJWxgXGyDMB//GBSpNM+IKHlOlgaa9aqT4pzZ06ch5yplNvzW0ue5gVqH0DLW5qdOJihmqrjaiqw73PPhLn/Kad2FFHIyAoeq9hhuRxiFYecIW4/fepgijAMaBlQGVCNKsN9XihmdLkaZNTr9+yv3eoGvcEEpco5usPAzEw/jDerXDi2kZN/fms2cTrPpM1YD2bq6JX/f72m1Y+pJSbNdYPHs2NDnFWgfpNpURZt1rK2fOxvi9p3moGfYzK3uP3pFYvgxN/H187v+/LqD+byPqzUUXP3bfp97b90nanEC6+v2zgrVZnPpmtYO7AzVwNl49ocuPFwRJ0az25k2gWmyLsoLyKpxeS+Wo5GLLRAqF4t1Fddnlc38JVNiPs2cjkaamE8LOeXe+b//pC7uI31j0wXerdqLa0Ut5h4rrGdltuubs6CIb/hYe0Bi90bQgoSEuoUqM4WZzIwjIeo7xGcuElWNZp45UdFzkWqa+9cSzfoX7CozVxjVTmrQan/fPTLXSIraN5ol2Sy0rIhIa6jarEbEJLer+XOaV/je/GsQe52+mJ65Okf1jho9Gpw9vKDS8Qlv3aMLAecDPkRCiP1jwIdmsg+nZ1ePNVL+845VyjF1kf5uC2O0zROpoCelHiukfusotFnQZPMfL5aZt9mMoCOit2zGW+7vb7m92bPfbpmGAWjkWkhL4jTPHDrh5jTP5JxtvHFeEEwOodrn2EmpS+vwvRFH1uciHkIVkoBv1lHVACGbxZcPMJ/+GBSpw8MTJTXS8WRu5bNFXZsYVfuC1+Gb3mprM6+yUtfhn13YZdFLkct01wX7PWc7ky58dd51Asba9Si5J2Aup0ReKq0aDusCxJ1n2nt2zyLTi8j4PODvBmQX0WmgMdE00NpggWWpUA5QDlCPSlss8fK99mJ9LcU6kPkEx6NweHKMk7AktV2wd0iotO4MkdNlcPv41KM7ThchYfkc+GFlGZVE3/00wlB7kCPdesbcm530bmnlwHWLlqYN5+ziHAYrmr7DYzmCX2xWVLv10heBQs6n4icsZgDSi6FznYre4U7UYq3XhN/gLk4frhex1Upp9TBcNwpn5+kfckg/N3EMTNvA7jayu4nsdpHRQ+wBhxYAUVm0muv53EjJutcmHu8C+IC61SG7knLmdJp5enzi8eGRx7ePHB7NBqnVdhZznp+LM//AGIU4GPwYozdfSG2m21tskyXBRKDONySAhIYEQSL4oRKKEnsBb85BPNFCJMmBRQYWRoQNIkJhQlhwzGR5oDKTeCL0IhXVOsnMkaKnbotkd1rTQlEQrWToQaMW8d7osRTZkoTTsdnHU6UsxfSTrZzZjK5LS1q17Kh0sviahp6RCRnAbQXfhOwcOXrK6KljQAePhu5K7Jydj6C4UAlxJMSJOEwMQybGRBwqIVYrVsVo7e1H6BlW1u7+Bu7uhGfPhN2kjL4ii9KeHEWE2l0rUKiLRxeHa57ghM00Ind7tpstN7uXbKY7nt294sXzZ9zf3bIZRnJJLHPheDjx8PTEm9fvePf4yNPhyJIzpRWaXAyCEYdq60bVdp4dxthtvZNK2chFfgA3m8uEH4VNgTDBcFRckC7m/fHHl7pInd4+UjeQ54WSra0vi3Z7kasWHjrEZNh6aY00V5Zj4fi4mJ5pNgFwt6Q67zzF9UF7DzMbxoCPprdaTRtrg5LKecemaqwaP3rC4Lh5NrG9j9y8Gti/GpheeOR2QLeBGgOlDrjkWWqgHQv14FgehPQI6dioi1nyrEP/M2TAelEIp6Pw+CCMO5iy43RQcnGoCBIcVS0jp8w9qTbD08G6p2UNhvwhA39YBbAWMd+aItLOnZT3vUi5dvZ/8+4iZhR32TU6M0sgjl0EiO2ugrdHLvY9q9XSH5XoV3l/tnRmWWnPz6qch9/bSc5dVoxK6M97FV3mbD+wlsvP/dznLCBeumlpZH8T2U+BXfCMtH4zrrTsytIKSy4sp0pKakNrCagEmvPdHbuRW2bOC4fjkYeHA+/ePvHu7SOHpxPzKdmM5er5rNIFExU7izzvpqmr8HqVIBAFL9ZluSi4wVwpLGK+UgfFV0fcWIdQHRAONPEs7YGo5noOEw6lMOCYcRyZeaBwYuGJiBIUBgS0UZvNoxTFqTsjGK07IuvaevcXJGIR5iV1c+mDMp8ay1JI62Lbu7KLMFspi82yl2Pt6IsiweGi4LfOrs3mqDGgQ4BNhCmgY0RcQAh4F5HeXYZo5yXEiRA3hKEQx0xcCjFWQjQdZAgFaKiW7tOn781PV9bu7Z3w7Jnj+QthN8EoSjsUFhXc6QwCALauVXVoNoeNYRyJYYfqLcIrNuMd97cvePbMaOfBBVIzecLDwxNv3j3w2eu3PB2fOByP5Jqo1zt1HCi05khJOZ2U08EgqzV6Y9VbzgXcyp6crEhNCcII48Fqe1q+2L36pS5S6ZQApaRMzdU6oe7A25qeKdudBWuXtJoPW8mNNBdOT4k8V9KpnR3Br2/o1eYnBIcPtvUP6s+fnyG3YlR1WwCs9RomT5g8083A5jayvR0Z9pGwtW1Sc57aAiV5pJpFUTtCPUA5KWXucF9Vo3LK5flAv246JJWSdUPHoz3/eTaxpvahWWu28K95Uilfuqe1c/lRVkXrrKVkzn5vNUMI9rlpU9RyhZz0BM/+fH2H+lynMuulO121R2fCQZ/1NL3MjP6ojvV9vp69rcw33wtRjBgZwF/ppKoarRg7p+u5Ece5gfy8Q7rriRUG617G4BidEISuJDKor1IprZJrI6fWzXxtoVBjsXSIUamtUkomJbOvOZ1mTseFZTbizvd3UXB1LfnVfUS6wJfzTkjp7izeiAEuCr7bY63kCEu7bUa6WFkFkmg6k8uJ5EcWPxLkiBfTQCkL2mHAFQ5cD9+5f9oXR4fDazgD7es93M+o/d9PvDZjGtYO9VnCbuuQcz0LmdH+NvV1IPci1Tp64gZw1Qgua8yPOo+exU+rQ/PKRzSDADyIazhvkJ/4gLjY/2zkKO8bPrSuneqeetS+sbmY0a6d1DQJm409ojPSU50bSTGo7+qtba6/BZ3Z7L236BaZiGHHZtqx32/ZdE2UILTaSKlwPC4cDieeDkdOy8yS0+V8rQtoh/q0b8hyVpZkpsKhvO+F2ZxBfN4Y/PhiJLDQN8MEc+D/IseXukgtTyebtWTDReuiRqcuXSfVxLy0RBBvrtNOXIc/oGS7OPNcSfM6uHj/d9R+P9RiUfCtVYZmoWPeuzNjMA4eEc+0j8Z688J4Exk2nptXE5vbwP55ZLN3hADpoIhW2qIszux4huZgTsgpUR8q+anSkuKadRm7DeeEzXWBNScKZTkq7z6rqArDxvRfh8dGWozunYp1UIcnOKwd1BUN9scd68yl5HW2o52kATE4yxPqDClBz5i7O3dS3Q9sLUjr7Ku/jmAIFuO6cXMXmPEct/5HeKxEkbVwg/1+1yn+5o7dn+Nkm5M10XdeYA6AXCKxV7hyPcyvTRgGx3Yf2N8E7m4d+1HZ+MZAwWFUKJvXZFIpLKl1qypo1dnuvXvRVAUp5oBwPC0cn048PRx5enfk+HhiOWVKbp/b1a1pqSFw0QuNFlOyuRV2WWBouFHwoxL3hbABFxU/AU5Qp6gHDYofOlOpNFp+JJ0qT28iZZfI+0QdYZADE92oWRKFQ7c8qpibhMcz4MUhDgbp/gnaLiBoazRRsqsU31CphO4ErjWb6e2iXTNpcpGGGq21C2PNp9Jm08eHTJorx3e5Q7XCsINQPX4StAWcBHwcccOATDtcnPBuxJ9jSIKNAAK0IDiXERdRYu+Pg9HiQzBYTJXWAiEUYiyUks8jA/p8d5zMjmyzEUvuHUCLkk/K42tlDhdfzLV428ZBYRDEOyQGhmlkGLbcbG+Yxj032x2DD0hV5sPM08ORt68f+ezTd3z25i2vX78jt0TV3DfEthnS1hnP1YT7y9w4nZTDwUYRwV9mY7VCVns+Pgo+GZyf1TqpVQa63mc/7vhSF6mWK82b7qZVNQZbV9LT/frWoDPvvRWpbvzoXN/90Bf8H7YKrmhhw+CBorhscxnff0YIjjB0h/WNRYH7yTPuHXFybJ9Fhq1js3d416Aqy0OmniA/OJIUAo6heXxJuFxwc6XOtuVbB/fbHpVw1ktp13kEo4TOh4a4QjyZ0ec8W75LK32Y3LumlC8q958USutCfWoTXLMobtu5dXJKteVEr1LWVqhv7fy4ev5ng9N4UaCPXZ+VQrci0h896/lfeaw3Wcq261s6HLqyJ+11WILqukj0xsbcA/rzXW/Alaxh8yzHOBpRYtoI44S5Wkg75yShBktVNSZayevc1GImxPsuhLRfZPq0Ql4KaTb6cF7MfaXVzy9Q15R97WQBRHEBggjjXthVcKPDDYKLlbj1uAEkKBqsq6lwJiA4Mf/KJoWSZhCD41f4PNyN1JgJZ5q92feA4vEEBqJERnZ49fhOClkFzTZ9KhTXqK0iLuOkopJICE6tizP3lB4a2Ox5rve7ZS6JUdKTyU+OT2YAMB/b+b31o+IrXQPlcS50ur95JXoZcHKJG7Gi6vrMuuGcqRJV7f5QdWf0wAcIzSj7zpuUw3tHaw3nOqtQGzG27gzTpd2trz2qnBRSR1SuSUbW4YLfmDNMQNDBI5jreQye4MX0nrlQU+X4dOTpsdseHWZOc0KlgGvEwSFOLa+rmIuP0dAbuTRyttHKGnNiQZLWObliHJsGPYvR/i1O8MXQlVq+2OLzpS5SdWnULs5tBtD3IiXQk3hXe5QQAiEGvHM0UXzI3eVcvtAufaWzKw0Ru/ktvVYYp8i0iwyTZ/tsIm4D481gO7JRiDvw0cgG4oy2efhkOesfRo14dcTmGSQzkBnXyX6z4f04ws3NxXJlnXsYWw5qVg5vC8vJ8HSDoNYLyRiLKySYkhWsnyxdt0NA/carlT5/cnT9K6JKa7nbKq22U7xXmL5/NrIq8+NgHYtzF71WSt28VH80FPm/8lifY0r93M4wzKbhcX1uNnjbEar292O5kClSuoKYuRSplZyw2Qa2O89259hslDEogzQcBaHQxAb8RQslVfLSSPMa+2DpvU4MEbCI+0YpmeW0PhJpTpRUqWXVYLx/zteP0sW52h8G6Tl2Isjo2WbthACD+Uxy0cxtu0LOjtITj1xnxQmVMp9sRpuE5WQwpAzClh3jWHuO1Dpnkh6FuGFiw5Y7gkQGGc4BBq07excKWTNFKt7NJJdQN7PgyL1ItSrnIlV7Kyud7ASe1mDuUpX5WDg9FoP7DrVvJgyJ0AEcgnee4CLBT/2xwbuJIGN3d/fdQaN7gnq1IqW+Z5FJj3qxzXEI60CsmcVVa5RS+r2aWQ1hx7EwxIZ3tkNrVckLNGcfV8i/lD5yqEIczNx6LI44CZMIm8kheGIIRO/xIrRSWFojz4WHt0/mTvL2iceHA8fDjIvNYnyix/t1Vt36taam/UytP7S783TiVS9UksF1kmQTS1T22SBRn62A1S8I53+pi1RLoE4vw+8+j6IZE2W9yPw5Lj7gvaO6ZpkqvkdVnLf4P/pQqxnU0rFjzFlhmiI3dxPTPnL/1T3T7cD2+UTYGF4vYemhiYXlMZHnxOnt0YIZkxJbXPeSbIbKZizc7irBmd5hGoX9Xrm/N7PX7faqo+ofW4HDOzMDXZ3efTB2llFsIc8Xdt9PHP8uWJHqj4ajqaM2IajvZ8/MdmtW5lmsWOUf3qmKXCC1BmddV6lmUxYX+zv5/8GN4g971H7TnbourFRjuW0GIXphGDrkuXq19XlV9N/nntFnlOPGc3sXubsfuL3z7G8c240yxkoQYU2ahWzhm62bFS+Ql975e49zNrBHvM2oqkEv8ylzOi7Mx8Qy5y6f+CEvbmWr9ojz9b/QF9HoPdvRdHodr7VZoTOC/Nw7RWk2ZxAH1Rusq1pZ5hO1LJxqIbw7cXo44nyl3O2YXhSiHwkyooy4c07vni037HlBZCAysTr0r1BfJjOzkCWDOyBuJrvQad1qb9A60+oiYukbqdDfoFosCfv4BMdHSwqw+bTifRec941VEE90kehHBr9hcBOjbBmYiIwEAmENb3QOvGCZWB5t5lhfskHvFjBpcyLb8Cnai1QIgdYaIXhaM0r9MAghVEQzrUs/tPbNT99INe33czMYLvb7Z1dtnWgK2wnqYKuhzZQap3mmpsbx3cxnbw3ie/f2kXePBw7HmXHriFNgGDwhOHJp/T5upFyZUyOlSkrG8ls3dnntpHowqvNWqFRMRDAUwa361S84ZoAveZHSomiVcyV/b86hVqikQ36rA7rz7mzs6q4YaHyxOtXnrrazdc7iLmKP5himwLi1rmqzH/BjRXyliV2wWho1FfKpcHqXaUujJaX0+DL1gbBRBgHdGJUvBEccLdJ6vzdtUrzqpFYW1ulkFPGUbQC75kyJv/y7a13PH+p8rw+lJ4BqhzPWnb6aY0e16BHbbXOB966OlThxEVxjO/lgxfX95NL/+w+F91huzis5C9HpJTtrJX+4qw63XsxFW73AhNPGsdl6tjvPtHEWmRIU7xpu9Ys8OzR08kRulGzmqETBOW8wkwREDE7SJpRkDLVVBlGyadO+Xxu1Hn2/0a97PZOK7PXYgD8Ey7DS1ZFdWneEM40STagCoXdhwduMqvYtcktCOtkMSIHlcCQMQq4n+z3icIw4EYz/NxAYCUwERiKb3nHJ2ZcCUo85dDjJXfKQOhwXzve3eDNBdsEEtqKCqz2Wpm+sajVtpIlTOcfM6BUpQ1b3FGebA+8ivgc2+nOysOvn84KhXlwlbNOWSzMdZuvhmX1dMka3O79Pzlm33K5gg/VntWIbt/VrK9lnnvt9VroVUQSCsZpDVJtJT3ruLkvJLHMjz5XHxyee1iiXHsNRyhqGSc/Y8tTWtwpttU5rthldJT7tsuFcZ8h+3aiJ2nuwrtHN4MN1PP1Fji91kWoZM19diQSNVVNqh1we5iTeLyR3KV7XbgE/7hCxojQMkWGKTNuBcRMZNoE4Sl+kbGA7HxbcUkAqtZwoeSEtM4+fLiyHxNPHyVgZ1bHfBfwwMu73TLvKZl/Y3jniUBiCI0yFzc50FnmxC2+FwFI2t4hPP+1D/8U+WvrrhaW2ZtX8YQ8TNJvepjqjKps6z3bR9ILdqqMWsWDDdT74eedyJZdsHMMWwgb81IMGT9UgKP0RdO4ffHN+8PMzX7/vQNbt5zXu+HnvfV9AmphTfMp2Fx4PYmF+xQTW60xqpa+v8ynL0+oMrQGGUbh5Fnn2YuD+xcCzl57bWxhDJUjuztWm/FEKWQtLrcyzBcOlEwwSED/gwwYftji/pWahJCUfMunQSIdKmi0q5ocVqPU12/nv8wJs49WkSxZkDQrsjC4qDTHfRxTwZrcVBT8s5JZxU2KpiuRCfuoEhUNCM9QspOeJ0Y2UF9axRe8JvTgNjHgGxJIW+2/xKIHVP93skQoFKGpzqWbtDn4biC6yuR9pIZNlpPqEPwEPkOZKTYoTe95xCAyTmM0UEFLFSboEgcaAD8EMqb3NpKKMRBkZZCBg8yi5unjWs93UFu4lF46nzNMx8fg4k5KSkqPUQOuwJHQJSzXNVCnFWIitGvlCTSozeCV9n2u4hSMqD482Nz0cIcRKiMr+lNhsHad5Bk7kfGQzHRliY3CJp3cLyzHz+uO3PB4OvH58w/F0ItXc7aKEED1hCNZJJWitkHMnmZ2si1oLFc3ulVV/WYpd+6oYiawprq5rSH/dX3TR5ctepPoieF533lvQrrQHCqtjxPnrXHbwX/QQJ4QYGMaBaYoM48AweHx0tqi1RpoTVSulFkRs71nyiZISeVl4epNJx8LxXc/ZcR65GYnjhu3dDdubwvYms7mDGBNDaPhoeDMKJZlgeYX65sW0O4eDaTRELuan0hN04QLzrVBPl6N88a5K+zltdNdlrEOUfvEJnZ5qu/sfpW+yGY23Oc3OM2xM5CdRsTTY+p6h7g/THJ1TIM/meVyxAroK972WrfPwW6NvD+1YNyvnwVmvba713DFbdEuCpHbDrU4U63lcKfOtcXan955OkBBubz239567Z579jbDZYm7a5+wo66SqNoqqPbIJeEt2xNYNWv2I9yPOj7Rkr6MlzLW8u6207k7yw95agYt/odAjz4XWHysCAWsUen8PREEtVsa22oJEJTTQ0UOptGQJ0kUUqYZ2tEVpJ6GeHG0OqLNcDOcGY/OtIYgqFGnQn4mnXcxo15lUyyRNfTaVadGCCL3zDDeR4gY2OlJ0wYVmwvqsRq7oBIo4RSqO5jzioKRumNsp+WEw9q6RITzOeRsbSPi+LkrOAMz60SQBjZzNWHqeE8dTIi1mMLsWqVWLuRYpVaVU01iqNoP6xGbKuRsTXMtP1g3c0iUlj4+GQDjfdXOzRzWxmRacLNzdzraWSObxzZHTYeGzz95wnE88zkdSyTSazaHCSjKzc6AYVb72edQaibT6XF4/n/WeXSUZus5nOznH/vH3J0//6OPLXaRab3u5LGbXj/WTtShdf34JMvyCh4DzzgrUZmSz6YVqst0XYk7E83FBZoFHgVbQVinLQs2ZPCeOD4m0FOanRoyeafJ4vyFuduxf3HJzW9jfJLa3jSE6Bl8ZJiFvIHqlZemWS6BNOB2VGJWHh0Z8Y7OylbnX1ruHy1zorLHSHzxvP/bo59SiTsyBTbAF3K2L3blQff7PPA+nJ884GWV/3EHcgMSKukaV0jOU9Aec39f34uwbs4bWrF3Teic7Z6yM1YF3TZO7xkfWJsu7yw3UmmXlqCJUmx2oLRK5O3+UxRh/7qo+Xh+rySbAfgebreP+meP5C8+zl4GbO2G7NYcJJwZgWbfSKFIpanZI5mdoYZhTDYhGYtjg/YR3E6WZTUZNUJdeqLou6IdCfRcw4bxZMbBxJYLbSVmX4Nah7SYOOqzr8Tgxj0gfDf7TKSAd5ouhUfoOuuVuPXRytKOnHc0KjHEkuJGgI46IiKeJkFcXCQo2YbJnVLSSSCy6MNeF1BaKFFqsVliqZ9QBHQoaJhoLPijLoVCXZmukCM5bzIyEgB+VEBwlVZL3Z9lEHCN+MI63c8bwCxLswaVQ9QqPrhVKwRIWGikXTnPicEwcDgvLYnKQWv2ZEWv3XTuLeWtfnwRzZwGLGhm7E8yaS+VWYKB1x5gjPDwAXTQ/p8Jmk6h1Zhxm0BO3N0eCzzgcbz975Ph04pPvvSbVxNIWEokmFT94wuAJMeCCNzKIdvp56bOopUPK9TL3VOWsM732GV2XWFt2DJGQHpD3RQvVl7pIXWtRFM7zKOnvoGqndmpD2yrmE1oPLVTMcVx/hIfWeqw01hADYy9UcfIWk+wtl6dUpTxcslZqKrRSSadMXQp5LsxLDxYsnu1NZIgTw/aG3bMb7r72gv1NYr9f2G0L0TkGl2kboc5CnQTNiq7gdFOOh8Y4Nt6+aXzSkzFrbxhKp4YCZxucEBxxsEvGosQvxBO4FIQfC7Vpx5dLO88E8RdcXpy7elMux1qgdjejxVW8iMSNMu4afuoaGmcmpO9d7Jc34iKoHEfkbHvRrUHWHI7gjTYYuiqyU4/kcOyOtukcJumuGFfSbZ6lVGgFKQqYIHZJkLTfcGuzFi5FaRUz+tAdNALsb4XdjeP5y8CLDyLPP4jc3Tv2e2HyDS+WWdIo/WFFqjRT5OcklORpbUAwUabvDDNbrCyF2hz7ezRGN4RFsXiN9dxLf27BzEfDYGxBvO8OFtJnPmt3YASZtcuSZuJVL1akXCeQFOegHnE0WsscJsinrpVrjlYcOkf0OMHTHhf3+HFHcDuiG4lMfSrr1jNAJVM5eziTySwszPXIXA4seqK5BsHmW4HAFAb8puCnEWUiRHh6vZBC7SQq0yhuomdQYWzmIFOWyrwOelHixhFGjwvBdGnOGJVefNdGrX0U5w7coHChVEs9WJLBfYdj4vEpk3qRMmLENdKwojtXna/AstiG87TAOMFUTIZyZmXa7W/uDsvFJBlgToVpFFKaieFIqwO7zYORNhQ+/fgNT49HPv74M1QaflDitrtCTJFxigxjIPSdlhkBKGkxY+5lttf4nrP+1ZpxXjd6F+XWh1xoLa4Lj7/I8aUuUtcuAVfv9xneM/897UpyG0bTpLfUP3yw/EOP9eeeoUQ9X2yt2s6xLNYO57l009tKOmYbbs+VXEHVgQs4NxCGkXE3Md5s2NxPjLtuMRNDZwo5XHRIC/gNyNAt8js0JVJZFoOVhsHsSdbF99oazInNHcSZnss5cLIGOep7F9Z6wf24CI3zOeBSmMQ5nPc47/vi1qFW1XMXFQfHuPFM28C4DcRJiVMx6nxboTf5wfdn7ZRCQEKwAjVEJAZ0DYjy0s32AkzdaG8ISHBQPNIqZI/5tBoL0nX3ELQhfecjRaAoKg7XeljblYvJSi2/7kTdOgq7WnCcMyeHODqGjWPcOobJ4hucrGcI66TUuimDjLSztozAQA8KdC7gfTAoRqTvR82myjl7/8NgJr7VmxD8fOq8pUDHwYgc4+QJ0RwRcDZxat1FQc8Pz0p/VzyiDunzqrAKf6j4IeBrwKeAGxUZm12rbdUaDXg3EJjw2h8MuE5EMFWPXe99m3k+M6DnidT6X5WCivZrrtPFByMnhHwxzHXedYKUw0mzDtd3nwgVNNvkrSze7svW7H4La0fe59f9XK//Xd0E760HtRn1vXRygT0ugaKtKT+48br6pLfllgqslI5M1w6luXUT2b/j2tLrfC1iPpppqcxz5nTKHI8z3ptb/tPhxNPhxOE444IyOEdgOEt1rq8v7d1h6ySQUmwzZEiNXuC89QldrcHrSzsXqPNDz1/7IseXukilCq5jtefX269uLUotlZKSLSBOyEvAh2rZMDXT2o8ZMF8dluRbmecZ9wilZHADQ/ZsNFhMshNyUmpaM2wqJVXmk7XHNUGMI36IbG/23L3a8eyjPS+++ZxnP7Xn/qf3BH/ES2E5CEsCtyiheYJ6hmFDEGVwjegqTirjtKACz1447u9tNvXmLWez1pWJJkLfSQbGMRiTaVDQirby3o2zFv/T6aJ7+PzDbirnbUceBhML+RBQ56ilklKiZoM9RSCOgc3NwP7ZyP5u4PZFwA8VPzaqgGSLMhevXK8FCEjsMM12QkKAabrqpDzqBPXOWprgYYyIN2GTi4JUbyapteHq0OPtV5GucTa1O83qktDs0AVczkitSCm28bmykLI5RDc06DvJ2r0QU4Bt8gzV5jB+iMRtxI+CGxTEIEdb5jKVzEIm1dY7YaGqsMZBOB8I0Vy1xTnCoPih4YfMsKts75W7DzzNB/veIn02YF3POJjr+jh5Xn5l4P7DwN3LiXE/4YaB5iKtA2x1DUtjpXV3t4sOCa7lxIVI84q6DRqEFoDjgviCnytRJzx7bl7dc3P7nJu7l2x3W6ZhwygTkcjAhDAgDHi2cI7otPNbeoh8IlN8pkqmkbvOy+HFCjXOiCe1lB6EWhHFFt8YGIaKb8ZArDR7vwbbsLrRdfGvQ4bLw0XTiRleuLIwV32lsvKCWm20rCxzZlkKOfeFfJ3P/sjVpRcn78+IQPOFErSbDMOQIRZQ4Ycy4643SyuSXXJmPp14++6hu+0oDw9HjgcLw4yjY9wEYpzs+hgnxmiyHW3mzlNz62Lx3AXjZkF3TYpab9VVZ97ru+0ZnQWvjs5GFqMTc6j54wD3rXoaz1WhWruBalBUzRUoFO+ope8WVbs10k+mEG2tUVJhcQtaq1mc1IDvfmzOQ8vSozUuxI5azDGgKYRxZJwmbp/dcfdyz/2HN9x+cMvNyy2b2xE00apjeTAbfH1qjM4zOk8cB7PAj43oE8FlkMxmcWx3ju1e2e4qm+5MkdKlBV/d2733/SFIFyBJZ52snJvaqaSrd97nOaPDiuxJ93Zz+MHbML1ZnEEtFb8E8pJoxYgBwxiYttHC/m4C097joyJRKM2WawmXi91+Ed1WxhPGiB+tMMkQz3YV6nt0RS9S6h0S7KZ3oXeOQXBuwLWGb8HysFynKve+r2UT/bRID2tTZMGcU1O/rnzH4zueoT2cMkOn2qpRqyssyZGKo9G7vejseXiQThLAwGJqZ/aVtp7zfiKcN5aZ92eKtTi5OJIPFb9RxhvYP3dU8SzV06oJftfOdjMObDaBaRN58eHIzUvP7nZkmAYkBJoY5OcuSw70kiTaP65w4Dqrw35f9QN16DOVG8FpYTgUnE4E2bB5tme7v2G7u7OF0I9EiUQxhaDrminHBPhOnVjJ5zarq1Saq6h05f66M0dQGqIVrYWSMmUulMWypUDwPuAjHYUofeZowZZO6K4IfQbXozgkdO2TWwuUPZ+1u9R+jnTtfIuSSyGXSqmtR7esbMn3SQ+1cRlm+tDZNsPFdt9lmi8kWVi0MVeYOns5cilSZ57Q1ecXBxcr8bVm5vkIOFqFJWVSqYasSMC5SPQDMQ4McSR4I8doR1nW7qlkW08t9uSqUvaK+P1C8dWb07tLoRoERqedxPLF1t0vdZFaQ7Zk3egABsR3fUYR28VjXlYlZ3xnpmkzJs17vemPObQpORVUGyVnQhQzVwwBVPDBoUXQ0pXmZS1Sqx7IEceRab/h7sUd969uePbhrRWpVyPb25GUTyyzMGclz0p5VOro0CGw22yQKISpMgQx8ppb2MyO3d6z2zV2O2EzmdPDyhGwTtKcyUPo7hs9/lsAL5cpp1x1YEsvctIzqD6PwGAXo0F8fggWw6PghkorjRAz3gdqztSaekzFwGZvMRXbPRCEHslKVQuik3UYsV783hTwsRepFerTPmRp507KCiTeI9FETD74rmixmYSRm5XQi4Vfi5RakdLqaVHRBc4tQ7HgvdbhnDUnq/bVobnLjrnV1uOIxMIKi6eKPS/pRcpYl8bfVRpVc3dUKJSuNTP9metGpZ1t5rsvm7NoCIkNN1TCpjHeCPsXHo2BLN40hCqm7/GBzTSx7UF8z15N7J9bXMiw8Yh31JV+fvVmy5luHS9vCNrnu5XWi1T2g+XBiiA34F0hzoXAhihbts/2bHe3bHf3DGFg8JEBc+xbi5RnxLNBzSCqxz7qeVpXqFRXaf28naEkdfbnVtCSKUsiL7kXKbPr8iEQqvn+iSrSKlJr1ypbym7DnYsU/SHeBikiViH0rGdzF+Shw3xGmLDE4NwXcuWqkFzxc8SZKP5MAHLewqOiwdfIgkomkZkbxNpYKuai3yE9rnhCKxlGhHOETow2T601cTqdANOFrZ2edujWu4EYRoYwMIaRePYFNPcY0+tV6srsK+1zIcvrxOvVG3IdD4deoIbeUV0zFX/c8aUuUq2ZsrtpZ2TJ1YmrnKOjXbP8h5y83Vwi1GqahPXNZmXofIGjVus85mNG1BGj3TBxFUBW6bMv1x2ofXcG92zvbrh7fsuH3/yIZz+158U399x/dcPumccPxtCaS+HNu8z8NpM+q9zfCHoTef5si5scYVeIgzL4hguOcXFMe89239juYbsr1E5Pb81QpRg9MbruvBHt8yh4VwyDlsXG5lq6CNFo60IfytbvOz2yFqcr8aTv7vDO9sStGUwQ4kJNmZpPbHeWFrq/DexuPbtbaK6a/uXUEN/6rKYvkgLiHWGKjLuBzX46w30aB5q32IoWAs05aliLVJ/ldXdv7wzijBIJAlG6s71fE4KN/dGyR0uhnoAlwLFvS3NFh9xneI3amkFRQncll7OYsczNyC0NJAwQBtw0EjaRuPX4EVzQ86LXqGTJZDW6QK5KKY6m3Wnbd6ZV8FbggoKrNJdobgE/E8bMdNO4/cDjd9EWPLUWwTznAptxw3YzspkG7p5tmG49m73DBWgqpB5tU93lnXZXj9Xzu0o7zxcSjoKnMFCco4rD7YUYC5taieyZ5Ib98zv20z37+IxBovHjtBocRLRZFVsCt71I1XOPaUBon0X1GBMLTuwXImL2QulEmk/MTyeWp5l0tIhYL55hFLyokRtKotRGkWzzyCbUEMxizTlKNMf3GqF6pYlStVBbomi4Og8DqEF+KVdSKpxOidOcWVIlVSU3qHgjsLjWux7bfJglQxfThQibLTJEZDAmp+hCqolTy5AKQ5eeRG/iXuvGrupcX/i3G8cwCuMIzhVaWzidHs0hpjjmxWZk2iuLOI8PkRgHQhzw3TlEi9mtlaV7Q6bSXSZ6YsRacK+ew2DEzbXeMo1XDxsPs/Gmh8xfcL39iYvUf/yP/5F/9s/+Gf/1v/5XvvOd7/Cv//W/5i/9pb8EQM6Zf/gP/yH/7t/9O37nd36Hu7s7/sJf+Av82q/9Gl/96lfPP+Onf/qn+da3vvXez/3VX/1V/sE/+Ac/2ZPRFTdv7+1YoJ/AVeFc9byw0GwOof0WWLUiIlyopD/293ZGXO3Js7XPKdyKC/cdsPi+SGIMIR/Y3mzZ3++4e3nDzYstu2db4sYhEVJNzGnheEw8PWTmh0J+qIxB2UzWJRAcbrDMG/GWDrrSZuRqh7Ja/ce+ATZCnCN0hp8Pvmt5HDH4jhEXpJluw/vGMNg8ag0nPGPQK8znLoJoFYOAVvhPvIfqumeXuYwKnhCNVbamFfvYO9lOw7VzerXZ6Du0MHiGTWDcBfzWuicNnZUmnhrt8/M20Pd5grMhujkpWOKu4eRGIvBeiH4titVcQIrgKDY0b32nHpqpo9dBuRg8VF1PZ3XOYNLSSMdKy+b7Mu2D0ey3kXHjGTbWva0UY+2+dJVq+jqt1LNpr7145/wZ5G/9eaJKbZnaMqUlGhlcwQ8WmzBsHapmh+Ndf52DECYhjILrhVKxrrssjaXHqXuxm+fcqbBq+pzBmK51I1UlO0tpWinsytrFWo5QJBqsNxqcFGUkiJkJ+d61mVFrxOuAk7H/xoKwQJ/9rGdphQCb9W0G0qp1diUvlLRQ5kRZMjVV0Njp8tazadXuIgHQ4+O90Lzdv805qleKsy65ovae1EypQqmZ4ALeecxIuWvkWqWUSsqFlIsZsFYlN6PvN6e2XPWiKmLxH5aKatezxID0zYg9IU+rnlwrHptRiyqjp2/CL/Pm2Jsx72AYhDF2l37fEGeJxq05SrPrtCo09Wc0aQ0YFjHoVNsKYbb3Hu+RrPqxuq7EYL9z7IVq6JlY09j/PMAYxZ5XeH/k/KOOn7hIHQ4HfvZnf5a/9bf+Fn/5L//l9/7ueDzyW7/1W/yjf/SP+Nmf/VnevHnD3/27f5e/+Bf/Ir/5m7/53r/9J//kn/B//V//1/nPNzc3P+lTQWRFZxtIPe/u1tmUKuewPoopuhFFvHVfuDUTx97gz9Xk/NDffSmIrAyYpoC5N+MhjIaDx8ERh8gwjXz1mx/y4qv3fPP/+RG7lwPbDyJt+8jCzLunN7z93mvefPsN3/7/PJLezrSHgtfKNCktCjI53NaWjaIwN2VuylIhrRpV6QzssRMoGr2LcsTRM06RGAPTxnY/IarNaiQj9UCtmVoS82Jd5vF4gf7WFy+rJ6IPvWhXcilmZ4QRSWzmLkjubCoxV/i4EeKWzmI0jLuWynKsLE+WqFqWzpgUM83c3Q/sXozcvBxxG7vKs6xzFE8eBpr35DieMX4fjdF2zVAPHQ83yrgjOGEI2udylbY4Wim0k8fNFTcWhpPZvMRq860QPX6K1t0MwZzJvaOqkXVOjzNpXigp8/xl4/a58MHXPc8+cty+EMbRElrpdGs64TpRWFolF0cpne7t/Nn9QF0jtQUpGVU4zo8cTo88Ht5xmJ+Y85EmCULGTasDerM4DVdpUaixkUNhaQXNAo9CUogn8NMltXaFwd0aoSHdoNQ7wrTGewgtNppXsq8UsSJCd6wYRs8oA5MfGePI6MfuLrEaCq1cvoBnwssOxy1WthKehGPpRar2R+5na+7dVkVbplPUHnkAAQAASURBVObC8eENx7dHjm8eWN4l8hGogeAcYRoQddA8tSRyqcSceNfXh1zUpA/OQyi0oJyoHFtmLMrxBH4spLkrLTxEGVAsMiXnxLxkno4nHo8LD3PiMTeemrKEYLJkaRf7JBcvGr9hMHZl32CtIgBF0BZJxZjJrhaGAGlWnBp0WLp4fLu9JAns92KFagJzebXztm47DJ6E2jy5OYpaNMzqx6ndASOnwrIUlrmwzJW0lLM+6voI3l7Cfgf3d2aEfbu3r202sJns8+3OmsZxZy99SV9srf2Ji9Qv/uIv8ou/+Iuf+3d3d3f8+3//79/72r/4F/+CP/fn/hy/+7u/yze+8Y3z129ubvjoo49+0l//3uFwffGp+Kv4ct/jjs8kCl2p6I161UmJ9ALVIZSr0cyPPNb21lTq1n5ZRLp2rNlUFJY35QhuZJpGtjcbXn7lGS+/+oxXX7tnuHMMd/BQ37HMM6/fvub1H7zl9e+949PfP5IfEm6u3N2b2zBBDNAdnc2NijEcl2LWPbkopavAVy3rKgA02NuEx3Ew2C8OgdDdLOy5JqQVgxJKZZpMCzFtrvHmFSKI+BCMBQQdky9o64JQ36W+ChJBKjgVfIQwKn6oxnDz1nXkpbIclOVJSd2HsDXD4H10lsf1LHL7asLvBtQ5luYpKhS1f1N9wK3bNO/PHZTpl7ofWc+2WouUDXW7ZLV52qBodjRvCv7olCk6YnNMMjCMkXEzMt1sCEMkbAcjZkRnu+5SOLw7sBxPpGXm5n5hf1d59mHl9nljt2+MoRKli4U79bywBhxWaom00mnTztT/OFuqc0ugQm2NeTlxOh05no6c5iMpL5SaqWqi6NY7PjNfrVSnZArSAqEkahLak5AqhBPWoa95YJ1EI91x3Yu34uwd487hoyNMDiY1a6zBfn51zbpPuq7QBUKIV0JYK1FO18tDOFsgaWQ1ibV+wz6v516znItVpRjJiIVWFmpKnA4PzE8zy+ORfKzUWcy13ItR7btWpRWPL0as8YslEWhyZvfl1llUI0klNSGVxrLAuFQrUsHjgqP6DapGE8+lkFLmtCROS+KYCkmhOE+LA+paJ9QY3EcYjJHqg4UpOkNdEMOFrJkVzNndU6rn2CrJQVnUxrgGIiHuUghij/SJURgmOced5GZdp2Vw2aysNKVqoqp14qqOhj+7s+RUyIuJd3PqAt6m723khQtqs9nAfg+3t3B30wvTZu2izGXFDzBs+hzNf7F1/o98JvXu3TtEhPv7+/e+/mu/9mv803/6T/nGN77BX//rf51f+qVfMgLC5xzLsrAslxjHh4cHYL2BsPa9F6iVrXNucjqBSrXHnje7iWz17J1Uf6wuwz+umVrhNLfCXVx0EhdqSx/WusBm2LHdb7i53/Hsg3uef3TH86/c4LeKTJV3b4sVqY/f8tl33vH6Dx54/e0T5akwNDgdzG0Y75AoaDTRZauQqpKKklY9RrHpvRO6U/daVFdNUPcnC544DIQeH+67g7O0hVYrNXvGyYTH0/Teqwc8uNh9zXyHOJVSS4dgHeJ6S4ctYhKxGPKhp7sODRex2YraTZCOjeWo5LlHiVR73iEKm31gdz9w83LA7weac8QEqQm5CRIdxTtk6CpaH4jBoK7YX7u9LevA+dJhBbGZpmvNCBPZoSvGLsJuiIzq2Yctm93Edr9l/+yWOA0MNyN+cPjhUqQe3z5wOhxIpwPTzSObfeb+1Ymbu8x2ly3qwxlstw7iTcDbGVSdHWrUc98tekClUTR3o8/CvMyclpnj6cS8zFakmlkrNd96B4ItkM4sh4QC6vElmHhXBJ/XQMeVfdTOBHTLX3OWKhsDPjimEgijI1aPa4JE2/RpAA263lqIcz2BIBDcKoY1hqD0AsUKEeLtIun0c9MimXffWqLWAqW9o1ISRU/UcqCmheXwxHJYSIcT5aS05ImThXOOgzfYGqUVjyuGjfnYkCposMDH1mesxUGWRm6FVISUGmkJpEWI40AZAtXVM1MvF4upn5fMnApzriQVing0QHN9B9xh4veKlFvtu66gmXMza5BfK55aHI5KEmFwSnCwCZ2U0CG2GIVN1+HF0a6wlc3clK6htOuiaqOq77SUTBOPUvqIxO7JtUiVzur7AX2pXIrUNFlHd7O3bmo8FymxDmrTO/AeyfNF8b4/0iI1zzN//+//ff7aX/tr3N7enr/+d/7O3+Hnfu7neP78Of/pP/0nfvmXf5nvfOc7/PN//s8/9+f86q/+Kv/4H//jH/i66zsuS991ON8ubJcrponZ2pjOSbzi1LJsxNuchqjUWM1F3fUF4kcc68Kv2qjFBooqglclugEXPX4Y2O63jOPI8/tn3NztuH9xy0c//YrnH+25udvQ3EJm4fjuwNtP3vLt//fHvP7dJ9787oHXvzfTFtiNgbTYED0MoZs+QsqOXB2HGQ4n5TQrSzKvL8UunHFYPeTsCa/SUYM4hTBE4hSIm/B9RUqR3Jj25qO3n+uZ1qrqUO0GoOJ6TTJcW7XRxOZjbjCyCKp4bRCVUR3jDcQtuMFIErVYSN/pqXB40zi+U5Ynsx4yXZUw7D3755G7Dwaef3Uk3o2oNwbkUpS5KI8C2SnL0OmF3phETqxjEllZWpeCFb3d6FFAtOGaoy0BzYKbPHERpslzW0c2DDybbrm5ueHu+R3PPnjJuJvY3GzMRmZwVmxL4u3jG47HR+bTE2H6jDgduX3xmtvtgd1gXZTvZkSdYM2ihaV265xi7vLOG/sNbzovvFJaplUlLZnD8cRTD607LQvHlDmVRlZlaba4N7EBglNjTtZWuvbK4Vvn7tWeXN0D9joSbmhAz0kyUkzDB6HWShgcY3L4xYqUW5p1+oPg1HcuZYcHxZ9dGtYtYJ8AdgBqFT/84MplWqT1LLVzWc8Umi7U9kQ5PVCfZsrhiXrM6DIj2eFrILrKEJRpWPVwDm0OVwSyMJ6w0MmU+3y1owCuWYIB1ZiuM8wxMD8oIUS8C1SXjelbhGVJzPPCcpqZ54VTyqTWKCLWKXV3dfV9HhXie13Uuk6Zr9BqOGZzbbWMhJ4P5iitkl0lOj1DZ9utRc2Pg1luebNHJHebppL07N25UqHFe/ANQsOFhvNrZ6+UXJhPhdOxMK8uE+nzO6lVQz90osR2gt3WPt/trHiG2BOugxWpswHDFzj+yIpUzpm/+lf/KqrKv/yX//K9v/t7f+/vnT//M3/mzzAMA3/7b/9tfvVXf5VxHH/gZ/3yL//ye9/z8PDA17/+dVDpeofLybr2JnvfU23FeS/6FvPy6p1UF5h9EauOlX5Zqy00knIfagt+pEMjA9N2w2a34e7lHbf3O569umV3PzHuA+oLpS0s6cjx4cjh7YnHT2YOny2c3mXySU2bM77vNOC9SS21GU059TDDlLVHcXTOk7u4cVvsgg1Lyypbl/6xb2IJnejRrO0RDbjoCVEZhnr2Z61XXak66xx1/TkOxCvSL3zxK7UbXBQGEeJWCSM9e8iIBnmp5JPNotJRz1AfAj66s1vDsDWoabhxEARXFV8arihZzQevBSM84MUWR3HmjNCdA1jjWfwaB9Ip783uV3F2LpwK0TtGCUx1YJKB7WbD7m7L7d0N98/umbYbtrvN2ZRTtZBrwjuYRs9p63FxIQyw2TwxxJPZBPZrsPV5aveZsHiO1a9QbCOh3TtNAuBtI3COgMiVnBtLWh8wq1DUMTdvup9OeXXryqL2c6LafEQdVG8MP/EWa0E15wangq5aAPX2pJqgxbqzlmzgLrXvrgOQxBptkd4USW8M9GwSu9qy1v65uyo/ci5iK1ni+r+VoHH1963QaqaVBK3gyARneeYqjuAbwVe8s2BGEfs53kPQvsOvMI4W2Ihky3Fz2qNUeghNVlqCshTqUqljoXbKq0lNzAKt1kKrBa0VbabFVO3n0HUdn3NovMgn1iJFq91FrEMhra9scqGvWCfTLBVbVhcXIy3EYLTzEJUQFB8arfRQUvSyIHaoTZrJZlzoDNg+vli9H+0as0fpzM/Pm9lfu668R00XuYxGrkhdPZHk/7+OE2uB+ta3vsV/+A//4b0u6vOOP//n/zylFP7n//yf/Ok//ad/4O/Hcfzc4rXOnQS7F1YPtWsBmXd0Vbielwawm0idIwbMCDOCZsPVfxzmZ9ojpdaEcw6fCkOpxKESN1tGP7Dd3fDs1Utun+/5+s98lbuXe158eMPuQyXuGgd5zWF+4N27N3zv9z7mk9//lM/+2yNPnyaeXlfKYnj+MGwYp4lps2EMI6PzODDWT3Icn+DwBMcDzLOSehyH91agdjvpDGpnBuBL6wuCUXmdCNU7WwRFQCPqKkjDT42gwmZbzVJGrBjWao7NsqI0QSAIOjmzw5kablPt4vcw7A242TjHNCrDTkEsHDGdCqeHyuFN5fBZ4/gAy9FOvwvCuPVs9oHtrWd3K+zuhOk5yGhR7UmV1BRJmbkVXKs0sdmG2e54i4G4gmGlFzEfG94rLtgi7qoSFpBuqzPlwHY7sG8TWxm53+94dn/Hy5cv+fDlR2ymHZu46XCz6/v7zM204dT2nOqDXU8u4v0TUY79hrsw1FYRb9FC6aJQ7TC03zhErQfxoxjUq8a+TEtlmZXTSXk6wDEJh+I4aiSrMANNWvelNDLEFCq7YLqsce0stg7ZemQ0Nw9Rh8sOVz1SPbEFnBp9wrStStCELxWWgpZiBctd3YiDFSzd9MbAQQqFIAtHOfQi1WhkHCbaDZwYmLtCCqBQSWRSN0SqXTtFp5oYWUlrRXOCvBAlQSzIrtn6Xhx+WMzqxw2dEFIRVwmiNCfst0YzfymNU62cqhA6oWYKwjYqoygkpZ6U5SEzxkR0iTwl0EBdoC6JuiS0ZLRmpFkQmaZqT9gZtqwhGkFi6ASfcJWn03MuVDJam61tzneNlrMNQ58xaWdgrj/C4D41wsTYr2vfIb22OtlbsXNeLFkhROImMm0G8+ubIjF6Wnddn5fGca6c5mqU+tx+wDhaufg1z6uP4ALbZC+rdtPZ6/X0zBf4gsf/8iK1Fqj/9t/+G7/+67/Oixcvfuz3/PZv/zbOOT744IOf6HdJU3tgZpadeXztLnLuqs4gwwq7i5EFNAg0pXhL9xSpP3Yutbo42Mdm3YUUtHOWYhjZ7W64f3HPs1e3fPTND7h/vuP5yxva/h3VH3k8vuHtmzd89r1P+fT3X/PZ7z3w8AeJ02MlPSnUiA+RcTsxbSamaewqfYfTSiuetAinA5wOyrIy4rq5t3d2H0wbG1pKMm8Dyc3soJr5nzknNFdprqEORD0qEZWGGxKhKcMmn8/f6gumrVgHEjxuNMqxTiCb/pgUNwhhEEYnROfZeoPY/GBZQDlX5qfK6bFyfKgcH+H0BDVbgfKj5U1NO8e4k/5QNreKm6COShZzetAF5gqxNEqt9CYXNBgauWJ83qGu7/ZjL1CxN5MV21lm66KmEtjmyFYDW4ns9hP7uy03dzv20x1T3DG6yVQzIiiFqBnvYBTH1nmSPHW8fzA+m1wTqtdJS58jrR2qKNq9Bc+he4OcGaitdneDrOQEKQlL9swtsoiSpZIkWMxIj0hwVFydGVxl0GpddBDc1uNvAn5n3b9Tj8sRSQFXPLEM+OaIzeNpXdukOBq+VrOR0nrWjakT3BRg8ERM5NwGIWslaeYkp3MnpSQsLQoG5l6kMtZdLhRSL1Olc/p68KJ2ZwjF9Gi1Ia0SXEVixW+bzX1KBZcRl3B6sptajEQifXa9mSwW7XkwV4e5rgxhITrH2MkzkhsqUA6VsqnkaMGBgrnj12QFm5qRWpDag5VKtcA1UcD1oUzs2qheYdZO6jJJt+9r2gW/a0ji+uD8uPZUXoWzllQMzq1sQjWYnmZelMGIMCK9MI32GAbT4+Fa1831Dj03UlWKrn6K76+Fa5FaFrNSO55gv9jzyBPk2GNQSn8L+q34Q5Ojv+/4iYvU09MT//2///fzn//H//gf/PZv/zbPnz/nK1/5Cn/lr/wVfuu3fot/+2//LbVWvvvd7wLw/PlzhmHgN37jN/gv/+W/8Au/8Avc3NzwG7/xG/zSL/0Sf+Nv/A2ePXv2Ez2XNZNlHdR66UVK3m8t17BNOavouz5qNZfy0rNT+pv6Bcr8e21tNS2C8w0ngWEY2e32Bgu9uOPFR8+4u9/x/PmOR3fgVBuHB+uiXn/6GW8/fse7jw8cPimkWakLxODxzmjr9jCVfhABDbTiKdmxzIaXp8Wiqlu3MlozjcbJRH1VhVytgzEjU2covzijJ7vOAmPdsZmrg2uNMPX00GZQk9CopZ7TT8PQBzujIBPIBExGkAgbYQzOPLuCo5NxbbeWTVO0HIx6Ph+V+WT3XwzggrNIj43v4YjCsIFxp/gttI1ZrRQntARDAbcoKVdKhpLF7qoqIF3XErzN0XxnswVwg/HJfLOFyQfH4Dxj9YzZW16sC5a4fDOw3W3YxC2T3xHOMecAFZVClMaAyU0XNhQ2JAJrKMfa0a/aqLWnap2FqmCQjFs1Z7b7le5Rt2b7rOtgKkKqjqSeFAaKKMWH7vAPSEO12PB7ncm7CqFLGvYOv/eEKeBawKURt0SkBGIaCdUTiye0itOC0wXRjGsNrZVWC6Rynnm2NuFqs6HgYASfoq3T7FcClELvpBzSXQsXIrn3VlaazHPBitQFFOwqLgWtnbnQGl7sHgxDn+0UoWlBNdE0dFKP3QNrkRpG2ATlNihDVca2rsQd8pVGQCHb1qLOlXJqlLFRFkvfqqnRSqWVgtaCnq1mqnVE6452xebW9mcd9Fok8OXRGufsdTGfQLobiL6nfbECtEJqZr2kV1KcZvczNj9X9X0W1aUNPpwLVOzzbtftK5qa83kujVTUam37fLe91eQ25e7KPtvH4Hv4QFbT6uWOkHS4UX+oJ+j7x09cpH7zN3+TX/iFXzj/eZ0V/c2/+Tf5lV/5Ff7Nv/k3APzZP/tn3/u+X//1X+fnf/7nGceRf/Wv/hW/8iu/wrIs/MzP/Ay/9Eu/9N7M6YsepyfbbUi/6C5Gqr1QXXVRBvUa3dQJ3SHB0SQQvNIWqKlSQqVca4LW4z2V8A8eznliGLi/v+fVq1d8/etf42tf/5BnH93y6qN7tpvINARenxYeDw98+3e/yye//wnf+58f873fecPb78w8fGLMDSee/W7Dbr/h7vkNt3db9vsNox8NptCZlAzie3zbeHynHJ7s4kj5ep5jDJ84CaE0XF5TVi2vqflsV4AZaoEzCr3TAE2IjLgk4Ap+KoSpIMGGsE4qPgouQth5E4fuPXITYBvgPuAGIU6OwTeCa4w0cxafG/OhUE6Fx08zT68rx3eQZqPVhwHC6JluB7YvJ7avRsYXA/E+4O48cuuQjRC2zdh7AlTHVGFcHCU5ShJycrQiNqvTSkXIvqIeWgS36qiiGf+E5ojOfv8YhKEKsWDhe1Jhl2HMSEiIJKyHu76FDIgSKgGz41HMU3ydqFi2bO7TGeulitpsKSej3tdmP0WCuU1IjIgERAy7tutZqdKo0qwL9lZ9XOzCSm/zLKTbAiFsgmM7eLaDst0qw04Y7wT/zOHuHDJ1Y9cUIAUkR0IaiSWySQOxVnwtSJ1BC9IsOqaV7hHRzLV9EEz5JCbkDevgn8LMibNwmoQDKgmn0TRxTCieEzMPvOGBdzzwxMLJMo8odi7tR6AZWBTmBqW72Gu5FKSWu/OL7yw60FDQ3k0FGtE1Nr7iUEKfWWkTS8hV09AVVSQ7kjrSphKGSlkKQqOkRlqSkSeWxLJkUt8QnPUO6/xpLU6+C7RtUenPtfE+nibntuNsoYTNqhVH1UoqPfhwvixPVgsN7ktJe86ntV7OO8Y4gBvw44bdzcRuPzJtLJ7Dun2hVmVJlXmp3TVDLoVzTfnsxxoNdDzBw5P9/iHarBxsTVpFvb4jneIMGvwix09cpH7+53+eH+Uc/uNcxX/u536O//yf//NP+ms/9zidrDj5UXHhqru5Jk/0N20NhlyJFuZAYNRaJxCGhg/u3B7/JIeIEEJkHEdubm64v7/n5YsXPHt2x+3djjgaiybrzPF45OndgXcfP/XHgcPbxPxYKBmCd3h/yaza7kbGjelz7Lk2ikp3CTCYbzmpQQ7l/XytlU2yumqodKBJqwUUSu0uFs0YWoHO8BITPmpAgqJtYHW1iKUgrtFyw0XFRyVO4EbBbwTZ2IxDJ4cbHGEQgqi5C5QOVS3WQeVDZX5qLIdGnvVsveSCw0+eYRcYbgeGmwG/C8jGKHsaHRromC52DfRuRru7QgjWGTUnlA4J1m4s2JxBPCtZwHlwav2Q+J5N622Qbo4GhSqO7BaynEgcyDziVUHyVSdlRarLQGmcaMwoCe3x8Gu84LWHQtV27oxqEXOcEMGZBYK9F52za+QPex3iFQlq74NvRG3oYPOhEFo3RgXXu4Gtwj7CLsJmtA2MH0FGhVHR0RZIdYp6NZf8KLhiOWqxGCPQtWjXRvO05NFaTSBfoVWxnLXoGbtje/ChD8n1LF42ZuNst5o0khxYGJh5RHEcWTjyxIkjJ04kZgoJpdhceV3MWz+tWSApmtQSixNolZ50YOf6HFDYYdAma7qsZbRpd48xhxpBy9rh9Fup2fyq5mZputUgwFobpfv1LbmRSiM3NUq355J/dp5DdBajtTjGaFpxs/eyOa4K1rmD6kVLhaaOnCtJrEgJq0dor4HB2K+5rRE4Zs22Rt3EjWVHxTHgo5kXn1OmV0mEqnWt4rpnZPcsvLJAX5u/nHtSeICng33dGMHWlZVsNTplW3+XP6oi9b/T8fqzwjJLp2U1yu2lwAsX8lqXJ9if1dbbEIQhepyL1AxlVpZjMbuOFfO9rlXrFuX7jjUMcbvdcnt7y1e/+hW++Y2v8af+5M/w/Os3TPeRYTI9xyE/8snHH/Pxdz/mD/5fn/DZH7zmk999x7vvLJweTB8To2eYBvZ3W+6e7bh/sefmbstmP5qoU/oFsSinY+PxQTk8KPPRWuxSriBOsQ2lOulwUjfl1WKwTMhoFNh4/M5grhi6OwSeMA3U7HCT4o8JfwqIzJRTwVPxa5G6tflRuHG4W4/sPNxGm3l4cFUtPDAr9dhY3lWePsksj5l3H2eOD81mUZb/h996hruBzastu4+27F6NxJcTcj9Qd4E8mEv5aqyprIRFU9yYDY1QBvNSLAGSM6sb56A6teRYt8591HKSAPEGyom3MlK0smgDyTwRGIBJhQ2OIhvGDvetZADj7C3dEWGm8obGOxoHGidggbNtatdHqUkH0gLp1IMHTdSGI14YYM5ysFwEN3Riy6YS9xaF4ihMsdhuN1SzOPLGUIvS2DvY+v7YC7IX6k0j7Splg5nCIfiYkY3Dq3VuUoSQAmOhz6ZGrAoMNuivip6aSTiKY4gR70fiOOGmCTcOJnqXRmE563IKR0QaBY/0AmZQnuOJxBt9w4M88pa3vUBlhIJQcBTL+CrAInAS6hO0WamnRl269w/NzodXo5WL3UBNKk0s0iNXCymcc2PpmwXtMLFRDUxr16InSKUkcwNvzbw/c23MqXCcC4+nzONSORbIPtKGYLHTPq5UW6sg68p+/Ujpko2zxmufd92eFapEWvctbRyPlWrERk7dimgzWccSB+uAmoiJdH2wTcNmwA8mSN/uRrb7kWEM+OjIyd6dVBtLayQ1gokGb7ZNRc2F/jyY7/W12DxKpKcniDnVpNy1UoM9LxtB2L27dlo/7vhSF6k3bwpLFuLWdvXX2Ulu7ZTpD+3yGe1Dcm+zlDh4aoHl6AmjDRVF2g8nT6zgr5jewXvPEAI3tzc8f/6Mr3z0IV/7qZ/iZ77xDTYvAm5beeJ7nNITbw4f8/F3P+F7v/cp3/udt7z99oG3f5A4fmZKdtQ8xqbNwP5uw80zK1S724lpa/lHAKk1lqTMR+X0pJwOynwyZl+utpNRDL8+Q0OsAeUWr9CA5gs6OBgLfhuMjht76qoIZYnU4tAR3MbjjgGp1Wa9peCjOUiMO8FPjnhj8w3Zetj5Xi3V3Lgb5LlRjpXTu8zhdeH4rvDw2gS86WQbDPGOuI0MtyObFxObVxumVyPhfkT2gTp5SnQ0L/ir+eFKoglXkdaxs6ZLNHw8eVCvlI7dr2aNdt9XVtilh3bQQx1YKDTNBv/lxrA0Bmmc/MTojDloy9kKLGd7aAJ5g/KWyhO6dlWaUcmsEtWiSs1KTUKe6V2us8ws6aw71yEfj3VR0YIF3dQI24poNb3P2PpcphG8ZXNFrwRRdqJMgoVibBXdwnGjZm3klIK5hzTnTEvYUQVz7QgMTRjU42WwAsVoLLYGbq9W0JqYq7kMeGeGbTJEW+hEu6VRprGQOaKdtYdClYYBaMITiQd54IEnnnjodPVK0NYJHF3xXBRJArOQDlBOkJ+UMmMOD7EZOcb03babUeuiKpVlNuHt6VStSGXras0EoCMATtDooUEdtLvg2zxRUXKpzLlyXAqHuXJMyqyOGgeUzihaq8bqBAuXTql0t9gl9fyh0tXsvXjB+6QJzLya5pnnSpFGWWDuRWqeuxB9BAmWh+WjJRWEGBmngbgZ2d5MNobYBOLg8N7y8MxFY5VDGHlDvMNHbzM2Z9QWius+bP2q74WqdlOEebKXclyL1OYi+nVOzaDgCxxf6iL19GRywNuTWffUleq4wn1cPsKlkzrDfT0x1Xv7uEY3nCvb51Wp85TSmXNwCEzDwH6/5+72lufPn/HyxXNevXyJ3xdqXHhXC/NyNG++z97y5pN3vP3OgYfvzTx9WlgebfeGCt5ZHMVmN7LdT2xvrEANQ+j5SnYBpaykFe6bjeVlIXmmqdQ+ZNXzTM7I901ApbOfVj3T0HCj4kfMfLQXYCJIdZQBiDbE5xRw0uDUtRVRjE079ptiFNwoMPTfXbuvQjUYps4G9c2PldNj4/RkHUQuGIHFC34KDLvIeDsy3o0MtyN+F5GNpw2O2hl6Kw3GNiDmJCLVNiKCdZDqupg5WAtdvDFBcbaI2QlZ3+gVQpJuc9UJJ80o+3MVhgKHDFMUsowkF5Fu5Grrh152+pJxPAIH9NxFZVRWe58Lo68aMYya5bwgafEWPVItgE6dXHXICr5ZVEe09CVcY5isMNmm3TQzY7AitQGGBgP2PpcBclSyb2d2ol0rvWvDdu/msOKJ6ogoTrqFEQHRgNDwrdqcv4Fv69cvMFcR6fqm1meimcKCasEMe9Yk50BDOJE48siJIzNH2jrnE4tUcXRuc1UbQWUhL1bklyNW7JsSpu4NvELfhlX1zUEjzY2UGvOhnpls1dxXEVVqFzl7FZyYSXVrZmjbtDvhN4P4ltw4pcZSIKujhoAyAJ3Rt3ZRvZs7Q3zr41ygyqWL+gFD0fO2G1UhZ+u8azZyQwr27SHAWDCfxSYM3qQMzhtBYhgj0yZa4OFg6QUrJLtavNm12X+rk7P9F6Jodai+H42w5s+11qU/1dbeUiCN9vKG2NEed/n3P+74Uhept2+Mhnt7b9YbS8dltW+KnbOuYmU0nX0zWyf2BRgnCwIbN0KcDLY6h+19/9ELlMSIj4H7u3v22y2vnj3jp7/5NT766AO+8dNf48MPX3G73XFyD5xK4pNPX/Ptz77Ht779+3zrv3+XT373NZ/+3hOH14XDG6Vme14+CCEGpu3IzbMdty/23L24Ybs3G5oiZuR6mCtPx8rjofF0gNNszK7moeNR6GjM19xfdxWhWS4CEpy5YuwDfh/xNxF/F3Cjw2069uzFor9VCcUhB0c7OsRPtAdnVGRMbxYHsUGtVqRmC5xNBrW13MiPiXwoHD9bOL5NHD5JPL4uHB8qp0N/fzz4jSfuAtsXG7Yvtmyf79g82zHeDbiNNzuoFctsl/dTgKEIvorNZbQDcD1/ie5NGLrDBFzufW19+F5AksJsi16bsxXYnpDrcWhotMVYXE2zJZlOtnkQbzwqQQkUAhVHJvIWxwHPA8IRYcEsfVYHumqdVLEC1ZL9MPEBVyOuDYgOoI7WhFILp5o5lcypFI45cywVEWNN+g55mguAzQMHMfLloBDERLqumRhakuLGZiYL0rVVXNYeh73PQRxBIhGAqUsuNnYz4VBvRV0UNEekBVwN0DzaPM0FkF64SEZOwBJ2m2ZELJSj9c7qQOLEgZkTlQfW1IKAJVsFXaBle3+Sp86R+RhZDsrpqfSOwLPxgeYDI5EKPTOsUmoh1cThIXM4Nd6+rZwWKzSoMeSCp6cfi20EpFGDhV7iFXUmP8mtsSTTEx2OymmBTKSFEfUjMF1Yfb5j1N2+zC7GVSV/9WhXcOCZ9adXhAWbTzU8TcVMmquSOowWgzVmUxWGsYeSRnMCicEzDI7N1jFOjmGii5YVUYvrES1GOmkJIdvG1NuGtnljFLYsaO4kxCvuR8E2nSIwB84jCuTMrDfY/Y+Kgv6/0zHPtjFJs70xOcvZdHGlZvqA2dqv33S+A7W7DfQbeoUE/KpJ+CHkCekJtzGwv9nz/O6Or3z0EV/9qa/y0UevuHt2y3Y3gWssZeGQj7x+/cBnn7zj4+++5fWnBx7enJiPjby0C/4tNt8SLzZ0now4MW5G/GAheUUzpTXmpTIvjXlRox83oYpDo/07GRWiEQSaKlW7o7LZgJsp7ODxY8BPATcFZAowCboxNwdChxa0x1UMHoZGe4rWuTxlpBpUpqileKaC61Hb6vo8KCn5kElPmfmxMD8VlmMlnZqRPRrgBD846572A+PNyLAbiZsBF6PBXj16Wblc8PY26hmq1wairpMoejdy3VGuI8XuVMKqperonCzAbLMzPZlNFsUIAVUVFwtBM9F5xhgoWqjOhNBCswgLGpFCkEIgozziOUHXAEknTej6Ue1PrQ+3tfXBeus8+Z5epGq6oFQbS4eX5lSZkzJ3CynXtVW1gXOuOx4IoVkl9/ST0EkizYHO2MZFFBlrL7gFhzcHEmqHv1dI08qwElGGDl+uEGfXLTZBquCLvQRBbHHW7p5C6NZJ9j60VmnOXLgrJ6pAY0E5AjPCbFAfXIoUxejlTaF2E9YSyLmRciBnUByDBoJYB6M9HsUcO8ybbj41TsfG06MyJ2VJti54Z+icetsQdR1s3+T1t0Z61IqasXMuSspKrtKlHR7VPk+UTprwfZe00s67UNc+ruSIq9n398+urtl/K6pjnBAq5nDRA7dNFpjt41CveBh9HOL7zNJ4HO28LpqbYsNLI0glOMNhmrPQyObsHjJkRpBq95I0Vq7W+y/lakZ+dcv+MKL0Dxxf6iJ1PAAKx4OwOwpptuRVp90TzKu1l2KkGds5r5EadhJDNOp6GDv1+fsG8u8dfdjlQiCOA69eveArH33E//kn/yR/4k98kw8+fMlHP/UB+/2W6hLvjg988vSa3/u97/G7v/9dfud3vst3v/WWp48PHB4qpTPa1m4AMTFpHALb/YbdzZbd7YZhquAri1aWnHk8Jh6Ohcdj45jNAqeEzngTTKc0Nqq3PTtNqeLM7mdjuLRMkbgfCfuRsJ9slrQR6tYG8zKYoBQHDgcnDweQNCEx4E4Cc4Gl0LIN7luuuFZxyUO2XKS0KKc3ieWp8vjpzPGh8PimcHpqLH0O5aNj2Ef2LzdMdyP7l3u29zvG3RY/TCYwFM43rLGv+jLfdS1aHVUNAlyzilaHCUT6wqcWSrnCfAkkC3ICErijwqmhqVGfCpS+W2wgONogaBLTXTlhKEYhcIPph0IseFcZJBNJRBIDRwILyoHAjCdfGGpYsF9TaEWoRdBqi3m38jDITQJVjRZ8qJXDUng4JR5OlYdj5eFgl6f3kKs5jJQqDAHGoNQgDM60RYNY/HjNYkSHKFAVyeD3NtvV0eJsvIAFytsWry9dvTgVGlMnQQiFYkSC0nDJGdkiNyNvRAjR41wgMLEaxDo6U6wazKh+JZ6Y0BcOCDOeJ5xWBoGNCiNG7q9a7b0snlYiKY0sSTjMtvkT55mYiG5C4gBSLHeqNlKqzMfM07vKu6fG608bczLixNB976YNaAAZDIXQKMiAoRXdI1LVEgCWbN9/WpSlOIpEVAaQoQvxroSbwBmH9wq+diihq7XP5sz01uQK/ltX+HWm4f25yLVajF2dbT5fm/18RRkyhLzCdw1xzbwYvWWDCR321i6hkMoghckX8MU6ZZrlUDWTbtQgBk+XfhHXC1mtezxfbOcCZxHv2aLxqhb/qONLXaRyF48tMywnK1IaFe8cuHo2WRUM0ikd6tPa5xE0cNUYelHNnXt0xp7qery1Uhl0ZFDfNI1sd1s+/OhDvvGNr/Gn/88/xZ/4P77Bi5fP2L/YgFcO7cBnj2/43mef8vu//wl/8Huf8Qffesvbj4/MbxM5C63KFf3QWvIwBoZtZHs7sbub2N5sCGMCv3BqhVNJHA6Z46lxTDBXT8J0PzKY6DOMZhip0Sx3pBnjTb0zd4gp4DaRsB8I+xG3H5G9gw3oTmlDg6gQbfahorAF2QnuOIGvtCePPlhUAqkr63PBlYJED0ulVJhPyvFN4fRYefzMHDVOD400d0zaCX7wjDcD+1dbts827F/s2NxviPsRP1puk+u8r9aEVowAktXiD2xDagJE1OPF4c93A4BQmlKaedJZUVBkERu6n8Q6qKOiB0WXhjzWLjgWSgNwlNEKSVNLbY3VMVLxpeBLIY4LIRTGuDBIIkqmMRMl45kREtIjAs/OdLpukHvKdDO4z2Edh+mjemaRwlwqx1Q4LIXjAqfkSXlAEchC0UIItqMfA4xeyUEZnM25RxFGcUxRISlZoc6KToqbFR0FvXG4qXXbKPCy2sNaC6E60qRSmbrMVll0oNZCKQWXBJdhWBo+KkGVzcbjNBBlRChAIrdgm4ti7EmVSnXZOgIW6J2U54jvAulRHJMaZJm1z+2boDVQ20jpm5XS54TqJwgDMgx9hiVWpJbKciwcHiuPD8rbt5CqIzdhHCpxMCeS2Kzh1oBV97F/DJ2qj/bAw9bjchy5mSmsurDqOi4r9FqknJ67S3ywdWY1uWuOvqu6dFDngXs/XG+d16TV3i0bRG0msapYFAkmUQmDmdRqsw2SJUdU1qRgVXNYdFKJrjH6SvUNotlMqbM1tDazlBNn3o1tjWepaiRKerZVxNK/14IVzO7rXKz+OMB9q9K5FqFkoSQzWZFoeJDzSgj2/rpecNZu6hKtYW+u+HUm1AeEoRltun+P874PHQemzcRut+PZ8ztevHzGhx+94tUHL3j2/A6mxtJmjuXIw/GRt48PvH174O3bIw9vZw6HQl6ahYu5Tl5QE9FKsAU7boIluW4Hhk0kxArSrWVKJaVqavAqFDGmG15spjSI7eydOTk3nFnudJWzEweDw40OPwZcf8gkyAhMzYhbg0GGHfpGgsGAbg8kh24FTWreZNI71FxM/FgaWszwdjk2lsfK/NgM7jv0UMMexSHeznecAuPNwHQ7MN4MxG00KDLa5oAO1dDnSK3DNut7WdUZ8QSDQPTMGDGYrLSuzuk7zFYwoXLqhSqBLoLOauLQU6MVKEXJ1Rovi8CoMBR0MePaFAwCDC0RWQgtU2WhukR1GeeMHDCS8VK5wHyXImUwH6Z96e7nYM7+4rwx+zDY1hwAulVNwQISa+hDfHM9D8Xo4NULxdqhM+W+OaE5tblhNhizFdBk8LhspEsHbI7nvcdrt9ExyXPHuwLNhAgYsOmNSl+dzbfMzZgo5nZvt5pJG1z3I5RmpBfplVpdj4XXtUjNCAuOBU/D6oQQEWMfqsM3YyIagLl2LHK2n3HBIGMXXddNdV1TbqSlsZwa8wlOJzGyg/aYmT5jU2xxFt8h8G70SycxmU2VdWe5rBshSy0w55Z1Vf4cKG/9mpPL814/fw8zs991Ppz9nNUgmI6WrFDgmqyL2vVrTDplyOYU05o9TA0tqNYOv2knQ9j8zTuTLxjXw2A+hyER0mxmfdaeNnCiZ/a076MT930vX666KL06FT/q+FIXqTDY0B7xNHUsi0f7hYNYrMB2p2f4d3U0KIsSZ5sJldzOtjNx8mz2kd29EsbG/FgNgmnCZrtn2m159vIFLz98ybMXz/gTf+qbfOPrX+Wjrz/n5n4iTsLr8pa38zu++/Axv/Pd3+U7H3/CZw8HnhYly8b87bQgLfeV03Y0wQvTNrD7cMPNhztuPtyy/2Bi+2xAYoKgLCkzL4VUFHWeMA5s7nbWxjuHmywyPTpbOKW6bvBYrZPCFnw3eNwQiBtvj60nbAS3AdmoiWVjPQcFKh0WEki3grRGe+HM51BswVaENit5KabjKtlcMZ4aj2+NxffutTH5lrnPiQXLJdpGpruJ7fMN25dbphcDw30k3DncHtym2Q52wOyMwApStY6oVWMioXoW7YoTtCrNNeoCWY2gkEs3ySyCWxSXhXAUI97NQlsM0qvF4LXcIHf8HVVCbYRcGU4JVyGWTPCZEBLDMBNjZbdd2IyFzVi4GTNbVxlcIWhB5H0xb6v2u1bKLxi0uOY3uRhwcUBKhVaNxbUYszMvkBcoiy2Mqg7NnuqEQuWk4FEe1YrFzjU2Xtl4ZT+axo2pGwKPyrAM+N3AwI7InkG37N0rtu6WLfeMDHgRMjMJ85vrvEKbZKg/s+LM42/ByxHvAkGOlifG1ggT2nVOVdFiNBLUbKWsS1uobaZpgpb6otkuU7ociNkz1pHsAzp67u43DBMMW4N1xQu3zxzjBjY7ZTlWSs7Mp8zhsfDwWnl6gONRyBqoztOcR8cCG8HvlbAX4t7hNuAmIEJzStVGzoWcOEdanE6NUgK1BZRuw6Ry7nJ6kF3fPF2RINZC5Ve8viMAoYs2Q6fssQ59VtOBLq4FK1CnYMIksyyhaTvHa/inBaQRJzg+OXwsnI6eVgO1RbyLoJBqITUL4CxNe/q3aaXNK6XPmmlGxuoifyPtdKivo5DrvGuNTuoXDHReyI+LRDqv8z9ZWfjf6xjHwDCZ3Txi0RW1cklh7XBfrRBSh/2aUX1L95Squc9AxdJqhwk2e8W5hlZnsRHVsd1t2d/e8sGrV3z0lY948cFzXn34gvsXt0w7wwUyCw/zO14/vuZ7n32PT9++5s3jA3OpFBHcOOG3Ar6AK2bhUhveVStSu8D0bMN4PzLeRsZ9JI7exLhi+oxaLXjMB2dU9f1EKGrt92hDfK8OVwWXCtLMlAe1rYyw5gP13XLvHs3iSJEgqDctxLr7Mc0V4DASx+SoO2B2VqBGB1lozs5pScp8EtKsHB/tMR+VpYcZrsiFOMFHTxjNNy5sImETTJM1SfcAVBgxmnx3iNBOG3fN5irSySfne79HMpRmnxevJO16pNY7l9rD/jJQxIbNTajqaCjFe6pA9qaVak5gjKbTGgMlOMsmaw2nFVeEmM1Ad2mwK5Cq5TG50MixO15wmbs0bVZYrrt7wJzaHS74ngLrcT1rRWufJRn5ipYVTStbsUdtiG1TaxNyc+RqFOoqSvGNEgzeC0HxueGz2mMwx3i/2zNN90xhz7S9Z2LPyI5BPM4AoQ6kXiX4qpHwvToCineV6BLRR6Jb8DLjCbjeHQkZ0YZcMdlarUjJVKm0ls1hvRk1W3pHJj06wy3gsuCbEr2jjYHtfjQbrl1AsYVxs8+EoRCHRJ4LaCGnynJqnI7KnAytbs5bjEY0CYUfhbBR/MY+94P5VErftbXWepQFnaxh3UptzjYM9C7nTHmES/Q359f8vq9fX8kF0CtPv054WvExh638IkLzRhNS1lkVHUY0LLQpSMVe8yzMJ8d8WogbZVlmVKLd195+caqN3CyTLHeIeVGhOE92zgq5Qu3dmLbVSdEK6EqcOPsH9q/Rpysrggl/TIrUZj8wTQ4/AE6M9tg1HyKGhW63dlLSwpmeXqp5pOUFSrJ31jvHMEZ058n3nmFsOCnWfWXH3f0tz1+84Bvf+Drf/D++wYcfveKb3/wpnr24YXc/oC4ztxOfPnzMtz/7Hv/j9/4nv/+9T3j97sChFIoL+N2OwQ3U3Gi7An2Bi74RPez3gd1HN2w/2LF9ObG5H5gmT1KhNkt9LcXSQEMITFvh9lkwWme/wUyYsEByQNdc6CVUTpw9nBODy6MFC4ZRcIN0K3CuUAn7ZIU94uipW6XdGrzaikcfPFqMXZiPjWWuPL6B5QRP7zj7Cs6n3kFph+e9EDeRYTsw7keG3cCwj4SbgLuxLopdQ0eQ0TBt5zHGXTMWmxbQxHt2LioOFSUnpYiSxbhixj/zBgVWT0zgu426FIMvCp4qQopGtijO0bw9mCKMAZmidXauWznlhuaMr0Jwwu3iuNk69lu12djomD1s3erh1870gNasm2qd0NPoi1IXT4YY8MOAK0Zxp1in1xbQxTwn22LiU9OlOdPNqEGaNStt8UiDQ4NtUDahkjbKEBvDRhk2jTgJ0UVi3RCne7bDh+zCPTf1FRu3ZSe3+DNFwn6H9MVJaOfu1uGITglaGeNMDI4hOAY5Gv2dEcdihUrtHpAGWipqRnxGzM+ZtmQ0F0idhhkKRCtULil+Foaq1OhtxjrsaYw02ViBkIYLD4gcUD0xPyW0JeukDpWHd8rxCHNxlu00OGQEv3WEnTLcOoa9EHdCGB2+318GNRv5Is2Q5srS2ba5BWrzdkYqHaa+Gr7oVaFamXvrsUY3rDZYtXbBUbA3k7XTtoBSnAl0z8zVdVd5OJ6JDK3P3+dTMXRpqExPigyZ3dEx6UgV09OBI9XKUiupKksT5iYc8RSJlDCgYbC1wHwpbNifbdfUpHbpw1WR6ouHtMuMvxtS/PHQSd3cDQyD8fxdMAqo2dyAOHNECDsTua0GjK3ZOc2LkhfDpZEe9R6771yLpMlEmekotOx4/uKeDz58yU//9Nf4U/+Pn+ErP/UhH339GdttZIzCqR44lCc+fvttvv3Jd/jWH3yL73z6jodTYm6OGh3xdofbVFuMcsHRcNKYQiNGuLuJ3H7tlpuv7ti9mNjeDozBWRKrWshdKfb9fvCMO8/+hTOvN+dQbwGEdXG0Wag10ZK3Hapa7IN3zjRDXYUeoiNEg00laseML2Cxro/uXsFgO7N207uoLNRNh8i8JYEuMzy+hdMRHt9d7Jrqqv0Tzh3UZj8y3UyMtyPD7UC8ifidw+0Edg1ToGJi4vUe7he9JIw2vmDQ3irANfCI2TWqKIuDIo4qgk01HKIeZ7kPlNp63LlQh4EWgakvFEEM/vSCjmsWUKDG7kieB5SJphukDriaWI5PnKrjmDKTK7jaWAaoQbsnoJUoQS/SmN5FNeFKOOnxMRCChTeKOjSD5n6+kxWosrQzYxXvEBQa1OKoJZDngJZqu31RTlKp28Y4KLu90IqH6pDthhBu2C6vuMlf5Sa/4EbvmHRgwwQsKAtrxi7nAqX45kz6KzA4JUpldIvFmHvwPOK0YZkuGZpFWrjuU6et9uDARG2ZOmfasaBzRU89LtFncix4b1CZZoFlQ0CQGJh2GyRscOMteEGlUMtMzkfSkmktkfPC6ZB5fKy8e1COJ2EB2sYjg3XvYScMe8d0p4w3jmEn+A0mUvdd1lGVPFeWGY7HyrwoS4Giq7Ks252cC9E64LoqUutxnk25y9fOKaOdwaX1EkuEddqIs+tx/f7W34AYu+trv4fVfkxaGqeDcnyccaFweHCUWijaiJOCeJYMx9I4VWVBSC5Qh0gbRnScaHHqBMMZ1RnagrjlDGXISmJcZ1FcCtV6KtbV5Y+FTmqaPHFwPTfsopCu2mzX5sw6x0c954pZq97nGP39b9VIE84JRBP1gjJuDMJoObDbb7i53XH//JbnL+958eqem9uJOFhBTOXIqT7ytLzj8fSWx+M7DsuBORUqExJM/ySDs6F5Fbw0vCjT2BijsLsd2T4f2dyPDFtPHAXfnXEVU7uXZrtw8c60RZtgEJVzPTxVKVKoapk+BGeuyU3O7CLv5TzUdFezXdcHu7JuzXp6rs0bLt2UeiyLqTuJE0C9eYSdA9B65zTP7zNo6UNo3yG+uIvEXSBsAn7yyOjMlT0qGjBact+SaaeYr3CXrlDXomejUNvjGQspu95J2Smg9YwA2++Hnpqq3fGg3+ehky4k2pYwOHQwZmQbLO67RU8NzhhUVQwqEo9SrRtrC6IWY3HcClsPpaoRFnoq8hkm4yrt9AorWWM68KZ9s9dv17eZoPZOqfY/9wXKaf8ZKyS4ioSLQPKoGk9yEPv342gMO2Pe7RjcnsndspU7tu6WUXZE8fj/L3n/DmNbtuT1wr8YY8w511r52K96ntNHVzwcDMAACeEBjaAbi4eDhAFcCSxwMJDAQyBhgAMY4CEMcDFwQEgI4SAESAgHB8T9oOlTp6r2KzPXY845RsQ1IsZcWQ10n7666FJfZ2lV7tq1d2auteYcEfGP/yPmKIu2JfxLtueROz6cxO2YDI/OSJUkC9iCanEdlDZUI722xV5qy9mqaGvYpbpW7dSwk7s71NxYSvPX0JrHhKzNGWt0XaRbnVE8uRY8YVa1UteVZVmZLxqBkbHC2YLnnBOSR0/YHnaOMGxQX0gyLM6QtipthXVR1oaTJty0KW6qTnx4Njn1x3NShMS/nv+3JT/4s1xvun6qm22LHhkHrBMtxsVvvhKaLK7fW/W6j18uHpg5X5a4xguaCpKNpQXkZxY7p+T5ckM4Eg9T6EbUqfOpbswIR7Dsf8wTkSsa6ZyB60ruV/r4Xhepu1cDw5gYB985yqD+BqXs0FCqZFbyqOTRtmk6RydOc6djrQntno8GKRllTBzuBtgPJJt48+Udb7685c0XB15+NnL3psDuxEUuPOgDb5ev+Xj5yMf6Yy68g/FE2c+MuQGCSsPE7Vg6iyYXGIpwf5s57Aqfv9nxxec7Pv184nADw9hoMlNlZpWZ2VYWq6yitOKHeb7xQ1fztUjJULwDt5GWJmwO6jSJJq6VyjufoLbk6hW/T9QvbhF3EUD8YBXt3avjpYOuOOa0gDVUnW12Wb0wnU9epJYeBY9//TQKw03h5pM9u/uJu//jht2bHeNnA3YP7aCsQ3WDVRXyLNBcStrMJST11GizsXxsLOfGcnGw22I+aeIHzyrmrLyMu3FknD4coYdeeM1hioRPKmbOvCSj4o8mhSaJJgOK7xza6ow7qwXTitmIkcAurHVFV2U5L3wYGkNrXG48VoOiQfj1HydxZROnYDpKEayApkaTFVhYdGZuK+dl4bI6eWaJndPCgJm6QNnMrb8i+0uroauhVbDVX1M1YR0GpqmwO9zw8s0Nr14f+OGPvuTu/hWfff4zvHj1GYebO4ahIWll5ZGFByonzjywcA4TpUJGGPLgItgCqTbf1a3uIr9qY764T5+Jos1dxOuy0toKy9Z1oLqidcVOK3Zs6FmpTzX2scpZlWRKais0Qdcnj6GQhfHWKPsj080JGQWkMZ++5XL+yOnpIz/5hSMf3s68+0nl/Vvj44N709bBgEoYjFMmYwybrzK6x2ceolAlv2ZaM4fGzsbl0pxpS8HKDvIIsrtaIPUTuo8SW5GRZw/6Apht7KCP2TGJtXalo/fJaywB4RdsF6a0U3gQpT5dXQvVssB8VHIxju9nllVZ1BiaIqWwtMSpKica65TRJOQy+XQ2DJB8J9pZjpadVl4IN5OMNxFd8FygR3lZela4ntXuX+nje12kdjeZYUhbNIuUFBVe3FYm4bYuBVJuV2/YGJk3fEWDnsoVdpEEw5gp40RJO27vJ27uR3Z3OZKfG6scWfSJp/VbHs5f83D+yKKPWL4w7o19tKzJgsslddtGgHqBHYVXrwZuDiOffLLn1euJuxeZcXBChdvErKy2Us2zhxrhJlHi5ye67iAHpfAxT2smt4JlNzB1RX9CRiEN4lYo4m4R1jK2ClLDbZnolqJI9ZslLT4q5brS1GOye0fcWgTxtevjObLRiRLD7cD0emL3csf0yY7h5UC6T+jeqKMr3QUjNaGtrhmxWABLhfXkLtfzk4ZzhYbrAb4nSgkVoYaORROE1Zw7nnf/vk41inBAdw+Q687ILPY7Tqio6gxJNYcO1cR3RI2gvFd/b3VkVb8zzxfjUlxDo2qIXbF65bsLZvfIk9CROONNIjC9ajxaowbzqhnddhUTJyGk7ZAT95fT3hTFJGrJXRBIpDQx7e+5vX/Jy9f3vPrkc+7uX/DixSvu9jfsxgmVJxozKw/MfGDlxMKJ1Vwf1RVUg7jTX0k4MUjc5kWbsyxrXWkaHo797G0tYD7/bBqWU7WhZ3/YRdGzoqvvGG11NiBrw9pKW2esZZDKdBaG/czuvJBGQJTL6SOX8xPnpzMf3y48vKs8PhrnU5iOZ2frWbx2qVOvg1SUy5VklLJPuECIyHULBqybFnHwiy11n744mTdbo2dFqrtgb8ubfuv1ImXQPQrj+pZt2vKLxuUZHrOhQ8ZK/m40iF1vQjNHjWqkPiyn5n6QZaXmhAyNlcysxiLmpro5LLqyT6fbF5LQT/bQxXDikaShAfPhz0LK1QdLfTZd6XWr8Mt+fK+L1OHW4T5g60Ykq3vT5eZ2WVJDTGaenBt0SddneJduzYW13kVrFKnEuCtMZWI3Hrh/vefu1cThPoeJ58qZB47tPe/P/423x5/wcPrIRd9j5cL+TllLoqxCaYb24kLzNza7Zf1+n/nss4m724kffn7Di9sdL24GdoOS0+pWnDaz2sJq1anUYrRkaDY/fAOdkhyMoiyQXTwJA4zQltB0WMImV87n7AUbM18TNL+KUr9RssQF5wegmSFzJa0r1JlUZ1JboDlEU6sF2+mXJA3Eh8N8henFjv2nB/af7Nh9uafcZdKrTLsx2CkpeWhdqkKbHXkM/iu2GvXoe5jLx8ZyaayX3lmCZIuJ0ieqLpeSyZ3DSW5Q6pdLFKnkB4AnD0jAiq5l85WJT6C1+RSlksMsVSIiykP1MPO/qxNWB7RljifllBvL4hNEhCZsu74i29tFTkLJaetGVRpQfZeiK0vrj6AIm1AtU6VgsRtKZsH0akEeC8q9SpwaxX9yGcj5wOHmNS/ffM4nX7zmsx98yd3NLa/u3nAoe8ZcOPJAszMzb7nYe2aOXILcUA0GcXujiUIRYRShyeLwWsDqrpebqevCfJk3FqJnlzmqoBow3+IOJnqq6EnRi9LCQmx+Utazv/f10qcx9SRcBvYvF6b9yP52R54SJOV8/Mh8OnN+OvL2q4WH942P7+HpyXelbewvSUS0iLpnX5F4XPe3zycpVaMtyrJ4OGC1QpMSxIJwmdi6Z65Fqi9me67UNm3BNkEZ1+mJKDTqpARajCABr3WrJSkZGzI2FndxHUJB+3zxE0WqzbBmuDxVKsaajJINGTM1Fy4tMZNouwnM6emS4rn0yUyULaQvuSzAAxO9ADtC6edUX9Ftw2Jv0p6t4H65j+93kborjGOm9vdfxS0+ciKPSsmJQYxhWn18H41lMMriTClTQxfCCHtTroBImEsmdrvCYT9wuC/sboVhX6FcUDLn9pan9RvenX+BD+ef8HR5pKWFvFMOIshtYVW40RQL/WiXk5GKsdsP3NyMfPnJgfvDgZ+5e8F+GDgMA2O4aa8szDZzsZWzVS4oizTWnN1cJ5kXpiFutgQyhDBYPC+LWUiXFrsMoQ0+dfWFZqv9whGoKdTjwtXWOv4A5nzddcXOZ/S4UI8r69knmh5j32NxtvtDQHKi7EfGu4nd6z37zw8cPtsxfbFDDgI3RtuZi2WbYosvksRfLkcVV0MXYzlFkXrylOC6EqJXp2v3rW2NA9tQUsPj4pN6YQcPfTTv5NHY86ydcVdoOblQ01wMu1bxwVuiGMUkbup4oq3mwXnLQFszqSbOok7gmN0yCA1msb8s9J4ixQ5VUhgcJwu39BVjYdWFtVWW6vlHa3OnbfdbL4DvbQSHwySEqxvDGQ/mHCQzpB23tzvuX9zyyadf8vkXP+DLH3zK5y8/5zBN3A47BkkkGk+cqDxwtG95snfMnGn4jkR0IOWBkjITA4MIowmLnKmysMpK1cqyNo5PK+vFuDyCSCKlxDSWmCRjgmor9bzS1kq9LLSz0WajnozlohwjO20+eSp3XT0dt9UFQ7j5cGR3KBzuR/Lk1+vleGY5Vc5PC9/+pPH0YHz84OF8WuOeUUPU6Q4jxpgl1rlCyX2H64SjlFOgdkpdG+tiLKv6PkqyF4jnruddJGTxZrRoT1K+FqrUIcFnk1YfN2kRQBqOlNIQddu3zgKVnJCSfPIZCzoNsA4wDUG68K/XG8YlwgbHj1CauiNKAWpCx5G5JVbK1uwR6FMKE1RTt07qsIB0iYT6HmsNE7+EC4nVV6RYku16F4id4a/MQ/9eF6lhlyhjcq1MQAsdV06lxHVQyMXc4mVorg2J5sXUYTDCQFNRh01yQqWP5a7PIDVMVppdWPXErMbME5f2xKyPntYqZ2RQj/4obhZaTCjmAGOD0B95kdrvMzc3hfv7kfvdxN1ux5QyY8rhlmThkv1Mu4DnIfneJQpsCp+94l2NJxIL0hKyeqCcRe6L72yfrb/VkRnXULn2pGtxZGMdBQyqHrdha6OdK/XcqGdlPSv14iLTrj3b7jHYbqK8Gyj7geFmpNwM5JuBdCiwwzONwhkzVUOa9dURycTJEStepM7+vtXFgvzkzLctwTbAb9Ur4SNwMNIYS//kRUpRT2XVEJX2GJ9a0SxoyzRraBZaUod0RAPe6/CZT6BW/eFu5v48ao6AvC3nyq7sfnv2uH7a/qfF9Wi0iIZotGc5P6o4PGmuHROTzfE/fWdH78GcQ0rsSmIaErd3B+7u77h78YK7uxfc3t5zmA7sBp+IRCpqC5UTs564tCNnPTHbTEp7EpliO0qeGMgMDOEGATX8/lSdSFNr83DBxe15UhSpQp96FasVXRu2tPjsriXWxdpVaRFPc5mN08nff3cv8fdZaSxrpll1WYrA5WnxaJinxvHBOD25mLzWWNzHQ8zC7d1TnpM4acrp3t5ApBDRSjQorVk8YgKXPhmF1dHWMHXyg4bDDNcp6/kk1V0jtolLAumIHbrb1G8TSf+Q+A3ZCDf+da2k6+6rX1XRlNbkEhwGQ2bD5uauEuITuDXFepNKC/TRui+ST3ZR9ST5hlXF8Dxrr0JJjIR5ynT8CHkrejhc+P/vRaocBsqUscq2JE7BikrTSC7ZyQktMc3G4c6za9Ya+4dl5fjhjOSMjHlzN06ThlHtBfAI6vcfBihnbr812vSRu7TjKX/DWT+yDkfkrjIcgJaoKmRNjLhewrKwCWI9d5WUjP04cLOb+OT2lrvhllfpBd3gpbKysnI049SM8+q6hdngnKAmt+msNKKxdcfmHIQd3Ky0TJkkiq6utWootcb+bTbSaOglvLVEHFJzxqvbnZgi2nxPUCvLsVLnyuWxcv5QuTxUHn+sXB6M0zu4PHiezybUS/E+7UduP79nerNj/PSW9PKA3o2sY4EUGT0Xh9laU7IKqyqLJpIKutjGVlur71ccovUb0eJwUK6Hdq1xeKmQFNKqgOvDuDTEVsT8UAxkjbZUtCVabdQ0sA4rdWrUXKiloeKHUdedGSVuYPGOvs3o+QKrw6HNFtq4bia8m7m+XRvmzUM09nnEfqZuO6nVWW9BUNEt5lxdX1QVrS265UYyJYf+SBDKMDDmzIvbAy/vDry42/OjH7zi9Zsb/o/f+CVffPmKNy/v2I2CyMyZUxAljvyk/hce1g98fXzLeb7QFO52N+zHew67N9zZjh2FWxIZTweeo+Bfnjztej4u1EtDFwsmX5A31urkMBpUf81s7um0zZWkzba9mk8gDs82c3/C08k4n7xZmRdlt6tcTqtrmgQux8ZyMs5HfIKKxIS2FajukGEMCJMIBaGo7z9TE7JmihRyyuRUsJbRJtRw/FhXUDIyDCQZQEbUKcdsfnw86xrAyRWdq933462yeb3VQNRaQiLGI7WGNCel9EIl2sLIIvl9no02CDok2lCc/9+/ZnzUEL+fHmEyIu3Z0YbcKrkpsirajjS7oDp4npkFS9gCiTJDSNiwQ0xZ1ensoNsqDrzoZ2CH0C0QUwwFbvXyK5zzv8q68L/VRx4zecxo9ou5ZT/8JbkPnRTX/qTZKPvKeJOZqjGO6ph+g/XkxpZuFZSQQSi4tijh+cZqjcfHQhpX3n1IpMOFRSbWwyNrPqFDdTNKSxCHqlhiIPmBltO2LPRPRsbYlcxhKBzKyEEmJvYg3iU38SzSpRlLhaUKq0kEaHsn5DIft/5x42xfcBhhupod9jNNPnmJbbsMM2BxUaQubMJ2qXgER4OkYf2/NtrSHIN/UNaLcvqgnD8alwfj/N4PpOURlrNriX1qCF++/chwu2N6cWB8MTHcTshuhGGgSXasfLVYoBu6Ni9AFaylEHvKFmfRwkbZuhHYM6ZUp8s/F/RrC+q6+l7SMrDGTa9+IEoDVtBFohiqs+uK0nZCKwOtgKXkj61QNS+YJmhbsbbQzgvUhVQX2lS3AuLx9raRJvzniyiQhtvQtDjIzTNrI9LOmXGmEb/x/GEe367mE2GwR9Ew+4wJahpH7l/c8ebNC968vuOHP/qEV69v+OSTV9zf3LArA0lWVC5ceM9J33OxBz6cv+Xh/MiHD0fWi4EWbl7tyNyx271hL3smCiNKwsW46APaMuusrJfKelnQas5ReS4FakEisurq+uouE9QWGHSMhaKxazWkmDeTyckutfluaV38UmjV98pl8KbwcjTms7NNO+X8O/rZIK4M2RiSMKQ+vUNPGhYVkmWSZFI40luTkAD4JNUzwEQGUop4me8UqT4px7/T8KxIxW9X6LG2YrG71r431s2hI2nfqRL5TyAkp39LNKsB/2nOkNqVvMG1OVpXyIvvqFgM4vxMVUkrsMxYS+i6RnqAFyoRgSEj7n3kELu/o/TR1ElsfgZ3c4DABDZ2q5oC73/Fc/57XaTKLpOnjFYfkXM1HzEzyF78gh6EvBrlXJluMq0a50dFz8ayNM7nBbWEDNm/3i7TzLVVpjNNK2u7MD0oms/s3lZseuJsk+cF7VfabnW7oFRIwQQrltCw+5UcTBwxh+HMAw8mKRzSwE0aOciOHYcIMQioxRJzg7n5zeVFKoWLgkSh8hvYKdZu1aM48rdFR7f+Z6NIdXbVoqQQxJoGRB7ZME4dC4peLKrrWZnfN+aT8vTWOD8Yl0fj+NZYjsblAdZjFKlYuqQhU/YT092e/esbhpcTw90O2U/YWByrbn5g6YIzt2Znc+nq5sHSpyYyFqOixI0ogckjEXfe6RBm/jyb64SSRgNDC0Swkerq1lRL80ycFWzGxbK10pJPT3Un1NJog9tPWTCqPKeqoZaiSC1YW6gXv/NTm9F9uCaoksw2X3b7ThENu7UGa1zLqNKshft7wH1cWXCdUWnNv8g2TeH5RuCTsaTEOA7s9ztevHzBJ5+/4csvXvOjX/clL1/s+PyzG+7GxGEQ4ESzI2fe8WA/4am+5+3xax4fz7z/+gm7jGQr2HAg53sOfMqeAzspTO7Qi3LEdKLVzHppLJfKcl4cCg2yRKf+N42WXiuyLrCuSIjqROv1gI4dXfe2TYMv4xU3/72EH6SqF6tWu4OCx/nMsxeoy3OYD66WedkHjiHDkMRRc0e5XP7QEom8/dPjXtrqLvmtJSyKVC4jmroFeBSplLbdDsQxnsrW8AD+QyWgBjRm2a/JGld0J09oCxanOQwZRSqpeAEQw7YdVcHKsybZrtdeM3+tcvEh1mYn6yRRUhXSrHBu0bhFo6eCibu6235H3iUvVGW47lK7XmrI/nuDF0+SOXvTlKY1SFw/nS/S97pIDXeJYZeQhptzVnUBbzaGfaYMMAwZLbCTxu28kidhWRr20Viq+fJzVViM4jIWTHJIEiqtVdYVyt5ozAz7lZaeOC4jh+SOz2Ufppc5+1KYSHIN3uV2MInvV/ySVyYSO8mMgegXJlqkk15a41Q9O+rpXDldlMsiLDWxmjn1WIQeP2QD7lqefUeFGFlxaE+FVRLVkhumro4H10swHovRRiMnp2AXNXIz0sWnDDstrMeV9bTy9G1jPjaevjXOj8blCY7vEssZzk/GPCc3cU1+oeaDkyXGFzvGF3vyrccmKMkX15e4/xbQSMXlbMiqpFXJtdOBBu/gkjcUUvqr3EMqrzsdtxiSOEDc+qpTvS0amSwNWRqpNnRekWrIah4CGLuvKoklZ5apUctAHWen+JYUQYyeOOtRh0JrK9pm2nyBdiHZGb1dXVvWi/8z1KcLydszYW5rhjQvQiMVw+3K2wb3OQtOW8Pq6ruzdXVSRlPfpYgwZmE/FPbjwKv7W+5u93zxM2/4wQ/f8OWXr/nhD15zdzNwv08M4hPQmQ+c7B1v6y/y9vyLPF7e8ZMfP3D80Pjw40bWHVOeSC9fMB0+4Z4vuWPHiCA8Bl3/Ql2VdW5OgphXWq0UKd4wDZnu/t5a+L/V1Q010wpWXexuzRfukVLQN+4pHE9kFJ+oxLYVzmW+Mkr72d+p5pdAEZ9PUYhzG4YC+9Fdr3ZDMC77ZFOB5rCftDB0XYw2w3w25rOxLgFqpUIqBcmdAp6uk1Q4thj4vjT7NK6bQawhqTj6kcQto0Rgve6FXABh5GCn9qlcsI17IVmwMUMbSNOAToP7610Srgm4bj5VnY6+XGA9s5nESvMJq8y9cVNa9YlRrUIpmBQ0D8ggQQLJSClOYMrJjZGzONM69uSFDkX7yqMtP50v0ve6SJW9UPY+eifFP4fGoezFAwxHyJYptTDde6TBdJeZFyUfvZKbBSurhgtF9TeyPdPbrUtlnoXzaWY6JmRQ0rk4BX5JMGaf3LPQvUF86bo1UL9k4SmxferMHgnD5HDF0pW5rVzmlWWpLIvS2rM9TCj8pbMLcv8MxLJVHX10OnYAR9WSL94BXSEtkEJnpMn1Y9rMcemLa6Ls1FiPlfXYmI/GfIT5KCwnYT0nJ0wsPWspeU3ptj5TQSZ38+5KeIWrY4QRjDj1IrUaXAypAUU2f6WI7Bq3vfLXLBW6by6dqmixsDf9JYvtgBsCEPTGdTVS2DxLNWRW7KJepGal4lEo6yTUUmlj83Z7LKRBIWc0+WLYRb4r2lZ0rq4fw0ck6cGMgV7ps0J1tUWK4mr+kDioewidw30B+YWbhbY+UVWH0syz1EpK7IbMYTdxux95+eKW+7sDL18euHux4/Z+YH+TGCdIaUG5sHLhbI+c6iNPlwcenx55OD1x/DBz+mjMD8IohWEcKXpgtBv2cufUc4zGk7tC2BLFeo3luruqlBArl5IDHjXW1qKZYHthLIVuT2xz+enmxtKpzEU8m6jEPqXvP+I1fZ4PWGNK3VJpr7cfwBbIV4qwXaL9ng3Y1Jr5jrU5k7aTdpY5TGUbHoUTnpg+IUVXlKDroPow49esN5IiodkUv4edAGRIS9HVAt/5s7ZNgL1QBe73jDiRkZKRoXhDtxZvrireFcXHc1Z8q45aaEhyujIjmZHUr80eee9RIFHw+hPaOgl/SP8s6Zlm2VGkFN5mqTx/Q36Zc/6n+lP/m37s38B449TzBkwmXrULjPvs5qmDIccC9yM67Mkfi8c5lJVFV8aPNUTcvkAlulpw8Z4mPJdnyayX5JPD6I7NdhDGakxFKE3Jh0TasVHB+/WVCOEb/gZDwLTm4/VMJcvChQtnLhz1zIf5gaf5yPvH95wuC5d5oV3AzNeQOWdnSJUEg2AD6BgKcA2R7gpV/OJZKG4VZO5xqCbI2S8aobqOLAkXC2fvZsjTgs0VfVhox0o9rhzfKesZzh8L80lYzo31AnV1yIuCQ6zFC1S5P5AOO2waWA3qonCsqFa3rkm22dvoUqEqsgS7rznKIxLQZe5ZS4I0zyK6OtA4KUJwxpsv5v0AWRveqeIQRzKfMtPqDLx88amKc8VOFVsa7VRZDE5qzMOE5gHdTchUPIjxsPeOsoxodMTNKqYzup5JzCCrQ9AxRYXnp2+yjM0AtlWoVWnxoHrhqbqSbCF101UUs9U1SHVF19lfs2Xx6Vwyh5KZxoHXd7e8ennL65d3/PAHb7i7n/j8B3tefzLx8pWRdx/Q0jhyovJItSMf5p/wePrIT77+Md9885aHD088/ERYT4X1445hf0e+ecmBT7iTT3jJp4yyInbhI2cu9pHH9hPm9R1tfWCUmTI0NCWm/UgeCsN+TzfZnWtC2+pT17HSTsKi5smyhm8vqiMF7kkHafJufzgkxlWZbo39OYhvbUPXiJvtaosnwYR9di5Kcv7CMMF+L0y7xDRJDEEGzSHUuibqXLEGafVU6dOj8fixcTwLy5zRQZxinWMMS4EZpm4ldW1SXCeWsKT0yA2QqwVJicM+Jfffa4bURspKNiOrN2oiEmOf/3UrDokzDQhCakZqYbi8qu8MLstGve0NU1VY1ti15rIRj1L2tYGmfu115h9ID2Vs6l9A8SKYGkhCx+ZTYetLPgn7NYunZr82XNDLfRQpCIp3KMOLT1nu8i0wucit1hFGYXds7GZhOsKwjxyfOULErFPZPaMnd5qxFrR6oVpOfv2UB6eC684YskNk2QwZo+tJseAMi373J/OhP+M7qoSx0CjWmGXmYhdOnDmuTzzNR57OJ+a5scyNdfYbN8XCVTqVL+xHWmkO/TUvVu7Y7LuApommmaruVqAqHhKTQCLgUcQPcGlKqg2eVrg09ONKOzbqqXE5GvUi1LkQOt5oqHyplYrv4GRyuCHvJxgKmhI1TC/t7FOrZZ99TNVpxwFZSbW4x683oyBbHITjdnERxL7i6iQS1q0mIcQl4jnw6VKjcWhQVkgdclwMO1f06IW5HheWZlyasubV/fl2O9JuwA4TuYEMAzaoO1ykjFFRm7G2glQkNd+F9X0y/lqJdmjv+vBcrIiriN2DWcXMnT3UrjR004rp6hNU0AKTFIaSXWc3Tby6u+WTVy/47NOX/OiHn3B3P/Lms8zh1tjtG+SPrMxUe2CuD8z1iXcfvubx4Ym3v/iRdz8+8vhh5vJhwpYM845UbhjsjinfM6U7RjlQOGIIq52Y6wOn+R3r8ojWEyVVpziPwnTjgaHjYbfR6gf1JmK5rCySWUis5zjMS5+G0vZrGTrbziijS1CmPewO7uRRV+sZpr7qkivLu8RJpym4CTF9pRySpslNlssgG9+BuDat+vWJ+n5zOXn0zOVJmeewyMreNHnGmk8QdIJPh3c3PQTPkJZOE+/VVZz40ALMCzIUsfORDKn4nLIx25PEvRTTzFjiGh9Jy+pEmnkNlvGz8TIg1SrCYu7rmNQrpdPu8SYw2XWftsFBUaTUm0xUwyFFokD62aTN7aQsjJrJbPC89HytX+mc/39WHv73+BjvYbztoWvme5DshSntnDRAAXYJmwamZsiU2D0p0wnGB2M8rM7kquHWgIsgaZ3NksGSx1PXTJ0z88nfsRxFynZGK0bRUG6bkXKMswm3ChEcpomfvXfUWaJISWVh5syFk5041iPH5cjpcvSo9QXq4iJVGTJZM4nso3MBK27/pNlo4tqSFpCJqlDVxa1Vy1akbA2YIVwlEC9QvshpSC9SD5V2bLRzY3lyBlyds1stVQkmX0aKu7MzZuQwkcZCOYx+mKfE2gyW5ruYrCGKdcxBq8Z44Yym3AGDjtGHbkU7fpokPNv8xindnqUXqWBBNhNqT76NpybqxUlXyFWcLDEr7Vxppxk9r7SnmaUql9X9EzVl2K3k/YgtFVSQqWGTYhGWp1IxlsBRK1Ka66EdrQktDz5VKZvBtRsd20Yr70VKNTj3tjr13KKbNXcMd5jP3cRzTowCh6FwO028jCL1+aev+JkffMLd/cCL10oaTkg5YfKRypFTe8dp/ch5fuTtu295fHfi7S984N2PTxzfV9ppIllmlD355pbB7tnlO6Z0yyg3JBYnyduJS33gdHlPXR+xdmJIjZSNnBO720IZB6bDBMk1cYM2ajPyWBB1/5zzo98ctsai3uLQjl1JGoJDMcGwwLhPTAd/cVO44dOu91cvUn2C2mC/TU/p5IFh7HHnnZQX0Jb6KqCtzXenJswnZw2en4x59QbQBv8ZU7iYpGidu+WJApheSRsbPCa47iVBHoJgkdAhKlDpkGEgBhYQJV4rNJpUy34m+frWRytRkH11SHAZ3QTazK/f1iAaxSbCGruCpIWchJySN4qEc3/IDzexpy/SrpiqxGsfv28te4KA+nORJsQSkZ6F9dOOUt/rIpXuEvk+bR50msMo1NOtN5hUSiZNkNJIvsmMK+wssa/C7Xsj7xvyXqk1+96nn3yxDM05g7hWYF0S6eSFzN4bZTUWNcbFyDdKOVfSTki3Qo5pLk0WTuFsjt4iEkWkcQxa2SgnjnbkqE8clyPHy5Hj6Uy9JOoi6DqAZHLzvKScLEZso2V1X7/UPCJ6NuyitIuhs7GswromFvUwM2coZaxmWDIa7b6pxrlocPSYBHsKJ+oztNmLm1b3AdRssCu9bsBuQKaM3O5IJcFUHArJEtOSNxQtPAzdVy5wsJg4kvgeLYmQNBh8pA067bsKzfhCWoQasE5+JlxccRi4BhvLDUldV5JWkAWohiyKzUq9VF/2nxfqcWZd/XBqtjpkcq7YxYuUNJBpQHZ7rPii3HLzLyoVKV6denHsEH57Nklp3wWsRgsG5XpptKmSy8qyrqQhk3SghXtGkuZR6gJTAs2Z+92O3bjjbn/Dp29ecn934Ge+eMHnX9zxxec3fPFmz81tYtqdWOTEIu856o+51Ecezm95/PCR4+MT3/xfH3h6O/P1fz7y4avK+cHY5YHddMvh7hNeHj7nzf3nvLh5wWF3IItHIKrNLOsTl8sjx6cHcpvJpuymiWHMjGNmvLknDxNlOKAxSeUk1DyAqIt4a6PsB+/Hm8aOJU5Hx3ERdfSh7IyhCtOts0lTMlKK5OJF6SyFrpXtOltVtgBcspvIDtPV6i51jJ5oKrqQeFaXcagwn1xycX6CuSaqgCV/TpQZyQ1KCV89n4i6/RgaYu4+HWVxRMQSJIfmSAmxFcPlMWTFCk6Hz8lZy33PM8WkNcQ5Z6ApOzIyZLcJ2/n9bUulHQZ0WV1fty6kIuQpkQ8DMmSPpQnhvsQeTlYN+N02KybZFnfqC2bFYeoQxrMGA3dxWQ8FmhZkcAsxzeLIyU/x8b0uUgwCo2zjaF+skiyKgXfVmvFDZu+L+vRiIL9QyqvG+Kq6wmNV7CJY6GSAsKpJzlgJpp6aEwRkgfWCM+lGw7Iz0ZoYaQepitsvTeIWQwPIKNu+ymPQ3UFipbHIysLiEl6rrC3MRGtjra6q19VvVhXv3GzFwwbFqKLU1mjJE0NZzEPxFn/U6pTsahJ+AIJaxszFiWY+g2zmn2ph90NEWjhcoZ1PlHwHZeKxIX4QCLIrHgw4DY6te5iQQ51qW2Fyj8QoUN1gL9wnTULwGQFwEm4KogFlxrnVAs8x0rZj7riEQXfmi6bDrkuBDlOoXbVGUUU6rVs7vbv21tuAiiVnZukwuxUMGRviph7M9wwpWueIFnFnCtsIE96hx48TwulWjVbdsSOtiq1Kq+1qwmq4eFJsI4yNpWCjwi5x2O25vznw6sXNRpK4vxu5vclMO6UMiqULzU4s+sRpfeC8PPH49MTjw5HjhxOP7y8c3y8cP6xcHpTlSdjdDAzTjsN0z93NPfc39158SkZEUVYXTbSFWlfqWoNxmRnGPdNYGKdCGfakPJJkAnEiSJbm0oc8kMtAHorrFWvEwFhAYs1dU6SqTxPNNtjLoTpCXBsasWeksQ7r5X6oxxS13YNO0txYct/56O9VFCvE3yOnnht1cXKOiWFrw6Qiw4oVvx4o2YtsStcv9B2j2O5o4aJYmmzu/+iKad28Pn2SkusItTXgyQkXJb5eN8YriqWC2IAlI60jOrjDvq7Jr/PqfoRpzOT94P6ESVyWURXWsNnKbh6bspKI3VuSZ7Hw8UJ1PUUNRxbF5T3hi2TFMBLuBZr8+/wUH9/rImU70InoSoiu+mr5Y3HZteRU7XqXaZNfBAXY58TNJZPeNtq0Ih8UeVB47J2XR8oPo988Ekv72inTD0qehTLD/ARpB/m9kXZGPijDncOO+cVA2iXyTfIQv0FgJ1hqWK6cmAFhpHC2CxebWdbKsjTW2Zgvzc0wFwNJpKZkq+SayZawYtSirFKp4t2krEY6g528WC2z31greAQ0hcYO1YK2wa2QMIwOIzUfSXOFYfaLWtX/XN/dETBGZ92V5H5hY4GDixUtgVmLC7huXdo2QW0fMbmG2nODRc3FlOGY5BBHcvxdJWNSsFw21l+fpHpOlG0q/4i4tiBcqLkHmjYknm8yN2hVM+y587S6Caozm0Joa6DDQloVmSbYTV5Ii7pOJaY/0+QFWe1qOBCFMqQv1ArLbB5pflTS0Mi5UubVwyHbuhGpJJhyuyHDzUgrGYYddzc3vHp5x8/88FNevNjz5ZcvuXuZuXslpN1HalmY7S1P9VuO9S1vH7/idDzy/quPPH1z5vT+wrf/vzOnD5X3P165fBDapfDp7S33h1f84LMf8qMvf8SnX3zKqxe3HPYZ0oVmT6ztkflyZjkvrCdl2u0YxsTtzYFxGhmnASkj7gjiXEAXUSSSDAxJWYsyjI1hP6GSGK2ixZ35CwlbzYXX4bAgQaIoe2Fc/X6tqzcrLLadA90mDNiMfZv3FkhMUqUQ426CGo3D5qcX7iWrv3Hr6hZgbfYJzrOsGtpml4EsCYYBm0bSOLiOaIifS8xFuoBo2jLDcs+waMt27XcZglhFxcWxIgWJ67oz+Sh+LnlqsE9tWWOXVw0mhUWwyaA1bI3UAjPMqodrjh5AmrK4UHhe4TyTjgu2GGlNZPVgS6v+2uQej5Tjgo4dF9XJT7ZasDJ9X5YKKAPURErF3f7bL2kK/icf3+sipVnQEu2RPG/IbdtTaaDEzfDwu0FotxleDeQ1MX1IMDVn6NjqdjhnZ93k4hHeeczkEKcpcQBW0BmkQl6NPBtpMPKxIpOS943hPpP3iWER0iGTF5BFPOummUdhD405LyQRzpK5tJl5XfxmWJS6uoHlshrLohhOUS1SyDRyKmg2ajaC9OzsxBXSReFisGlE3K1B04BSqDLSGGg6RvJub+3DgSsnJK8enugcXJ8AMjQs6MESuLkgo9OzGYqn2HaWVXS2FpCeT1SwUVelL5AL3SuNeB971yhutucejeFXp1I8kE1cn7EFG1oQKDrGlgyRGjNWLLit+UMbKajdgtGi4+4f/deC7xSpLZ53gqZeIMX1MwyFLjrtGFN3wNhgPyMovVcBb2uuXVsWN1JNcyOPjbo4MUJaDdmBG56OpXCYhPEwYmWgTDe8vL/lzZsX/PDLN7x8sefzL24YD5VyWGjpiaWdeFy/4fHyLY+Xd7x9947T05n3Xx05frNwfr/w8E3l/FA5fmisx4LUzFgO3O5f8MmbT/n0k0/45PUbbqaRKQOcWe3EokeWZWFdGm0R0m6i5IlpesU0DIzZGZAtLnuomOVgvApiE0kmUl7J40gxKFXIkkiSKM0tdFJ16Ml9v8xzKScoO3+N89F1gZJ6w3WF+pwaDmDb7/VJSpJfl30vaOo0+e+8f90FJIIO2xo2Xau61qut2CIOrwwVXRXZadgSFW/W+nIyAEAxIYUjvAh0h3MLpbdP+L5Tsj5BEXvYjoKWLmyP55d8H+vVmKA64xddU2huRuy4ujljdgi3HRFkrdhZfAG4uthaikf7lJa2wpMG16ml+HLeb/ovvJn1jswC4bImyJSQZOSKP+//VUXqX/yLf8Ff+2t/jX/7b/8tP/7xj/mH//Af8gf/4B/c/v+f+BN/gr/39/7ed/7O7//9v59//I//8fbf796948/+2T/LP/pH/4iUEn/kj/wR/sbf+Bvc3t7+qn4WK53V0guUBLnVIhZDQjjo7tU145qWg8D9QG6Z6fMMpTE9eGe0nCE9uDOEdxneaaQh+zWioHH4tRlnli1Cmn3BmE8VGRppVxkumbJPNEtexJqQW0KmaKl3SrLGPFanf0uKIrWyro11tXh4HtFl9SIFUJKRyZuf6prCIsncrkWqRby6ISu00FepuAC1SaEyxGO8Fox41UQaOQuU1Yuqb3c8pj05XEfy0d+9ZHqRGnxHMxS/KbWPDU4R7xPuFVlxzEA6Y7HTcfEb2qJIdNmi9H0ayQkLkt1iJmcnU6TkDEWia03ucC2bzoT4f7phbV3Ff6VcRHfYC1SghdJFOJgXKTMY1k3I6MI023aOSKYzDcMkens5um3TxlMJqG9djDQrOrnLNrWRmht8iiRKzkwlIVPB2h4ZR3Z6x8uXd3z6+p4vPn3Bixd7PvtkwsoRLRdO9sClPfJ4ecvHp3c8HN/z/tuPnB5n3n994fRN5fy+8fiucXl0erVdoJAYy57D4Y7Xr17748VL9kOh5AacqZxY7My6rtQ1ICR25LRnGl4ylpEpjywxdy8SBYoU06qBjYiMpDxSRt+/5eoTRjYhV/Ggw9WQUt2FIfugn0bIkxNj8iikxTbrpV6kUvJcqJ7o7qoMhwz7HsonfDZT4m0M60bCvYitVy2lVgkT3IbV6kVi9UlKYi0m6tEoFizUjSshRJG6Inh0SNe6VyEkdYc7/zuupbQosMT+XRKelRawOjldi5RkZDBEBtyaK20/g2SiSKWgvQOX5AGtNcHZEYxUfGWRB18tWPICtRXIjRnC9mtHXgLeV5xotMY+rXoOmX1HWf0///hVF6nj8chv/a2/lf/z//w/+cN/+A//D//Mz/3cz/F3/+7f3f57mqbv/P8/9sf+GD/+8Y/5p//0n7KuK3/yT/5J/vSf/tP8g3/wD35VP8spSI8B7ECs431yMlQ8StzC063hxaoNgt74wrJ8kdApM57MF+0imGWSJvY3I9PtyLAfKYO/VH2RqmrbZFDVb2oDOAV1LK/kx0beJYaTkG4K+RbKfUJ2QrrLjHtl2it2m9gNhWVcuZxXzseFh7cLp8eVp8fG+aJcFuNSXSTYXSuSJof7xAkC3WaHZkHljkcDLCLOS8FsRG2gMaEMKCMWYgs3rQxjWStIrohkbGkwupedrkabdKPHSo+pH7saMrslC+Z41lx8lJuz/1z9hzVIoQdxpbqzF68woI8ghsW0RSybfQtuZQha1ujftwtiotCUDEmb65TMaXSiFeqKLDNpnpF1dbbTUrHVO2A/iHpX3btE/+jNbMfp81CQ0WMRZBphBJs0pmSjMrNoi3jx79bs7zzs2ffpkGU/zRCmcWTQwvDigI4DuhtJ847URia75f7eAzPvXxo3tyuUhdk+cpnf8+78Y57mB37y/ie8+/CR9x8e+PqrB86PC49frVzeG8uDcnlntItQ54HCjjwcuHvxmtdvPuWLL3/Ap68/49X9PVM+ouIZUyd9z7F9pC4Nq4Vstwz5NVO5ZZe/YEzup1I5R/DMMYwcNMSjiTpnah1obSSVHdkyQ1uc8CKwtrD6WiJYL8gDSdl2UmZesNJA0KbZBKTP1N5RtGxj9wn+frTFoSvJsePKV6ive/R5FpPRFqMtoC2hVVxWEa4QpBkGVxOLmE8uVnya6LuwBGiQD7I4KUj8npBn+SrOCPTioX13Zddlhpo3RJ1BSkCI1pMFFZK6zUamesI2XRRvm6G25BAfA5oqKVe02081Ie8cSkxJXLIhQhsdxqQkhzFF3Dm2CVjdno+pbmYDTpiJ59RRjp/i41ddpH7+53+en//5n/9l/8w0TXzxxRf/w//3H/7Df+Af/+N/zL/+1/+a3/7bfzsAf+tv/S3+wB/4A/z1v/7X+cEPfvBT/ywX0ShAHdyLNw82+nUQQbd9ucOBoAVsEuwgsAhyn8mPhXxWhnNCWqIcCuVQGKZMGbL/3Wa06kI5N0UFVlelq9qWzaS401xajaUoaVHy2ijVSJO4sePeaAcYloU2NmSnzCd3cb48Vi5HZb4YyyIsq1CtYJaQNJBaJtVMS4N/v35tK3EahuBVc6ChUQCkYPjDWRz+EHLQX2U7MU3V72T1HYL71JkvQLMGbVZg6kUqX+1gcnSQ2vzPrBmyhR9Z7IPMu0nH1zv2ItfFjeGTjnWYRIJ2684VUlxZf7WgiYUuHgCYhQhqDgKEKNat1Gv4xK3VhY5VvUDVcBm/nhV8p3akWCKXTBoLKQrU8yIlY0NKxYZ6tYh9tjd//pnta3e3gDgMUoqa679OpcA0UW72kEek7MjLjtwGRttxuBnY7YWUZxoLl3Xl1D5wrB/48PSRp/Mj7z4cefvuzPsPM2/fVS6PjeNHZXn0IMn1IrBGqnOZKOOO/f6Gw+GWm5tbpnHHUAaSKI2FhSOznlja7HIAG8iyp6QbSrqjyB2ZgUwGnHzTEFaDVd1OSFdz7zhzXaJI8WTc4kkETSFnn+pTkiurrReZzGY620kQnavgpMBnzi7PSDqOutn2XnSLKm3XxmGzu9CYrSseHdKlA0GRd+yN7ZQRVaT5rjPF9a4q3ylUTsSK5kyjmPYlbwsZS5xq11ZFQ3MZh3/yRs+SD2loQOUdZjfCPNnHdY9/sq7ecusnvTbxvrp1ZxNoV0g0O2qFhiuOiLPzcvgCdgSi4NZsgdKjPoFJaDY7o9G2v/H/4U7qn//zf85nn33Gq1ev+D2/5/fwV/7KX+HNmzcA/Mt/+S95+fLlVqAAfu/v/b2klPhX/+pf8Yf+0B/6777ePM/M89XS/eHhAYBHlBlBfUOy4dAQO5Ptb/gBF2twH5uH8Fu7B02CnAoZYRwzMhmpJXZlZDeNjEPQ0K0TWFzPsl48ndMuYY66unK71cS6Cpxx7PzckAnSHsqNwxLlNjHuEuMus96ujGPmeCisc2O+ND5+WJgvjacnWKywWKEyBRVpjCVqRlr/uVwgu+FHAQxKF8MmFxgmyVge3SiSHSKFhGer29Vf379mzkhdg0rmbt5t9ILcVnxpWxIyhufYGBNOf/RJqoUuY5mdWlz9xhHt7hghfozYgk6qsBA2OUbvMzMipOKQoo4FHUd08BTULpzONDKNnUFRZaixM6gzWs/oGv565wVZK3pZ3WXi0lgvjXX2XeAvLVIIvqPcjQx3B9JuQl7ewGGP3R7gbueuCCVH92isqbBYZY2XweqziTe+sCTX9pUxUaZEnjJ5dNJOyZkhD4yHG7IeGIbX5LqjrHvGtvMYCS3k0kjDyipPrMvMsnzkcX7k4fKRn3z8hofjiV/8yUfevrvw/v3Mtz+pLEdl/mC0kyMBeS4Mltnnielwz+3tC169+YzXn3zKmzefcHM4MJSCsbLYE0/2NU/LB47zkVYTyQ6Mec9UPmMq90zyOQX3sgTfl84IlwbnVbl8rLTFd8A5Q8nZaeo5MQ0hGzBlGdwSzEW5Qg3nEc3mrNnq7iRldGPZUhzaty5Vsj5dENfS9WDX1QtW7QUv+aWaNYpGnBcarucahIl1BuuJzKRrIVR310+tktfYpVLDvswdbFxjMThSEXlTEowgaRYOAf3yiEM/Wm4xJWwovOnKbhhA7gNjHP+90atxj1X//SQR2pthDHaupqiPFgjC4pEw1qnvk1PZNRtUfM9fMm3oeGk0BKaklGFQUnMPxqYrqUPtY+zOxPWO1vH0X+Hj//Ui9XM/93P84T/8h/l1v+7X8Z/+03/iL/7Fv8jP//zP8y//5b8k58xXX33FZ5999t0fohRev37NV1999T/8mn/1r/5V/tJf+kv/3e+faPF26UZphj45XYuUn7ux0wjupopHNtSd0FSwF9lp48k7/lwTo4wMpVBy8d8HFxhGng9PiixKS54zpJJgHj39tnfiDUSK6wXmRLk4JJGPxjI2ytTQfWUYEss+U6uyVuN4qh7RsRZWGWiMtLT3KUhHpGafknIOIkdzbLwptLRh8Sk7BJCy02EtOeQXITJepGTAcAKAboUiup5WvAkt7hWnhS2eRgLaS5ObSsogG8jvRQqCX+0wRh0Dhmyk6r+XWiyRe1sM7hPWYylq52hL3zk7lJiLY/9D8Uc4PSdRMuoNh7nAepKG5hWT1Q18tYW5qS943XHdiSqdCr7ZkvWLLUlMTwN5N1Fud6T9Drk7YDfxuN1hYfLrQqrGIokFIYY1ml2JEx2SF8HTX4fEMGanBA/BtsqZkgtTnhhlz2G6Z7AdU7th1IGsiaxC0xOrrpzbO5blyMfz13w8P/Hh9MSP33/g8TTzi1+f+fhReXgQHk+3Xoxrjd2AsSsjIgPjcOBw94K7+1fcv3zN3d1LDtMtJbvdTmVl0RNP63tOl0culwtaM2KZkgdKvmfILxh44Y4hGMYTykw1Y66Ny9x4eKzUS6WdG9ME05Q4DG4kXAqUWqklXpt+HkqfqK4ODJ1EIRHlkYttE1UPpm39+hFDRbdrSYIdTnbtXCrgqaJ0SMSvgRV09QypOhtrF/Gaz+0SB253Psna3NZLEog6I7XFDjfLpqGiakxVccmoILHbtK1D0mCctuhyFug0qaQbdJmkH/t9r6oRG9QRBaEkz5AqBlPfEZt/NTWHYasYa1Jq9wUdICe3SaIG4zb72aPZF30puaRGklsxqbqGjkh9SK7Qd71kGA/qT1ej/t8vUn/0j/7R7de/+Tf/Zn7Lb/kt/Ibf8Bv45//8n/OzP/uz/4++5l/4C3+BP/fn/tz23w8PD/zoRz/ibM2jvbsb2HXjef13LCgd+e0bVScgaFJ0zF5MbhKs3vcUg1QTWQs5FYq4aWfHebUlnya0YTkC9TQse8rgNFYzF2Cq+C6m+tdvq7nKfW6skRjMBEMR2i55QoYap+qaphWH9FQGlAmzgsrk8J2mwC0VrXWjl6LuPGzZ/bhSiEKkZ1tln+Ete/aBSEakeNxAKvhSW0DMdVhmWKlYbbQc0EwTJwuUDFPesqu2IpWC8NuFQKqgQ2cJIGv1PKeq1/cm2FS5dRNV9e/bkhcpddsbK71IFSyKFDEEJq4haxnzScoalhqaHHzTgBB7I9E63FPdkNbXZaGa6XuNJMjg8F7eT5TDzv37bvbYzR67OdAOk9voJwUWsIWaEmtAXC0mcelw0rMiJcljyksso3Px6bdDf8MwMOaJQ9oz2Z4dByYt7uPWlMsC7bIwXx44zh95+/A1749H3h9P/OTdI4+nyjfvG09PmadjZl72PuCqBz8KBsXJC+Pujv3tC27uX3J7d8/hcMs07MjJYTu1hVXPobU6Mc8r2ibEBnK6oeRbcroly204gDSMASVTzeNi5qVxPDXWU6OdFAvIa2/F45eyhVuFXiG8EHjHFmebnCX+vAQ0lbJtkF8vUlfE2A/HZMFxCeapVIII0d1JwvEi9lkWouuIvXJ2XxSpjq9JCLES5kSP5oXDq4+Exs4JTFSF1KDlYJ8+g/uUSFYWvzjBd1QbJrnih0z1Pyj+3INOtJGDEqFHQpyAkjw5eTT/PDnAhGEeuBsSjL7lb0KQJBxClOy3O7ET1+y7qk7GECAlRdTJS0ZvLFsUcYu6H3u2jf77y3/8L6eg//pf/+v55JNP+I//8T/ysz/7s3zxxRd8/fXX3/kztVbevXv3P91jTdP035EvAH5ijaEXA0Ba10ixscKQPkNBDikqmiKpOUxRJcEkcMAPwiZIldAEaHytmMKCBk0zEkKehTEFY2aAoRU3TiUhLcS/qR/czmjyTr06LVoqNa1kMc6DT/oVWFNBk9CyoDnTckFLwfKAEVBBLl5cDKBiHiWKG1xK7Im8MOnz7KXu0FwmjxPJBU0OJWoaY5smXgza6t83pjQNaxhTwWInpON3i5T0U2UrUl50JLKQWPudfi1SG1/duFLfNPz8enRtuImTA6+YfJKihNiTnkbrv+6CXSXEwTlhY/EfaTVnB47qE+XYoAyknElzJV9qFI+IHSiJfLMj3+zJ9zeU1/ek3Q67v8f2O3S/g93gjY8IZv5cFx24kJlXmBd/6l3EC2wdPaEf85esi0ZdmF1bQ81jO1KuYAtmLkBfV2U9nni6fOTj8S1ff/xFHs+P/Lf33/L26cLbp5mvPq4cF+PhOLAuO6rtaPmADqDTSpJGKsYw7jns9rx5+Qk//OQzPn/9hk+++Iz7Vy+cOCSgNM76yOP8nncPnjW1zIbMIyUNTPkG5ICkA4lxA9kVbyYXWznNCw9PF96/P3lq7+kc5AjlcA/P/Six664Jc4JDvQhtTi6gxnz9iZF3kGcjT5DH63nuu2I//fvLve2t1GuIwoY+6JrceUEdZUFhOcK6GMcnuJxhmRMaCxuRsu3Kkuimf3I/gYDc4jzqTv5iErsic2/AFC20xvUflVVai/1tI7UZsxWzOaI8mk/boenqTVx+tvA0NOC15NOcOQGjN2heOMJ9n5BDVGNZYVYnbOSyrZMoJCSsxnoskVm4wRQN4T2+HwtWq0TRFfUz1CQg2P8vd1LPP37hF36Bt2/f8uWXXwLwO3/n7+TDhw/823/7b/ltv+23AfDP/tk/Q1X5Hb/jd/yqvvZZEpW0ecCJPIdovAva1qYm5MB32aKQM2YDEB1aUihKHpwZqOIGjI7ZRi5ULFLJIJN7zGkVSvVcmDIlt16ZgOoBiKSyWfmDxxBYvHl+KikqCkvDhwahFXd1aEME6pk7bSMOxXUrfKT40B43hu+sLKxSUjg+9KC+6AyF69Y5FSz5jseFsYMXKcl+uKfkUIM0LKm7d5h3mlJ89yRD3tJA5ZekkRIaJG9Pq6vMe5FUt3zpe4P+5vkfV+9UkxcbSXkrUta/R3jZSE5ONzcvUFfih5NZavcbSxkbBp9uVyC5ts2kYWMj5+I3YfFsIxF/jmVyy5hyuyfd7En3N6QXd8huQm8PME3I5A7pKTWMFdqI6YitBZVMVaE2o9aNgLiZoUpcBtdYCEWa0rZHo6kXKrXVNX9NmecFnVcuj088nT7y4fie9x8feTg/8e7jhXenlfcn5XEZuNTEYhNNdmie0LLz+6Rkh5GSkvd7xsMtty9e8uL1G169+YTbu1t2u52bGoubiM525lxPPF2eOF4W6iyUapATQxpwmpfTzLuLg0V3Xk1ZamNeKqfLynz2nLJxMsadp0H0tc3GwLsCIIF2CbrGNYNPWJZ6NpobmnamHnE2eHzL9VjsOiNiNyWZjRCxESciNdmaF6hlJuI5hLrtq1w+IdH7uajcP/cGuS82Y2XkhTF1d8H46E90a1r0u0WqmzwG5CfiSc+SuBKQzAtVN1P23ZuEw5tEYfBzp0/1cVRudbEprOrPr+FifDebtXg/kiMVte+UYjWg8bNsDaY/EQlgwQsU273ufdn/op3U09MT//E//sftv//zf/7P/Lt/9+94/fo1r1+/5i/9pb/EH/kjf4QvvviC//Sf/hN//s//eX7jb/yN/P7f//sB+E2/6Tfxcz/3c/ypP/Wn+Dt/5++writ/5s/8Gf7oH/2jvypmH8ATA7kr8cQ2tgvd8yQO8mS9kgcWrWBaHH7SncNjGCmtSKnouGJJKfFoKZFp/qa4Kg9iF5CK485JjDwIrZqzWSRTW6ZZolJoEl53MSFoTaAZWnLozBprWP5oSugQe50xoUNGh8CzTLadj1CQ5EZkKTlcY2icx4IMiTSWrWD4tW/bZEgaAzZzUailAnmHiR+smrPHaKTB4cQtqMe/mAVt3AYnTtgQRarj7YD7ocSOyapPUnXBaolft+uEFIeDT7nqr1XDC07Q45zpF/Yx4ZknKTsjKrC7/rl1QWQVxIqfInuwYt6MNId20mzkpZHOC/lwoc0r5WlGgmI+3OxIYyHfHeBmh93dwItbbByR/R4dggqfxb3WzGi1kmrDmGjpwlyFeTXmxYfAzQC6NystGKJVI1ahUZfVYzjWhWW4UGRgyUdqTcyLcf7wyPJ05uGb9zyejnx4euSrhw88XC784ocj71fh/Zo4yT2NiTbcoDJ4UyIFWwFbXWbQlOH+nsP9S9784If84Gd+xI8++Yw3n37C3e1dFKlGY+WxfeTj/J53D+94emy0deBAwyZhHFzW4CzSzuQyGo1qlVUrl3XheJr5+HDh8jizHM+k0d1ZXjW3PO/+qs+LlFb3jtQT1JOEmaogJZFLIO0DpEFJQ0S+hPNEq7ZBfxBTTzRcEvqpTjBtq2zZSq26u/o5Ag5PT3CZhbVGqq54Q5ZKQLP5OeQW1UJjojLrtoB+kddg09QaSIuzWzs0Fss0pHWt3IqLalankAuulUTcQqo3OkvXfFkY0EpINPyYXAIldKTQmbCbdRdQSaySaKE9lAKalVyMXHztMVZfcXjCgG3WYho2Xh1BoWlEhlynqa2P7PfAr/Dxqy5S/+bf/Bt+9+/+3dt/913RH//jf5y//bf/Nv/+3/97/t7f+3t8+PCBH/zgB/y+3/f7+Mt/+S9/B677+3//7/Nn/syf4Wd/9mc3Me/f/Jt/81f7o9Dk1g8rri4JV02Ed3NCCfjPwnvChbpmxaco3fkbREXjImNYEfEFYhUjJSVJBHlZ4LxmoL7cznEXpexj8iYarIWkCcMN+4zse6PqTuC++0kewdKqJ8MmZxs2K16k5Bk1vAQGjMN8pILkgY0d1O+AQrDtssc3pw5vxiEITjtNYxz0AzaMfocO4+bioCmKlCSsdB853aaAPjHJUNwKKKY27yxD+Efbuir3I2sOPVbxXzePCXe1NWwed9uFT3SF1waD7kTRMZvUr4FYG0czV5trybRln4pyIe2HYFoGdGvi0N+q5PNK2l/Qy0rezaSSKOPoRWoaSHd79LBDb/e0u1vXK42Dw64px2HqDhEp79C8QJuwNLA0WFZhWYwxnLyfjf3+XJ8XqYD6pDbqulLrwpourO2IraCXyseHt5w+PvHtV1/zcLzw4Xjmm+OZx7ny9alxYsdFbqjTJ6g4xLftCyUWM7Yg2S2hpvuX3Lx6zevPv+DzL3/Il59+xqu7V9xMe9/vsND0wnF54OH8kfcPHzkfE1onZDAPNJQxClTuWwjMWhSp5tT4y8LTaebj45lzFKnxJjPdZpYmlFjEawzPW19YoUWKbD3H7i4l0ug7kzQqecyUqZFGl38EF6ef9w4/xe2ksO1SpLEVpucPF1jD5WxcznA6w9oSqleuu2u3OuTtBcoLQCNph7ohtYB0Mf9BmkKL5i95kOpmiNcbsuqWJKLV5RNSkRzM3URwJ5OTL4IVq02DVu+V101rR28kc/L7w2K10f321AXEjtRF9Ez25pNRYWhYUW8KEIYlkVryLL/Wd7s+KHSnF8/Li0lMcJcJuN6n/HRV6lddpH7X7/pdfNdz7bsf/+Sf/JNf8Wu8fv36Vy3c/R99KDt8YnJ8VsKg31+KEYccBvdjE29ZxaI4UDAGYAeAyUJKDVJ1eitCM6UmJYvHPqRwp+zuJt1KJJmQG770nxwaLCroWmJKGxz6MOd6mnnH3bUJquGqHReXZXHTV0lOGgh4clsZR3vpux9n7W2uxAIU3w/JkN0nMAlh4ezdDl6kHGyO1M4SQpMSXbA4DGopfL6Skw+ueDkB26WYpNLG9guqT/TRyS9cCxzHjfcw6XBewHzNDxq/iWXbJ1kMx/Idul2wmBJssG7zA6NrPiBSgi2jlkniycndf7FHz0NycfHakKlieUAvK1JGZ9ZNI8PtjjQOpPsDbT/RDjuH+YYSr1tApwTkmIo3EsTrK+UK9zmvhY66bh/mBfo6Qbpe6wr3VZqurDqjTanrwuPpI09Pj7z98JaPx4UPx4W358pxNT7OwjoMLMOelu+wtMdsx1XBisPbiofpAePhht3tPfcvX/Hq1Rtev3rDzf6GKQ84V2ul2YXzeuI4H3k8nZjPA7TETsxZsuJShm6zFZ4vNBrNGktrzGvlPK8czyun08J6WTmHU38z24h1HZKz7NerNk/xbbPQZkiDO3j3uBp3iDHSmMijuhNMsSuKJv36iSby+p9+T/Zda0en4/yunivpjxWauakx0jOhutYvhVmtRZ5ThBoiDs2p3+P+DaOpMuIwiYsip+uIpw73de8skYBle48W1bYHJxrdCLo/j7j/wv+pe0r6c42zgq6LqhBbIpPsU11yKYWV5pLKga1IJY1K39jo7lsQkeCEFg0sK/wyfRfn0OR2fvwUH99r7z54hRepJQpU5UqR6Xbj4zaJKJ0xVaANDvfZ6C9YmbffsypoWlllQUxppk4lCFBZcPZOyUpSKNkzU9QSdjO4IFMyec3XIqPOULNsPjVI3d7MnEdElJTzFmAmU8BmuyF2HgMcdsgwIvsJxhGGQprG616m48Yd6cxXixhnFwWeQRSunX8Nxg5XDVB2dDt5WSO8LMkWU27dFVxb7MBkm9ZcfS78d1DzNjX0Kpogj6HDGGIyk+/8OYti5PoWi5u6Fyr77tdUhx6tNiRldPBphnVCtJLWxZlVyQt3CrhUU3Sh6mactlTsMdKIj7O/XkOB3UAaMuwndBqou4m6m7DifnTboe/SFa72SsGqQhBzlmTrRrPx13rWnU/rANe4eBeFOlxUW2VpC+d6Zl0rl3nm6+ORDw9H/su3Jz6eKh/OlY86sDBwHm/R6QU2vYTDa0iTux9Uvy7IPlqk7Cq5XUq8+vQ1n336KT/6wQ/40Sef8TMvXnM/jGRpICda+5a5fc23H7/h63cf+OqbE23dkxnZF1jNyTQa8FCVBZhpXLjYhaNeeLrMPJwWPh5XPjytnI+NelJeXHzSVHPIKw+2LadkaFgSqjaWWZlPMB+VvEukm8RYMnkSdrcOvTvSJJTRX8c0OjW9VmKfGwemr4u5JvQKwy586cK5oTVoK6wtsyo0MpbGkG54gchxCnmNCeahKdm8idWAsxUnFrhDuje9PmE53020unD7OR4WBcqasw+EkJPQb6W4gIJ155U3X1mC4QKTO4e/ZL9XJPmjayFqu1Lee9EtwaBMK5i7ayxBzpDFb0mpuCu9dVgzuLqdTIFtbMuc3AYqxxiha/upTvnvdZFKTCScMh3BSo4DWwsgu5MLgioaaj1lxOHAAfdSicM8he6gLH7YYrTAVpU4i4LSLXjxSRbFp0QhGgZclBdFMiV62JEFROZGkwlJGt1XQZKGo7lPUjLFob8bPLdoGvzXQ4Fdwcbo4qc+xcT0kq4L48DR8MkjDCQ7yN8NUYeCjK45smDNSd/nRY5Pssy1hvg0ZRhbHk6H/Z4zAiDghyhqm++PsHnW9O1uvJvEj72pPZ5PThYBIx2riOlro3JLig2tQPa0WutuGZJcMZ8TNhU0JyiDQ3SxZ7Rm6KJoWdC5odPiHWrO8Ro7m1CHQhsH2jB44XtmVdNBDAls313VA/phe0m27j2Y7bFoD41JUpp0x/b4CwoWqbyrrSxaubSFp3Xlcal8XIzHmjjZyJz21DR5gRrvsfEWhj0uzostO7GUFyEnGCQxlczN7S23d7fc395ymCamMpBFEFGMM7M9cGofeTwfeTyeeXxasTYxJHOxMmm7vlUUpaKyUm1hsZVFVy5rZV4bl1lZmrA0d5pYW6K2mGwlebx4OIc4GcdQS9TqrMa6CgzBLEvZxdCT6xfHQ6Ze/LoaLwE5qpKrF68eounXrt/7aTBP5h0lgjavdUJVIs4+QR5IZGzbBcX7HPhMpjuFpG0P7k4vFuuC2NTZ9VbxLY+RRK+kkW3xqxuM168i2RgZzy8ifPcUEIM4XLIxelPq/pguxNUcO7COjbd+X8V1HOsD97sMclMP5TSQ0BNTw0Vm27fFe4h85/pOQfJIyDU2Jf8amKSEm4BVBl8o2owrsZ8FypBw2XSH9wqWd4gMID45iRnk4p2DJWfSpAHNmaoLTat3xkJMYpkU69GUA16y7N+jTl4MzaMqaLhkpuLi0dQ1PT7W9+TShFEED0ccBNslLIpU2o0+MR0mn3h2kzstlIKNQ1gEpc1ssqvTu0+SqaGSEMtsvvkp+yQ1FphGGEbXSPUpKm427+BCP9I/N3UhXooLrbhD/AbxdXZdvwECn44mzb93sJ82I/Se1sn1Vrwq0n2dKxaHd3NChVWPDqBBOO9gw44UkepaZle+D5MnpxZBD6Pv0MbRi1QY2qriRepS/fN5BZxpWQNSJWdaFlpJWxIqqPsDholtP1xyPO9sbj+TY58ZNccX9XgfotHk5myk7CxKS5HcFUXOVGlaWezCuVUelwvvzytvz5VvLsKpDZzTjnV8gZY9tnsF0w1MtzDd+WvegFSj/VWSJQYK+2Hgdhx59fo1b16/5pNXr7jdHxhLCZ3TivLAk37D+/XHvP34jm/fP/H2/UKiMRVjfuNMWw9/NFQqlZlqFxYunO3CuS2cLpWnc+Pp3LisiaUVTGHVwtKcd54kuR4n9rNSBsjQrLCujeUiLHPyCAzLpJIpk/+kvppxqDiPTiHPo5JyCuKEN2qdJk4EppLMU3mLkEf/f6pO1a4q1HDdT8ME5sLlLuFNphTzopRj/+JDjjdj0qLfE3fad/uhaEqIKHhcIpPEyGjcBN5Q2GY4+/ye6Kd/uhYp8QtLioQuLM4qSZTijSTFm1IpGZsGwOsgHQWIe1U6WmIK6vtja4nW2YarkVxcRf9xJdicAoEm+c7QeVTm13iy+G9Bl18Lk1Tdk1qw0qTGCVi9M95oNAk3lRpBRswKMGHi6nqXtVngyhrGi0tQnhOmBcMpn4S9kEXQHgh5DeqnDYhktE3x84xOdKh+Izi31ncAJHGiRMuklsli5MgJYhAvUpPfhBwG0jQi04jcjDAM6DQixRf2OkSRGtJm6yK2+pUXBrO+50geQIYTCCyXa/bTrkTMRBAyuhw2JS8IEXOgWa7FSXrR9gtwY2E9A/Ola3+CritwneaeJ5OmfmikDafumyPb/r3SBYrmbTu2xNvdxCHUmKq0OdU9lQELnz4b/PXUm8kTS3cjfZ9npKA3G21u6Kq0uQVpw6febXcBLnmIgpR0dahSnpXWvrM1SAF9mCZaE5841C8N+m4qbtzcsfwgAlnAPLo2tDqxp2ZY6sq5zjy1yqMK5/GG1faoHGD3CsoB9i9hOCDjHsadf6Ma94gIIo0ssC+Fu2nPy92eN69e8eb+Fa/3d+zLGISg1ZN3+cDH5Ru+PX/NNx8/8u3HJ959WJhGQ3cJlQHS4JZQCSwpKzNVZha7cLGFsy6c54XTvHJaGmsrVPWlU20+JaklF5VnCVjdPMutKEqitcRaU4RD+p9H/H0so6MGGm4NaQzvvzGRBje09SIVTZUIJC8cJN04Cx0CNJWgros3sRRS2m1076TXSaETWnOKRGnp90MnakmYU1s0ZeZpwmJbYRvi7+cifvjjOXgWq4ruomMdJ+6jivCd/Y7fZ44iJUtkvFB0cMSG7C4t0wCSSCZxDxFTkYZ92RpNpn9TVZCakOY+pF0q0UmM/RwFcTg0pW0XnBLuabm9vlFgf4qP73WRQguiBZMQ6eLLfuAZVuQ9SvinPHsEkmzZO4dgAkrcbL2NtYQXveTbys2pG4mTRlz829xLz8bwxbPR/1wOuCH3IuViP4JWmlqOfYSRijnRoU9SQ4JDIe2KM/VusuueukdecvYeYXjqHrLmBI3W9WFKT9XqtikSbCSG/kj+Oe5S6VkAsdwUU5Ts90FrLlS2gALEbza/X2J3RDdy9fehExk2rDvipyV2RNvvxU7LSQWbwg3oJc47S0nNKenAhvQ+z2xpJXyb8F+XeI4lY7sJhoztHO6z50VKDc3q+6lBN51VHw7RCIIxh6I87uOqPbv+tPETb0xTP6R60K96c7p1v9Ib4WCgbqGL6oVKIwpCM9CMqg6dzabMQB13KHtIt7C7Q4Y9sr/zXPRh5xB0fP0eApnMD8dpGjns99zub7i/ueVuf+BmmBhy9sU8itrKakdO6wOP80ceTyeeTjPHS0MEptGNYVPOpJJiolQaSmWlsm4w5bJWlrWxVKVqcoudeG1aLwrWC2nsN7oIXdyCSDXRwnpH1UW3Dm1JMDITbZ8gGePFMTXD6eTWX/hepIQNJWFDS+I9664TSEDDhZRKWBcR95RcIa2YEFLqzhhsRAFTPIa+KDRxZ4Zs5KRemBKU+JyTF4wtofraA23X1hWG4Po/t8utu6X0BjoQjt4YdWgw9/+Iz+Y/sHiUgn82cYd36xNqL9AWmijZ4PcOQ4pInCF9xSJXODKbh0N2UetP8fH9LlJ1gRrddRJEJpABCLq7gZ+UQeWmV+7miGBV0qUh1WBZEK2hpYpDOk2uK8FCE+HiTglIrJi7HbkAMYgOMmC1oOPAWrPfeC1+jAb5csHWilzmKFKNzEyiIalCcDnYJy9YB9dKyZiwvWxeWh0ylDz0Fu7aXFVDqobivcU39g7S0b5wX9gXGDO2c0Guhw5GtlTsz5zZ7d2QVXOVfQPNDbfoVXLSwOQ1cE2X+m9ivxKvfx62n18G12+lyWmuKeewX/E7qe+KGhLY/RRQosIYTKlBnVXZDIui6i40PsmlZfCJtS5eoHKCyVlOlHJ1f45KYRJcmky4TXVoUUMk2YLgUTeGp2x7UAmmYkaya/DMJjTtWBk5ronjIhxnuPGaQUnXwbN/PczcgFdW0vHizzkLjYrVjDCwrI1ZV+airLtEenVHllsod6TdC6zs0d29MzWz7xrNDJZ4Xtkog7DPhTc39/zg9iWf39zzo0/e8MXtPfdlYBLBrZtnzvrIt/Un/OThK756+xXfvnvg/ePM8ZLY73ekvGecdkzTyG7KlGwhal6onLlw4tQunNYLx3P1fdQCraXt3lirWyWtq1HDNdwyJBE32h2UMmXy5AbQcvHJap2Fy5O/f2VyLVCZFMmJtgdLRpmNclLWRXyaDfOSbsVE3L/bYd688GlksEFEsljxxjCmDlrfbeFNZHHfxZQSOSenx4tANdpaGXYD47yizeFrknva5eLEgmHAHSSi/vR66rtPtqKKEUnVsX8O5qzvAnvtEqD5XSlp89owhGY5fKhzwPsE4iHbvS+AiUPwfhxmX5sUPyO3OBBqED06SNSLv5+hRqFGc9ok9nV4w9+etXS/3Mf3ukhJW5EWoj4VUsRDbB9RpLr/1tZxaPMuIJI+pUKqqxep7mMmHk/u6a/ELin52N/pc9kXglbxnRAJ1KE0zQNNs8MXyHYQMWekVtKlhJV/9YOPCgg2mmcS7WOi2icYBR0gTT6RWbGta5LcJ5TIpBF8b0YIjVef4BpKSkrOuolZbTAvikPaCkTieoeYxdJXA+8nu15DxItSLIyzxKRmFWz1QsUKiOstEk6rHwgnjHiU7GmdIYakeIebLPtC3Lrtik9z0pfOoTfpv/ZG71qkXFfSSNmQljwDJ7B7Dcq4Ny3b+iwmjKBO52Ao6rXTJuDN7mQhsdBOyb+/19IodulZoUoDlcKlsj3W5kO49gPm2fUq2yK7YcvqrtHnjE7mOMlk1KZUrc7mHxJpP2J5grJHdnts2JOmEUvFH1JQU5Jl0IyZMuTEmAdu9zte3N7w+vaWl4cDd9OOMaVwxTCUmVVPPC0PPJweeXh64niemWdFbSSliVx2DIObMY8lUaJpUVaarVQWVq2stbFG2rRraxxCQpPT6quHP7ZIyOVZM55iSkqDB5E6nS6hNbFe3GXCTYCdnZdH380MzbVLJPGVdTXWRbaVbY4JIIVbg5i4uzn+GXMiR85xf3dcVn0/S5zHPQBQSkSr5Ewpvg8i9s79oa0Fkctp5Sm5k3nKtq2XWhxfRtocIzoDZ9sZxb6ykxY6BbxLhAwJraJtaSJmDdXmr31rW0Fzxxd/XmK+N0vWJ7KYPHP8vyZ+fmw+PH2P1etob/rc7Zy+9+3ndvz7p4yT+n4XKdrsHno2gGREh4CFYmLqb1ZSNCuSo21ThSrI0pBZkNW8YNhKEg8H8zfYo9Zbyk6Zjs+dFaYt6OhDHI5JHCrUjDWPaN+EcSIOgywr0ip5mUltJenieytb/U0bzAvVHl/2793JgSLoEFdl1us4vy2G/GdyqxTv4Eoz0uKWKioZsqJFsaHF4tRgFNilDepLpA2e8BvEfI+3YVnu7oBAUiObMpgX92yzL4psBVvj9StoEad87/xCZ0xYD0qMSarDll4JBkxd3yRdSd8rSsTbijmtmD6lkrb+RFr1tN1VvPFosm22XMfj3opdJe9CxriJutA1izczwW6SgDWkO8RqMA2DldehYXePL0ga3Ww2T1QdfYqKSeqy8xsvvEWvE2C8xBLZP3a+oOYZQjo2jIyOzaPmValF0CmR8s4L1HBD2t9gZQfD3q89cSaaNAVp7v4vxqCwH3a8uLvh0xf3fHn/ms/uXvB6PDBKMLCs0ezIpT7w/vSWdx/f8fb9Bx4eLpwvBhzI+YaxHJjGid04sBszQ1KKGI2FlZnZLix1YV5XlrmxLrYFHmrsEmuVZ0nUjtZ2tYRfH66Jyjt/yOCHYJ0T89G1P5KFshfy3tNms7jjQl6ENBlcQFaYTxbR7y4R6JBYaoEc4Ie1NnGmrnjB8fvfGyjXM8Y5k3GEJSePYs+ZUgZKKeTkajya+V5xrZFXttDdIwhEIgV7VUIr6PvPIOlsO6e4HqNpS8+vmyhe3Whb8Z/RJG3RRUqianJhdGrPpq4wlbZo+NX1YL6vi0IUrapPyXEfSHTfvUhxLVKanhWpbbLyc8Unr18LOykWsAQtdikaMB3litdikDyR1Tq20CoseNjhjGet6BoveKO3BO72kKIj9V2VpTFgsU4hj0yXIUSDOXYcltxWKPziekBgqsW7/DUjbfWdyWqYZi+exXzC2cd+aifhvecaB+lWz9FmXkWFLgDsZo7JIBUPjxNrlLRuk4QNBSlgZfTYjvhyEoXHK5Bs3Zf2ItBptMm/XzYlqzKokVQ9XBCFpFhu7n9YEjr59Nf2odMYCjpmFxNPZXtt5FkWthenHNArva2M5a5rpkTwVvi50Ndw9b+J2/0o5KpheuEQIsh12Wv4op3eBtq1a1TZOls0YLjNtcQgRJAWuqbAjXB42W0lbDiwrnuOa+ZxER5mOC0+Sd0O13sc2Jh8qO/c9LL4cxPDJkUo6B7WFiGKKcNg5KGQhsHNc/eRvpxzALBeCDT56JbUacc7EjfTjk/u7/nixSt+eP+az8Y7XuaRgYRQUVbO7SOP83u++fCOr9994Jt3DzwcV+ZlIKUbxvGOaX/PfnfgMB24KTt2ySixkWqsrLawtpW1NpZF3UG8D9zNgqkJukKdIw6jus1RMu9lhgJlEso+UfYJGX0/slaQo9seyc6Yivu7ZOfFMCS2xN6ava+V5tBY1a5b8qmhvweqKZhsGTMPBC0ldtWlhOgXWnRPbs/Z92ZyZVIUXwEkyWBKahkdi08zjRCbJ0xX/72Krxvwr61JXDph2a/b7uuk2+geMIBjgIEtYFGgOotUo9B3544qDgG2fi/IFX+6Fphf0hz6PIZITMDoxookSeyr/CuARFMc6dnRAKtcFYR+S/10o9T3ukiZKCaR3Nq5vfSlf1+8x/+jxd+xEMjhCFtVXyqZY8SWfCi1qPobbCpXJUQvELJhxv10cbzfF57qv87AFAd7TiF8kwgXwgkAy4CbixYvUsXwxD58vRbRGlLC4mSL+OzstzjI49qVTKRhGpKVpE42SKmRUiWl1SGvcJGwDmU9u8zl2XietlYtagjuFZbV1f659XM9IFXrwmmHP5gELUKanFZvQwpT2uRMo67bivRgL7j+WToMsd0sRDG2K93+Oxd734TJxuASy75Tii7Rnt98RvSYca1If91it64g2TtdEX9459sJIuFTRuu96/W5MGB5RNvAYolLE86rMFdjKWyhiqm/vBKXWxxEtjRndeWELQkZnbmoKuHeLzHJiTcdQ0zfsT7Qzk6Mb5JFwog4MUphNw7c7na82B14OR24zSO7VGLH0FBWZj1yXJ54OB15OJ55PM5cVic9SJ4ow45h2DEOE2MZmfLIwEISweNzHGpuGonH1aE8a8TiHb/2Fay6Z16rvjvKvXBIEBKGeIwJybrZ3zG7lda4GKV1DR+e1hv9SzZIixcosqHikuk+yWqIwsWujD5Vn0KR7DElyacljaRekm6Q2raLIQ4NwaFjCXNnc+2VhOhWtHlciFr43TlM519CfQoBn+g7La8aLmPoFwqxyghvD3HXB43f8Vvnqo+y5E2zrzGy599tyn9HZTpkLRrF1vB9VYoOsXuHpuR6Q/J2T3aSiVk3DujG1nnTFNqz18k2p4Ff/uN7XaTYNWzyyG9RZ2V1rLYfeH5oGnSPOMwLxKJ4El1wgmnOYqE7JcfREe2HtDUwYfHtMwmxSqKSZCUVP9I1i7uXl3J1cZjMu7A0oFEAaD45SSuwFl/O19GLaPJC1fkbW/5TcZ0XMmE2go0oBcxt/iX1SWZxQfK4glasVJL58yup+Q2eFvcHVEHbGAsSh8q8czO/oTfmDmzsPJEQK3owuHd/UVCkgDQkh933UJBhQsqITF0wXLAUHm8yYvTMq8B3QpOeiBsdAk6wgDjU2UWtIf396yf+FqdruLbKrnCaQqrRjTYw1bi92zY5pqFCSaQhIGBZSTjXXdqFzRZba1DEV7ebam2zdLI8hNnvgNU7WjpxXPd8bJX9XHmzVEr2M6dAH1y39keiealrg8WhqbwXJBs2Z5r4oZURijnxxpiBiz9EUIoXanWkQZrvT5MIORfuhx2vdjd8dvuSHxxe8jO7l7xKAzsMYabyxKyP/Pjy3/iFpx/zX755y397+8RXH2ZObcTynnF4wf7mJTc3L7jb33M/3nGfbsni02XDO+xqzfdoVWmVcDEzUm3kFSc/rSBrpp4z6zmzzkKaBIZgexajHKDcCuVWYAd6MerZWBYlzUa6b9hOGVQp4uy5jkh1NNjMaBXWVZnngPwUrCVn2raMLhlawWwCKSQKmZ1T1xnofVMTZ3oq5veMhsWW+PfoQu8UU4cIbiskyVcB4azinoINXcSvparoANcIhhiHFtmmKI1zooVDja8bbVNiOFToMDs5YWX0+y4PaNk5DD+MQNoyutjuM9nuGSeIhG8nfRAASwnpkgPtoER4hNILkVzhc4mjK4ow1zr7K358r4uUZoXsJo4ELTgk4qEYj9a/j8s4buvTU3fgdqqPu1cHlCO9yq94MQKzHAvyqP6SSOZixpz8MxgMoOFjJ4NDgZQx6p1ju5hfTJ026x1L8ij4PrDHjeDt/DMKvTiF3r0HM2bBznk26Fvy3YOTJLp7gWPZSSyem2E2uxt8W7G4FHocvfUL1NgICx4wFxRUSc6IAsTKNs1J7s9ZcQfl5M8/DVhosdzANse+xCemLiJ+PqV2Nb51qAGfHVPshlLooaS2q0O79jvGNkGsXywWOyX/+zTbvq6FBo5Y0Ev2OHPBCRiSqlvD9NhuGhbwsbU19CLRDbt9wSackWHCdIfmnQtxa+ZSG0u1LSohscnGgpHp38Zd3IPMPTdkSUg1NMUuFBd+JlYM3wNiK2aFZCsEbOUSCadNZ4SShH0ZOAwjd+PEXR64S5kRSNIwO7PoB07tPW+Pb/n28QPfPh75cF55WowmIynvGcqecdozTgemac9YdowyIRFwiHlnrfAdPzxfZ5hrcdQLljSBJrQlUZdEW32i6R/9ZU2jFy8ZQBcPCNVqpGasqzJW9z7srSZ0EEDo1j09fbmu5kGkTZCmpJbcBLYF1CwjKQ1IKuQ0OpuWfr/12dvz7OjTVVxbmixQGW/sUncUycSU5WQks0wrhmnyolIjtr3hhSlely607XqmLoK37DsrK+JFKj2H+WSbnqxM205ds5NqNJKWnw1mfp932nqQhFx2EXT8Dh9lF/x7gY6/KXkrUjEDx848vnZQWKWfgz/dIPV9L1INSo0ldtet9AehU/GDT7QvWMzpVTUs8pvHZ5sokv0O8su7ExGC8KAJuh4rjGYTM5nKkGYkjNtsxBlZo3iBSO5e4c1Jh+j8Y6MdD35TmHWbEvxCND+KRHx5a9L1XQXFww/71GPU7cBVqUiuaPGJw0SdQICQqGHR0jDbo5qRdYqvaz5RaQ4ETWJfEvEDYmGgGfoVf2VJHSbMBSsaMGVg1Tldi+zmkD1ei+yz55OuwJe/PtsRd4XBkn63SKX+PtbqbMT2DMLb/FfSVsBSDXZe61lh5hNoQGa5eCZRLopIRXRF8ho7ywWLAqWtRRBkn6icnu5sOjxWuwyw2yPssfHAzJmneuG0rlxGdzOI2khPVMi9j1Foi9FEqbWRL420S+Sqbm4iQkEp0ki6YDpDEFdMEzDiMTDuOC8qZE0MZEbJ3JSRu2Hi5bTnRRm5T8VtkK3SeOKs3/C4fs3XH7/iqw/f8tX7B94dFz5eDM17Sjkw7u6Y9nfs9rfspht2w4Fd2qEyUhn8IDZft+sm7P7vmwVpkGpGqtDmTJ0TdSUo4BJaPMiTkyLyXpAR7AKrGXV1qet+aYzVNWZ+L2wQwBVabKDV/866GDqHWWqTLVLCzYcHStkhaSTnwpCHuA9j/xLtlWd8ty2n02oECIo6WUGVLsvqqyo/koKYRey4zHyPGMbC3QGCKj6d1fj5xQuaBXvOSsYGQYcc05SfYCb4lCVOm9e882KVPZ7HC1YJRM+DXXs98emPK1EId73QFrtNgybZ/0yfwiQ5E5rrueHNLQ6V911u6Cod8fnpZqnvdZHqfn2WF3/z1nwV9iZ3Y5acfAwnOcygIKuP1N59d6fEzn/p4667FOcGSVuM1yX2YCu+O4nixOIHXTZkZHMgl1SDrr4twJ797FtLCXFD+cTmEKVHiTj9O/UdjTmxwItZX6b2NsWu32fbk+iz79NI2yVkKA1rZy/eqYSIsjqjiYgxSQ61+D74Clv4xZY2ZqH1sSeb0+cH30FpsOh02+eVKEp5K1DWecZxKV9LVDwtNgib6OFI1pxNGNCb4Bk2EnEX1y5Atr8kCtkkukK9vtc4g6qbg2dpJLwACBV3urjGdXu8yArriq2KzotPctX3R+QCw4QMgx9Ow4DZDpvumJcTT/XMsS2cKyxNicE19i7Bwsd/3nWG1WARZTgreaeMtcUi3P3tSjKyLajNaJtdO2gZSxWryZmjawLNDFI45IFDmXh9uOX14ZYXuwOHMuA9tdK4cNa3fHv5Bb4+/gL/9e0v8ovvvuXrxwvHVqjllqHcM4z3XqBubtjd3DDt94zjjiQTKezHcthrYclNm1fQGWw1pELuZqXxvpom1tU2x/GphXNEcpNSGfH03YOQbhKpKhzZnLgt9rD9nw7rsoItgs7QFjeMbYvnROkqpAbUkDxYZhiczu+OtT2GJW8tpjVHTSQpydzlfTM8iaLovxv7IbuiAi7MtZim4iQQh/1sLFuDIjWmziZ+TTewVKGau5ITFO+pbEXKWbTeTztHyK2clIzKGFN+IBaaQvVi9JWwX4N+F0ogPe7n5DidSUKr9cEpdE/PROvSiRgSQmADbVuzH6rHuL8jzeGn+Ph+FykJoWoKjVFefXy1RIQ0+ZtA8P6jk/K/pmANi5gP/83Yova8KPOv67Ru1w15X+8GtiIL3SvQkjkGHkamfkXbVoSuBSNOUOsFKoqJRecXP78XqYxpHOSytWBcMwwcq5ZePDq7zrrfV4T+WdxcHe4LTVbSBWNA2hLFGoevDCCFUa1sAjyJ19Kvx952habI7Y2x4jojTeGyvM1ZQieddDvOPjVdgZkOosSvwyEDu36F6yPsbHqxiZu+ryCF57/x7Gs++7MbpLotvNn2XmJ+AHWqbzf6VPX9Zy9MslasT3Ok6DvihBEj5eS6uTLS6siqA6slVtPw9JRtYb7Bffih21ZvoBdwBmh16rijBPHnk0+FwcLxzl1immi45idcqksWplzYDyO3447bcWKfBwaJ6HKUZpVZn3ia3/Px/I6PxwcezmfOtbHKgJVMGvfkaUfZTeRpJI+DMwxz6IL6jtGuznS9b9IahUP9vU3bteTv5qrGquq8Ir3CvJ0IIYM45DcmZ+2NSup2R2EYu9HS1PrL4kWymqP7PYG3xbTWfHeXAkDNeXC/wDH2ONtumw2tMcPv93699J2TiMN8z2ibFsCgdpr2tjMNKC0mH38NUkxZfm93HhjN/FzLFg11XP1DdgZtzhFwyKbz60XK1wJ939vxwIBbA4aUQYLOrm5GG4XHAvojUJ2NwxTaTIvzwe+dXrBwVw5T5Fkj0hEuP2ieYYy/wsf3u0gxs00jqWBjxWxE2urMFTRozJVszWGFFWQxrKn7usnqBSq1sPfxTlw0OaxEAwpWaugNPDLdtQt67YiyM6c84TbTg98stiob1tCLktRn/x27DjOn07eMrTtMB6ztUAj2ENdDu/vwmR/WSZSUKoIfmtY0wvx8upCkJGmuFUvR59Uz1oTWFfa20hoOx0kjh+Fmipyc1C/IztBJPn64o7JAN15NEnBk6uqPZ+WllyK/QnvwmR9HAbHyXO3m4mqRa2S2d2QahbEX/WDwCdfFb6jpO4ArEnosVfchk94bp7hpnaggKWDFptf+JSAhdydRbF5Jc6Wdzh5kuYbDxpbS61qeLIaVguxuQU7UujBzYTa4WGONM7u/nCnIUtrgMsPcjLMKdnY0b6ogsfIaBgnzkQUV53RrVayZB340f2j1924aRu6nG94c7vjB/Su+2N/xskzsUiEjNFYu7cjby0/4b2//K//17f/FL37zDd8eF04M2OGGLDuG6RXjeM94uGW83zHeDuTJmXfXucghXuvw8Sq02ajhjYiFiUHknWkx1tQ41ZWyJo7VGLUymHsMSnKBbt5n8kEY78UhNYV136AJuztzw/fSp2rQi6f41ifQY0LPhl3Mj45V0BYYGQOp7MhlR727gfFAvb2hk6+aBpOzXQlXqbXt3uvQYkvQ83NTGaJ5TdE4NGptURy8ofQ1gT8/DxmMLKch0h0s9/4IlubeemucFeYsRRWoKaC/kmhDnE+diWjEbsvh1bSspKqkWl1APOCxJ4NEgQoxeo/0GIQ2EibZfk/36cqtq1o0c9Xdzk1JGvdxHw6McOqwTWf5a6RI1e2oQ6rrlaxFh7wLOGxCpGJo4NIWGT2dCRfFIl2nLsBHf9KG0foU5R1OS7Kl5CISbgZ5w5n5zv7IN6XRS/kbulmje+FKtkaBMqwNUahG0BHaDujTUOC69mw1aX6hi2gUzbiRqrMdNbDsbBaJnp2eI0hbSDWT1iHYTIbqsFFnHeZzunlEVdGJDWxfyRe+Pf56y6nZitQzXcQGxMRVG8/DhYHPu87Un912LSf57gTltTIghpjivGmPrkH9Z7O4aegToFy7wE2mYJ6r0/cnfgkE2SQcLbpljEMxiiyKLRUuHfpb/Kdq6hZbdXRcKZvfyONEYk/KN7T0gdUqqwpVjLZ1p2y8nCCkunC3OVkzhW8w0bFm31+Tm6EoNaanrR1q/nCXeGHIA/th4m468Gp3y4vphn0qDCKAojqz1hOPx498+Piet+/e8v7xiacFVhlgPJDyDWV3Q5n2lP1E2RW3KtpCN5/PxSFIddTHJ5hVgwWJT5kE8lAMzTBbc09CNVZrtE5mSo5SpAHylCh7d5OYZjfnRWHYC2W6FnqaOcS4CDo73KcL2OrVJKk5OzZlJE1I2SHjDnY7bBpp+9BDitBaBTNSvU7lOXmBKv3m8M0CqLvgt+REBunwG0JP5k3qSye3Q4v3NLvQ3H053aBAyfRARsnNi0wJu4yNMBaXZy5oSu6q0nVa4VLuejv340trI62VvCxepAxycZPeZGEPJbkreYJdHPe4lbjnr0Wqw+/uadjIGlNmwHwSWOjGINR+Zl+vlV/u43tdpITlCplZguzUVweGZyA5fFVjV9N6iFw/uByqC4DcD/p+r5ERakxT6l+XWLTHSE2OqekZJGC9QFnQxfEL3afbhtCtlwIqtCA1PFvqWkvQJtAJ6o4rLSpErNY2sMy7sSBoZJ8q+yLfWU8Oa1Acr/e9lEObqS3ompA5B6vPMPWQSIu0XwnNRAqkwSdEC0gwB/nAbYY0LHicqTduJQU6yPlM9LoRIjT+3W9+L+edv7aBgtJnHt3+23px7K+FgUQkhdsaQSdMdNjcF71gpLh5/ADArmwyMwtafb/Bo4qo0DN0WBoyN5hXbFnQefbnOCqyLMi6uLwgm9/g40SSA5JX1jqwsrgj+jM2FnI9YM28xtUV5gWGBfLK1UyV0KglY0gezGnaXHeDw4g9GdxfF2FMAzfDnhe7G97s73g1HTjIQBGfyJtdWNYnPj6+5+37t3zz9i3vHx55tJF1OJDGA3m6o+xuKdOB4bCj7AfyLkyCxd+5Ph9b/zlwc1cvUhY/kzgDNoXYYGi0bCw0LgoXNRZTVjTYl1GkRiFPRtnjU0L1wRWF8aZSdm6N1HPHeoFqF6HNgs0GqyLVXRVMnDErw4407t01frfDdhNtP/j1b9CagppPGobvaFU9vSCSbyT7JaJq5CVMowN260VK44B202dn4SbzFYNfyMlz4npCdioh+hWXuTSFsW2x8tRueGwB+TmTz0dt36G7zVZ1BqOZT1DLQj5fiOQg0ljcyNeGKMwWqyt/fyRnXHozBewX+3EgBYYq6udN1ius7tE6bPlWhly3Hr8W2H1XIkJ/xsLmG6e+QKZeHKKpkYPS3QkITDV33Dd0Sd3tvOutetXvrLViMFgE/6aA+wqdIu5snR53kTYn6U5WSMxR/C6eW6Vd52NB7Mh+uLXZle91+I4+qFui9Hc6Ud0VfNut+aHfCQlK9sKVMt1NodOXk1ZSW0hrIjdXfyoFk4qlSqmZLEJpQTkXQVuNrzuEW0V4FSZxgoTkmKK84+p4/LW/v85I/sr0bqoXKieKXH/tr7/DKmwYOuCdXsq+d6u9+fAdD2benDT1z10wk3XD1bt3nLb4CUxo4jel5gLmgkt3JA+xYhVfwl8aeqm0s09RLEvsFJW0npE6kFoJKCdBOSBDQ5pweRg56YVTg4u4l3AMfl0SR5Jolmss+lehrQldk1OlTRjFkapdVtq6cq4XlxUwAg2xHIYAmTEP3O12vDnc8OXNPV+Od7zOIweg4BY95/otD+dv+Obdt/zkm/f85JsH3j+uXMqADiOl7CnDgWG3Y5xGxl0OmM8wWVG3caGx0Jip8c8S09HFjGXItMmLfUrjNhWyr9ihMo9GKcYlKbM0FlEqimUhT7C7SRwWWF4Y6whLMXT2F3DaJ4ZJGIc4/7bpzV9DXcGqkDSTLKyfygRlIu9uSVMUqcMOG0faVLZJvkUwYOroQuy1JN6DDpz0XVInFrmheTRgJs7M84UWhiCiWHGBdRo9302G4oWqs2Jj/2wW0Hi7EqKSVHI0jY704CSvDijkGM+H7AiEKjIVL6DafOIZLIIlg0mMIzSokqz7dCpZG82cYCahlfBg0SiEet21SXq+p0sbgmUYG7Dxa6FIORb6DNzsL1JXyFkN6yGcbaLedQh8Bzq3UFJK/3WK7rq3uOpkABfNXUf74GD6xdat6fvnwJa8efefKfWiYpXcu49Wr0WqgbQB0dFFopoxHdnMG8P8dkuppZGkb4abX8RhPSLBurOUgjHYKTyBjZj4VKbu7efehckPrDjEs2ayCkn9YBRJUSBD7NHL3ebcAFeihL8OPvTr9h49h/E6McK2N6P/netnhe3fmwdY8tnJIpLdew6HazG/YU3Nu0x1uYG7anSCih8mqvadIpWkl297Nuv1LjdFdEO83NWcIRoECqsNkjnk0eozs2JfhJMHxJyeVslOnmjiLvpcGVOdENHZhl2vZuoLfovpTyziPgUGMTLNr3ULPZf4905AkcSQMrth5GaYuBt23KeBG8kUGthKs5nL+shpeeLpGFEc58qyGjUJ4MbKKeWrWWrp+h/1xkYqFnZKjYrS4h/1PU0CDccRRhAGL1JJYAc2Gq1UajFWUaq4sMKHRzdpHQZh2gm7vXrqWRMX0DdPJimDkMPSqkO3z3gvcYaGgDUJqbjVURonZHTBOUPBSvY9MwFTd8o3fnN0UrWI0RJuo5ViLy3ispKtxElcmtd7xqG85PusnhqdsxeL3jh3cOH5k9DQBAb7w/ft/mc6lch5hSl+vpjicmi/WvMkbgza4JP+YE4r3aI74gBoFvdo17OB2EoixzHXscxG16cSAY1i/e92rVTYyFmcsWbPnt8v//E9L1IzW0yCJc/N0OQL7rYG3RzSmpCa/MCygPT6Xqkbmw4Zi0mp++/RCtYGkpYoUoYNK1qc7qqRQZVscFGsRaZUaH8cYlJSXr14JCPrjNhKriekepsstW6u7O5QrIiW0DH1ghLHuW9BcaeLsOeJcX+j0acIgysFX0jhtNUcuVjRynRblqSN0hbMNFxYFrDCoJmkiWHNSIvXt42oFS9Wo7kZ7uCaGAhIYys41w6ys36u753/2/s2t/SX2GNdHxIzclhRxh7GUtrWTg5vmb/RpkirHoq4Vuyy+A29+OvfIYxevKsmKkJVV8WnImFWDKmYW9cgbAzRgJdsaR6KeG7YHAWqNsgtdlgLWWeSDog4iYLdhB8MwszERQdONXOSyi77q2UCqfg0FYbw/ryNuKYTaS2kWkgtMyUPy6xZWWwhrYboE0gm5TnqemY3FG7HkdeHGz4/3PPD/Qu+yCM3YiQuNH3gVB/5+uEX+Or9L/Ljb9/z7YcL7x+VRUeaTsj/zd67xdq2ZedZX+u9jzHn2vtc7LJjG4OthJeEKBCRCAWDhLAUJbIACRJeIIhIIALGFsKJEJgHSECiuAjeQLxwyQsyikRAIgoSIhADMUSJZAmIYoGUkISUy3aq6py991pzjtF7azz8rY+59qkqn1N2OT4nNfvRPHvvdZmXcemttb/9/984UWhUKyzFaBVadUrboW50HhlUnJWdJ3aeuPLElSsX27i2Ifevdx+0Ka+GbSdt5CWws0bGbO88Uh+Cy1FNuTzyinFajHdeFmoY9VrYHwb76oyLAJTDx7SB79L0CHIMuttNaFoUvptV4nSC05ny7gtYT7CeiXN6H5ocHQwYaWdU5pDOVrjqEKs9nRVwWGGfE3DJ6b3BASZECC6nrrqGC5RF0HpbGyWNaoUWyGjYhohebFdiH4zHa+4Zg2Yj7ZYkIQlfGKPiOKNUelZTUUWqoBUFqTGIlyfFoxryC52eB2bZfrj1ZUuSqOuU88xWRykpw1Gi4qg/PuUuVB1rDZWdbRmDKNPk5mPXZztIJU3YZ3Nwr8gCpmTUd2zfc3PNCgJXwVOzn5EZRKxxayNVE+Q0KvSmAGSGV8db4Ol3V7JT6q5N3EL/NnIaq6EbMLr0WzYoccW8U/uG9T0fOceqB+6VERXiCXmfVYkzE89VLpbPhaz+wzRobViy7PIC8YQHI8Cr3N29NGHIWU4aU68ylOmZXJmxTgsFqZrTU2PU7OkluygzSEGsRZUMc4bocZYmIM3k9j2vqcgpwHYMorxVUgpPwgUcpyScSFFF40UUlFnJWXRidFVQuxO75mJEBiksz6+JIj9Co1RGaDJwmIL7QLBfsbSfCU17nmTMGKHn75pOGmnLZCUr5mwkl9BQQKsGS8vnCnpb2frCpReupXAlWDMbTpu16XGrUB+ILeV2XN+lF5amXtuJwerStW9+xbiAbRRrVAvOtfCwLLy7nHivnXivnnhhhZVBROe6v+Zy/RJf+uCv8aWvfIWvvHrk9dPgaSt400TrUtoB+Wp8WRxzo6x0wvZ0Pb+wc1XPjZ3Ndq50tursK8QLDQMV+3E9ghQn4BT4udHXYKvBbtO6N+2cCqyLwbngL1Th7V7oZmkcYwfpZ/ZNIyL1w3G78kxj1StN5s/Lgp2bplSvonRPhcdtRlPSdkpWSEnQiTBVeS3JOLUl2UFw9zPONvit8o9aE8Ep+GI5isSOgaATmlBVLqZp7LvIOleJvTR92sWRMCVsYUEfK46J7duq2qhNPWeaqZL10Ph4i9SATkLShLbJhCtg37EelD7FzgWre47hSY3hJG0dLZMEq6aEZUJ/k2Ry+AF+/PrMBymLYCJeClJVwjxHVUffM2jNxmQku0sQHi1NT9ehDG9BQStNEsO0OWtjM6JEXrjO1IFoFo2Et1NJZ/la5CYnqmmn+Ib5TukbZexY3yg9m5o9GFE194cMUtSbW3KerloS6qQj4kRhTAfxQ8VtRPPcGA0vGr8eZSTkKchsAhIVR00xREG1qjxwfrbpLr+jXlokRNBqBq3ZDZ1AWfC8ntffbgHqFqjSSPYQ+WaQCrEERQDypLKno/SBvdtha1ayUU4X7V7BSVAf234QE0hrFy9ia3qY9Cs1KMXwPKc+9V8JNUaSJixpvDFygu+EauM46kqemMytxO+beOMexmgnNl+4jMplDK6Y2Jdw2E3eApVlNm6HDx89+1I1KBacLFhi0Hpg44rZFas7rQyaBedWebE03llPvFtX3isrZwoLXRNzt9e8fvoyf+2Dv8aXPvgyH7x65M3FueyFWE5YWSl2C1K1BLXOIOVHT3Swa2R8bOyWQYrO1Tpbc/YF/GFVotFyhHnMIOVwcvxcGafBVoJegm7O7LJaMdZVs5/ihen7w0Tj38XMHmF0zUxJBGqIN+CR7M2shKJSTRMBWBt2qnKRX2+stYn2RwnBeAhaZEJYofPiBl5zgnBTkLKYTv7GnFPFHPEBeG1JGa8aO1KSfDFv4WdaPTzwDFLsIupotHunhogbS7UDzjTfcTPK6NrjqIzZm5o5bAbvkntBhLR93jslnHA/rnXbOtY1Mt6Hfs+qJlt7yVl0BqW6/g7aB7Fbb4pngeqw2vkWCFLt8YHSTlh/BxtnfHtfuozRDsKDegOeGS06+XXA0mHZidNO1B2WPRP5UDlLTQjYoJuYNlMLZTPjn/5kaT1UBsRk7eVgRTz7BHIvKH6leKf2i2x9RpfewqF2KHQGG9gTw0ZqaJLpg+dVrKAZZRFuXkuSGGoK7FyB0RcYV9n4mAE7xkKNjhXHK8RaFNiz72IH3RR8VbZjLcnj4Tq0kQ4V5yaH93WTcWZZDu0R4bnJlwMpvwWtg0SOtGS30FWQFm3S8dyE5Xcsb3rXIR05OG8kW2zbsW2jXDfKdlVg6jmYKOJgQB3jBJK1dUgCmmyhosnwxktRtrpv8HSF64ZdNmJTQmGmBId1VZCNRRNjT43y4h3s9FLzneoLqAu0s/RXrPT2Pvt+5cuXL/MiJ+95qBpaSdOKFV6cFf/2gIfVWIvRRqX1Qu0mEkwxVuAUwSk6by6vGRGU8S6nF40Xy0u+83TmO88v+I7Tifda5QHVqB47T/0DvvTq5/j5r/wV/r9f+AI/+5Uv8wvXC09LI955l/ryXcr5Je200JbK0grLUlmXxnpqLGtlWSrNLIXifiNMZD212c61OtsCfdWcsCiV0uSwYAasjVid0Sq9FC4RPDlcXEHoUIzWZPgtQWwqbtmNuBhjq/QBew+uT8F+NbZXsL8J+gVGFyphZaFag7LAqRKnkk4pTtT+rOrKYBfipR5JWBHDxdrC7GLWRY7ftbW8lypz1EwZVczRoYrMMQWpZ2WzJwNw0ouKu2aAdQnHSx8iLaWuINwFdYdQuTb7bhZMizhjUKJlMLJb4pWvdZAxwjEv2fvObC4gigwBKF1z9uhCZjDwkSiP3CusTLBCwbsyVB2C+pYEdWQfzW/35Cfa57+hqPApW3VfKdsJ+gP4megPTO+5OT5BzeuM3iW3x5bGq4sTrUNTP0EHUxVaeEhUOxLSsdzkUi8A3FTZBGHJXLPsHVncGopjy97RjnmOqU9z1DJcnmEO1dX/COQbVyi47UygMj8A0yqfYmnaWo8gpVlPjgzeYO4CnpoyzXCxoz9DM2x9RvioCXUUw+aY+nbTN/lwPJ0N4tRzeGGHUjHbD9gunpHKn2dMMzjNGm5Ce/Mr8ycy2Tt+OyKrntRcaLSDhKv0gL1jx0MV1GHNArctxnMoXAimPOi2qO8ROcXXexWpZd+xbVdfq6ehbTjTiM2W5ag87WTY2rD1jC1naCeoJ22GdhJxg4KVB5wTl954pPLgnRdFqOhqU+IiltppwLnDWrN96gn9DcF/ZqpBNWw5KPuGjYpxoa07Z4J3a+OdtvCiVU7FaMkAde9s+xOPl1e8eqPJu68ujzx5ZywVSqOeT9hp0UTcOqE+ESdaq9RaD4cCnbdEGsgRHfNRRDDwotvzMGQlqeuzcV8qw4w+T2sI0p0y7wnDMVG0YfhujM3oT5bDE439qbBfnX4x+mb4LmlHhMSqpVbpkaZB6+HYkuSkrF7nhRMh6PfotZhGX0yCQCmC2gW33xAIe+4OkxD720maHZCgKj2SI5GvPf3zXOxkOffPpT6bYYxpYYRNgh2ztDq2geCtu/I4hs9Ajyk9VEsk4fkqlxWqSFbz/j2IKJmAWpLMhOCqr2Vp9yb/08mwzl7XJxzN+5kOUqfX72H2gu7v4/4A43NEpMjUnuCwLRIDSY4SDkvHVlVRnK5QdozOEik1GhrxvF3Bt5J+duRcmITzclz11N9EzSluVtU7sNwEw6FfRfccgzr2HDUxqCNoHtp4Qt5yZkGUjpv8CGtRP+bgmpVFgwvbgteFWF+kHX+94QXkGPeRTCEvwqu94aNK/FuCcjI1fBuApgdr1IhIHrIzCAUpE5Yu5T14bKrCFoOHCzJAu61iC9Onz44QMTG3ws2NY0J+JiptQoW37UK/OtLxIbrBbjAcvybGsw/s8RHbdsrjhbLv0oWkGwdVvQN3TUfVWBefHu5Uq0SvYkzZQmTmW/qGbU/w5o20T4+PlE29AFrBbIFVQlarYOeqIPXuS+zFSzi9IJYHrDUoJ93ssUP7Nro98cH1zHnvUAbnVf2rlwRLg4cTvPdS3JdakWfdAqfIALXLBSSAhcIyOqfulMc32L5T96/wcHqH98P57tMD3/nwgs+tjZfVWRExpvcnXj3+Ar/wlS/yhZ//q3zhSz/HL1ye+ICN/vIFdT1TT+9CWyhr1dDBxVjWyro21nVlXRaW2qiWG1eePc+KaqNzpbObsxdnzyF+Bni7XRGxFLzpYgwaT8O4DOPaYV/FVq1YEgHk6NAHbBe4vjb2V4XLK3n/Xa+Fy1Owb3B9Df2qSsqRsWpdF8rS8LXhq+VYnJ6VjIstF5bjzRVIIsfsjIMZW5BRZ1DNhdJUo7ZMuqJQvEI0RgpkGUrBYvanih3sTYSscSh/R1ZTUz7T9wMVAlJaUnQrkMGXBYuJ8mgdbvMjbkmfMvG8I+e4mrwba35vmgP4OHSYZp21jHRhiRw+eruPY5Q8/0q2U7/BtFOzkWLiMan03wJBql7OlHYm4gzxgPkL5BqODnAByoWcUSDD1xqqoJaE/MqgFKfhrCGbGfOgR7DvA3Y1xpXxlKNaOe4uElMnnYSjJ4cggCREDJmQllk5RdAiqK5ZNg2wUGfGkyptZUjoWDsyZ0SbfikaPLwUzYZZTwpSRRRhBUfBd2ZJWXWTNVL2t4orSJED8mxRUA17jvw5sSh6Wu3H9yP91CIEjai7v+UxaJhNfdTpyBc/unzCqVhmtvr6/HlHsEMJjUGI0KC8OcXVxuw9DdhUPXHdYN9l+TLHxxd0DEvBh0i5I9lK0ceRxYZFet65WH0TbuwbXDY997aJjZljYFga1iolx5HYUrCz6Mv24gV2fgHrGdoZasPqqmsyKtbewes7XOwlr8dO7Z1vt12i3Aqlqs9wPnNku3YKubRnwm+9aOQM2teqwxrB2ofmd/LIad94EYP3W+XbWuWdAmfT/Cn3jX285vXjB3zw6st86YMv8eXXH/Jh71xLJR7O1BfvYOsDVhpWG3Up1AatWUJ/jVY1ENCyylea8bySEhldzhpyYJiG+zVuAPAkMnmObBlh9GGaqBOFmsbLE94aI2R+fzX2x8L2Jri+KuxbcL0Utqeg79CfUme2mYIgMovVKJ2qZLNBmDNmj5fM/j2YBB6OgDVxgelbJ/ivFPU0o0wXkyQERdHrPUe7ySdx3rIImlJ1ww4Rvqy59HCfEFnS3MkAhQGVQlb001iAOHgbKaRSIjF7QvZcbJGBJBGnWQDiGW8QeNAsxPKLZB76fH5LkoqlI3wcVfXRf0oTaMsZNSUD7setz3SQateFsiyMWAlWLM5qeBZDztWRIlY4pq3OWUfNiTaOqaty7wrWqW0KRGToMQ3RAVPDd6QBY/UDUpP7ero/RCS8l55WYxMxYpeYWASXktXTdFWwo6mouU39rSAFQCSOXUMsnVbxnNGkcqgeMJo6qJrdpFR1iJbupjlLEQnRZDanbxFNs3C8hGDQMhSIjpR33iiTzWPI/xAs5xoJgJLQ+pgafIBBek2PZ1qqQMcNPf+hVEq4g4g0A70FKMtHNiEOqI+k5pbwpPKiDQnPXkO+hgc1Yb/ZS6GHPorpd9m7nntLyC+hPjNTMDK5JpQlK6jzqjnn5xdwOovS3E7qd5VFDKgo0F7g7SXdXvA4HinjylMtnGswShymA+s697LJPNW1M41SfGQAq3Lrbx4sY2iUuj+x9I2zd96pxjvVOFvQrFPY8Bhs/TVvnj7kwzcf8MHrD/jg8TWvge3hHer5RH35UgMrp2lsKznGpNBqobVCLZVaynGW/QhSUkzpIWcFT8jP0ox56uUDkghnHH6PUW6BKOaUXssAlePgOvTd2C/G9ljY3hj7tbA9FbZLSAC9mdo43YRyWBX5ptRMNCxjkGjUIkPkhh3B7C3hmSBOUNqUbAke89umngQLc0HyFrPvxJGMHdXT3GgkaDrAhsDenrs1TY1TL5kbBYHGtE8rsmJV+8rk4tuEVEnYLz9b+BE8jirHnhmYlaytCky3FTOReFqIrNOGpklZHGklgVixomBpnxjHh4UbuerteP1x6zMdpEp/LW1InIiouiUsN/JoRFkotqqvE8njLzulXqBcMK4SM+aB27sReyGuC6NXPE6EvYByhvKQgrd2bH6a/OpHn8LCKV3VUu2eI+Jd4xKGyvhA1OluamR2bo1FaohWXQdj6XjRCHq8YzkPvJSdaMjwtm1EGRmgFow5fNA4Wr3ZxCXn+1CQlgEYRRZGXpoqoRKMqgm+UXInxDn6bEBGo6zUMsu8lZXU3J6CJ5Qyb7fASRIlouv9UDM90PNkXSCSQwTThkitAjtM78WW0p1jczz20tIBe1WVZw7rQrRCWZogwT5oxfC9Yxej9pBOLG79BEtRppU02VwWbM2Nuk5hx9R9FDX8l4YaSAvWKpxWRZhlIc5Vvas1X4OAd94l/MJ4/7v48Cs7l73z3vVKVHgfZ3XlBW3Nja+Cn4b6p2xqmu8iW4zibNbpbwZxGbS9s/ZCKRvr/kjbXtMuH2Jr5fJ68GppsBQWv/Lq9Zf5/37hr/CFv/bX+OKXX/PBm85ja/jDSrFTYoxJGS+o8lgKdQnq4tRlUOqWvTZpc+Q18cgTjzxy4YkrFzYG0vC1GtmsH8cGKBx53HzfismtYzW8BNezNvwyCv1ijA0uXwm2D4ynV8abR9ienOtm9N3pY2G4fP+6i5E2emB1OrUvotQneWEy92qQ03S7yCfIVNaiUN2RFVVNZxU5rHBwHzLh8xtTtSDJRCtVeqgZoLPCmRVbGXJTaSVus8UyONXsOTHFx0XQcimpHlzSp29Z8MWIVvBVWita9tyKZXyVhXMcgtuRwLtLDI5P9D3f7AI198fUREQVTBfN5B7fcw9Ig+lDq1YRQlEik1EoXg6hfZDs6k+wPtNBaqVDXDGfoyaGSAVUwrPL4Y1IDVB2VYg5H+hokqatiBvdC4zG8EZwIoqqNGoTKaGk31gBSIZYyMfLQsGpjKDsQaIHGaAyp5kCw5p+fgU83dStJuOuBt5G9oZ6yrtCWRlgtYlGW0hKdgNWYdJUBaacwhbPK5YMOJbVpdcFJsupZvlfdiK1V4o72baezaH8zDwLPCRspp/Ivh+pt4pxMP6MonOB6MBHUxcORT9wuH1Efu45JG8Gp9ulnb2/oknIemM5Y8wCzqtcpdcF2i5Cxd5F4ffBMGH1nrT0SLYcRW7Qh4NrW/RaU2FvJdW2RVqTlkFqXZLxJxdrWsXWqli9lOzrmQSk5xfw8B798QPYnnjqr3nCuQ5LWnDCeFWkiFGUOARd/m9dm83UEfkm7VZxF4RcnBad5jtlXLD+xLgaF1fuYfsbPnz9AV95/QGvHp94c9m5DuilJJNULv5mUwtjYrTVkm49op8rawDlzrq7Nja22KSWik73dEMhDkpyJEkh0KaF3VCI4jI15grenP6kRMR6oV+gX+D6xtiegusVtj3YhtHddCzMlZBNgkjMyU6SNxh6vqPiQDeidIlxUN5vDoTpzx8lq46aMpY4gscxU8n15+GyENNSbL6UH5ctWa1N6VANGauJZHzwC/NSz+dMCPv4HHmNRqtYKwoebQapwhz8WcoBhSQl0g+K+9RHGXFUcqTejGydzIDjJrzVzbBueEvdWFGAfCZxzG3BD/TiIIZM2PFbgd33oj7hOBc/qanOVc4c0508ClFWKAOvVwaDwg52xdgp9KPFKKy7MkaDccZ9hfISLLPJKp1L+lHqRo2eJqWBdaeMwXJN0ducvuEzDOZUyyIR33hukt5URVlzYlLD60jGoKuSco2iMLo2h9qhXjHbCBrYieIn8IWIltBewnvRBGdYILd43ZyxnPCyUMopbxxH04vS/5BCEmK5+cqg8j9v1un4LmNdmCRa4wqoj1UyQOn5Gh4yxvTQ5eekDqWoT3UYPCcWz7Pm73Q/mtAIKaDklGXHgGnqGg9naI1YF8q2YftOsYBLpZpTt8C7Y7syaC9g05qmtQO6CcptPP0kqazS+rAuKQhVRaXNgcN2bfb95jA6j4CX7wBBuV7wfWMP48Mvfcg6gte2HTdwzSQZg9IGXnYirvSRVcJVk2mvPdgvA38c1F3N6ZN1XrDzwq8s22vsYvTyxIdceRVXnt58mQ9fveKvfPGv8nNf+Qpffn3hqRd6XSicCVbCFzQxeSYCK2WpGunQnFolZZhEGOmkCo888ppHPuSRN37l2ne5qISCpxdUuQ/JCVTNxlt0mmZySxk92BJGit3YH6Ff4enLsL0Jnh7haSvs/RkxoISg+BiEKelyOiUqERVqo5RGqwtlDu4DQZLIdSXTgYPGUyKYlmJRukyVW82NvE0gQtVFip1KTkQopd6qqDpRiXSnjKAOqBasMWhWaGlFlE0k0dOjUJrusQoQquhmb5RTI5Z0lVhL7lVFvepSjvaCJiInRX0ctvr5Z74W5fgT0+gPSWi6LuJwRge8pLiXxAMTtDkE1SKi6LTcAlKJDIRTU/Ux6xsOUj/5kz/Jv/fv/Xv82T/7Z/nCF77AH/2jf5R/+B/+h4/vf9T+Zq5/99/9d/mX/qV/CYBf+2t/Lf/v//v/vvX9z3/+8/wr/8q/8g29l0YXLjw2GFdg06bJQnIzIRuLjlHncCBLcW32PbQxFgj1tYh3sFjBHjBbMJaE+ThmThlx87PqUHcowyhbSO+U0zcicgDgHOzW0jEhxz3TgMVvQcom3Ba56WdlUtAwPSI1X0HJmVRBwyJHnMeCjzMlmnp10Y6NVheSLkCp4+Um4OXMLZSiDHD+K7UepKhwGu9aOLeInbTzMmHQ2w2mbFBQp6GRGCXRnuJy1NCogptXnx/+dLenKrOKillR5YWertSCPC3F2AhKeDiJmn86Ya0SW6Ns6lXavuhNmAnKmRlxTZivqSI9Ro946r6aKis7LxmsGtQ0210SFpt0zSK7mblpzgF3nFbMX2DvfA57eiS68/ilv8Ib33mzF1HELThnIltbTMyTMTZAcM3o0IexbdD3QVwV1CvGykBD3DeqP2HduD7Cvr1h3x75yge/wAev3/DFL32JL71+4tXm9HLG2wNWHwg7EbZIo5cwqPzlisg2VYSaCfkK6BN8feWJCxce/cLTuHLZOqMPbWqgSopnSc/wQ/dePOv+KNKI7c6bIUFzdGO7QN+My+usqPbKbnJtONqkLSu1OgRHmRxDopWsMprIIKXSSrrJV/Vqw1xjNiL7KolIlmJZGUhkPGrgC9nbMZkFDGZjSpCfJds2OOAyPHuviaxNJcjcClTDZqVVDM95807cfgcoc17XuuiaPK35+QyfQaraEYQLdiQCJUeElJTIzF7trVuUNVwph3aUhDdLVqalCSon+Wm6UPMD5RdmDXq01PNrSVjnEzLQv/Eg9ebNG37zb/7N/FP/1D/F7/pdv+urvv+FL3zhrX//8T/+x/mn/+l/mt/9u3/3W1//N/6Nf4N/5p/5Z45/v/vuu9/oW8ksu2scx1DTPuock5EZgnFAV7deyzi+r76NgpT5AnHC4gHBZydms5XZhLfUXYW0TebZxM8x1KUb1oUzH4XHHHlRTBh4zSysmfwCM0gJkDcBODathcQaUuWQp9j60bAVrt2UeaUBpW5wfT2PFLPZipEBs6iqs+xnGUzT2ql40cruvPBQQXSRfZxp/Txhk4QjtSHfgt6soizl9jGmwLFgDMGT5ilGRL27eTXHrXKyzLjztHL0ugzBQRNSqcrSDijutN7yuKVReodWKTlqvtQsy6qlwace6h0UZazT83FpGg1/TkJE6tTEFc8svgw109N1/QhYhI5faxrd8fAu8fAecblwtZVLNJ66CBTVjJU4hjw7cqQevkMO9BubGHDbxejd8T2ZYFZoDGp0aojiNja4bJ3Hxw95enrFz3/p5/jw8YkvffiKV5edxwFjkQFu1DOUhbBFwvWEQkXasTzdkbTkhO/Yc4pJsHOVZ19sXPvOtvc0JbmNAJ0MUZ4lHZP1rPtKsol9N548K50O+2b03bhmb2p3U2+1yfnlYICGzgF7ZHJkGniZRntW0yy3SP/VpkFsVgHa0MtxD5eEvpzstdSAGaTCUKOY4z4Ly57tTKIU6XhuBaT9PF3G4WDQlYm0Jxpkkw7px92MWTt6sdEqsegYeDXZHh19Jb0fg3Thcar5EahmeJqz8m57hfYHBVe59KiVknrJGm/rtlQW5u/GQRIRlJIpcFZXR9Aav0Lsvh/6oR/ih37oh77u97/ne77nrX//N//Nf8MP/uAP8rf+rX/rW19/9913v+pnv966Xq9cr9fj3x9++CEAf+Uv7/ACnuqg18E4PxFVUJgtouyWh52yXCjLBTvtWOuw3Py45LNVwV9Q/CV1vMTGCywrA/k89MO5oLhju2qzust9oG6IFeZFgZO8mKdj+jqU1bQi+nuFsZZbxj89cOZJJFTZkZqKHMA4xHGfVxuiywq6tJxNRfS8sAE7iXVoNnV++RBm5snoUXsoGUKcCToS5Uphbp5O7SNnX6HXMZMxrBdtZlEmeKr3NhE5/RnC67MymjO91KydlIm8OZmV0lwhSnlOzqsaEaqsNANnNRKmS8ijGt6UZSobFhlDcIdecoTe76joBmoVlhVbFzilIWxI7U9kRZQQn+ZBlNtGEEHOg1dAyh4LI5kfZc+ALO2LecHqip3O+IsXjPNLnq47f23TuI1rVutrwDkEE44Y7L6xjcHTPnjzVHjajVePxogkfVtQqrO1jTdvXvGV9vOspbG0lbEPnt684fHxDb/wlQ94tXV+9tF5bCeuD+8S738Oe3gB775HnF8Q7aSeQzNoBW/GqIY30mRZVb9jbM/y5Ne84ZW/4dX1iddPFx5fbeyXiu9K4uiWxv2WeqA0MHWwXVViH8brLXhjzoevZ/KWY2BC02tDrVhAMDxL2kcNw88V744v4NuA6yLotRm8aHCucps4o13wnPehQVwrPox9gi0BdeTGv0CpTj0FvBOC52PAZcCecKCF7vGme6s3Y3RLiWXRdVLLgQzMju0x3btkBRKFMQrDB+4LvsRtKkZu+r6u8gFcF1W5xZg2RDNBPLCtSS9vSqAiet5j5YDs5XBclMQtNWHLSGJI9ntjBrZbImuhlsdRjxV5bdZ0e4/s50/4EoyxfLJS6le0J/XFL36RP/bH/hh/+A//4a/63r/9b//b/Jv/5r/J93//9/OP/+P/OD/2Yz9Ga1/77Xz+85/nD/2hP/RVX396LWpoPxfGgsS5ZKZXwVpQlkFZdmzpWHMsA4dgnNycaVicwM/gJyScKHlOtataZIY2qZoRlE2Za9nAeiqsc1uejXjBeJG1vEMGKZrrgjrGZxSIlmy8+ffsq82+yIFVPyd+qGdks0QPw2LgOa23FJUiMf+cwoesnOIoU3ThiF/EAc+lkyuHnUnW6JZmnrOC0siO29iOyQjUc6lataTA8/zanIlbzH/aRGGZOfcB8xEHi3IOgpz+jczhNHNCcrpyxKThZjVYRxAuqHZa7oXlz5ZbFUVtygjnmJNIA9lnQXBWpnrD+YSWf5/zq3yypkRxMDwrOEkAojZoC76eGf3EYzTO0SnIAdxCkxTmoR/d6TvsG9IHbcb1UaPUI4XXAGMMtuuFx8dXfNBO1NLo18HTmyeeHi985YNH3ozgMRb2ZcVPL+DFO/DwkPquNaFpbc5RitoRcxxFNtHdPFmqU+UzRJrwjeu2c712tmvH9yKt+5Gk3BwdSmgAYTmmyIKPmXkH+6RTlwZllRavSslsi1PymnTLINXU0PduMofOzX+aR7PUG6mgQazAKTN8M818ygp9XjuR+4EFqiCrYydVyXPiN3iSEphSRaLEkbvMH4F5zecxIE0B0iDA81JWq8By7llKQ+alT9ocLenY3tpBVjgC1EEyynMGh1A/mFWQ+sYx0ZBshNocHWKWe166wkzYPkQ/n8+jSu9GRDPn0I5hmi48b/aY1WY693zc+hUNUn/4D/9h3n333a+CBf+Ff+Ff4Lf8lt/C5z73Of7Un/pT/PiP/zhf+MIX+A/+g//gaz7Pj//4j/P7f//vP/794Ycf8n3f9328+vKJsq/gDXswiKvguLZPJxrK2SntQl03yqoBgQn4KivjJHjQ38H8hQJVnHTzuJpL1fYD6jr6Ix6UK8nkUz9KXc04Zk+R2hZOCpi2Gt66oKk2sakEc6OCz1lSa74PkR/CSjab5ThNfcJ4Yo6hV4Aaz3KmHWjqbx2jrScPTFCnwAzJLWPeOUwj2SAYadrq4LuarH0XjDJ9VGyW8vJi8wlLPGNFCeIqmFcYVQSQZ9425RkoaNMA9DjTs1qciYHYa8UHcxDU8xvR0rIqku00GYOOPMhiOHvqq0oGqwiyZ6gb3doqT7blITPSDnN+l+nceqsSUBt5TSiBYd6sFup5mjOTCbLaFe03g2sVfOinE+PFe/Sx8+rNK5o7HvCQcMwSEqT6CPbrYNucy8V5emM8XY3HN0KbyhLUByHHfdt45BXed57ePEIU+qPz9Hrn8rjzlTfOpS68ee/bsfffobz8HOXbvgM7n+DhgTifEkoi+1FyI1E1JcKq14EnuUCdPpnMvok3vOmPvHp94c2rK4+vOn0s0sOldU5xy0Qw76mR7gr7ZNpWOsawxjBTULIFq2dKWTRWpQ4qm67VOvta2kTnQMuoRuwOJz+cvziZ/KdWVEmtQbzgCEoRTcnb1pSsuT2rSAKrG2XpxMPQwENXL5lSBcWiy3s0J0yGt92mYz/ZX9b1XubzFgTXzURAr8Rw6bzcY07OyIRlBqklYdiWv3FL3EokLDuF+CZCjjchElgnyInapUpTWU/J5ExGZ95bN38/nbPIQKWEwnVdH758Ti2hyqkaskrI6o9p6VaI8isE930j6z/9T/9Tfs/v+T2cz+e3vv484Pwdf8ffwbqu/LP/7D/L5z//eU6n01c9z+l0+ppf73Gmxiq8uAS1XbFlx5ZKWRGzeglKu2L1ilnPVEiBwaJRfAVfUSmWBIA8EUqUIzP2foN0JoIzNKGzdGWDkIWCkUwxssnsbw1UnLYBU+gqsWAjxglzPeRH2MCr5r2UwNqOSA27gprdJteWZ1VGZAu7lC2zzxCEZ9m3E0CesJ424OnON6sxwog0vrXd0kk+FfThqS2Sq7fZzIotq4lZYZBxxkQe6oL7YmbRkJj+jc1kliVDGoPNAY9ljr7IADinfk68XXRxwRQk7OFYFjgy6WR3xnCsyxZpBqmj3zIfVd5uusFRsEY3fCQ13XMgHn2+zxC0hxrTdlS9Ke5iDkHM8R0mbYzEv2fqy/eJMbi8fsPrizwSX2Yle4rAN/Adro9w3YLtybm8gssVnt6YJC0n41R0bLsiFePaebQLMcSGu7weXB6DV0+F/WTs7yws9YydX8LDS+x8kih5XfJ46hr2rAxUPclJQMwt/X+ggOxsXMeVS994erVzfTPYHwdTDF/KLUG5bcXzcslqPuT+r6MG+xSrtxN1WbHSaJHyjIAaG15isrsxBI9Ox/HoBmu5tWWXqn7SEsQp8BP4Q9z6oSPRhKXciAfjmZPNREcWS1Gy6zpwubRYuGQlOTFhTtQtUeR2P+IYw1IxasJpZSEDRLKBw3CveDWOCTkezEnzgKrB/J1pEKsUPHeXktVwLZmkNsJltOsTPSlFFb21RBAS2tR3kyJ/C9TqNAg+D9QnlkRkHKJjN+0RzuRRKIH0VkUaM1P19wnWr1iQ+p//5/+Zn/mZn+G//C//y4/92d/2234bvXf+4l/8i/z6X//rP/FrRGnahKtlINiwNpXxJFVcmY+V/RnWYFmLK1AdprQzwgDK7GMiYzc4ZzbuPeG9kIjvhvvmRZOo3WEKfmAFRwFO5A16vBdf9BgrNs5yGI6W73k6B7uCmh2X4XHhGM829zIJFntumLtEtCaGoC67rspqytsnIT9QlE2K8CRFHk1tNC5hWrg8c1m5ER5uHfK8gUNWRiEY4qALv6V8z8zqCFAZpCLHXsyqKjF3yjzulgrIDFSHQDNv6qS8xxjK+hJ7mcgk3CAIytzJpngybvVeSUgpNwbL9z+rvEnTP5xHntET1cfLIDU9Hwtpz7NSzi/x65V+esl1e8Ji48kLLYItgtjB92C/oscl2J9gu8L+pF5PRGE567nVD+kq8OKKd7i+di5vFOiergsjBu6FWhrRVrlLLKtMc1MTdXCrpzAze6KRcGYkhOwMInaMjd07e+/sl8H+NOjXrBrLPG7Zx8NvCY2Zno+Z64h2PszoZYXSsDQwLLXiofvYaqe0nHjoSdjJ68tN7EoxLu24ZmiW2jXDlwEL+HojKsWmm9eWmuc+x/HMIFV6bux6nhIh/7+hABaZRJGwmlzCPfdp7SnVbmSJiiVF/caizA0ue72WUpYbbH9EqdkStdteNSn1YLcgNWU5ZBX0bIOaMHdMGPX4/XmTPocQOb4X8159BssrgWR2D3Im2ySOlfyM5Qisn2T9igWp/+Q/+U/4rb/1t/Kbf/Nv/tif/emf/mlKKXzXd33XN/Qa6/cY9d2gvjuwE9T3d+wclDPYKaBlth0qb4MJisPhLjx6bsTXbCPk0DptQapYWNERn5YHYvd4LQd04TF//BmsYJltDD9OcIzJ9ponSAMSiQXG+YAbNTZ+SvumxdAz6AuJlQvnDHSCC8NbCui2vJqkV4JdlWTCgyG3QnS3njjGwO+hiqebZl15p/QL1XtWM/tR2cwsrlaFq6iWYwQGElcHDMv5S0UfI0LzsXCwInGgylJKVWcja1IFqMkoJJOM6qmv0E3mE0s/qcdg6ySfOOw5quO6w6PGbUj9KeLFESjxA8696UhG9sluAccsKEWZbCm6SVVtKfiU6FmB+1EZCggb+f4zSTiy1UIsqy6t978TqyvdjcfhXDs8XDeG75SxUVJh8fQE1ws8Pco8dbvC9grKKjP6dalYF4g0zDXuxZ3RnetVQa5vsPfKGDqPMQPpUUhaTouteEmSTZJlyIop1TTZBgk2dogNjwvXxyvbq53xFcefgngyERNakWt+blJBS/LobVPHHHdjUBlUulV6XVXdLguxVBGQvIsaHxpQXzr6YO5E7IJ3CRE8Wgi2K+nY3pYkgxjxYPgK28rBqHU3VUkUyiYRcblmFVFNcoZSNbHbNB15mOWE265eUgp/S1QNZg35wUSSmIofXeRsbxpTb6u7tchiyHLStRVB/a7ko8ykqGea1ScxQZ+PUqXtTIdib0WQXDSiOz6a9h2Zd6LeiEjwaQuhfZHgmCiBWIHE7K+OvD9mgK1UUx+LuqoF0FZsaRrLcp7aQunLqv8KwX2vX7/m//l//p/j33/hL/wFfvqnf5rPfe5zfP/3fz+gntEf+SN/hH//3//3v+r3f+qnfor//X//3/nBH/xB3n33XX7qp36KH/uxH+Of+Cf+Cb7927/9G3ov7T2jvAf1RWBrUB8GtgZl2plPume4Nk6eHZTpiTVGVkVdUBYzU5iVion9VxIKKzC7o3IxVlk+nSeeFWL6mSyN9XfdLGTjWT+WxXkULEfQE1L7uyWt9mDyaTMps6yPgtGS4prTQCNpouFg23HRz82SbHHPzYbYFby9k9NEYDdsN+hydjffKNGpobH1ltXCrII0AdhSpKfKJ45OP/LE80L0yVZMeNFumTklz0NSfY9AOIkRE2M1h9Ky35RakVITekHtvVn1DjlMcN2wTSaxdH3WyJEDkfDSJGGIdlaxEPX+CDp220hLOhrMjFLfc+qzJILpBZdeTjY9nfAbiSUH0sFCeXiJe1Df3RgffgW/blweX7G4cxmF0h2TlSDbro+yb6qq+jXlLNXwqwJ3LOVIfSMGMYwYkQMAJ2kkr5ODKTkOtlnJSpIEF2YlqA1LY1/qrGgJBkoCh3f266BfBn4J4qrrSaLm+XxJCMoBgjH1d3mvehTNLKPhVvEMUhzVXTbsQ73BqE2V1chKIYaq57THErNV6EZqbxWkliLIr4nMNBFr0mDEem6++zPEwDRFN6zo9acYNvufUScxR8crc1U8lFIGei9TT6q7X9dSyfLDTHc7oenQFrefT5iDOYZIZIV5jy9YaWDZp8ppCVELsajvLQ3irns1R8cc7gQSOh3cn4kcWV7jk8IlyUEc9/DURR1uPGaqdquExtaKEsulJNKV1WH7FSJO/Jk/82f4wR/8wePfs7/0e3/v7+U//8//cwB+4id+gojgH/vH/rGv+v3T6cRP/MRP8Af/4B/ker3y637dr+PHfuzH3upTfdLVvsuo7xtlzTHKD5tgvyZMOLRrHpRJxu10Hi4GvmdDnsRT1VcQBXvR5i/qTTZHjSg7ZnLRnkPMis+AZBxU61mSd3JEc6iqmCV7why3quiExQIhSrcmxMKtEZ89pFC9IZskNXcj2k24G9KtzE3xYARO1lnoQoaNYCPiSvRCdIcnh83gChYX5MzxRKXTcnM6jqIluynJFCXV6BFDAwc92U69EaMw201EUzdjwmETjSh+NGuLRXYlXBvtrGBLELXlHCCNK1HWiIJFGTDSc23f4arxHTxdtLvvm9xJI4OU2W13ityoXYFM8fIWhKatjSoO096U41+KOdVuPbMDI51usLarEjc/RoeL/ZUavHfep7YzpazEm0eiw+OXv4INZ9l22h7YFvQsBi8XPa5PsD1Bc11F+8kgCrXWWzVPyXNh8rHLPl24p1trV0Iy5rysOFBPhafIoN2VtKDNt028IZzBTveNrV+4Pm5srzrjlRObYd1EVGg3vZVEnlPkTpJ7dE8oSC0MW5I4cTqcPMoxSmZWSqvOGUH0TSw438F3XTM1kqDyTN/V1mPT9FWQ4GjyCcTysPUMwhuUXb00uaJbkgoqso2RcW4BUaubiATOoAxNfa4RYu2STbPQwRXjbVotzfvKEobT+Zwc3hv8lglrdMx3TfeOdMSIM9FORHnIaeMLsYiiTmsKqKPkTLWWsFxCgqa5aDNpUCs12wu4pkBXp1pC5SUScr0J4WliBZZqeE7uLTV7ULVQVpGTYm26DvqvUCX19//9fz8f57n0+37f7+P3/b7f9zW/91t+y2/hf/vf/rdv9GW/5rIXK+Whqfyvg1JzkyKhmpGY8EwHUcdxjpbXXKf55623UqLqcoiWl0Vl5AbpU4BbVOZPRXqM1PZs+ea8HJWTTkVWHon5CiuelUC6qpdZMY0kagiYs3LByhXIoBFXZVBFG6K8+jhu9sjKRPO0Jl1dm/28R/TDIy/2fmzeGhio5m7hkcJOLU/ZExqZ9amCUzYrmItJnfcciNjH0YcSWaIcPT+Pfmzm/qwXNQ+BPNzyOGSja9J5lYFps/MlWUmlUsozFt3YsX3HL48atfFKzRvbu0Zhu/pSUeQaH2VuYM+CztwajBs8W2Fq2ix7bqUEVkJfnsPhTKY6kNYj5ljtRBliNNWabchkdYZpg1gWSqmMb/sOvHeuv/CzWARt7yxp40UMuSLGIV1jotZ9kwvDbPUcTQ+DEcZ+TXTbJ6LjxHZlPL1hvH7F+PAB+qbP7mfKIjutsEFwJcaGj0G/dHoxeq9sReSJi1/YrlcuTxc+/HDn9avB9cnYh8lVhOXoeUQatB5i92f9CZElKz0azoJbkiam+0ENBZ7IOWXeDnKQl6rKiyk6vVXBxcgetuFlZN+mJD3c6Cn3wIJWjNJ0Y+uezYphVnwnU6U6dU1Jap3Vjto/cZyIiMjkJglEYdKI5TkwqcDz3tLeWkI822JBLZ4jMJKh65PwJMG2uVwyoiYCUxCrcanPaO0Z/Mi+kzeoJyahYU77npVcbqDaEycJrGnfsCiC7z33nawkp58lRVCxzT+reALeRP2PVpO79itUSX2qVk1LkIoOCJ4b5UwD5xyoLCCmQevwTNzkMlxwqg80/8WR+7Fs5v3A7ZJxU1BJ3xL+IWFAE7tIKdXMzMsR0FUoT8jQmAaRc/5MGIIoZyk/LfJxrGyYbZjtwI7Fnll/7kbTPHcSM0qowZubdjaD8qAZWV5y0FW9izgy8jmntxcXiu0YT5hNAkBjKoNv2omkgkfi1weEFAkZWEKSnsSP+TlnI28eofQzmyLi2dfJjC8Sskk8Kqva1MAkRmGuDJO+Y3v2oS4X2DXTy7o/I8TkTTKb1rd0Nt8PzC7wMYZhMjSL6RjOezyD1Y3RN4NU9kGLpzYu+2pz0z0y5zQ9dSgvX+JPL+kPD2x953K5MExZbOUmGphXH7OFMIKxZaJTTbquqs/nAWO3gyF2nI+9E5cr4/GR8eYNFs6oGkrIaXkWpDYidtw7Y+uMpdB7sBVVDWPf2C4bj487j4+Dy5Ozd2NExa1R0tDQbWp2bJL8picrHD0Y9WH80NTcEoPDeD/vL8sNVpZGlbB0KUd9z7kNKmR5/jd07l3zxQSB2nF7eCYn07BFAcozwESOTcmNv85714570UjQIitxbQ/x1u0Hka5cM4GGtwhDSV4olr0/Q+/hGWll2rPNTxvcjGCZ7abZPz1edx7PqQXkFlgyyIbdqGAKUqqCrdnt/Q5TIp7396wwD5LGM7LGdPLQr87Ez55R3H/x9ZkOUnt/B98atQw19MqeLBrHS97K1pmO4DXpkXXsFBtU65yLhg025kYKFiJCbEPDDzuC6qIMRu2MtatBaurD1JGN0DBN4xwNG01wSuTFk5EykEULrWWJveALWB3Y+kjMXWVkQI2OXCU2qj0l5JfQ3VHRymDW2go0zFK3waA800HdLmlpmgyn+I6NJyIkhJbUSRBVsyeq7Vh9ZLLUPM6qouJMlAXKIg/BKDlcEcipnQdcP8UhNUkiCakJiZ9D2kLHoARms9LKTZ1ZeSoiTKiMI6Ewytgw32n7I3Z5gsuV8eEHxJsr/sEbUdA9qPbM0dsq0cBPLa2KimChBlQ/FPxjWVL4WY57XOVhBoHj6GbfxjcsN3XqrqC0ulS5TUnODIbzNrVhWK9QV8p3vEOrG/Hmu/GvrLwpwRKVxoV1zSGcq1NPctRaVkFu0eHyuFOuxuVJ9OhSQ8GROKou9ToQDf/Va3opjMuF2DbaiwfG+69pL19STifp/EoQtVNOnXIaXHDePBU+6JWnot7h5fEN16eNN28ufPlLzuURrvHAKAtRV/pyTtPewvSkZCq4Qw4r4cZAUPCgM1DyMcfTFzOSUEc72JcFq4sq9PpAeMWr4EJRIi8Jmu0Ml90YvVKKy7oqKr4YI1oaAyfklz2YqONZDzID0AosRpxTT4WnBlDXaQw7xOHhskFT4BP8RwRxdaIHbJGoiTKJ2bmaJtnFjJaw7WCIoep5X0RQ5sBJXJ6EtUALouV073ChGGMcvdfCkNB2zeqplsNQeUKJmp2mVLRWDbu06XLOyPZk1diYfpTE2gM858Wl6/mcMjD7oDrnRzb4seszHaS8n6EvYq2ZM8oijUqZHn0KUgd/f/SE9pxmQSuFkxmtFJYU1FqO+RhuKZpL5pqnJqR4jgJwxky+DJXrXlTGuyX2q0AjVmHoQsyLKqIqIFnRjdGcqElimG4OEbcgFRvGJcv8/dZno2RWJdHvpPLOTT4yM4tMQaffniAG9RrCdzWaE9qyNqB0rDyqeiuPB3wyezfq1XFcZ0bBvNyKo0NkO1GEjFgxyQUjqfR+2/iPRxw/e9CfrSTkN6tkZanCzMF6p4ydsl9Ff7teKJcn/HKlPD1NpgDWmggnJXsQxY7GrrWS9HLyNWfml+VSzSD1jO6rnpz6HtPqCXoG/SuUoaB0uN5PTNOzts4gPIPdWrCHFesP1G9/T8dnuzKugxhBXXZVPy2YSFht5LUa9N3TwH4GKWQRVmbteoOkfDhx2fD6BnzQayUumzbbbVDOJ/nd5fXp+2Dszn4OrhiPtbAXh3CeXm9cLjuPj87T1dh7wcsZrwvRFryodxjTGQRuiZbrvUcUPCRk9oPco86MWZGWyEiLntk3E1QqNH/NajfPX5S8n5zI/hCmacGembzo3dK9zYp/MnRnj0rXZ8xiT5XUkpCf5T1hlVTiJqkiCVczYGXleKhY0nWE4nkLaD5YiXyfeSOpz6qgGQmtMhJ+jpLvK++HA4rW5yzpM6qJCJ73pRI7eS8+QwWW/NMKs0dbpuYqiSbWlNxBBqhhgv32EOlqIE1aTEJS7oWFG7Sbe0MCXJ9ofaaDVN9eUK8nzDphjhU5LNQyezIZpEIbWB075oMaTjNNW3hYYCmFlYaxwFhwL+xDSu/uydbyIEKvM4oqNSWEEzqpidQZ1gvR27OzYLeN2NWDumkUMkjVkXRfbgEo2WYWGyUfamDvCjwASN9giBkXZSIK6ZTgGaiqJRljIeaAxEC727je3mrLPhWdUh6RKeEjU5uhm09BalaJhiMBdOFQfQa66Y+sGUh4sSQDEJP7AjmpdSocpUS/QX0TfqNYjhnh9nyOKsJ9o/Qr5fokfvbTEzy+xt5c8TePzGa1nU4Se2f15hX1H5ZyMAStTuhUQyingFdBKuGVecfV3ExmdB4ObDAuwCXhPSDHKEQtCcskrHv7MAhWLNi7Z0rt2PU78JOy9f2yyyfy1VVBaAFbdJm3Bbo5fQv6No6eE0Wfbc4tlJ5QfxYLZdePF0bvxOMj7AN/eEFcBv7ulfpwxk4tR44E5QHKKdgaPO3wapiqDHdev75w3QaPF2fsJ3wsRHkhcsuy4E1sPj+SL7/ZbAW4F8FukbINUz2lwzKrKLl611IOw21L2ybm7COTu7nGW6iatfS09IT/JWwnYcNCLNmXXEvq/xQ0RhlHq1X9LW69mYZkD/Py7jV1jUpUtWvUQ6/npEsHWc0OlLB11wia455KR4fw5E+o91wmCSxBBZ/3V8neXEKi8trLSqv0HJFSKD57gGoRHGSHiRAsHJD2FO0efrHNdK0tiM4OMBYRH4YTV4guJmc4eM+9ZMKYbnNT4shis+XwSdZnOkh5NhsjqZOCYNDByb5UsSpHY4cyPFsCjVpFfGhhLFFYbNEG64U+5GhuKfy0KNQumGXxCQdWlkAi1x3KtWJ7wfYCe4Vec2+OQ7xpqPEPlvoeEs64McmAAz9XYOjZo+kJCzgfFcNKoe+4DaaKCjLbzwFyUSoFjZM/pvYkxn4jdcRNpEnkbJh0GNDRpY5EaeYcrTnaGo6+n75dnu+9SQwZ4IYPqWCIwrCe5AcFIK+ir85ftueBykT/dksJQJ4LhmHXK7angOhRgSqeLsQ1edqWokabGiXdoEocn2WukS4bQ1luiWlGnAC/zZEbOheQQyJLjkop+zE2xtjyeAI5BVnXqipJ2LOSqs8OVFE1FSt82zuUhhKjxyteGpfHQbEN840YgvPOA7aLyBpjF6zTZU6Cj5DDRyUrL7s5MziYO2V0fHOcJ2IHKydJBjbHTouqywXsanCCpwpxMVHL81q5Xir7qGy74fFAoCDlVd5ynslXJP99KiRSBXFDr6cglDzc9rXaHCmwPvqvqhqtQZmIRhhYE5ztSUCaNlqpUcIydwEYBRuCvizvqzmJdmSiYpZBvs2kKtEBV7WjPm4GDCyJFYL6PBF8P0ykPUfXO6W5HF3QNVJI1CCvyzL7zzMVSk2SkI1ZnSARdmsHTd/yWJUYEtTm3lLmIMRWs2LMRGr2dsneN1dKGZR1yLlnBVs0zbeM0GTrrgNYNhOZtYvO785sG3IIwo/K2G/39idYn+kgdeBJeU1MGOXWVMwgkUaOJYpOeBSKV2UoU2+UfnI344AQgUB3F8WDmvNTLJl71XVibDdKV4CKLuZLjGdMHVSCl1nycgtIJPMIm2MMIv/v+adEtJbaHxUVmZbMsjpL52lTE6QnWnDg6BZTRPR8MAB6jUPpnyLliWsUCDOGKRWwENRCmLwKZ/TIeHqrtJJgME9PbjYZNRkxhbqVA6osHHOuLBu9x0VsM9DNDcyPA2nppG17h60T1x22Hbt2Uc1TXHygTEmbp8xG7qS0pB4rj/FEJ3UMJxY3K7ybWax6f+ncgQLFrERvXy8cV6PZs5tznscse/IKoCD45WHF/IHYd3jvHWIbjJePuFfKbrApiC5XBaLRNS3XyIDgs9rIeyBfspiSq5nXIUt4jacvOoax7oQ1vbVasG6M0PW+nUXHLmYH+WPvleGF4ZU5iyrKipfpjq+KnpEEhGS0T37J86Ta8vqb127JSmoiU0r47IgFHMBEyUrYKCkUtVhUjcSSfWM/rl2DhKwsz7f2CaxIC5TOMargcxpvBWrCZUnOOqDbTErn+Yys1qdNmlfLYYIoYtm8JgoaTiqHmcMDM+2UdE06U7dJsTxedsheIhKKq+VwdiDvmSDvnXnsSj6a0AJ5ROvvcRB/dqZsopSeZt2pQTXwbupqGNJVthmbTcE1t4ZDe5b3v+5dbViS+3z8+kwHKWu7IJrWdTDrJZkwMkedJLe5RSg/L5g15iAw67pAPOSa7B227mJODwk6SxhLN9puqpQW1b52QXMWHyGuldgL+9Zkrz+q+mMEtRXp502QIZBWISAq+7Om7JFnIT3DVF66Eb4waxqH9FGT7n8gpGk2clUYKfgWjOKrdAtzPk3S1mfh2avhpdBzDLv82W5Zfpn1V5wosUCRZ6KGvYnsUUOEjDANlYvc7WsZSQUeDI9DrCk0veUgPWO0NV3G5Y5RiWMD0AWe1kg9G7R7m0kf8UYCIn9zJR6v8HTFr4PY/WgZWGqlWUgJnC6Q8J7K/TjsewqmDXtJd4OwI21IfEqQTNKCpcK/YpFSgXiCuKSo0whOmZYYEsMqIJUMYo5eQ56M2cd6OOnYtEbZAl68C74SX3linF/T2oXyODjXzvJmo5YLY9dx7mlBFQ6eVVUNBS8fNmViudEk3FXT3+4xNLgxBrEbUVxu3JdCX6R32x4K22OlLlNg29TvLA1fznhpjHbKUTTAyHEyxhGg/FLVX9k53CDaUTIpYaG29FHUcMLDmYGE+aqOr5mSxomcKHlrELuud6+CzMeg7ro/SoAvgiFjapeqUeoc/cFRvnlbRZuvlsMeB6XIcLmwiUo+dWdRwGqiG4VhlVErvhTGKvq9rVVz5zpyeBmF6AsxmpK3rd/E6PK2OgJNJAQXIPg4imj+c0pvjo+f/VUzVX6Wwtsy7akq4oom0UEQ4g5xxeIR7A1mO1Z2Wg1aE0nHrOBFQuuwwmjtGJYQqarwbG0dbOjq4g7MXmNY3isfvz7bQar0PBm7MPyWQQrX/EBH1Y4dgR0hrvXWCchmv4cp8fagh/702WzHqF6VhHdlsQaatH7Vw68V342xT1V3yXLbqW60ydLBmWjZZDfV3K5nJRXMBmta8yeYPdJ7yU1jOxIM0A3pymA8efAlqxpLSLRRKV4VuPK/mLR608Y8ijHSPl/JztCNOUXBAW4iXRQr2hBsuprrCvXnc2lUeim7PyohTzbm0O+aNCheFCQpScXO4Fstqw2mGMAycKXmpM/zsMF1J5524tKJ6zhMZOfnKzkMkXrTuk3quqoynQebvbRJguGZZi2r4QOfCs2IisO7bidtO7Ki8pQ+zEprOpkkMJsVvNwDCtPOB+NmxEmlvL9TbIHHgPUR7EzxV9TlyjIuEDD2QXuzMRIG9JF9y8xc1eK8VScHMSQb+8VFfhHT0LA9GVkW8zahDNibNlWLSjstlFYpyynHjjQ8VpwmZl8W1CXUf5KTSZGP4xYZsBQYos7zXbA5bLRUJZXFKMUOssRRhc6eLglpRiYjruufsUJIHlDRcW7eM4mNnClWDpg8bFbzyJS1JiOxnhKSVu93krUstZCHwfORymQFxQ3KlplM9jibCaUZRvSic39dYFQiWwUglMZCSYW0tqb+csKdImkUIlpOjb65jCsQZdWXLM05XfwgWIQIYnTDQkJoiw1ig3KB2ClsVINTMc5V7OFjj4zKqJoz1qsc8qMK7zhaBiUyYD5LOjFu0xd+8fWZDlKldmU9y6ZAtTwpQOGUXRWEnFhK1imqCsxqwgcJ8cUURjq7B/uIZBupiimAuVF7ZdkqtVa5K1yQ7csT9Guh9yleFCRoqUFcMkitYalzCEY25oWqzSClHdCzjNf7ynkyI0cPEAcsJ9RPEEAEhzs1jIPIUKgUKi3kq7VQjxA9s6eYxo/F2E2UcBXmkfXT1FkFDTn3WgapSrtBpr4wqjIs6qIdo8oSyHIacgB1ODWZSjO7iyNIVZW/FsjVIyGUfEgKlptU2jjFFsRlIy5XeNrxp48GKRl4Ck2xg2YeM1Hwqe0yHc/cDEnYd7o967ayAwbU9zNIzf4UO7f+oQguCkTtLehP5zuzbrdkRiojhqpjURvRDBajjoKtLylxxl68geUVeKOcHlkTvhp90F5v9CF3gEAw1pQCHKJv0tgriRoxzSZDo1RwVcjWDfL4DQtVPS2UQOwVH43lYaUujeXhBbY0qAvyhSyMUm89Ce8SzneTccVuxAXJMZK2LLYaopVT0Yh3PedBXM2HlmV5OPeD/HIe06DAOOleMN0JhcESO8WHmL6hoaeS/UySjMmypy3pbtIUpKzgtVC4ivVKl9wDUmYhGFtIe06iM8drIZrjSxDrJP/U23HP2Se+LOpnb4UpHLbZOx4khDcr3mfms+nBFzkBYM47K0f/zLFVsG9p2T9LODDEr8h+cmhDiysWF/AnMPVWFyucS+Vlnp8rAh9GaKhj9xyK2XK/Iu+v4ko08jqYkwW0JsPiF1+f6SB1YmMBKleq7bTaqWh6yXTrrkMUU7JhmY0WZg9ALHFVIAdEHsjkctKsA4o3ymiUbaGWhdqrqqg0dSjdqF1VzxIKSM2cGsE7dWEhWHG677g7V9+keeiukfOgs5EwROT0UnYoXnK8vTa4YcpSiFRCTYcJHM8TXyi0aKxJDHnBwmKNsy20NLTpoWJhB57yo3Ru9vqzTTBpy4ZRw2gO624se2HplfO+Ur3S/JQ2NpXdFrw4w/bslQXD+5xmIQcPc5YyMhh1iI3pYGE2Xc9v72K+o0kNwR26w3UwLk/E04Xx5kJcrvi2EePW7LAplk2H/NL0nCKejOMlbPYnLDSBeQ+5Z9QgXHZAnsScCRkVgmKp70pnActqwItTSslO4cjOgqZ5zYoqEB4ZCWNNAatbCqdrhZdVjt+jwXLC6iISwsPKtQdbrWwYvgWsK82uuh67PtM8jIfgU+UUUavMP1sUFtjIAABFoklEQVSjPLzQROLzA35+IE6nhJMztuUwPLdVFUI0PJ37hy9ookBjxMKg0JMBynDMFgQJR+5u+aQ5xkVjX1JPpLRK1UGksBiOive4QNFxijmuwpMQMxPSqZUsOwdFvPRsEAnOZKmwFmJtovuuJScvVzit4veXhrcT08ZpUDHbcAbm5aB6R83uZmjQ4TBZJnkbmWxUyTsqCV2iiDuHPZbcgFZTOdgCfJUd1Kb3GLXAqR3NuTK4ESdKYMVl+Fwy6U12oBVXFTUdOzIfqyPRoqaklCHJTgmhUdXU6lh6ofVCcyX8FUTV10asSonUGe7qw0/WiyWjsKTt0jxDvX4LVFJrDFp0KqIyN3NqGn2KLSyIYJpQHP2E2clLyMNNF5aHTlTyI5Td2awSEiseDesNC2HKutks+6fG6hIH1zAWl//XQwRLPnYqI0QNHW7pl6a385xZIS3uvAgnuaPm9layp5pQZnFd50e339NBxmmuoXknCierPNjCYgtGYQ+nZ600OPwwknqaBfvc5En0bojhV/dg7cbajYfRaN5YfWWUBaeyURk4Un6pqU5m9Ycg3mavSyO4I3rqnkQUMfPjtW8rjiBl00S2yyvQ+w57l46oD6nhSfV+Vs4lYSOrkb2L6UE2s+FcU/mac3L0Z8/KLhsDcbvhlOFPI1ptngdsZPPnZgPcjs8QudE+D8JRsplO05+Ro9JPBR6GPvfesXcfpN/78CKd1BbY46DUnTZqOtrD9FGKkdOfwrPqNkprxLLA0rD1hK0rcToR6wnW05EmREQ25k0QWK3Z6NNj+h5YZLDJh2AFUheIAs8xKuUZAWDC1kz/hBS1Y0nKidsj7+Gg5H1TjmAex7nJKsV73hIuI1lM4mbPKrYVQXCt5KNK2No0viTqItultjJZBxNqN5bUzC1qDQB4F0wf0/0hN/PmeePUdANJz7tJMpjuLPNi2jPQboI7MXSOWlEAzSBXRlb62bNSXPCERgsciE1emxPiLnFcdhpQWORSlLq/YurFVy9UL5qnNcrN/3SKmAtpyGvqyRUhFXOPlY7rGYw+ySnodT/J+kwHqZexsUTIv4pBy9w0DQV0yS/GiGAfEvHN8ebSMBRGE5tsEHSHPiJ9+ozpSK4sbIXeYF8JRKu1PbOGYdS0MVoig1sYq1UWK7wTlRbGCjz5I5tt0Bs7GzsbXjsx+wczQu7CrK2LXCBdUypCy6LBZaG2+x47VtTk7Olh18ZgieA0GmevvE/jBSfeLS85c6ZY4QnnyuBSdop1Gs6lBOOoYkR/L0Ab0EawXgZrN85Pg5cjeOmF9/uJU5x4Ye8wysLwyiWMLToXL1y8s/ngzShMN48V3YuB0xEsVnzgoxx2LrqUJ+Q3VyR8GFTvWN+xbYenNxrH8UbOCbH33MAMWxL0LEZrlpVU4NHxEMMzplWT6zU0gyo0MXaknVJP6jt5jshzYwlPVUGRFhXzJnZo9aPHfxu1Mplaob7LIdoRbAONYAFO+rdVYi1ylXbkJF1REH3nBdQT5duvLN/+RLz3huVx4/SlC1wcuw54HEQfsjPqnTEG3QdeCr42Yl2JdYF3XmKnM7z3HvHiBZFBSglcSikKlHVJ/VWhrk106Jp+fBg94enRq95+RFKgXTDj7PdO6YipF+SmKkVwt64Vy9afuZIZogtCSllFoP5npM9fuKU9z4Am5qjVDavqbzE6ozSmo7u/CMbZGA9GPCzEQ4PzWXTuk+ZXUResqnqRZqoic+ZQ9d8r3i6UvlFWI6Lj7PQp5SidyH6QtZC2yTzPfcuqUPf1NKi22tS/qyfYkkjRVgXkVX1jLCjTjHHCl8gae459T+fsaYsDvG2tVJKgUmoVOr/kde2F4kpm216oSVvvlGP+1iguLVkWzHYWvV8syunxWdX6cJMt2ajYaFm9fQsEqQd3mgtSKw5LaJZJxQ/xNNXYa1ZL3PLVzLGT9kzCGnL7ck8ad6kUU3AorAn5rZgtmFdK2iEV5iwYO+Apo3COhSUqL2OVfDaE/5tfuMae7L2QEC7EQiNH05tEFZSRfR9rlFiz0JZZZ5iw9EohYgNKWsrltCh31hGsHpwR5PdunHmwBwEq1lmsU9y4BgzvtNiTfJJKfQIbmlXUBrQrtD1Yrs55wIMX3vWFB1t5WU4MXxjRBB9GpeFHFXrNfp0kGWnOZBPCEQTklGcVbIYmO/53/Cnn5562Tju979A70bOK6srqdKukxuVZJVVKJENvdojSOf3I1rtgmBmkhoLorTeWodMCWVHZrWKq2ew3WWlR4hl9fj4S+Ds0WjDryki1aCBNG9FyczTNYwqRE+z6AmrDeqGeNzi9gPZAvNnw9QJPHR47vFLQ7k8bfe+MXZCOmzHWhTit+LrAy5eC+168IB5ewLrqNVOOkZGWspYjSFmrWOrc5LsnOHRkJWXpiSfEwqkx53hl3Z4+fpams26zf2w58RXpm1LcG9nbFOqQFlkmZuGwRZAchSBhLo+DEWiA9aqCJZTM+ck1mXc1EVXWhi2LaGzLicggFbUddOrbGZvuMNrwpcXV13TndG7DTgPSsk1bRZnxmYNL33TtWcKglCKn9Sm4nUGq1VnfYl19WsY4phO1dJlR7zZ7wZHHzQZugykoL2aUUtMC1Sg+q62ErANqTiCnqOcEwagZ6Go68IQ+Y8keeYk5MNJFyImCbZZykeQCtG8BuO/Bg8U9T46xRtWQsSx7ZQUjnHaYsrhpzwGZtKSoU/JSVVSj5tZWG9YXCgs1zhSvlL5SkK19ccF6aTKEZCMKkDUKZ06sNF7yQAtl80KgKs0vuAXdXXqebFweE39dPnfVG80WKguNVaynscrR2TQ8o1EIvzAHBkZuc6sHiwfrCM5UXrDwnp95KA9Ua1TbaLZjZjy5oL8WW8Knu7LNbHa3HdoOyzVY9+B0Cc6uIPUeCy/Kynv1TA8FqVPANTVe7o0YldYt+W1Z6pc5djzytUTZda/JOJvVRQaoiYaQppszSPlGGaqqoqc+qrvYTmXCfHrUWQ0kyWRKYVRhKys1AsYmWK2jDLC6xN0o4KiGtWPjUmljuTEkzWw2qHMwItm7uDUgIs9WPPtUzwNUDqW0dvSR/NQEodSgDJODRlmpLzv2YqeenojHHc5P8GbX4/SIXzp9ubBvO31TQHczymllnFfKuuLvvEOcTvDyJX5+Qaw57DMNgw/IaFE1Whc18bGiqsxUdw8UoMKz+2Y5c23q8Zho6Y1RJ/hKvys4FVUHJvJLWF4jhycnSWgp0mLZglzTRflOaq+g5fR7LAbWJCwmRAga54GfQ0FqbbA0Ylk1LbmdoK0aL2HtFqAs+5IMQb5S/lNqEGXVBhxpYXRA1pmYWDJdGc80RCLU0CbEnld8nVBvwXpDgwSzR5muHaVO8fmgjtBM0JGQXcBk5ko/eROnRQaqYoVWnTWU7NYaKWRH116Yqp+09hxN16+vQ9Bxmc8V0o9lO+LGWC7HXoaZptZQtfl+KwSpz+3vcdoXmU66sabYzkqk+4JrYiiDXroqKuTLt1fNauoJrWr0geFDtia1N9b+gpM/sMaJNV5QaNRoVNcI6yU3vVaNsvrRI5FPYLBaY7HGqTbZuUSVpUsrlFNJC/tKs5VA/aOJ/9axUKKw+MLCSqOxokqq5FAzsY1gj5WXJXiywu4PDC7HuPd1rCxjQaG20KyyDlHHT7n5uQUPvuGl86IPvHQ5QQxdsHU3NU278c51YR2VF2PlIVYeYuFM4+SVtRSWnFJco7CYfBUuHuxzwORbFZExYXNQkC8HjX7+3OxLTCEzYp2N7NlUl2nr6Syq/jmbZouLGt0q5bxQVk10Jf3UdFlk5rrPDFajWwwx2Mi+oPuS/bTJTlJGamZEWQ6jXfWPEmrJoktmuIHXpoxTtSlQKKHREqqeHtCk5AeCMwL6V24QYI5byQY1pSkInALaGV46vDPg5Q6XDu9fsdcb9uYKX36iXHbs1RNl26nbTtkVpPp6oq+qpPzhQaSB8wOsJ2hNrxppD2ZOsaCtaDNbQlA4BWeRHsgao6mPE8uClZLX6vQfX49SJFqyWFOqgNmNxYcLvhqaK1bKkJTBI7lPGeCy8FVbWMLzYQpcGNSTUZaFOhbMV4252DdK7FR2/GHH14Czw6nCIjFQLItmM1WNg/FD0C3GXZ2BNK8b8zgkL5FTsrVVT2QlpbhZKQpH0ECYmBd2yWBcQhC0BWUNySbcZJxrBa81o3wQexCjJ2tyiEyT/qQ1OmQVKsaSrtuRg06jyuS5eWUtQzKbJDR5CCLWzaloGsNgQ5rBBQmAq6Bzsna0NLTWp0sko2Qav5DVJPo86/6J9vnPdJB6Oc6cxok2pEg/7dmorUEvO8Ok89np8v0qgpbmJA3HDrcOGaWLCiuordJi4RQnTnHmzMMRpCate00q+1LSyQLHx66m7BjpXF0Oz7ESyrQP6/tqWDNq1AODr17zsVKjsoyVlSXrKF34xRY1nU2wQseo44FWBjuVTpVTtjs1lMWUbFwKmszqz4xmhWaFNYzNjGU4izmLDVURDu1aaKOyjMLaG6vX4z1NSnulHL6wkxWs3lPQ0rFD7CzTk84k6tZsUhWYsIxujY9UUMENChmzcS736miLGoLryuF31nLW2JLWPqXMEy8H6tmsnt5pB+1XFVAcLgAlIRzBgsWmgTEJA2Vz3Yo2jgmZeDbOSyCC3Q1yBnsWoKQcvVVPqTamcetVafcOLBls2ZswCF+geLqrdwVqW2Hd4LSq8X7ZqUvTSN9NI34HBusKy8qYPZilYeuqEeQ5smPMzTgrqbKiynJxpsVWeH4WayIb5MTcKYYV2SCPQF4nk1kbxwmeyFeeh0jmZBr3WsTNOSQvJSY6m30v6f5U2SlhkCmdJTUar9RmkHCUn4sy+jaY03/J/lUcAXZW9XrhI6maDiVvPcDyurHpYUnhmGxruq4H5SAR3KrJeaqn7XCk4FzXSyRcE/M6DnTtGCmXMVWb+QYFicsj0Ty9TCMxI3OiKJBUgyUGRdRVGQ/XgddZBWYykASxmGLwgYxy65TScJxnpS7aZ+beMz059V4M/1aopL6jfzvn7UHBoBvLaHJrboOn5cpWdqI+MmJnt42numPmDPebInoep3Qut1FYesPGyslf8k68xzu85CUvj+CUWzLr0mi1sLSiAGjO7m8Y3unjSu2VMoqcKkA3ugoAeEDCwGK0vmDDWPaFNhZaX2jjTPXGaZw50VhoPLCqmLZyEBR7HXjtbLawxwOj7lx4ovvO1i/0scu6aTaiyazOjCW7TycrnFHV8yIGERvOlbJXbFTWrbHEQovG2U4sVnlhKw92YjX13ZrZ3K6wCFr+/RqwutN8YGPqIhIOcW43aCWpxMmIm/c3k26O+kIDYdu7jC6tVc09egBspfSq4Y19UJdGqYW2VJElavZWdo08mCPuSz92uoN4Qy3gjfAFH2fwKkJE9hZoO9NeKSyHzZG4fEJggvqyYidnJKHe53Ec0h7d4gXYCXhBcMqLJOeYA8ZGOo5qo6pGnBdsrXIx6AZ7Id4L2IPy7Tv2uFHfbNi3PWKXHV5fKNtG2zpLBqm9ruxtZa8aNU6iA7XoGtlDBk9bwPRga4tT28BaF2vPCz5WwhsRK9HSDmlpxBRmZ7/Oj7OKtIj6m6qMEIxaQiNmqisjb0NwmthxzAxGq5IBS5DisCLyRV10nEyTESJlDpWB+U5lKKC3i0gWy5YmwyUhviU9B3UxxtRY5n8lnNpz8x8TspdhLAHhTb3YdHiR83hPmBgsTBZSXgSHW9Xk4gpRQ1r4nAM0bSOjLjd5AtnXKQVrDix4bFjsuj7Tr7B6p8ag9S4Yu/lhQFsyuFj2hy2CUWG3gbsqKcW9SomWCEQGx83wanOww/TLzmPE7KayzAqSwEtWaRPevH4LBKl1vODUH1ioNC+sseDuuHd2ExuqmqcVUCqsGUJnLWdbelIdRhERYkAdDXyh+srqJ85x5oEzNd0VpIEprKVRq7GuBUvndVFsXSy1DegiR8wqYthg1MFoylS8zFl4JbVGjeYLSz/pz/HAwsIpKqutUs2bsvMwp2ZxXfuJhSK9ihm7X7OaisO5wsPZo9NzDLyj7MoQHbuGNoQWzuJDgugBa5cIeGHh3BZarax1oWUVFZ6D79ghOyuanDooLjd22QZ5ulyoxxE55uGgq+LKohNesBCzj4CyK5hYd+HjXX51s/nubZXFzep5Eh1bmkgSTT6NmBO+6Tn7PDauZm7kBnREKVM/IFacM5PpeaiGJjHClDfKOFTnOZJ2i0cGKWXOnlTt6VimDVpBqtgJOEGcVQUly29WkBp2NPtgRTHVlqRdz4Z6keffCDh1ynnHT5uYYped8rDCdoVN4z5KFKKueF3xhJAFLk45LhDTE1uwJSWop06p6ktFin/ZFwV0F22bIujMpM5l9u/ydCZSdquubJIzQsHKPM9JTJLJTCRIFEEHJoauo7CsDo7nBCxp5SXSe68g0LFgDIa1ZGQOJSVtJkui2B+VWZ6vmS5ksUeZ8HAPVRZz7LoDXiRZiXRlMQ1YtYT23AWf2SiH4W0MRJ44qPJBtKESpZGjOkZq+9CTteSNLuAumO8QUJtmFJcImgvhqJmgllKT2afjZBn0ruF4g9EE5akdOe9HWWKFpblAB29wm1kUs6PInNG3cCC5HLSjWZROBPVj1mc6SC3jzDLOrDSqV5ZY8HBG7LQKg0opg5u5pqz3PTQLqqOx0RJnhhK1YSw+g9TCEifWOHG2k6qoZIkZxlIEHaxLgarnrhTcZ08i7RrT/TvCGGXoUT2DlEMpB9mieqWOxjJWqq8s/cxCo0VjsYWS1NKIkcwmxw1qF4FCVjSBhbGPXVRj14Y8wune2dkTHgli0s2HZ3bocoQYTt3ViF1HYbHGao21LCylspZGIwdeh0TE3fS806mdSb6YY69jjkpIhwbX96YjiKV2x2YZ5bdOdd1zrMEeouZmrAjSMbo1YeZLmr7WkCP09KQj2VwJWeTc8CSrkLuHHRuR7qAqCC5O+ITfIqm72ZzPLVZD3hKkUYNc7CqdH0MScyH1nv2G50EqOGGcwE4YizD8LBvUfmioL6XArIy6EbHg7SyIaja6A3hwOG1w2mQ5dN2xs8TotgvyKxTczoyyCrJ0QURL7DRXRUOUTGnK4c1WTlDrwBZkceQ1f1+jbqI1KBVrC1NEPY/rrCGP64CJtyc5QwaUgqaTQT19ww8ZW0qoBDllUsIMVjdANSBhR4glErpLR/D0kpQPYFcg1S6uwF+mVdKs/uYGnKQEghrq2dq8nib8lZbnStSaSFYMahFRIXDGMLybhPwjIe46IWg/YD+zDFI1MkDp98lEJdK1vbgd6MAxFNScOWm8heLfEtCsUE3DJLNRBswpb+Bdc8g06FVJkUU6y7tJLjNSZjAEvUuDpbOs2lNEFWlG51U74Vj9nM9g9THrMx2kfs4H5+gsYdQUy4YH7oOnDrsXHr3xhPMYLr1OzJHWmZFKUIVuQjVThjd2W9loXALWcCq72pyZvFuVINSKDG3DdtwGl3jDYGfjQirraGU5mi2v4kMufuHVeI3HzrCd2Cp9L8R+oe9n2n6ib1DHytaDJxrNGq0uev02wHaiDKJtkE1pzz7UHlf22LjGlW1seDivR7D7xuZPnOOsYFoi2zvOa79yiZ1XY3Bx48pKtRO15OAia5TScuaSiWBknT06H5SLGD35M4WKjYXdnNc88YG95k250Nee7MYhdh2NVhuetkzDFK6iA6E+B12eiXV3VVIjNAbFsw8yCj6MvXcYjo+BuzLbYqENDw7tIXscG+KcZVQt4YpqRJFJpyWZINqZqCcdgyhKDnwwoosibxMktKwAS8KrFZtZLUbaZ6MAk+NMMOLoPb2DsWK8VB9sZrdzm7Se9cwLZoQONBLDOUtzk70PWWdJExiWVPfN1EvakLYvG+AlJHHwkINFdVhGsIxO8S5JR7oF+BwBcXK5XjcoXglvlLpQx0qMVZoiK0RrNyF1noe0i8s2S2ESSdTLCeZYclCf1l2emjYpbxkQwlXVMRzbZY/WSqFYp5ScnN0q1pqqvlLk4j1FrAZeiqpQa+rblVDV1fL79KP6k+VV9hVtA8bhX1lTw2juYuFlMI20TpK7RMKLlhNvrVCt5AcSGhJuklBESUNYx1wz8AjpF4OGsSFAXRWfHdfKhpedQYc9YUIn2YJKzH0EvknXWbXtMa03neDq2jd3YNQqIwFfwSqRRtK65gojVsbYNR5muMha2bfc6SzI83Ep6nuT0P7tmH6y9ZkOUq9wNpzGyJw3L6LwY/T7BeNCYQvZtCjnmsr2qWrPzJRIZ3Gxefa8HDaCa7pa2NHBD+aIdthx23A6V54Y7OxsR1VQbT0Yx2+4sMWFq19x8iIe0gY1F/TkI/B+oo5gjCac3kTHtRJE9NxpOlGuei9ZnYQ7PTZGdPZQJRUxuAwYPhi+c40ttVV5cZbgTQy2GFxxNoydRpSGJ62/lkJLg01SF6kh3842G3tWpA+iUnxlN+fJNp7YuNquYZEJienIDOlKUtT2nK3FKApGvShIbVCG5tgwGhGF8IXhEzIZOQbCj3Ernkw9N5JwEel5FdlfTsqvyux8H/nhWsu+xMJksB0nMYayylntkb8P2ehsN71VwjL2nAAxRawUboSJE8GJwpqvZ7fXQ5uTKqnG3NiCRRUMC8+Na4lIIoVpoN9Jbys8kpGVLEWPJKOkjsVUFUydXDERjsgeqDdl1nak5ZZCzUpxseDKHBX8DIoy4/DynaQDjKTr5z+z18b8HGVuaOmFOSvE+QDemuDsPcfM5z2SMKxMX2VRNQOU1ZiWl6oSDkKKJ8MuIbUDoNIb1qsl5Ebk+0uafTrCVM/AO1k+0zvLlGgE2QfyfE+mkSeWiEFEyZlbKXlwOa2Tk7M91KcMa7LNIqvVWiVmjI4vChQDGEOmADUJs3KNybp2yx5YCUY13IKRyJLnnhgmvagSp3kfkAiD7NrGGMe17qaZb10mXXQb8ledhDEz5nTt5wYvv9j6TAepv8ouqC9FaY1s6gWMPqRD6oWdwk7lykonR8CXqioqkpVlN1FeB3arPBmccAo7YzZ3DVUwZbDbI247gys9rgy6HCQYdNKW3gq16sIyjEu8Yfcrl/1DzDR3yNMbq46VfWhIX9kD6ytl9IM9NizhHzrYFcoO9aqb4RjpIRgtEnaLNE+99KtmAA1pRwwEAxWgGhcqexhvqHSr7DRqXSm2MFgOnP5lNndLdQYbnY039Ykd54oauFiFcVaftQwudmErO5vtGaTAYhN8UiSoHrOxHkbsatDGVo5ham0vB/yDNyKq6MxeiFGxLdJnDznhuOYfRSHdprPBveWfQ9tOmXqmgNJKVlILnJKOvaxQpIvT+/ZZ3GhjM22gPH/4dK8lzUDtqNb1lxtrb9LNR1ZS+veEx8iOSEfGtU7h9Gy7fDgCHMztdDpaDLXRUkgcq1PaoGxDlkrboKRI04ZcAjCjjkh69pVaNorJKLaU23RdziSEZoTLr6+WVWau40St9TaLKd9ZSeYkQ5VTZAzO7h0l9B5kv1MPQkYMwaOWLgZ51G+I7AjNEitOKyPHvij4S25wwuYGWzPJakI4vBQ4HD/IoOJgT8zk89ZkIu898OydRq250Q5glZB25PDJ1vG6aa+oHWx+0qxgXZu/1Rwt4olfDiVr7GnKvG9QOlQNu3SrKJWUC42M80omUzvR1E6IIWmG7QUfYLtMDyIM343WdZ9FBW/OvoAXYzcYXnOfnA7u6XSTZgRhhnwbg+HB2G+B3EIVWeER2Kk8Hn3eshbBFquuNZ+9rI9Zn+kg9aENmvXsjEhYm9o5JuIfIJKEVbFWLJj29vi8SMvBS488yANjt+BaBhVBa6WCzSZtGWz2yGBjjyf2uOCoelFfLK1vrGpoWChIbXZlxMY2dmHidJors5pe405goRED+MKoHafRyYy27Fi9Qt1VYsyNSf4xifxqq7PUOUSKSQVBKBO/uWQYGyvDGlerMkOyJiPdonHctRRqhb0GS4HeBjudjZ3XXNnoPMZQPRvtgHF6OLt1hg32mplozt6ZdYISC8vAgYLUZsTFsKs2rzaqNrIw8CF4JKRmL2M+sj0xsi9o0lcFyFcse3NM6nAWQzoMmcnPyXo1G/5FSDrhKZr0QyclquFMyUG3k3bPyOOqabyz9S/AblY+quRPgpwm9TwWphmyavsBAcUmTX1VP89I7GDS1m8Aor43q8TAa26Oa944M6POlkQx7YNUhc3iA4uE+2qSghr4UolWJHyt8ryz3HBL0fj0Omq6fAsWnqxJ6/ocEtjPAH+Dfo4qq5IddsGI7h3bF1XeYyYJkXFFfaXZ3zJLVptZQmSTCigSituklVc8Zy1JxjDlDOnMn9XfoU07glTuLaaKx4tE515VPej0jmTMjaxmpx5JzgyWAVfsT6eWrOqoWQTPCn2k20kXK7arf+Y2pANFY3FEfkqH/SmOXIa0eZB2gGlRFepVa7C1ZDGR892GRZqxy0YuJuFlagsnApDJTFAPWnpMqrEL9SBm//AqQlWOC6lRZOhrmg/m/ZOVUp/pIPW6aOSDJSPsmXciNwoJ2fyznOhuMmed948GO6kxqOE5ZIuS3ZxrkXdd1EIpQckqym2w2ROdK9d4pMcl66curzOcw9ftmTP2YBfJwAfTqaynDiSHzCpexpY335XOUJVmVXBk2bEyK6kttz/1x9QoJbVQ7WADjhJ4DIl0c2z5nM1jZuRUBnaLpPE2vDW8VKxVWjFaMXqBvQS9BhuDK4NH27hG53XszLEV0s0khJDwiXp+agZXGwn7pTL+lqwqw9uM2MCuRRm2NxH/rWiTjprsKina66h4eijanIs0Z3pNrcGc1QS3bPxrPWagKjfpAFPrZbl52NBGUrQJzmTgoCsfMFk7hKccZ2olDh7dLUAFi3Q9WUcd79Sm8qQpg7asJmZfYkKAz+75I0e1wJNFFRM4mBtVIlw11DtQRaleSCm6PuWeXfR7S+CtwlLxGrfncvVmYvYR89iVYhxzt0xvcVYt+mv28eCmTwu9D0aOnnDprQokSnD7WWWhgl91/4skpf6XM+FSaYkct0WVSMmkpNSs6qYdVVbCz8VY3IgYNl+StF5KV3Svng4wgZp1Lrgyh/1F6biJmDSHK1opCcuShIMJDZPVuHqsZcxgJQr7KKYUujQsJx6EZTCdN1JLJmsEriKOMW2YkpBCaOKBknIY1TnmTSdsN5MNJot2Xt/H8cpEHyX9ZVSOyBh7Bip5k3p1BSeKSDWlfmsEqccz1JXsNzjs/kwLpANQbGBVztBRdm0yuBq0w9DcH7HJplpaQapwrTu0lc0apwLVQkExbVB2Lqqk2Bih4DM9BWDGScMWlfPmJq81M2q8wNJGfZg0N4818CXfQYRcq0ccbMCxdLwG4zRglatCLLkfDoS5O9RmiOu3UlixMHxsRHTcNyJ2IkbCjbMJn8wb10SuoOE5qmA7iSUXBh+Ec0FU9p3OzuCVaUzEEwWPlYhGxJlDf25pnRJVUFIMwl16mPTDKzFp2xDXiu/gV5P2JypRbz6Lc2MidVNLN2yX2HgZacmTxBBl/FXsSzTTR+yxmt2PoJUkAixB1KHeQd+wGOkNqHk4Njy1Ort0NdX1/DnzKFqO2yhDM7VQa80zHZnVkyqpBrFmUNLDjl7VXM8aOIc+r3EjRU/Cb1Yrk58VOaMjba00NLNomF6XwWcdQC/J1JJ2SKzMoXcxS1xFMWwJbA3KMohTzR5IMu9CU3GPDTDNSI+Wk3OgZuoFKpCMZIxFkOSCkFeme47MMJmrNqB3uDbKvlNGDpRpJrb+giyFihwxrEZyfUxIgy0QK8NX3Bt7nKSpSgHHbbcgE4IJKibrNM9B5L8imZrdquZ2rYuS3Lpl0qJrb7Qh27UiCy/GdPiPDJJNdLsqJEVTVwEPStEr1q6E07wK0S9GXZCMpXoSZWrKPxIheZYSkT29Mase15+FPP4icIptmImkm6fcoGfQbkwBPT0dWUBDkQOhUlFg1EPUXGLR+wjBreFOn8bYXolS2S6fjIP+mQ5SIyvRA8JJIZ05TNv6an4QDyxvXJuNb7d0G1CJLSaUH7TMng1mr6J1VguaOcW6Kh/vsl/y2UiNW3KXZfbMzm0GrNy8SiTjL6EKN6dbjulWApkkDldgSlhBD78p0Ysy7UKW9ibAYk59kUFuFX342BRMQSoDM7Npy61HEKDqvgGLKqhi8OTO8AHe6THoBFeM3Yw+IZXQLCDRrafbghEhKEn6kITQRherK/zIH9iSZr4l0w9wa2l39ay6cRm/Woc6XP2MyfpD1OnAGGV6lx1lNhbP6AuGNrY0yyQ6c9y5Rcn+gQSc1EG0JK6kd1wgUWUkXqnW1HT1TsjtOLbzFle/0g4oJSt65iYZs9R/voXmxj83zmyOodEmHMMWO4zOHNopanTJibhFU1h7JmnDKT7U8J/OBHMIZFaeYfNay8rxwAh9Zoi5MakaiGx6zoTCIp/D9Z7TjEM7p6FeUyYgqnxHXnyLnn8sWV3p/dkkM6djSxL0sCJ3/FJc5I46A2UmPy64oLhGoFhaPTFdUOY1ciSadoSlt4kv6tEcQa4EpWaQH6pq3Vp6Ck6+vGXMn6xSA+SIY0XXhBX9zDQvFsmh5NiMrIRSODvxUS8dD1W9B5Dq83kEwRF2k17knzPXI0GGG+E1P38SUAjUy0NQpGV1bOGHvquEkuqbRoyjvaHxQtkL61WTfFGQ6vvbMOrXW5/pIHUVJMwIlz7hoszRRiTTR0amddllnFquqqoiZzjtwJa7CsKRrYTkMLWw1ZW9rtiycK1yVDi506KrAugbHvIIdJ8ssbksr6OEjWIqr9M+ps9+hdhAwwajdE23tMLwINzVj6qWGhVPCCEEE9dIL7ccX90ivc0ELRYacjHIfsacohgbRE9W3A5jO26iA2oyiCVgCfws3HxYMLadNgaPviW3byBCbpWA2k5ZKZx1YY6gR8Fn5eOuSbLd5aPWr9ooeyBmr+FbUQO5F816KpMenBds9pQYA/ZBuTp1kxs4+6TJVraQJmYvgni8HIcLyC0n3TGYPQV2wge+yZuvFKOmjk2sNlWwjC5iwrpISFl0HUWIGTpsWnvOntQMkxM6mhBKZfaytDnNpGEGpoEUfeOooCYPI0MgoArIfMfGVVBR33Af+PDs0RW8N2JPT7ZN5JE6uhrYw2XO6zuMHe9dLNJF3S1BWCMbWRk0SjmaWaV2VYdkELMMQHjConmPmjRC+GxOZbDOgYo6wbNZFulwkGJbV4JZTIozW0y8gQdVwWZdDFiGjFcL+f6afncfjJAJrdC1kglF6t3St24wr4UMhqGEz5KNCZqZpu8L2Pc6qAbDp6tKx1kJAvMrZXRad7nzRyqvzA/mX5KLD8jTsjqpe6P1RrEqI4kmsfzugvW8ZrAypPnLS8hGwfac9Lvr3LsY8PSETAszQGfCT754daEss5VSHWh6XyWyYhpHC4xxS3wlI0zWKKZjPwwvweaNUQq9NZkRP01Z+y++PtNBanv1hrLvjEcnNsdfKbNmxKGmbjUoS6etO+38RKmdaj2HwQVclFmI/Zr0sQViKfjDmt5mja0WWkD3wTIGNQbUrv5UG4eQ8Nb1gBItL+52UFRhgjNCgKGLhs3AbT8ymr41DUZMBwONjFdDNBaDxTQmepl6iq5HOOYL2gGSFJLD2WJm3XmzW88A5Vs2mmXlIqGP49uAtcG+ZJshuF4v1NFp1+ux7XabI8bTrRtPYoNG3o/Rce/EuFL8whgXvD/RfGPsF0p3bHfKBnRjdDG6usu4lFKpV6eeVvq6YCOh0McdroO4Dtq1yZh3k0URVvC9MBajnwUV+iREGIgCOynx48iXhwlzH0V0HI33kTCxNN3AGk2saavjvODLiq+r/l4r+6nibWHUJieHw9Vc783SwaLESdXCnEA7yTsHjT6IhODctqSEX3S+Eaw0q3F8YN5hv2Kegx/d5cAyBKuWfqX0qxKDfZOF4mgMn0Esg1R/hHHB2IhrqGW2GXEpxFIY6yLD3LaISWan5L5eCbakGOc9FarKoidxpbvcCkZhdEkJHHksllooizZ+xi6fwb5hj0+UbSMen+BywfeO264psCP7ay0o5Qo5A23OHCvrmn58C2M5M+rCvnZGO9HbiVFl3TTqknCXM+pFRKOy31jkQ4hEiV3JZUoVNAdtp+4Xytio21MmgRdijmL3jTo2xr7jY6N6El9KIequezRMlkopp2qjUqNgIUJTtYafFaT6qdOb05uz1cEoQS9+tM/mgMKyN7g2Yi/E05KVc1EiH1AoIu21wFddhqxqj1A7tUmbV0ejjKpjsDfwiu+VMSo+Kj6WRGlqoloQcUUcwS3dXoJrVZDai/q0/fFbwGC2f/gGu+6MN+MIUvTQHpv782hBXTq+dvyiINWsZxUVcEFBqsDUSbAqSPVtI04rrI1eNPrau9PTOogi6mu0WyDxFHOoJ9qe9RHKkR0pUU4qeWqFwjpebkFq71fcByMUMMIGUaU+n6OooxYxvnGIPS2I0sAR142UgsHJhsLUZ8E7ZVeA0liKouoqikgBOL4P+a9ti2AYgnq9UEanXi4ZjDOLo+ClKXCZxqRHwgxjdHwMol8occHHEzEuLGPDM0iVzbErqROTO/SIipeuINWDsnbqujLnPMXjRlwHXJ3Y5JPI1iBZXGMYvUHfp8+YpXjbIE2BZeHkmVnPeuUmLJ4wnVlQawapKkp3NGM8rPiy4OtKPy/4UtnPFV8bY624rVkxT/ZeBXYsGmVclVB4SXyXFLOKheg9YZQYRA6mDLsypybn4VegcqXItl/0973jrmGFMXRNlL4rSI0r7BsWEF5lDeaGD/H3oz+BX4Fd98gaKsBPYnv1VZKE0Ra8uITsVGATe/QY3kTChuNrBinvc3NreDMFqjUZeWMX/brv2NOFsu3E4xNxueD7LrLAgoJUn5WUgpTFdgSp2tZs1Df60hWklsFoV3o74UvDa1FCUbMn07bjfpus9LKr71RCASqKHgpSnbpfsZ5ByjciLphfMwHcqGOn7gq6I1xxqcjZfDbv7BCrQ/SGuyDJaguFQWSQGudOb4NeB3vrtyAVgBs1afy2N+KqPqRfxtGHP4IZljZQIS1dRb3uOrC2y5/R4lmQqqrOvBBbU4DqBfflIM3MKcE5g1zXgzlhcC2DkcjGt0yQevrpv4C1hl8UpOIxDrsbS4ppqVDqUDXVNln+o2Am50xUZmdlSg04qUrx06IZM01u5zWcxz5Y0iOL3LSiuioIu8E5AHPknsX6dpCa7YYJAtVNkIPtumGjMcaKR+pF7Bm9dFJWpx6kziCVbLmI2UiCkmal1hLhSsDYd1GM9ydlzr4dgk4OBf7COJ80CO68HptO3TZNq93TAukZfi8XbFlLTrhPm9+Q9f/QBtL8icUv1NDNXYZTumtDdJRdW1Y1tUIt2LpSWqPWlkEqiEsXbLV76qiMZSgIhcF+ymHKC6nbsWQsyXux5PGqhyec5ArZZWFy8hpJa7Z0K6iR8cYUiJamQHWSlmicG74Y3kymoAeLr0EUiq+Ya3rvHG6JlxvbNDVvPnKYXEAtPYkBO4eXHXntBjf4s+/KcobndZZBKpzL6LTRab7Txn4E5tmnxLt0dX3XdUFXgFqymlpMgTmNY702ws6IZfhS15ytqT9CwWoqq9NHUjCvNDLRVW0HLeE+U3WU1S296zNtG2Xv1Mum6693sUPF4MdOIeJEyissqeMGlCKGbbHKaCteGluTX+Eoi85fkmsibQi9dKZLuOa7afioJeHmcE0oYg/q+t2xDFbhO+FXLAS9lvGkaqtfWYYm6GJxOD2EAWGUEUdftnWhLy2n4xYrWe0EY52WakFvXXrQZM6q7yW43HoldrmyxD7HzZhaAjzrmReIRX+yqIqidkrzFB7r8xcvWF8Sim/phn7rAftRSQXNZH6wlNmvJz17NHMsgLH9CsB9n//85/mv/qv/ij//5/88Dw8P/D1/z9/Dv/Pv/Dv8+l//64+fuVwu/IE/8Af4iZ/4Ca7XK7/zd/5O/qP/6D/iu7/7u4+f+Ut/6S/xwz/8w/yP/+P/yDvvvMPv/b2/l89//vO09o3FzPH/fUWjq69Dc1UuflAgLRlGEjQOkR9qZli4NjrN/0raq3GweVdTs3BdVEnUkhC5M0anu6tJXNNGpWYlBXAEKcExah6uGaDqAc/oekoILgeHkQy4oDJ8zyyzHPBb+u2o2kuLmlsHdDZkAwt51iiLWZ+d5tnkTgLJfoHYM2hl/4yWQarip5M88U6TQghl79I47R0RB/z4uDH7bVaBNbMrk5+YhxrzsTP8ykAzfXRzZ5DKuU7qFQiei6bGo+VsolKr2jQjiMuAXb/XUpDapvTAYF9VoPTmtyBVFMQUpNKHLfnvESaSh80tLkPvbAJPMsC0zimGr1VU/TxOUQvj1NKP1sT8m0FKzp/YaIdTgzJbYzJMj/EhgfpJufGMMlLKMOkgR6aTQW4Gg3Hrgoe+pL8HwzvV1ZOt3lNDlqE4LIXfGRxcVb7M3mKavoll2W56o8jrK3jD7NdMyQcHtdqP5JEh4ky4NjgRdqSbYRIhjt8Z2Uvq2BiU60bddw34I4PUigJpCTRUcgCDqRcr1piD+LwueCnsdRVcWRbGoiTINZpW1Y1N9CIgWYs2St4ft+srUqKg+XE9q9WN8E74lozJThkXVVt9p3vXdWfauIXA6CPLA1BBoY4cehqJwmAKUiUYq/YdQZPjYOXNXqYp0mLZ1w0v0FsmIvkzc5ua0PfUl8vAFGrXOBbjJjMYBUZTJJeoSucyZrJRD0PrZrI+a+Xm+b/H5J+KbOTXX4Eg9Sf/5J/kR37kR/i7/q6/i947/+q/+q/yO37H7+DP/bk/x8uXLwH4sR/7Mf7YH/tj/JE/8kd4//33+dEf/VF+1+/6Xfyv/+v/CsAYg3/gH/gH+J7v+R7+1J/6U3zhC1/gn/wn/0mWZeHf+rf+rW/k7eB/7meVzVzVlGe73bhRSmbOSERn8oO4MWC4sVGM7F+bHosdlUqkYA8iKcmd4j77qrpY2iRNxLMHwpnDIFr2o55pMY5ceHY6M9CkDuEwEMP0dRN7i6MxzUEAyAjBwUye5qTTTdumUCbf29yMxpYV2M5NqJQ0jFJgOcniZo6thiMQTqYVxwav38wyj4M756k/83lM5kYiireCZMIcIyGLmtBDFbw0bZNIo8vDu20Lwbs996jsAFqWFyOdo72mq3RGmcMkeFaecbtjFaSe8+m4cSomrTqPf5hlBl5lqtpavucqSLbmhWV5TDyrpiMS2o1xdTuIx0Yyg5XeRF4j6Y2mn/dnl9zcpPzZ88TtufICKT6Oyntu5McKbgFuntsGOZM8r3d9DCxhCibdbFUvcH7ehFQF+cURKFUlIjPggMN6wvI81/mWgrdpZwPrXVOS4xkLdw7fy8Fw9sw7ybihGTLTVfIzLAcZpouKpqbW3DOOSyGPyZG1fPXKn7GYSYFDDhokGbB671syKDttaIueDIkDfbmdIlUuuXeItJEM0nQQmRVfPLdwKvDW9jOZfT6ToJqiW3Qs37rAEwq3PNcZpMSK0Ac1l2j7NuMog6AiLVPQO2/zguQA9dnWM3MuD7m3xHj+Jr7+sohP6qD01evnf/7n+a7v+i7+5J/8k/x9f9/fxwcffMCv+TW/hv/iv/gv+Ef/0X8UgD//5/88f9vf9rfxUz/1U/zdf/ffzR//43+cf/Af/Af5q3/1rx7V1X/8H//H/Mv/8r/Mz//8z7Ou6y/2kgB8+OGHvP/++/C93w6Y6CruBwVdG3mKCg/V+4xK8fYGEJBc9dtjmTfZvNHK8RyTnEDEsQ9HtbcDxrMglenSkUUf0eU5FdCeXaFoI41pLmYZpG7cTg5bDfLpPnpxzpuetHyab24em8jnGhmcUqvEzNBtVqEnHce6HBnX3AztuCmz+pkb5WzmJ4NKnz9um+jxOXOjiePKzUAWOge5YdEm0eGIDBwjEXY//m7zvseY9NyYhqHSJOTx0vMZ87pIu5789NOn7tit5u0RSNtybDB5jHJAHiWhyXKDYY/hhOTzzWmbx+F+dg0ex+bZ9/zZv48P+Px2fX5cP/IcPL/G9XM6Av7sM0c+rz27jp7tJuEcoqnJtjO0mR/Rep6XTMDmtTeD1PFW8/nGrcF/e+83MssR2I73/wwhGP04LkeQqnajvL91IPR3m8xdJNoFy6m9CrIT/pW2KwPlW8s+8l7f/tZbn23eWymPmIH50KCls3yJuJ1He4bA5P0i0p8d71+nVdKUCfdPvkyU/O2jkoKDqXv8PZOl4/A8S2SOSFtuOWZJ1KbO++NZQnGM7bDjdY69KmZ6ITG14PHbQYtBtgBK/jvgF97wwQcf8N577/H11i+rJ/XBBx8A8LnPfQ6AP/tn/yz7vvPbf/tvP37mN/yG38D3f//3H0Hqp37qp/jb//a//S3473f+zt/JD//wD/N//V//F3/n3/l3ftXrXK9Xrtfr8e8PP/ww/3LhaBpPTH6u8vxGiSPIPL9p9XW7Bamsvmh2C1LTLieA6aaQ/Z/nzgRMfPd5ynWcSL8FD+AjZdD8YY6ryJ797NwYb3f220Hq2cWtX5/PXcA6b73WsYnNQD1v+ttzHxt6Xte6cfttk5+v+ixAvb2pzp979ppvVSvz23lzHtUdHNXcDFDVRFSwfL55Z+bYdzmaPwuSx8vmhj4tSHgepOaN9Ox/EW8HMZ6fq9vmdBze+TzHplryz/rsWrLb+55vLMjM9nlwiedvZh7cj2yMeVy/6rKJt/54e8VXvc6z7eK2Mc7nfJ7sHL8XR2A6AqTBnA+l+2Luljx7oo8GKdM5ODZvbp9vrnn8yyylnr2HDNYxq7GAmDTxZySN4zN93WOTPzjP2XHent/rz5K6tw7ns3Niz5/v2esdFWsGq2w9hKtHNveOMZOjj94q83KNvPri9vUA3ooFzx+QsP+zx/z944ef70tfJ0jN55mJ8ExKnj/v8wB4BPCZ0N0+UBw2Uc9e96jqngX+T7B+yUHK3fkX/8V/kb/37/17+U2/6TcB8LM/+7Os68q3fdu3vfWz3/3d383P/uzPHj/zPEDN78/vfa31+c9/nj/0h/7QV3/jzdOzAPLRD5wBy55dAV9z5dd3bpvz82vUnm/yH70K5rfs2Sb+kdd568c/clV+rffx/PvHJhdf42c+8u+vuhm/3us8X7d+0lc9twFz3MjzkZvPf/StTXT++bVu8I/9wu3rwbOA8ezvX/Xr8fb7eP60X/Pjx1d/Lb7W928Qx+1bHzk3bz3vs0Rmfu+tpOVrvG+f/4i338Nbf/9kN/AnW7/Yc330nMXXOS7Pv/aRz/aWQPAXOXfHffo1Aoh91V+++j18vWPycZf5/N2PRoXj3PH2+ftar/+NnI7nnzPgdp/F1z7dz+6r+bW30MWPXtcf/fsv9j5umccv9kNf/f2v+/zPz93X+d2Pe0/f4KX9Sw5SP/IjP8L/+X/+n/wv/8v/8kt9ik+8fvzHf5zf//t///HvDz/8kO/7vu/7ZNH4G7nZ56b7VSfo6+0kz75mX+f7X3XD21f/zFf98Ce+An+Zz/O1ss3nv/osKH/NDfdr/eNrPNk3ut/Oc/aJbpSv/+2v/7k+yfe/XgLw0WPx7IJ5Xq19vff+1jX7MZ/jm7Y+7gV+sWs219cK+F/z39/AtfvRpzmSsk/489/oS9pHL4yPnLeP/v2TvPbXfb2vcU983Pv/Rk7TL3eL+Lj1CbeOvx7rlxSkfvRHf5T/9r/9b/nJn/xJ/pa/5W85vv493/M9bNvGV77ylbeqqS9+8Yt8z/d8z/Ezf/pP/+m3nu+LX/zi8b2vtU6nE6fT6ZfyVn9p6+tuwL/Y73zSs/ZJfu6bdQX8Mp7no8fgr+NF+fZ7+Ca/8De4X3+yn/mYTP9vhPWJP9ov95r7Jfz+L/nW+5Sct1/Ky/9Kv+VP0aVcPv5Hbisi+NEf/VH+6B/9o/yJP/En+HW/7te99f3f+lt/K8uy8D/8D//D8bWf+Zmf4S/9pb/ED/zADwDwAz/wA/wf/8f/wc/93M8dP/Pf//f/Pe+99x6/8Tf+xl/OZ7mv+7qv+7qvv9FWfAPrh3/4h+P999+P/+l/+p/iC1/4wvF4fHw8fuaf++f+ufj+7//++BN/4k/En/kzfyZ+4Ad+IH7gB37g+H7vPX7Tb/pN8Tt+x++In/7pn47/7r/77+LX/JpfEz/+4z/+id/HBx988AyHuj/uj/vj/rg/PquPDz744Bfd77+hIPX1XuQ/+8/+s+Nnnp6e4p//5//5+PZv//Z48eJF/CP/yD8SX/jCF956nr/4F/9i/NAP/VA8PDzEd37nd8Yf+AN/IPZ9vwep++P+uD/uj2+xx8cFqV+WTupXax06qfu6r/u6r/v6TK+P00l9Qz2p+7qv+7qv+7qvv57rHqTu677u677u61O77kHqvu7rvu7rvj616x6k7uu+7uu+7utTu+5B6r7u677u674+tesepO7rvu7rvu7rU7vuQeq+7uu+7uu+PrXrHqTu677u677u61O77kHqvu7rvu7rvj616x6k7uu+7uu+7utTu+5B6r7u677u674+tesepO7rvu7rvu7rU7vuQeq+7uu+7uu+PrXrHqTu677u677u61O77kHqvu7rvu7rvj616x6k7uu+7uu+7utTu+5B6r7u677u674+tesepO7rvu7rvu7rU7vuQeq+7uu+7uu+PrXrHqTu677u677u61O77kHqvu7rvu7rvj616x6k7uu+7uu+7utTu+5B6r7u677u674+tesepO7rvu7rvu7rU7vuQeq+7uu+7uu+PrXrHqTu677u677u61O77kHqvu7rvu7rvj616x6k7uu+7uu+7utTu+5B6r7u677u674+tesepO7rvu7rvu7rU7s+k0EqIn6138J93dd93dd9fRPWx+3nn8kg9erVq1/tt3Bf93Vf93Vf34T1cfu5xWewLHF3fuZnfobf+Bt/I3/5L/9l3nvvvV/tt/SZXR9++CHf933fdz+O34R1P5bfnHU/jt+89Wk+lhHBq1ev+N7v/V5K+fr1Uvvr+J6+aauUwt/8N//NALz33nufuoP/WVz34/jNW/dj+c1Z9+P4zVuf1mP5/vvvf+zPfCbhvvu6r/u6r/v61lj3IHVf93Vf93Vfn9r1mQ1Sp9OJf/1f/9c5nU6/2m/lM73ux/Gbt+7H8puz7sfxm7f+RjiWn0nixH3d133d1319a6zPbCV1X/d1X/d1X3/jr3uQuq/7uq/7uq9P7boHqfu6r/u6r/v61K57kLqv+7qv+7qvT+26B6n7uq/7uq/7+tSuz2SQ+g//w/+QX/trfy3n85nf9tt+G3/6T//pX+239Klff/AP/kHM7K3Hb/gNv+H4/uVy4Ud+5Ef4ju/4Dt555x1+9+/+3Xzxi1/8VXzHn471kz/5k/xD/9A/xPd+7/diZvzX//V//db3I4J/7V/71/ib/qa/iYeHB377b//t/N//9//91s986Utf4vf8nt/D/9/e/YU01YdxAP9mOdFqLplzWkw0rCh1lOUY0ZVDN7ywujHxwiKSTC8q66KL8tL+QBdF2F3WjZUXEkkJa3MLa46yRaUxMlZSbY2M5fzX/j3vxYsHlr7T92bn7H2fDxzYzu+38fy+nB/Pxg5MLpdDoVDg6NGjmJ6eTuIqpGG5LA8fPrzoGjUajXFzOEugs7MTe/bswfr166FSqbB//3643e64OSvZzxMTE6itrUVWVhZUKhXOnj2LSCSSzKWsSMo1qXv37uH06dPo6OjAq1evoNVqUVNTA7/fL3Zpkrdjxw54vV7hGBoaEsZOnTqFhw8fore3F3a7Hd++fcPBgwdFrFYaZmZmoNVqcePGjSXHL1++jGvXruHmzZtwOp1Yu3YtampqMD8/L8xpbGzE6OgozGYz+vv78fTpUzQ3NydrCZKxXJYAYDQa467Rnp6euHHOErDb7WhtbcXw8DDMZjPC4TCqq6sxMzMjzFluP0ejUdTW1iIUCuH58+e4ffs2uru7ceHCBTGWlBilmMrKSmptbRWeR6NRKigooM7OThGrkr6Ojg7SarVLjgUCAUpPT6fe3l7h3Pv37wkAORyOJFUofQCor69PeB6LxUitVtOVK1eEc4FAgDIyMqinp4eIiMbGxggAvXjxQpjz+PFjWrVqFX39+jVptUvNn1kSETU1NVFdXd0/voazXJrf7ycAZLfbiWhl+/nRo0eUlpZGPp9PmNPV1UVyuZx+//6d3AUsI6W+SYVCIYyMjMBgMAjn0tLSYDAY4HA4RKwsNXz48AEFBQUoLi5GY2MjJiYmAAAjIyMIh8NxuW7btg0ajYZzTcDj8cDn88Xllp2dDZ1OJ+TmcDigUCiwe/duYY7BYEBaWhqcTmfSa5Y6m80GlUqFrVu3oqWlBZOTk8IYZ7m0X79+AQBycnIArGw/OxwOlJWVIS8vT5hTU1ODqakpjI6OJrH65aVUk/rx4wei0WhcsACQl5cHn88nUlWpQafTobu7GwMDA+jq6oLH48G+ffsQDAbh8/kgk8mgUCjiXsO5JraQTaLr0efzQaVSxY2vWbMGOTk5nO0fjEYj7ty5A4vFgkuXLsFut8NkMiEajQLgLJcSi8Vw8uRJ7N27F6WlpQCwov3s8/mWvG4XxqQkJf+qg/17JpNJeFxeXg6dTofCwkLcv38fmZmZIlbG2N8OHTokPC4rK0N5eTk2b94Mm82GqqoqESuTrtbWVrx79y7u9+X/mpT6JqVUKrF69epFd6l8//4darVapKpSk0KhwJYtWzA+Pg61Wo1QKIRAIBA3h3NNbCGbRNejWq1edFNPJBLBz58/OdtlFBcXQ6lUYnx8HABn+ae2tjb09/djcHAQmzZtEs6vZD+r1eolr9uFMSlJqSYlk8lQUVEBi8UinIvFYrBYLNDr9SJWlnqmp6fx8eNH5Ofno6KiAunp6XG5ut1uTExMcK4JFBUVQa1Wx+U2NTUFp9Mp5KbX6xEIBDAyMiLMsVqtiMVi0Ol0Sa85lXz58gWTk5PIz88HwFkuICK0tbWhr68PVqsVRUVFceMr2c96vR5v376Na/pmsxlyuRzbt29PzkJWSuw7N/6tu3fvUkZGBnV3d9PY2Bg1NzeTQqGIu0uFLdbe3k42m408Hg89e/aMDAYDKZVK8vv9RER0/Phx0mg0ZLVa6eXLl6TX60mv14tctfiCwSC5XC5yuVwEgK5evUoul4s+f/5MREQXL14khUJBDx48oDdv3lBdXR0VFRXR3Nyc8B5Go5F27txJTqeThoaGqKSkhBoaGsRakmgSZRkMBunMmTPkcDjI4/HQkydPaNeuXVRSUkLz8/PCe3CWRC0tLZSdnU02m428Xq9wzM7OCnOW28+RSIRKS0upurqaXr9+TQMDA5Sbm0vnzp0TY0kJpVyTIiK6fv06aTQakslkVFlZScPDw2KXJHn19fWUn59PMpmMNm7cSPX19TQ+Pi6Mz83N0YkTJ2jDhg2UlZVFBw4cIK/XK2LF0jA4OEgAFh1NTU1E9Pdt6OfPn6e8vDzKyMigqqoqcrvdce8xOTlJDQ0NtG7dOpLL5XTkyBEKBoMirEZcibKcnZ2l6upqys3NpfT0dCosLKRjx44t+vDJWdKSGQKgW7duCXNWsp8/ffpEJpOJMjMzSalUUnt7O4XD4SSvZnn8f1KMMcYkK6V+k2KMMfb/wk2KMcaYZHGTYowxJlncpBhjjEkWNynGGGOSxU2KMcaYZHGTYowxJlncpBhjjEkWNynGGGOSxU2KMcaYZHGTYowxJll/AamfP6Nlhjg1AAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plt.imshow(img_list[0].squeeze().permute(1, 2, 0))" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 38, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAakAAAGhCAYAAADbf0s2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAADk20lEQVR4nO29e9BlZXUmvs53bcC+2EDTdGwUHeMlAkbUTlcSRwIBWoeJQmbUkAxGBxMDZoRcnE7FCyY1zZhMxtIQ/ccBUxFNrIo4MZEpLgJjbFAwlPESSigiJtKQ6EADynfdvz/8rdPPec6z1vvu833d/Z1mr6pT55y938t617vWetZ697v37jVN01hHHXXUUUcdrUGaONwMdNRRRx111FFEHUh11FFHHXW0ZqkDqY466qijjtYsdSDVUUcdddTRmqUOpDrqqKOOOlqz1IFURx111FFHa5Y6kOqoo4466mjNUgdSHXXUUUcdrVnqQKqjjjrqqKM1Sx1IddRRRx11tGbpsIHUVVddZc961rNs3bp1tmPHDvviF794uFjpqKOOOupojdJhAak///M/t8svv9ze/e5325e//GU77bTT7JxzzrGHH374cLDTUUcdddTRGqXe4XjA7I4dO+xlL3uZ/fEf/7GZmS0vL9v27dvtbW97m/3X//pfi/WXl5ftO9/5jq1fv956vd7BZrejjjrqqKNVpqZp7LHHHrNt27bZxEScL00dQp7MzGx+ft7uuusu2717d//YxMSEnXXWWbZ3715ZZ25uzubm5vr///mf/9le+MIXHnReO+qoo446Orj07W9/257xjGeE5w85SP3rv/6rLS0t2QknnDBw/IQTTrB/+Id/kHX27NljV1xxxdDxmZmZLpPqqKOOOhpDaprG5ufnbf369Wm5Qw5So9Du3bvt8ssv7//fv3+/bd++3Xq9XgdSHXXUUUdjTCUffshB6rjjjrPJyUl76KGHBo4/9NBDtnXrVllndnbWZmdnDwV7HXXUUUcdrSE65Lv7ZmZm7PTTT7ebbrqpf2x5edluuukm27lz56Fmp6OOOuqoozVMh2W57/LLL7eLLrrIXvrSl9rLX/5ye//7329PPPGE/fIv//LhYKejjjrqqKM1SocFpF73utfZv/zLv9i73vUu27dvn734xS+266+/fmgzRUcdddRRR09tOiz3Sa2U9u/fbxs3brTZ2dlu40RHHXXU0RhS0zQ2Nzdnjz76qG3YsCEs1z27r6OOOuqoozVLHUh11FFHHXW0ZqkDqY466qijjtYsdSDVUUcdddTRmqUOpDrqqKOOOlqz1IFURx111FFHa5Y6kOqoo4466mjNUgdSHXXUUUcdrVnqQKqjjjrqqKM1Sx1IddRRRx11tGapA6mOOuqoo47WLHUg1VFHHXXU0ZqlsXgzb0QTExPhA2bXynNznT/mp3swbkcddfRUploffcSDVK0gDgZocJvIS6m/DsQOPUW6ks1jRDXzN2og1bbtTpc6Wov0lACpo48+egiomqaxpmlseXm5/9s/SF4n+mZq45hqnEKv1wvLl8614c+PYz3VxqF0ZFEAUSO/LPionUOui/+zdlW5GlmqMpFeZv3j72g+a3is4W+tA1tJhxStZEzRisiRQKPIpTagy2h5edkef/zxYrmxBqmZmZkBkOr1en2AWlpakmDl5fxb/VZUUk6vXwtUUb98nM9lDljxmI2Z+6yhUtnMebNjUQ6mZoyqfia3Ut0SSKkx+O8SaKjzKniqdbqRXpTGkulRpg9rEayieXQqga77iajswaDVcOoHi9ryUOMLa2hpaamq3FiD1FFHHWWTk5M2MfHD/R+ufEtLS7awsDAEVk4KnEoO0kwDgZd3sCw52ahPdmaqvWx508cXgRW3w/zXUubA0HHUghTy6uND/lT5LDtGuan+MdNmHkoOvgZIuN/Jyck+P8y3G6kaj5Jhpjc8xqgdr8PBXcR/pB/R3OO5NllHTXt4riSzUtDA5Wv5UvzVUu3KwaHK1lYDIFcC9k8JkJqenrapqakB57u8vNz/v7S0ZL1ez5aXl21iYmIIJPB3LcCUoncEzIyi8hl4qbadn8nJydTZqkwqivIjfrPvTD7YdgQwZjFIcX3MjqM2IsP3umrMNVkUg1oUtbO8Uf+8DYzoETSxbQUwag6zsag5cxkrnVopUK1WdqDaUwFLqX8lRyaci6xcxGMNZZntU5Fqxz3WIDU1NSVBygmNkhU6AypF2bLCKCClMqXMCUU8Kr6y5asMrGrT+BonqeqqpVc8PwpIcTslkPLfLMMaZ1MCWRyL+vZxqewNnaQKLPxb6QA72FLEXtLXaG5Lxw42SPEYI5Bi3g4mSEX9lsp0IFVPYw1SMzMzA8sp7gjZ2S0vL/cdm1NkuHhMkQI6/ExOTg71waTqmemoFv+XABDbUlG/GjuWZ0PNHFdJXuqYL70iX8pxugwV0CiQ4vYU0GMZ1AW1HMZ9c70asFJyckIQLjnaSFeUfGt5KbVZms/MPrJytQFNzfloDmrlVxpHW6rJxkvyeKpR7fjHGqQciHDpwge+tLTUByyzuujSv2uFxxFpbcRWchpRXyX++FpT5kgVn20cbluQqlkaq7mu5+UcrKL2ahyp4qkt8DplAVBNGz5fip8akIrky/xk7R1MkBrFIbtMSudVwIG/0UYjfkr81awy1AJUJuenEkUrJkxjDVK+3IfOzaPdiYkJm5ycHLpAzhE3UimqZMXHOpGDLfWB2VHJ+CNHgk4eSWVTbdfQMyfW1pGzA6mVYbSM5Mu7mYPG+oqfaGlN8R2BYykIqKGaQKEmUInajYBKtZ2NoeRga9vJjjvVzCtnMNFc8KajUQA0mhtesi0FLB1I/ZCeEiA1OTlpU1MHhpBFjHi9qqSsyoizTMBsWFGjSE0BimpXRYfcdgYcXM43lChjjpZO3ClHDhQ3qWBbqh8GSiUntWxTyy+fz9qNljSxbC3YOki6LPgT8c18KV5xLAjsCsgj4vqZo+U5UXKK5i7jtyYLaXOc+cp4z+QU2WjWV8ZDqZ0anXoqUe2Yxx6kGI2VIZei0FrQqslCMiOOolqsy21lzlfxrJY1IwelsqxoyaTk3Ert8nnFd+TUuX//rXbEKTlkIKUommslW+cDl5ZVwKDkkfERBT2Ybarl3RLVlIt0CP+XHMxq8RMFEXyuJutSdfBYKVMqnStlcyp4KvV9JFPtmMcapKanp/tbr9nBRtlU5Gjwd5ShRJmI+p9FbzXGpJypR+s1mzOyttU9QrytG8fNcsuiVNWeapOJs71aICo5kKhM5vxqCPlcXl62xcXFgaVn9SQUlwneH6WyIuSFlxZ9o1C2TFwKCjL9juogqV2YClSVvTFlwdxKKArKmMcSRcFOJHPWN5WBqvodxTTWIFWKwBRgRWVVveh8lJlEPCqFjiIodsZK4bMNIRFPCjRKEb3zqTYoRIR9ZCAxSjs1PCuqcQgRgJbmx+ug48Zsh51Xdo+X6lM5PXf+yulm8sEyKvjCYxF/WdbOZWozFa7L46qh0oqEajujCJwy22c9VWNRqxQd5TT2IIWRti+7OLlB4/UCrOtlsLz6jcfY0EtZRaSokQErh+zE48DsA9tXckInmRG34XUy2XDkz2Nhw1UyYpDguY1AT9XFcWTZmxo7k+oT+cKMZnJysr8Ejbc/oOz5dgjnP9qWHs2Z0kXFd8141dMnlDPFIImzORxLWxolwxmFVprNlPSYZc7yP5ygtBZBsTYAGXuQYgccOTH8qAv+tcRAVxORuWEoQIkyBBUNokPzp2kwT6p//+a2VX2O9tT4ozLYhztWdOgZiCvnGM2vt8m7ARnw+JpNBEJI/Igtf7SWj4WdEWZRvtyH93mhY/d6+DgYtcMJZch1GNQcKFEGKCueGwZylhtngAo4GMDU3NbopDrOm3Vq6/L5rI822UzJLpmyhwlkWeLBBpC1BlBm9TwdsSDFkSobZES12RT/VrxhGaWoaixYNyLeqRfxim1Fbdc4bqZSnQwYVb1INgqoEKSUY4yO4bkoAOA6JV5U+8xDNIZo/JGManVDjY35YCetbl1geWTBSylwi/S9hn9lLysFqlL7UTtq3vw3z1Fk9xHAH0rAGjcae5Dyb4yylTMwi9fGVUbBx6JIPOOLj/V6vX4GpDIq5pmBNTO+GuDgtjlrKDmDmvGrsfB35JTVMRX1It9qowLPWSabSFfMDuzaW1xcHNIxJ+yfsxezH95U7jeW4z1dmQPFcaLMzA5snOCyqg2lSxllwVdNcKbAraY/LI8BSBubqyGWpdnwEjqXzWwuCmK4TI0cOmCKadVfH79nzx572cteZuvXr7ctW7bYa17zGrvnnnsGyrzyla8cijx/9Vd/daT+skg1cj4q6q091oaXjL8IVCP+GZBH+ZT6Y37V/9L4+Xdm3FGEqpyXX+fx5TS8/uPfCFZRVoNy4I8DyuLioi0sLPQ/6v/i4uIAAPlvr49lUP7ukHCDBRPyrsbLmzOU/M1soCx/sH3sV7UT6bDS55pPTduR/pbaU1TiRdlATb1I7qpNVT6z9bZyPJJp1TOpW2+91S655BJ72cteZouLi/Y7v/M7dvbZZ9vXv/51O+aYY/rlLr74Ynvve9/b/3/00Ue37quk2JESRtF6tFzEvzPHGx3jSDQbU7S8ogwpo8iYcJx4kzPKMlqmiIgzwhKP0TjVb2yf+8RNCty26rPkVBnEEGT46Ra9Xm8AOPhap2q/lKEzuLIOMcBxf5xtRrKPKMpuWfdw7nAseE0pC+6Us1XzyysOiscawuxXbfxgPiLfkZVXssJx8WpIpOslyrLnjMYV2FYdpK6//vqB/9dcc41t2bLF7rrrLnvFK17RP3700Ufb1q1bV6VPBVLsdLwck8qe+LyqE7UXGVJbgFHgEEV7ESnHwud9O7uK4hiolpeXB+7RyqhttMfjKc2DypoigMRz/pBb/zYb3qCA4OTZkY+fwaVpDjyCi/tDPrAu74pTQITlsGzt8w0VCDJfkTNlypw0Ay8CVPRUlZogj9vlPng3ZGRvWdCEuo/11b15Gf+1gVk2ZhWUYZkoGHkq0EG/JvXoo4+amdnmzZsHjn/sYx+zP/uzP7OtW7faeeedZ+985zvDbGpubs7m5ub6//fv329mdWlxRBn4RL9LCrjS896XirxUG6Ux1pyPnHvUr+ItAwgem+pfjS2rWwoAIicYZdqYPTk4LS4uDoEUO07MnqLIOuKxNAaVYfl3VlfJKgKqjGoBC/n1b34lSZTVRm3W6JEKQlQfqq2agLEN8CjbKa0O1IIZytZsZZsqVgvY2gDxatBBBanl5WV7+9vfbj/5kz9pL3rRi/rHf+EXfsGe+cxn2rZt2+wrX/mKveMd77B77rnH/vIv/1K2s2fPHrviiiuGjuNLDWtukiwZ6KGKUErGl/FZY2B+zh0vt41lMJrMgAqjVzYaBCm1ecDLZXyaDWY0pQwXj9XMmbeL4KNklYEU8sSgkWW8LE+eEz/noOfLeej0/Tde6I+cXo1cInlmNqR0UwEkgxbLqFaHuUyUTZQAKiPMcHEuVJCBcs/6iIIIdVydUzJ+KmVOTL3mIMLiW9/6VvvsZz9rn//85+0Zz3hGWO7mm2+2M8880+699157znOeM3ReZVLbt2+3nTt32uTkZN+J4C4ss+FMKyIVeeLxSLm4n6jdyPlHEXAWNTMf0XEFUthmr9cberU5jwXb54vuvJuO5Vxymip6xoymBqTU8QwMeRkvO5+BFG7W8Hea4YOO1XIz35CrdKPX++G9VrghhOVQ+6R4VaYUxSMv6gbiqA7PtdrEki1xRnwgKbBrwxe2U/qvfAb2zfcAqvZUu2gzqBs4t6ou94//V5NW0l6tf0VaXFy0G2+80R599FHbsGFDWP+gZVKXXnqpfeYzn7HbbrstBSgzsx07dpiZhSA1Oztrs7OzQ8czp1iTiSjhobFz5BgpdGlyazIk5g2PKUdT4j8aExJfQGZQj3j1cagNC1F/JV55Kc7rRMYY9aHKR5kS888gpkCKrz25/KInRvCx6LiSETtmd5DZDcDcDutzSXa1GUikJyrQijaVqDHjMSWrkg0xbzi2qE3st5QlZfZR4w8y+2QZYburQSUgyeakRAcz01t1kGqaxt72trfZpz71Kbvlllvs5JNPLta5++67zczsxBNPHLlfzggi3g4WZRG8n0cFZoPLFL4GCNvyWhOVsjzZMPGpF9E4IlLGrpZs2elx5sHRpVqSxKjVwYezzChyxqVQtZXb32k2PT3d7wvfO+Xb0NWY+aOAjnfy+QOVWZb8f5QnpCONUsf75g0mzk+bTCgizF4ym4tApIYygMLvWmId9TZqZTzqONrQavuY1aRVB6lLLrnErr32Wvv0pz9t69evt3379pmZ2caNG+2oo46y++67z6699lp71ateZccee6x95Stfscsuu8xe8YpX2Kmnntq6PwanLGJSURODgIrCsbzqPyIGJa6jgIrHopSnBIjRmLluDUCV+spALOILy6qlWcWLaovBluWmQCrrJ2ubP+q+LSfPBnu93gCQK15w6ciduR9DgGQ9UgEE8l5yflnkrOqrOSj1gec5a+c2a7M/bj/KsqM5jsakeM6O1wJVKSCs4aEmAC5Rac7YH5b6PpS06iD1oQ99yMx+eMMu0tVXX21vfOMbbWZmxm688UZ7//vfb0888YRt377dLrjgAvvd3/3d1n0p52EWK5S6PlMbQURggYqC59WuL66b8crlsjZUvai8O4oIlP1YFOkpx8iAEB1Dwsym1tjY8agnj/MYEAzYeUVjjJwp9uXXjqanp21mZsZmZmb6bfG9VWY/XH/3Pjmrc3lgn35digOpkr6qsag6bSLnNs6cy3AA6P+jG5mxnOqvrQPn34fS6SrfFAXBbWmUzEfJUAXMawGYkA7Kcl9G27dvt1tvvXVV+mojzGiCajMLrst1anhhUKnJqCL+MsBUGUOJz5JRqyCAwT8CKCV7fgJGNC7mK3JSCvQ5S6mJsLF/d6QoQwZGzKRwznyJj5froqyvxE8WYNQ4mFEdtdIb1tcau2Fea/pVQNUmQ1T81fJcyuJq6kf+BfUwsqkSTzXzrI6zT4hkVJv1HQoa62f3KcoUOctq2oDNKFQy8gioonb4fGTQ6nim4FynFLWr7AR/M4A5RbcNqDFFQGdm/Ws06jqIf6LbExh4o8zYj3mmox7PxLv7nBDMcNciXrcqyVuBXHbNiZ0jjkEFN6o8H8PNNX48eqpFbXaQ6W8tqXGxPEogHgFJDUXtKbkqHVb2GVHEv6oTHVO3P6y1zInpiAApd3i+BZ1JAQQb+0onahQjywxGZTDej4q+8INPSMDrG7wE2ev1BkAEldjrKB4ywuwqA71e78AmgGgceE5t4Waj83IqYo14YFLRr9IVfp4et+Ug5GP013ggOPvuQZ+zXq/X34gxMTHRf/M0bktnfc0Cm8wpZmPndqLsL9rEoOSNwBb1U0Mqw1LzmbW70qCUAaimDZ6LGsDK+i/1VcrMMtmvNdAaa5DiaDm6byGKoDMg4H4iKkWDKkvKDI3/ZxkYHq/5YIbB9dWOt4iXTPEjfqIxcdaTjUf1V3ve/0dBSeaccYnPjyNoqIzMy2HWhRmUy5ozLPUg2QgMS05SjT/LpNS8sJ3w+JgXLMdl1Qd5ZcrmKLJpbK/kbCP5sZ1mMlP1o2MKPEYBKW4/8leRrXP9Gv4PJ3AdESDlEenCwoKZDV4HyAgdDx9vozDIC7ahfnMd7hd/10bFmG0waDP4RPfzeLnFxcUwe0DeMqDKAIYjaqfo+hFurGA+ogyhBiT5KRuKHCCmp6eHgAiBxH+rOfFdf2r++L4t7BM3Z/gWd3zGIlPkFPnxT15WBWk8hqjdXq83lEWxfFi2EcAir6U+EaCUDka63daWOeCIAFGNO+J/FL5YJjXjYNmXtuyXAp21QGMNUrh8FTlUJ3a8HAE7ZVFJRqVySpmz8oqP2gyG+4yMqbZOSU4IhLwhgh0V7mJjvrl8lB0x75HhI2Dj2JxnVR6Jt5ErR5z99jE7qEWArcCCP5E8kNT8Og8ud2+/NpvJ+socWxRw8TcHF0hu1/hbAUcWwDEvEZ9RFoa/Iyrx7d9twVL1E8mp9Jv5MmufEdbyXwt4teXGGqTQAZnFF3Ixcsb/HN3VgkCbOiVlj8BNgUIJqFTWEF1XypxqBExqLAgE6t1JzE/kpDjqz8bo9VVmUQNSNcGJH8Ndegq4M5BEPicnJ4eu/3Fmx/2opUSmGl2MgKkEUggKWR+sF1xP9RdlKEysP9k4o0dPRc5Q8cdgWKLaIDMrq3iqtfNR+oj6jP5zu1kfal6jNmtprEGKNwJEEacLyJdelMGWJjdzRIpqwCnadYZ9YDsqdcfxc/TIziECPq6DH3aUEbjg0xXUEl3kMEqRZq3RRuccPDmCjB4thG25fvk3XytChxHNgdqFh3rLNwKb2RBgebuRLNpmCkrvM4BTwM/HaoAgAq3SPYWqPRWcoM2UZBLpowKqUuAU/R8lC+G6tb6hxBeWz8ZfolFAcKU01iClnGwNlZS4jVJG7UdRaVtiJ8Ljjf5nTiEaRwnUMiVnsIkMJ3KOqk4ks0gmShb4cYcYXduJHDZubIiAPos0sW7k0PEYZ781gVQbEGPAitpumyWw/kX9M41qG4qXNllQiZ+Mr2i+cfzZXNeOV81PDXhy/eh/dC7ql49l5SOQbjs3Yw1S6oGMkSPxc0zoYNTuQOU8s6hTnXel4qi6TbSWZQxZtBzJQrURZVGKH6yDT0bA9iIjrQG0qL6K6v03yhGXHzMQqW0fZYxPJ+dyNYRjU/M0SnRbymRGpZIzLvGx0v65bQ48FF/Rsr/iZxRAy5z1waQaABylzUPdTts+jwiQKmUAUbSr/keAFBkHRwsMVOzM2BkpB8/9ZqSW49Rvvh6C/KDDVNdeIp68vDttfPWAWprEC/dZFsTEEXokW/72D/LCN/0qfeHxZqTmlnUlc6rcTgbqXLYNv6VgIWojO1frLEuBUhZERPxG9hqBZI1TrOUrmsesrwgca8aqbHCU7DPTm8jGsM8ScUCX6Xmbdo8IkCrdP4LOSjmLTOmZMkXFya2dAJV9YF+RI4iyHszYMgeO/UXtcBuqPmcYrKgRz5HDU7JR4K7mm+eRn5/n4ImgyW1mQYoaS1S2FqCUrEdxQIoPbDObl1I7KrBRuh7pquq71gmqc21Aio/VBAG1fGH/JTBUwWwE8JGulIAt6i8bQ8ZjTdvR/IzCT0RjD1KYieANk6g0vvTjhMqVPYWbJ0MZbUZeruYGUnZ6uJVbXUNRIIU3fKIjaZrBF0Ky8/dv3ppfUkDsG7Mo/I/lUQ5qG7rq08uhPDgTYv6i5wIq4+ddniqiRGfCclc8MD/MSxQk+LFDQW2iY+Y/0meew8wRt+FB8ROBBNOo8qwBKPcpPH8q6GCgjB4XlvVfK1cvO0pAcrD0rwTMGY01SKGTVPeAKFJglEVmWCaq77zU1PWybcaoJjZrQzlPfrIB8lbraBVAYV3c9abqKfDE+4dKkTEDdi1QRGPiPvjbAwwE31qAioIaBD0FzCp6Vm2XHL9qW2U/GdUEKl4uKqPGOYojzPhuk8XU9BH9j/pT7WeghfM3ClBnlPE8CkBEQTsfK+nVqP2bjTlI+XPO+HXbSBgJRvfwZOASReJKoaNJjPoptcEOHX874ZJnZsDqAaFchscYjVVlXR4dcsYQAZufV8/vU/LhpTsfk887k8pA2UlGgBPNI+ubugctm2MeJxquCiDaUKSn2dzi2CJSQFULNj7e6LqrstWoT8WXf6tMZlRZKhlFfE9NTaUBU6RT6neboDfjPfIBKP9Iv0fVvYNNRxxIOSE4+W98f48iVJgosq+JelEZUDmyTKYUZUQGjc9/Q2cQGUekjJEzcxmqrInBamlpaehxQ2hsNVku1kE+8KkPfpzBwsvyf/WbZRJtQEFe/cGvUTmVvWTjdMKncLSlLMDy70h3kCIHxvrE8mTdVefV+FSbat4Vn5GORzeuKyoFZtw/txk5/gyglIwi21e6FAGdl2eflQVemXyUHDJgzQLarK9aUBxrkMLrMAxS6hpU9FqEKJqJ1o1LjoiNt20GxaTOoUJyBqPqK2NQiqsif3QyCqjwg8uK3I9y/JnxRPLHm135sUEKqPFYNP/Ou//HPrBfvn7FbUUOG+WJ/6OykXyYaqJt7ifT5yjAUvqj6mAfWcaVOT3micuUdKvG+UU2mQFkxLcqEwFUqW5JJ7J6bquqfC3YYXv8Hfkh7B+PrYbumo05SOFrDSYmJvrpd5s72BWVojmkLAJV/2uiw6hN7g8BxH97xsEZhmqDHTd/8wNrlXJj3yWw9LZUphPxxHz3er1+MIJPB8e6Hr37ky9UlIll2aGqR2ZFkSvPCbcdycEJl2t5rIeKSkGSU2ZTaryj2l8NsZ2Ugp+MagGqlq9R63BwW9tmxL8KPtT5qByXyfpXssvabiPjsQYpjKRXYmSjKGUUKak+MqftdUrKUaMk6pPV4z4iZWfFU4DF1/oyg88UWY2V5YSZDzp47JMfW5Qt86IMSxGwGmNEUfZSow+Rw8LzpfYU/9GxiBc1TjVXmf6PQqVMpm0GVeKpDUCV5MfOOHP0EUApoMqCAKWTqk8enzqPfbUBTO4/GndtUOQ01iAVvdNnVIqEjBRFb9x/9qQDZRAq+4naxnOY9eCz5pB/NprMENmpc3newo5jUZtTFGXbbxU4sJzwVgPMpLCeepgr84Vlo40nLBuWUQRukXOPMk287ldybIqnWloJiERzGtlEtFW9TduKsP2aIK6m76yv1fArtW1E858BRY3cOADiPmv4KgVsag6zDK52vscapDCaRgM305GFl48yBP6o9V318XP+rYBhlLGxYqjIKlMcBhSsy9lJ9EQKHgMqGINZtHsyI34yRdMMZkFRNMlzj2V4OZF3QKoxerloR6C3zcQ8sVzbOrkMoGozhSyQ4rIKSEvtK2o73qjftkCV/Y7arskw+HhpTDX23jboiPQNy7Vx9llAVQOOEZ9t/Vsb0DY7AkAqMwx2SDwRXobLqnrK8DlSZGOJ+uVoJlOICBCjnYJqLHw+6otlx3X8v3J22dbzUh9Z2wqo/DuadyWvkvOqiQijoKGmLwVkEdUYcamdmkCK+8RvPJ7NZzbW6H8JEDIH2BbwVduZ3Gr6i0BulMA04lERz5sqx36HyykeS/6Jy3jdKKDlevy7zRyajTlIOXE0rN5tZHYgyuYNAfjNSszOB5/qwBe7o7r+GxVALVHh8gU+xJSdDWYG6hFImJ2wInJZ5p3HoJ7IrRxxjeJxXeTVZYJlIoeBcoqCDdU3kgIo54Vv4OVdf75JR73WnQMbHK+3W9purmQeydP7RL3ivtWyG+s/8s5lMkfETk05pYii9trUYeeZ1VF2wW3V9FdLbF/R/WJ+PgqOMp6jsjXyyHRL8Y/zXNILFbhFQV6Jxh6kIuEhYEWOS5XD6zlOaOz8dAun6MJ8FunzfzWp0ZJWVAfHpp5Tp5x1yWAVH1E7WVslRcUxZ5FZNoYs0OB2uE6v1xt4F5aP2c8pPUC5RACVjTlySllAgPUyveB5U6DWNsMoyVXxFY21JtvgeeK+ldOvCb64j1p+skyDyzG/Ud3aOShRZJuK/xLv+K3qMq819lrDn6KxBymz4WUn/jgpJ+5OCX+jYH27sxs8b9bwtqIoCBUzc0B8jh/zhHzjtRXOyNgI2FBUFF9SsMjZ4diz9jKH6/wy+GPAgI5PXSxXABU5UwVOHGF7/2pThuuAyqJqnJ2az0heGTEoYj0FqKgbmFlMTEz07ylUThUpGhe2WeOASs440pFMr6N+uM/aLGKlxHqFtqeClEhvFahEVBMYtOFf1a8FJv7POhrxq2isQUopapuJiiLcGuHxNmw0/Kgv/uZj6EwykOIIWH3771JUpQyjbTRfAsKonUyZ/X+tIfBxDiBUtOiPZJqaOmAGeGM4/kaHj23wfPuxCHwUvwpwoqib66m5Uv9RFv4bvyOKACUDhpp+I8J6Lmu1usF9+nk1vpVmJxHARDxg3cypR/oQ6WukD5FNqQBZ9avabDNnpYAhks1TAqTMBh2xiiSxHBKWYcOKlCdqSwFHppCZs1KRMNZxJ+jHJycnw+3i2Hak+FHWoeSBPChZKPmVIqiam1hLylyKODMj5qdpYHDAv7EcZ21q/ttkGCgbpV+ZTJXOl+Ys4r0NKZsoBUWqbtZuqWzUvv9nR1vblpdVwI/tZQBZwy8HKOzPkBc1zpIcVdCA9SJeVwvkI7ts0+ZYgxSn7uhkShGP1+PlmtLFVT/uZX0SvV7pptESQDk/vHFClfWPL1Xisk3T6KVAHIPzjc/Fi0C+NCaXlW9MKYE+jwV5aiM7xYMCQT+GclVAEgUHeN55ja55tnX8NbrKY0NeMeuL+i458CxjK81JNr+Z4y45yuh8jV5GAViboCH6RJkH6lR2vnYMNRRlKdhndD5qSwVbTG2CyVHmD2msQYqp5BSzsupivTt65bTbRAIqEuPzETiws+fIxHl0pxkZAbfhdf2jnF8UAUVKy5FbrdzYgNs4+ZLhs/xUEMOOTMmc2+K6CqQiPqNxZPXVvEc8K/4iHiMgjJx7TX+spyUnx+1zG5ksVKReK2eWZxv/gf1EfOLY1NyVgq6Ix1GDn0w/lB5kdhhlSNhXBE5txzHWIBVFOiVCp+7XJXq93tB1JuyDM67ICWVRZbS0xfyrnXRZROTgxECrHC4TAxXygHVqQEP1p66pqSdOsEFwhsNjVnKMjiEfmKlm40C+oqDBx+LZI5KK4nHMkaMqZTy8YSdzRCXHw59ozNxuydbaOiHmkQMW5FvxwmNVVPILbCPK9iKelS/I5ojrKx5VoOTf/BgwVafUlzoe6Ut0LuIRx50FK08ZkIoocgZoAPi6c1wq4T7YyUWglkVT2W9l9OpYtDTp/fqyD+8qUkAVOSjmK+K7ZlmTAY+XUhmwSrLjtkvkZaLrS22NJgL8KNKMxlC6j005ZgXoiiJeuC2+P1CNVY2DgxnVT3Y/kOIzc4g83lIwGI0j6ru2bM38KGDPwDrTt8xu1Xctleq30eus/WieRglgxh6ksqiuRhmjyefz0f1KtXxGdaLoictEY/VJV0DAfaDRtI0sVZTkv7M2MsfOTjKLlBVv2FZWNvpwG9mYouCCeVD1lGEyz20AytuKsiHVj+oD+6kBaNabjPeSI6rhS/3GfqP21Pxgudp55rYVUEX1SwBdC5TR/6iNGhCJ5isqz79L86HscdQgw8xstDetJfSe97xnyBk8//nP759/8skn7ZJLLrFjjz3Wnva0p9kFF1xgDz300Kr0XYqwMZLGe10mJyf7r/2Ynp7uf2ZmZvq/+UkT2BYeU32om4Bxm7OXQ2Il7PV6sj3uS709NgML7nMUGSuDYR1Qy35LS0u2uLhoi4uLNj8/3//tTwupcaAlZ6Z4iP63odryynniuSzzUWNSx2pkxOCH9xAqYMxIzWcbftRYkC+ff98MVPPQ4ojPUnDSlr9ofJl9japfHFwqu8/mgceB2bPL1u2NbU7Vbyt/rBu1V0MHJZP6sR/7MbvxxhsPdAL3oVx22WX213/91/bJT37SNm7caJdeeqmdf/759rd/+7cj9RVNkHLMNVFO1HYmUHUtqIa36HcUzWURSo2j8eOlJ7S7InPUzGPMIlGVieFH3XSNT3jgcSo51ESN/L8099x2Nj9ZRI6E5Xh+eS6ztiKdbuMEa7KvrB7+L2ULin/8jRmXcoJKFzKbQp4y3c1IgU+WGUb6HmUX0bFIJ2oJ+SuBTAQ4kU9Q55DPbG5YT0YBuYMCUlNTU7Z169ah448++qh95CMfsWuvvdZ+5md+xszMrr76anvBC15gt99+u/3ET/yEbG9ubs7m5ub6//fv39//jUJQSu7neCNApuxMkaBRgfmGw1qgKk2wepAsnkeHH0V76Aj4HH/j8+symXA7qhw7cQVQGDVjtpoZanSbADsqHBO2mUWL3GZJBoqPqE4JwFU7ynl5VD0qRc6qNogrARs7dSUH5fiz7AT/RzJpCxClgIDHgt8RGKp+VkJswxH/kT3zseg1OejHIsBTtl7r77CPDPQVrfpyn5nZN7/5Tdu2bZs9+9nPtgsvvNAeeOABMzO76667bGFhwc4666x+2ec///l20kkn2d69e8P29uzZYxs3bux/tm/fbmbDxqbIDbrX6w09ziZKnUvG4W2qJTw/z9/+UU80iFJ2lXGg422aZiBNZ8ePoKAAgpfUcBkA32rLPGURF8uK20cefblhYWFBLvNl8uClIB6PGlu02SMC7RonlrWXPXlCOdKa/pRe1pCaD+SxZlxtqGSbSk8yvhXQteW15BSjYKGGt1qAH5WiwEIti2fBYEnPvL5a6cjmsoZ3/43fNbTqILVjxw675ppr7Prrr7cPfehDdv/999tP//RP22OPPWb79u2zmZkZ27Rp00CdE044wfbt2xe2uXv3bnv00Uf7n29/+9uynHKo/s2/ayaqhrLIDs+zImfHFb/IV+RQ234yWWVRU8YLyyQyoMyRZw6ozdhq5jCTRUQlgCodV4GQ6iPKtrFeFkQovWEHVqvntbIunctkk9lmZA98PtOb0jgiXVWkbBjnp6adzC8w7wfrk/XDAKh8Qi2Nap9mB2G5b9euXf3fp556qu3YscOe+cxn2l/8xV/YUUcdNVKbs7OzNjs7O3RcKQgb4eLiYr+8P1kB62dUk8qyoEuRbrZUUOtwFOGYcemR5WKmo69s7Hhfhl9/c1lyG2oZSm13xswS66qsFsfIY2HDwQcCq08NiLAMVkI43lqjVMRz2esN3gKhniWpnA3bRUm3uR0nvA6bOR3XGXUPIvahMrrIyfu5SE8iqrEjHIfSvShwUPXbzHdt4MNtq0BDAWSNfqv55HGr+q6PEd9+HsuxPmV00Legb9q0yX70R3/U7r33XvvZn/1Zm5+ft0ceeWQgm3rooYfkNawSZUqKS0JK0JmSRWl8FOUoijIsrhsBlVIyPqcAGh9zxICRORU1zuyeIlQ6/1agovjnPvwYL8WW5B1Fg2osEVgpg1Z9oYzY2KIyJT6j/zVO19vMMgcFMrj8Y2YD88vLzg6EDHR+XPGv5KEAVMkGj+O1EaU/WF7pEvMUOevIeXNwkfkJJQPVV+Zzsva4bewjAvfS2Nm3lfwNyjbTz8gfKqBaMyD1+OOP23333We/9Eu/ZKeffrpNT0/bTTfdZBdccIGZmd1zzz32wAMP2M6dO1u3rRTTjQKveaBxZSClwEltW0YqTUpEOGlZmQykmH90RMg/t4dtRBGS+qibhBUoIKkLtQyC3g5uyWfwxboZkET9KADlbyWbbH5U/1HbWX3lzLLIlHnMnnaB33gNz0HKZc39cl98PTSSTTSW6CkJ3p4KEJAX559t079VIKVkkukO982gGFHk5LEPBXZtgY/b53mpDXK5v4mJwde1eLvYzvLy8oCueN0oWGP+Oaj1Nmto1UHqN3/zN+28886zZz7zmfad73zH3v3ud9vk5KS94Q1vsI0bN9qb3/xmu/zyy23z5s22YcMGe9vb3mY7d+4Md/bVUOR8fFmjFEVEgkcD4wlCisADwYKNTvGP7av1YAZaNk5UMB4DKl3J8TJvSsF4/CxDNiRWepSrH2OQwrZV/8qZ8LhV5Mf1ogBA9cXtlWQZ7abiPnCJNgNe5hf/Z8fQoaGOuG6oZVrVRslxRyCldqny2KNxog6h3FF/WAeygET1pcah5iIKHjJQYP0tgR7znAGPz2lNmxmxzSrdb0vML8vzsIHUP/3TP9kb3vAG++53v2vHH3+8/dRP/ZTdfvvtdvzxx5uZ2f/8n//TJiYm7IILLrC5uTk755xz7E/+5E9G6qs24oyEzo4mywzcqUYOJHNCJYemQCori+XUI2jYMSkgq+0/M8DM0FUdPMfRsP/GXZLOkwIqdnAom8ygVBuZbLhOBlLKgZcMUQEBthHxo/iO9D76YIbjfZb65foleygFC+o3H1PXUCMA4TFl7ZcoApbIfpRttAWqSC/5W+kq6ieeU31HGVv2W/Faosw/1s7JqoPUJz7xifT8unXr7KqrrrKrrrpqtbs2s+Fozx10tv1Y/UbHHQlaKRTX598ZcbmsHi91LC4u9t8thUajsphSH+wc0KnzdneuwwBZ6hezJsykStQ0g8tX3iYDXQambYBK8V4D1lyH5V3aFtwGpJinyMlxgBCBewR+vjQU8cB8R0EfH2PKdpf578XFRZuamrKJiYn+d/SaG0WlAFKBsbL5Erg0TfxMw9L8KX6dSg8SiPSOQTfqE4G/1o9l/lLxUqKxfnZfGwehdiNhO6ptswPvR2KwywBqVGLDZSOLFKZpGnkdJzKISHGxDC7P4D1TPn7lXH3dGvsv3V+BHwaYiE8c29LS0sDcKmPi5awo2CgBFsuz1qFkDpnbyJxhpLeKd7xWgefxvj7kTfGU9aGCEFVGBWuowxmYKNlEQOj678dwl6eizAZqgobM9tU5zmZUeXU/Ejr2KAtr+595yy4tRPVVO+pY5htr/eZYg5STMhJW/l4vfnI3ZgpMDFBYtgbsavhGPvwbb0B2hVFGjWUVeEU81SiuAr3IabAMs3LcPwNUBtLIq0f0amt7BCbZuNV3di0vA+ForlimzJ/qL8tIM4Dg+mY2cCO54gntpARQERCWrsPxXDOgqHYivfJ6CACRLeC3cuQ19lwDUqU2sC8cM99CwiBXkxmqvlQd9mMRSEUZWS34KLuI7DCisQcpNg5e+pmenu7/V3X5NxsDZy8Y0ajNADWRTonccN2h8EYCBtvoQZyR8/R2MANRYIDjjxwg1seL2+reKGwT+1Lgo5wnlvFxK8eiDKzGqCIQUtdEMhnz2KIAg/vBds20k84ociTokPCJJ/wYqlKQoeaUgYbr4m5C5Id1XOmBkjsGjH4Mx6I2OKn5RVBjUnPNdlCSPfenjjFfDPqlMaj+FS9ocyro9L55o5mXRR3hgNh/14xTyaiGxh6knKIIK8oyvA7/jpwPG7O3H1GUAZQIFSO6sZX5zgAqc258/Sril0GKy/t/jmgVDwzkEZCgY4ii3FIfEX98PCLlnPG4y1C1owxcAa8ag/ougVQmT9bj7IZp7q8kZ29jamqq3xZft3T9ZH4VL9gnBoMsbwcr1BOWdSTLTH7KJ+B/5ZAzQl1XOlgj56gt5B19kuoz4o0/0TI+y9iDgWjZOLK/UeiIACkWsNngRWGMHqNIB5XF28F7B1jx2YiwnPerHECJUBn4WYPIKzoCfN4eO1Le/YZ8smIzH/yEDpaZ4j1qj8uxrJQTYVkiD5nT4fFlwBr15+f9Xjt1rSBqH9txA2dAiLKXUtuRPP1b6Tjqqb/OhTOpTD6Z8/QNC7OzswO3RKDs5ufnB+7Nct2enp6WIGVmA9dBsU3OpJqm6Y9leno6zWgi58+6koGymmOUsyLmvVa2XJ+P8Rj4O1rSVXrs8+O6jitS2IbZgTnnlZ7SPaQsu1o6IkCKidNQF6ZHe04cLeAxpTiRgvGxqA/mEYmNgR24WmPHSLX0PqYs0o5AqsRnVJbbUwYd7b6LnIBS7Ey2qk3+798YKWI/eI2Ag6AMRBAYeEkVwQnLR+3wMRUEoTNC4PF67vCXl5f7AOVORmXpkV6zjM1sIAD0b9wViptWMDjhYEyBJTpQnCs+z7bBc8gyVPPEc8EfFeCVMhiWF/sbJWvkg3nNdA5lijqG88TzjLJhPVeZr88XLusrviKfEQWaJRp7kGKBsDD52g46RydUODSMKPJQx5TCZREDR9vYNisZlnHiJ4Djk8TRKXG7+F1Skiwq5XIZmLDzx3Z4XIpXBaKKT9V/BFD+mx0RbhNmfcicChMDYFsHpMqyTHjpBX8zOPoYMDvHNtoEAcgPZkIMkMwb845L8ZyFuc5HDlwBPzth/s7Oc9s1c61AKQMp/EYbjdrm39k1SgZ+9h9Yhsdacy3bCTeStfV5GchmNPYgZRZHlxztsTNjReA19OhaDytLpNjYNvOqzmO5bMeemQ1FPRz1RwZWcuKRTEtjiZxFZCwR2PHvbKs0Li9EIBn1hUCEx3CZI5NpiWrkh+2pnadqrjDo4vuCoswIx1FaoqkJCHBs6BA5ukbAUYGX+qidtCgr1Assh2WjwI/HoJxmzTxHc5gFIny8tBmHy2fAyT6PL29wQOCEQInZEW7n54CAn63JlPnYTF4ZHREgxcSOoRRpMQDhhyMedGQlsMJ+kBcmpZzMryrLfGVKriiLgNgZKWIZRw4vUtZSe+5sImNAvku8KvL6CiD8POtJxK8iJcMSSGVtojPCjAgzGLXBx+8lc5Bip1XSlwhkeIxKZiV5ZcGNqse6VLIP1hEeR+QHlIPG/iPdiPScv9uAVNafk8pGmybewevtYBCGZVxfvBzqXpZ9R/1gO09pkFLKzucU+YTiQ2l9O6aKppWCqbVy3iJd6j9yFko5S7vKIkBWbSr+IufBhEqN33heAVXk9LGfbGcc1lGbEpDUxpYMPNERqd1ObSkLTHiTDoMMz4lnUNPT0zY9PS2BCsfYNE3/XjvcxMEbcaIxIh+cuZaCKCWDTI84a1YBGcomcu5sey435oP1wfvw/xg4YFvKB0R6UgJBM731XPmTDAzVi1zxGGbRThiIRzaGco8CIvYhykYzgC3RWIOUvxOHL2wzqExMTPSjSXbcXhY3HSBIYbteL8peMkV0ihyW88LH+Df+L7XN5aLoNHJO6pqH2jEURVmZY2LjxnJR23xOgZgaP/fpzila48+cn39Hzpb7i7Yss2yisZfGzGND/pQu4nHuO9I577fkjJSclPPFpTqzwaeE+HfmpBncXT/Vyoa3ictZKpqPlvWVzZR8gJJhpEeR7Lw/nycVePE8KpBF2bAte7nMhvA6JmfhkR7ymJWNe9s1NNYgxY+X58f3sLKpSfDrTrjxwEEK20PhMyhyVIQ8RUCiDFxlR1nUoRxlDfhE/fs3gw4qaHTBPQMpJpQNyrgEUgxYpb58ftjBqnF7eeZPOaBsTrL2I+fg51COCqRK41UOIgIqRRwV85xkQZEKCPi3Ahp0oNhm5PRZd1x/GHiUs1djj0DKjylSPiYDHq5XQwhQKBf/7ed5mZjBFWXjsuZMCANAXh5EW1PXtlg/Il+pxl0ri7EGqfn5+YEtr1EmZRanoQ5S8/PzA0CFIMX1lYNt48jY4Sild1IRiHJuEan66voFXzw1O7CEMD093d+6PDMzMwBWyFN2gx+O08yGllbR2CJgUkDI2TE7Y2ybZcK6kl0nwHr8G8enzmcgw/Ui5+H/+QkkEY+Rs+XxKZDk+uocjy0DPyVTnnMPJlTmqepiQKmieiVT5Ffxhk5fOVaUp9t/VFaNAUkFIZFNs974yhAH0k7IF46fg06/T03pnNfDeWGgUlQK0jM9iWisQcqX+3yieJMDpplmernLjcUBCkEK6ytnWIqkRiEFUkqZo91/zJ9y/v5fXQBlUMb+/FqIO0qUZZtInyMtHieSMmZuk6O4aH7wGN9LpsryfChHw98188/OFAMDFUDgcSVTbgPPMd+Zg+AxZwFeVB9lkDlvNV9cvlaeGJQgKcDF4xEfGe9ZedW+/4+CATyPH2w32xSjArAoYMrkrEj5nUjGLJ+M2gLVWIPUwsJCP5rAb5VJMTFIOUDhs6s4AvVJUiBY65z8O4tGVFu93vDz/KKoGseET6FQAOcfv3jKywcYwfuTBRCkWJF5XCwfnhN3rlE5NjZ2SL1eT75VlIGHMybWG+VwWAcUseHiWJRBYxbq8ub6CuxVQIGy4ygYx6EoAoPMkXn7DGDYHi9Z1wCVWp5SFGUekdONzisZOO+ZPWftqWBKOXeVKapyakz8CpJoJUfNH/OIxzDLZPnzUiDKrgZoWLfagJPTWIPU/Px838Hy0lEGUqhM7LBUdoT1ouiubTSCTl492wzJgcSzGF5+Y0fhYLuwsGDz8/PyQazerrc3Pz8fjhedK/at2mM5uDzZeavIzGWOgMkOPZK1crIqU+J7Q5DHjNhhKMBXjprL4nKLz30pqlayxeusXg91iB01txcFEgziqKc8Dxn4sFwz+Sq+FJ8uB//2LMP5cVmiM1RBSkYRuKrrhHgcx6Gu3+BxbB95RRBAPfXz6kG8vFqSyZr5U/anyjN4Yl/qN8tQgV8bGmuQWlxctImJiX7GsLCwMKCQkVIqx8jlo0gT63Fb6r8fU06NQQ/LcX++3OZA4WAxOzs7ZJCuhJgJRNfoMBtTkRaOAbO4qakpySPKi52IkgnLFA2TjT+SLwcV7CQjkKp1oJnRK5DyNtUYXOYqgsXx8flIN1S9SGYZACigQidTmoOoHXUuIsUfOjfWK/zOonUfT/Sg22gcKFv/zdds0K79vNpwhB9sn1c5sG3XV//P15Cx75Jssd1s6VjJBnnG7xqgwjms5ZFprEHKM6iFhYX+NyojKkEEKnjOKXIualJUZMWkLvRzhIoGxEtguNw2PT1t69ats5mZGZuenu6DFALVwsKCLSwsDDhQlxFHhwg8aPSRHDKQwvII+qio/tvBi3n3+fP2eBMGG4rKXvxbfVAvapy3mm+cR3zqg/ehLmbjPPI9TWpZGfvKAF3xmQFcDcBEfeD8sROPiB25Wk5uQxgsMc9+jToC9Wx1BeWlglvvN7JlzpZ4R6wCqeyaufuyhYUF6Vt8/NhGG1myzTHI8zKfmnMVEPC8si8bhcYapMyGU1izQceljLtNm6ggUVslR1ACKf9258aRUea02Kl5fwwmkeOMomVsD8vh0wuiciybTM6KF+SRd4Dxh+vif7XTiI+VjA1/syNCkGJwVw6R5YTz5vUVYEYgxeWwPF/XinQ0Gn9EDFR4TPGmQDNaOuOIW33jvKHjd7vxeYqewoHLYwqAkTCIwh1x2GZpaS8LDGpkruYX5ejBUak9tg3/Rnn4MQ5wlM9k21G6m+lVG50ba5BCBfIB4/UMvlhuli9HcHusgMoQsZ4qhw6OwQoVml+zwXyr/vGiP7bnijszMzOwXs98s+PGusg7XqPwJdWmOfAcOSUTHj+XyZwvOycec2mpTgFZ1C4bPbbBTh3B33XEQQoN02+L4GucbPSZk2TQieTK/LHzZ4fD11B4vNwm84T6zLyY2VDw4n1OTU0N6BnqK5Larabm0dvGjB7t1ufF+1W3lqhlaBXQ9Hq9/vK634IR2TLLQ9ks96V4wG9F6nIB98d9sB9UPsbbVteCVfveVqSjyB8GNxl4M409SHlGgqk1vlvJd+thysqK4m2x0mGUrK63YF2fVN5QoNpVkZYbkbfLb8iMjJmNEnlYXl7uR37eHi75KWPEbBSVzI0bl1Sc1NKLkkFmdMiPMlIEKB8L84Ft8G8u43XRwHBpQjn3qampgQ9fa+Ddg7yDMDPKzMFlDkDpktIX/s/L0jxu7ofrcj9Yn3eUYmaD7XJfKoBC/eRNBKjnfsz1f2Zmpr88jrqL+hNdt8ZbWnyMvrx+1FFHyQxNgZG3yxtpMsI63o7Lli8R+G+er6y9Xq83MD6XC2eh/jvjVwX5TOxDavwA01iDFIKH2YGInw0HlYYdVwRS6lEgqk12FKOA1NTUVH88DrLu9JSTYMP1bxWZe2bk0X3k1L0uLztxGTdcbM/HkYEU/q4BK+zXx4eOn+fNqbTzrC3h/KknkLOj4gwM+Y8ciPfDv0vAxXxlzovHo8qotpUcIufc6w1fX/PjGCSq8UQAxbar+EedRfv1YMLB0/nzb850kdCpY4CiboBlOSqb5O8a54/fpeClBH44Nyzb0pJ6DRghnyqwZvtoY59jDVJHHXXUwAVrMxtYlsLNFBjV+oQ5GJjZEGi4krsDYIXBSBB/o0JFAMPkjgbXu33ruPOGhuXHfbxmNrDsh0rg0eX09HS/HC79cXvsXKNMBEEat/5iO/6tggJFHN0qZ6WiXwZUlq3LUJXHMeM8oiPi+9N4OUQ5TTy+vLw80H9JFtguBzQKSDjrw/bVPCq+2bFkDgpl48RL6x6hKx7QUUXZBYIT6oMK8pi/6BuzOQVSyCvOJYKeutal5s+/FTCosiVgVnWcP5Stn+eAArMn1N+pqamBLF/Jl2WiQJPPlcC4BKpIYw1Sft+QEztdBxc8hsbh2YqXxclB54RRIFJkEEg4oVyfo24EXFzKYCXAY6zUnEmqMTEvXAflFBEDVQRS2TKpao8NVIEUtxHxyYATRZ4MCuyIOQPOghB2IDjHJZ5VWzhvCpCycaEs/b/zwfOCFAUoOG62C54jfK6eInRiUf8KpLBOBHAqaME5YIBF3nF8LqdoOTWze3WO5Ypj53GqMeM4WF7K/jiYwvGh31NPj0FdV8ATyZ4pA6taoBp7kPIMwQe8vHxg2yY+NskFz9dk0Fh52yhGTbyunCmNk1IeJ4+uUXH89fbeHyoDgpRHQzgOzoZ8HL6U6ADo5RjU1LJVRBz98RJbBngKfPicck58DmUc8ee/a8GF5cDXnFQGwf35f7xOggERg4aKSvG3zyEv6aHeRrLAVQOUITvZKGhQhP17FI7z7+DkqxhcT7XHfUb64DcXMzgqUrqCMmUdQmBUy4ecNZXsH+0I7TwKABjkVSZV69Sd0IfhpRD/7ccVTxEQqnFGFNlpJIOIxhqk/OIoTiI6YbNBgFLGjSDBiodRCEc1NYbux5XTx36dkLfoIiMqM0dHfg6NDCNHHxPKxnlRjhb7xL5xB6Vy2igjdc77Vk6EjZX7LhHOCQMMjktFnvibl/sUaLGDwt+8DI0BAl6450g1Ali1zMhyQzlxcFMjw7YyZufrY8JHjGH5zKErx4XAgU4WdU/xrQIg7s9scKkMdZnnhecay/OYMMBCcGXwwbpYDuuiDBDwInmx3kcyj/Q+Cr6YB3UuOo/E1yufEiCFqSoKTTknP2c2GN368ZpohY2tjRNgp+jKqCI0dEx4/UgpHBoUGjUe47Eh77VOCXk2GwR/VD4nXGrDNpDv6HqA+s+8oEyYV5QhO5nMYNW4sa1o2Ufx5v9VVhD1reYM+VCZFJbnesq5jEqKHyacN1/yY1ll1+YUj8qmssAlAqgsIMExsSxLAYHSax8/B1wYmLENMrCxTdXIhvVElVd9ZD6idtw1usbz/JQBqVJEaXYgMsZrWH4surjLvzFKyaJCRWoCXUEwIuOlRxUxcUSHjgD5xKjWzAaWZdhxqDEr0G6apr+UWrpojP+V7NhgsI/IebODUwaKn+xxT6pPbJ9lykDlGbziQfWDfSCw4/lIpmp8TCVALxE7dr6eF40P6/P1KHyNRnQRnvnnfryu2oCQOUMPAFey9Z/Hj/aEPDM48zHMkHCDk/eFdaNxYTklAxVIOc9+CYTf8oABMG8OcvtxXefrjJGfRF4zOal6EY01SLHDVsJg597rHdjyjWWji/VR1IXf3k4t8STzZgYELLy5lg08Ug4EgSzq5HpqLAx6fkxlBCUHy9kt86jqqXaysrzhgR2eMrDIwUcfbFvJSukFRs/IC5bP5giJy0QRataWClCysUf9K5tRy3M4Tv7NffM3y455ieQW2XGmb9ifaoufuM/+Q9lFBFLMU7QcWAJk5J3nzOvi803xQdx+3q97clDhu47VmwYyWSoeMTDH7xIdESDFjhiNBMviBVckvkZTitYyZ6nKKooAlJXE+cuikShaxSgyi9LQKHGpkC8ws0y9bkbIHz7nDuUS8cZOLXKa+B+vH6FcWHbocHh5NAIebws3MbAMlHNV8xzJMAJSdqAKqGocRtR/Sf7cRwT2vMxlNrzDM7IfBiisl9lRdNx5iMBYjVGdx6AP20Y9Yp/D+o3LfdguB4EuP74mmo0R5YZBILbnWdT8/PzQgwPwg3rtbfm9Ycgn60JEym/V1ENadZB61rOeZd/61reGjv/ar/2aXXXVVfbKV77Sbr311oFzv/Irv2If/vCHW/f1xBNP9DMNVAJ/wKqn5+z0XTi49MfC96iDd3SpCNjMhhyhosi5KrCNMrvImfR6g09GRuKIjy/iMphze9gOt8nHI+fj41N3zWfj4+tKarmVwV1takDgRvl6H0y1BuT9o7PgdpRhs7yijMMJHT73VeJVBUFZ1sBjyhw5jg+X+fieRG+Tl1S5HQyMmEf1qRljxDfzr5yv940bK3hOs2tJZjZkc2b6sUaok94X7ux0njgQ8LLYt/s+nwe/75L7d/L28SHdTdMMPHbKZcG8lALXTOdraNVB6ktf+tLAduivfvWr9rM/+7P2H/7Df+gfu/jii+29731v///RRx89Ul/4+ng0EnwEimcTvFabRWushOgYlBExRZFbaVIUCDDAcDn/r5wXnscUWwEgAhJHZ2y06nc2To7wGGgigGLg4eVCjvp6vV66+w2dgdcpRYFqjDVzG82lqlNyqiyfLDPI+FP9ZEFFjb4qPULnxcCEulVyZs4L6jbqRGZ/PIZoHnluIpDCtnhzhLInJD6v+ODvaJxRPzwH7vscqHg3LbftxzHD8ud0RhuG8Hq48j1qjBxY1NCqg9Txxx8/8P/KK6+05zznOfZv/+2/7R87+uijbevWrdVtzs3N2dzcXP///v37zeyHmZRfHPXIoWkO3KvhgnEh8wVBvoeA7yFiZcEoCp2lEjwrXu2EIJiw0nGkh23yMgP3HxlKxKeKcl22mcEpefA1HHWMx4mRJF7QxXlEw1Hv7lEyZF1BgMfx8hIgy2t5eXhnJpNyYHwNLssMuS3kS2V/ESGQK6fHfeC8ZaCJY+QlLz9W2jCBbZoNPocPZaTGoo4p4Of5wzFHS+k+LuwDAROzjUhHatpEn8Ly9+tBPA+sQyxH93/z8/P95T1eZsQsbXJysn+tCu9tW15e7m+a8MdCzc7ODgSJvpFEAZKiUUCqXtNHoPn5efuzP/sze9Ob3jTA0Mc+9jE77rjj7EUvepHt3r3bvv/976ft7NmzxzZu3Nj/bN++3cxsIErgXSdZxBVF51yWHRun9fifKYqSow8ravZhZ8BZZPTh8qodPo/fUVs1/LHslANXTsSJN0Rkc5c5edX+qPPQds5KYI7fJbmo/yUqZWk1FM1x5Gh5jOo/zqt6ZmZprtuOU9lvSQY8L6rdKCCp+bBMIp+U6ZeyVb8WhX2wrFHe2I+D1tzc3MC1rCiIV8GWmmscXw0d1I0T1113nT3yyCP2xje+sX/sF37hF+yZz3ymbdu2zb7yla/YO97xDrvnnnvsL//yL8N2du/ebZdffnn///79+2379u1D1xdQcLyMp0ADy6gI3KPWCORwiUxFnVHU4DxmCq+OZeveirwd/I6OZcbOhl3r0JxwmZIzJZU51hpuBE4415Gz4MxTyQ6zKc7KMALO2oj6x3EpgEL9qJV523lhHWDdyMbT6w0+pFXpRgQmbHtIvMoRlUN+ovNRedaBTO+UHip5qf8oS7ZbP86ZFcuIgSOaJ7YjBCjPaFmu+M45fxI6+j/2r73egWeAIsDgciz7LuUTVTac0UEFqY985CO2a9cu27ZtW//YW97ylv7vU045xU488UQ788wz7b777rPnPOc5sp3Z2VmbnZ2V5zwtxv/8zQCGEYQL1SfNIwicdCdsxyezJsJTE8WTie17vwqw3BlwXUUlo8FyJSNv6wDNhpeZlJOIjrsTZBl4u2i4SOgocWkPM0TvQz1bjp1Hrzf4cE4V4Cj5MbCXAJ6DGG8T9bvNHETOLHOwDKhMbBMuE77e4baET/dXgQXbizqnHJmaM9QnPK6yGxU0ZM4U/+N8sx0ruSvww3PsLxSYoPwxyHO+UJ/NtN5H4OS//eHWvPmFr/H7MqGXxzniQC4C+ch2IzpoIPWtb33LbrzxxjRDMjPbsWOHmZnde++9IUiVSEXSLAx07lF2pSIX/2bBcxTmxGvlrOyqXfUby2YRVAmkInCqbUPxHY0HiYMFxRcHECUwVe2rsfg5XqKMnBTXLc07182clXJYildFkVwyQy85SM7gFThFOsLZnVn8ihgGIbWyoQBJnefx8RwpmUXyYNlwJhPNR+RbGKxqAQrngtuKfFgGrHgcl+O5b/Rz+MFX+mDby8vL/V3OeM0KdxyqIEoFbyqBqKGDBlJXX321bdmyxV796len5e6++24zMzvxxBNH7ouXCFjBPapwR6WUwLda4iR7Xf/mSIozAeQh2mWmKDIg5aCV0UVtqrbVef+uVRomdpiR0Sm+MieOvHKU7BQtlzRN039di0eB3EfmmBUg44N9I14zp1ILOsifAkAlHy/DD1BGR4zROkbd0XVd1Q/yhTLk9pVcOBjkyL7GXjJAiMA7AifO/lT/rMv8W8lHAaAipWu4oYGX39R84jzgagFn7942vmcL+/AnqKDO+WaKubm5ATm57vgDvn2+kb8oiKr1iUgHBaSWl5ft6quvtosuumjg6Q733XefXXvttfaqV73Kjj32WPvKV75il112mb3iFa+wU089tXU/vd6BpTsUNGdDKvrxc1gmWgtHQbOzRIVh0EI+nSID499qrM4jAkrmUFTEljm8GuWJyiiQU9ExzwXX52ADx8LPMcwIg41sFxYavYoEcU65njLAEqBwG2ocOF8KvJgHdEZ8YZvHhCBhZgMy4qU+7j/iFdvlZ2lG5b1Nly/bVWarXo+BIMp0zOpehskyV+24nWMdBLso6In6Q97Z/2TXbHhuUQeia9dsj0zuT70u7uxzG/DdfLjDVvkjBdQqyK+hgwJSN954oz3wwAP2pje9aeD4zMyM3Xjjjfb+97/fnnjiCdu+fbtdcMEF9ru/+7sj9cO7U/BVF2aDSsAbHZQAUeAoUG6LwY3rs6H7d0lhVV88sT4Wrhu1h86wBFSjZlIRReCUOTDMQjkbxnEx8THlwCMnr+ZM/Y7K8xgy3qL6CvAUSOEcInir2zC8Hjtdl63LFR2bkiOOS7WF7akHPkfy8XM4tzjnkeNTc6t4aqPPDMI4JuYH62TzqYIEHjv3F12PinhWoK3mUwEv89LrHXhkXNM0/Rt78Tmifu8UrhipQI7bVgHAYQWps88+Wxrw9u3bh542sRLytNVTV99cwQasdp8og3TF8MlRhubCRYHz8oe3xQrCVONsvT4CYE0WUeuIVfaT8RQZHcuWr0eozSXcppd1Q8EyeMG47b0ZEYCxgauxRct7CvyRfIwMrtwWzyn+5msyDFQuE3x4qC9t4oNQ1YaebJ55XhUw8Xl06thnBOj4H5ekvE01dpQBXmNUcs9ASs2zAih1fRrbKNlYppN43nnFpTgGaw5YEIyy66xqTAiAKHsHKf/tT+vB5XLfio5tst9l+fD8el186ENGR8Sz+1jwSnkjcGLBYbvKqBnssH3vV50fdXxI7jBrI5Cs3Yi/NtFnVj5yFgqgojlRDiJzdm14HrUeExtfFESUHHWpD/VBB+VLdrgby4l3ykVgVQpYeK5Uhor1oiBGjc8JAz6uxwFFSW+wfkmvlUzazEUEVLXEfiwCaOYlkkfk43gc0fwjP/iWcO+Tr2X6eeY3st02fmysQconE5f9zIYviPM2SiaOBP03v7jObDiCdh7w2hRfSPTj/N3GgLBP5Tzwt5fj5UylxDXRHjqKjFfMAHCMKjLF9vkTBQGq72hJpFamfD0B+eblFx6PGotyqrw1N3JqGWDwdRDWa86kMErFR4OxM4521qkAA697ef1sPOr6bBbcZbwqYES5sV2hvmXL+KwT2TWbKDCIwDojFbihvuG4WW+QF75Rl4mzQZSvGjOv2ExMTPS3m+PmM3y26dzc3MBuP+Qbf/Oxp0QmZTY4wXjfE2859gmNlgdwbZWVxmz4UUn4zb/ZMbFRsJPg6BHLOHm7bGTs3FARsG10wBngeRuq7TaknAWOmxWYnY+SGfOjwJB5iMaGgQUu0WLb0ZKlirQVzzgeBio+r/TDy6hVAdQz9TQQdADopDIgYsDCusiDipgj4j6UfNRDf5mPSK7ON/LPc4YZgAoSI76REKB5i7falFEDVCwjBKkInFgGKDNuD+XodVwvUFbo99Q8ID/4VmkPkPwZqngtTY1fJQ81NNYgxYbGy2HsJPimQzMbckRsJCxwPK8oiiozB4oOWEXTWJ+dYckwFJ/K4bcxqhpi5xQ5dv/OHF8kO+Vs1Q4x5eCUDNiwFTix80SKHEn0UXWi9lBG7njVUp/aKBLJjGUVjY/5ZufMbWe/cVyqPQTDbOMAyy0aS/SJbEUFO8qH8KeG2HdEczIxMTHgwBVIMV/YTjQeBAYOWPw/2qHz4wE8lkW982v4TdMM7OaOeOXgsERjDVLRQ0WRONLk5R1MrfGmNAYrNnylyFgXJ0U5aPwfRR5YBh1r5OT8t8riuF4EUMqBMzC2pcihM0+lNtiJMpAoJ8Ny4v78cTA+V7iEzLqllv0isGGjZAfnZXhJLHKQfh4zfhV01RLKKboWYjaYQShQYcDjeeG+eC5cHpHtMkVBTAQAmW2xPBQxQKmsNQuoavpUgVCNrTtPfpxBXYGUzyWvGnGQjPOK95DyA2j9Td38Fuwam39KgJTareLEzkg5KTY0jrwzoSsnrvrPIlpVj0kBW+Tc/Rw6Pr4upZaOsrGVjtUQgyWn/aq886vAibeoKwOPwFkFICgfFdUq41X6wvoWfWpkp0DP5w+DJuQJl3c5osbxogzVw0WRfwQkpW8q41QBQ2QfZoOPAlLjjmSrZKWCEq5Xo7esL5ztlXSYiWXLAF4bZKm2uEw091lg7WUioOPAA/nhSwqrTWMNUvj8qdIShRM6L7Phh5/iJChDUwrEETUqNjoWBiYGrIw4GsoiLBxPBGoRwKnzGZC2ASo3bHbuWEbx5cbB98UpA8eARYEi8sKPgDEbfnaayp6U8y05NTyuIsgs+jQbfIUFOjnnGbcO45KOkiGWV1kUL2UpZ4Ygx4COqxOR7nAQhW2wHjJQsj5HThLlra4jRbqr7Ca69sdBjwKKiKIMHflHWWVBBPoZLhvprvKN6hv59Bt8+Rql6x3fPqL44N8lGmuQwnsKokmuiUDwGEeiaOCRUqvjDFQMggxQtUCgMgRUusxpmg0vf6jyKuKKorDofwZEXIZ/8xIpAxS+Y4c/uOzgxhTJDHc6+fEoI1D6xQGPCh7YmbAMlB6yvNiZs8x83Di3fG0Hy+ADRdUKxPLy4MNJ/TjKmC+UM1gxfzwelLfzHIEU3qeDOqLkrD5ZFoT8oW5wQJNd9+M6PD88535OBQlsZ1EGq4JW1GMOULA/bpvL+nj4qRIe2LF83I4iUnZa8nlIYw1SHIUgRY4jKoeOkV9xrqJnVsDMECNHVIo225IyCgYnzma4PCtQBlqRcWY8oUFhmcjI2cA8msM5j0AKCZ08OpaonWjemTLgyUCL62ekAhIzk2AQZVoM9OrRU+zM+TmACuyyF1GWxuKf6FFKLC+1lFkDUCozZLmzznP7eG1OLfVhHRVIKB/E8irJrHQuK5fVdbnicjHKhAHVdYFXoWr7buvvxhqk+OI2CpffpxKBBUcF+K0mJ3ptAhuSAgRUApWW11DGPx/nKMh5YGPEKMyjIgXqCsii6LHWuUeOSY0nAqmondWmCHDQuZf6Z8BhAy9F39wWH0OgclLLStgvj8XswCPHvE20A89k8cGw0TVh1Q+PDceOr91B/l03/eM7yvDpI37M+/Pxlp4KntlgBHARqNUSyhPlyLJjYuCNdNI/+LDsXm/4uYM8R2izKC/MqpVd+29eCuSxIOg9ZTZOKCXLIisnlSngZHNaymClSCmL4gfL1pIyah4Ll+eoG6OmCGQ5k1LtKYBihVWZSO2YuX2VUTHY1gAi/o7mqpa/mrktARXXz+SUzQuXY9CIshy+VqacidoY4UGCupaCbaAjVU9YVwCbZRPYFzpgHI8Tjj8CypKu8PFIV6JAMWoH63Emj32VeMh0GecVg+NStqp0t7S8qcA1s4FIlyMae5CKjMQnSK0hs0PNHBVnBr4FmKMynDilINiXRxElB54ZUckoWBHYkCKDXUl06PUVYJXaVI5btYvOEeu2abNtoKCMnx0xR4XsjCJDxovdJcMtZVI4l3gBO7tei9davD2M7vGaL/6OlqlwmRBvLMZlMhXAKGft35hdTU1NDdyQyuNAPccMt/Qakoh4rjJfwZQFFBxwKX1me2V+mA/mFQHdQT7KeM0OrPSwPkY67uNQvk/xroLKGhprkGLj58gte0cOTqDXVxERR6ZoZDhpOEm8ruvfTdMMPdkCy3B7+O2klJXrcPmSgmdOOCPmP8qg2lAk/6i9CGxrDFmNszR2n1+n7EJ8NLZI3hjMMNCzbEr6io4gAih0vCjj6enp/q5ZzpoYTLgt3HSBj2hCOWF2ZmZDkT6Pift0wPRj/iBUXmrPxoryYieLsi5dy1JZQSkjxDKs2wqgnBj8M91Xvk2BDIKRArkacGTdQv8XUY1/cRprkHJi4+UJjNb8ceIyg8ffWXbDUZyZDWRS2C5+ZxNfcsyKZ8VbBE7qXE27eLwko1rnHbUdAVU2ptK4VLkSqXb4epTSoxqeMIJVbURt47nouyQzBgC8tcPBIHrkDQMUPiMTdwhGQO42iEFgND7cKILlvU62c02NWf1GHnBckX0yQPH5SOY4DmUvkc5kgMkU+aUo6Ilsn8+rPhTPEU/KBjM6YkDKP7hZAp/Q68QOj0HElS4ydt7mG00oOi+1lVntQsPoDw1SRVuYMeLYmA/8jo6jk40AShmjH+ddZooXrqcIjUpdC3EZqF1FCiTYuPkYf2qiP24PnQbLKTN4rM8Xt0u84PiizCmqo+TluugbIqanp8P7xHBcy8vL/bcf+zdmTbg70IkznsgG8T8CJfKM/PlSHztxz7KUXFhX8BgDQmTrqs2MVNCFy3CoR/zBTSO8ISwK5Hj+sQ9VJtPZiKJgIALfrB9FYw9SUZSBSqWAhwGHIw012ZkSID/Rf+VMUDkjsIxAyh0Btq/6RgNUx5WSMXHkqahkHFyG+1IZWZZF4Vyj0y2NLTKWGqNhnlH3MgfBdUq8saxUm9mcMKBEoMzZE//OAAqfwu4ghfc0OXCwXJhXdT2Pl+lxadDb8HYQ8P0YP4Iq0qHMkTKgR/xH56J5znQ6CrJQz9U18VriPpQslE2q45EtlPhqy/dYgxRf5OMJVBOpohc2IHW9yP9nO2WwPP7G8nhxl0FSGQNnFTh2fm9QW5Dw8yUnnTnCNhF/1Acfi7Io1W70/LSaMbUxFnTsWJcjdxVQqD4z8FDOyr8zJxvNhXK8OKd+U/zMzMwQSHE7PmYEp4WFBXvyySf7UT63Hz1RIZIPAiCC1PLy8sATZlAvnFe8boNAqcbCQSPLWvkU5pkDK9WOGivbtgoqsB4DVFugwvmIfIL3o+SlynL7bQOtWhprkMI7nVEoGB2qR99wVoVgxcJFw8RvN77a9WHVNx+PFEMZFI4x67vkxKJoL2qHvyNSDrYNcbSpdpT5HCBwRJlUBhalcXAggWNHh6Ei5CzazCLPGqBC2UR8IzB5UIef6enp/vIeZ08+PgwGcInPQQqX11VQ5/24vuJ5dppLS0syGMNv3zjhYOU2yfcaoV5wEOGUgQtf266x82jOeN6w71L2hecycOJx1hLrNe7yY36j+tg/XuLAcarfNTTWIIXpvJm+fqRAQQnMyyqFjBTH+8DrHW2JDSaKvLgO9q2iI9U+/2dHmLWD/ap2o8hxFHDCvjKnwkaVyVLxGUWUXF4Bhv9HkIrAIuov0rUIoJhXlk8E0AhSrjcOSLyTjwFWbYhAkEKAQufJIIXPDCyRyxJ1Gz8YmGB7uCSoglOUgQKGaM4y+4+o7dyV6qigI7OtCKh4ftR/1mVlS6wnyKPiwfUOj9X6hrEGKV9awOgPjcKXB9RyEG9mcGqawWeFcTSFbeAE1KbfkZNlUsCn6vpj9HmpEOtEdaPNBNgvK7ECfacIoKK2FJ/YR+nDF+AVP+yk2xCDkQID3g7MuqWcXORgSk4nkhP+xr4wC0IHPzk5abOzszY1NdXPohykzAY3VzgI+bIebpLA9wipLMp/T01N9fvD1z7w9SIzG9oQYGYDmzLcvv1hpmj/busoB97Ege1GAZeyi2i5L9I9ro/yUH3WAJTK7EpUm62oa/Ec7PtYo2AagxqUi9rB6MdraKxBCpdZahQClTOKZrA8RslRfaY2EQ5OLvJaA3RMvDWYy0bg2KYv1QaOI/rPx6I+2RCiecrkj7yhEyxFlhEhUGFb2TgVOLd1LhG/mVywLwXMHMSpazten2/Ezd6lFAUy3h8/vqxGBl4W+Uc79A1Dvd7g8wqRF+yPAwaWNf8vfSJiX8RlMShGmUcBTbTkqNrNAKkEVpFtKH/EfpCX4ZF/Lj8KHTEgZWZSEVkRkNRyIDtT3EHn59DovB3fwFATrUfKxs6Qx4CkxlwCaR6rquufUp9YH9tRO7Wy/6VxK56dSptYGIx5bkvBhBoj88B1er3egE5yfxnAZbrIMoh0l1cAOEL2jMk/uKnB63g2g5kU3/PES+28PIT98Qf5ipwvroygDft1aM+ulpeXB7bM4zzgNTC1MpEBVg1AoV5g3z4+DhCigIt5YWDizTmZnSq9U4GlKh8BVXY5A+dIbQLhbDkLbiMaa5BCZTezAUOLnAxGWBjVsJK4YfK9VmyAXhfXydWacRSt+RKGirCYMgWvASqur67jZCBbit7RwFU7kVJGoKj6VsczZ47t10Ry7ACitjMevB38jq4jcDRaosipcD+46xOXw2ZmZgY2SyC4uNPH5TV8aKvKnhRvancm3tfkesdv++W21XZzLNvr9frgidfTUKa4hIkPo8VvlqH/rg3gGGyzuVPb+rlP5xdljp9Ih6JgNJon7FfVrQmq3Adyds6BhZdpo+tOYw9SqOQcMSugiiIaFUGhkjjhxHh9ZVil61OKT+UYsV/1m9uscfb4W8mI28SxKgNgMOB+sog167vEfzQm/t3GIBSP0Zxkdfh/jcErmfHxLCJlh68ctlrmU/rOO/aia7PebxQMMQ+RM40CLP5ghuK8u82pe7t4ORN5joJIBArmra0elPyO4ol9UHQtqmQLEaDiNwdwUeCjiOc1yu5RZrgkWyvLsQYpBybcis4RB0c/maLgGjzuXOJlDe/XJ8ZJOdts+Yt3MbGiZMrNmz3UjijutwQcPI6DRSXe2hADLZ8blT9sU2WcNW1gW1EkXIp6kSK9jSJ+swNv7fUsCpf5XHao83xzLm84iBx2Nge87d1s+N1QkRNWIINl8L5DDyDxZaiYubmDxJURtM8soOBjUTCHv1XbyplzXbWsyjcn1xLrmtnw0mypnvIF6P/wLc18SwjqPGbVLJ+MxhqkXAg44RwFMsAoh4+GisaKShIpFEdKvDattkfzGKJMJAIm5ajQUNUaMh7HvpHX7FgpAnTZ47z4MeWAlPHw/8ghRnwwyOMuo8jJKWerolGep6gtlkmb6DviAcesdBcdAuokb5JQTiR6R1PJIeIcI48Ijn4s45/Hh+0owMJgDMfvdsv9s834J9L57Jv1INKHbC6zgCSaS9Umt6VAE8fJL8OMgh31zRTZHssXx8Tz1ev1hh5ZF9FYgxQvabhheRbkEYiKXpBcyf3GRAYpFCwrktlgVGE2qLxuVKpP9dvr+0fd/KgclX+XnK9SjBqwKkX7Ud1syaQ2I4kcFo7LiX9nwYHiVzkw3FhQAuoS4NeMl8eQBVeoi5ilMUDxRgm8doXbzN1mosCAo2QcJ/NXesM1jq9kYzgmr8dlEKT45YnYj9JJHAf/zsCA54Lr8jnVBpeNrmlHehSBZgauDFY8l1Ggi/xgffVhvcTnK6qgOaIjAqScXCh8wReXC9DovQ2/WDw/P98HKr5QjIakQMrb5rvevQ/8j/z6NysuptCZkfA2XW8PlZ3rqgiKHWoJpDKDzNpVFLWPWSzyhUaAUVsG4FG/EUCxgUdAncmOjykZZDxgcBNdV1FLQRjg4GaJ2dnZfl3Ue9d5v+8Q5cYZDgdgPC8IJDhW3oLOETeDCDs4P8+ZIOr60tLSwKtwvF3kAft2R1kTQKAMVPksSFFBakmXIhmzXBVwqP9qHjHbRF+l+CgR+lW+7uT6ybb5lAApFqZay40mCtvAdV91MyHWx36V40TnqY5lE851oou+qh7zxePnaCqSI9bJACYCKT4WtcEOKmqbx6L6y+QTAXLJCJXcSs5JHVf8qLmIdI1/Yz0lJ75WwBslGNj43ifUXcUH84J9o/NR1z2yucB55o+TyqLcCZrZQKQe6UyU0Sn+svmIdJyPcRtKBjgelIsi7iOyndJ/tj0ck7IPtoMIKGsCuacUSDkpJVeC5AyqaQ4sd0TPIDMbfAMwR4rsHLwv7JMnCBWE+eKIRLWrnBNf+I4UKdrmrq4dZcaSzUXpN8oia4cdkMtjcXFxIMNUTpCdnBoTBhhtx4ljqnEuzh/rhPOM44scKQYS/s0g7uCET3jAJT7Xj/n5+YElbj9ndmCpjAOkiCe2N5RrBAiot/g0C34XlY+p1+sNvOgQ+/ExoE6pZW2WvwJTDvjUPPM4orl2fth/8Gt6Mr3JggTuP7Ip9hs+bnxKfRQEKnApBc1R/whKLv8aqnsuBdBtt91m5513nm3bts16vZ5dd911A+ebprF3vetdduKJJ9pRRx1lZ511ln3zm98cKPO9733PLrzwQtuwYYNt2rTJ3vzmN9vjjz/elpV+fzWOlVNcNBIVTSpDYyei+suitOijzjNQ8Yd5wLGod2rxNbYIlKJoVgUAkdxHiQqzeVU8qHt2uM82YDuqATK/0biiefbyKvPn9rEfBiheFsMbdnGrOYKUf/AaLut9pnsKQDkA43GzvByQFhYWbG5urv9x3nwJ0kFqenraZmZmbHZ21tatW2fr1q3rL2fiONEmUDaR/fEY1ZPboyCx5HOyoCOyFaUvka4pW1EAGfVdspFMr0sUzTlnyRm1BqknnnjCTjvtNLvqqqvk+fe97332gQ98wD784Q/bHXfcYcccc4ydc8459uSTT/bLXHjhhfa1r33NbrjhBvvMZz5jt912m73lLW9py4qcqGyZj7fAokNH58eTgXUxCspAiusrPhCM8Lz6r5yEUwmgMqBB/hQgZIAUKXdmzG3mNeJJbWpR8x+BWDRXKwUq1Qf2g+2zQ0X+s7aVoePxXm/4qRIKpBAMMJtCnVCBktJv5E3JUUX/OAYHqfn5eQlUPi4EKP+sW7du4CG5Pk7UFdzlqzJVtDFcGo1e+KjmgnU8ar8EVJHeZDoZ2WMGUl5P8R7NGc9zxEdGvNRcQ62X+3bt2mW7du0KmXz/+99vv/u7v2s/93M/Z2Zmf/qnf2onnHCCXXfddfb617/evvGNb9j1119vX/rSl+ylL32pmZl98IMftFe96lX2h3/4h7Zt27ZqXiJB4jmlHGgc+EEHwYqF911Ek+31ouVGN7aoD2xLAZ0CE1yuxCVLrqPa44wMx5NFTxEIM9VGall7XseV2p1w0zRDzpPr4I2o2RIwLoGqTEFdX8nGkY0zkjc6UXb2XC5b1vZrUPjgWJQfggE/TYU3arBsnAfWRdZZjOJLzwdE3XXeEHCxHQcjvr7GT87wtl0ubLcRSCkdUs65RqdVPzyXka23JbZxlw8GKdEqDI8Rl8dZFsy/8+51eEclEwYO/r+GVvWa1P3332/79u2zs846q39s48aNtmPHDtu7d6+9/vWvt71799qmTZv6AGVmdtZZZ9nExITdcccd9trXvnaoXY+snPbv329msdFH2YgTR1pRZqGyGJwINyRWOIxIsYyKMDPlVTwp/tVyHsqC+2JHzLueuI0swuJj7IhUlhDxl/WB7UTXG7Cc2eA1GwVQOH4+xnrD/OG88rGI2Ngjx1fS3Qj8o6U5FZSh3mD70TWaklNXY4yWy7wcZ8fq6S5od/zYHQYzpW+u25FcI2dd+1HzGulAZOtKdiWKxmJ24NmifE0yChZcNi5792NKX5n3SIdLVFvObJVBat++fWZmdsIJJwwcP+GEE/rn9u3bZ1u2bBlkYmrKNm/e3C/DtGfPHrviiiuGjkeGYjYcFaISc/aB66NoWGgcPtnT09NDQGJmQwDVNspCpVCbN5xYOTB15iyK++JlDFRafPDnKAbI51FG3iYGBFiWwVP1w+C0tLTUv5COc4088/ww+Hq9mZmZIYfHfKlxMijxscgBoYzdkXJErYDA28fgCs+pm3bNDrz+ApfR+GkqXlZdz0B5RTLCOUXbUZlutDyNmx/cybrN+VIf27L3zfrmvOBz4/yYjwOzMecrai+yichGS3aC/aogRJHLNrr/DJ+PiHKL3maMKw04B2pcauzot1xOLucoIIv8SkZjsbtv9+7ddvnll/f/79+/37Zv3z5kyBhROvF5NHKM3tD4cNIVSCGAsCOsiSpUX04MciqrUREjO2F2Iug8GLQw2sTfikrRs3J2qMjK8XK7bLTeHp7HcUbRYSY3BCA13iibycpGzomPM0Cp9qJsRV07VRkU2gBe9+HlbZwTXmrkACDTW9R71i8lP9ZbtkN0tHh9zdvCtw4wmKDzV/PLY/AgAfnittm5lgLQWiccgQH/V3JH3UeAwsdgzczMyLcuY9DIoI4yw+AJbRDP1WReSGplI6NVBamtW7eamdlDDz1kJ554Yv/4Qw89ZC9+8Yv7ZR5++OGBeouLi/a9732vX5/JL5Iy1Tg7dJKozHxPlHLmuLzAby/1CXYFZ3DJlDmKsiLjjaJGPMbruzwm7l+BFLdfwzOPEftkkMC5YGNTBshywb4wGubrIVlwoMaUORUFqlnZ6JwKLhS/LBPVBl90RgfFc4oBGd9mgdFvCSzNDqwWRBfiuZ4CTB4HfzB44FeK4Ft4cb4VwGBwg9G9AifkS9myWqYukQpYeM7bEtsJyhgDagd2Bym+NuWETxhxP8PZMttslG2ijx1lbBmtKkidfPLJtnXrVrvpppv6oLR//36744477K1vfauZme3cudMeeeQRu+uuu+z00083M7Obb77ZlpeXbceOHa36QwU0G7xwx9GbmQ3dg+HluI5vZ8VUGSMSNFS+EKgUUQFWjTPiT7QsgfV4TGrJy69DLC4uDkXLijKA4nFhXyowqAU/bMf/YxToso+cIPOV8Rzxk80LBwWqX5wfdsZO7mBQv3CXGrbNbeCGAl8Oc2fkIOTbzdUTVbLARwUO6MDQMSqnhufxOGYn0WqGA5Tv5PMt5ugI1c7caJ6j8TmPaMMMejW6pPos2Qy3WQqE0E4VOOFbll0P8JUsCkQUqOBcedCj5Gc2eP9oRugvszFG1BqkHn/8cbv33nv7/++//367++67bfPmzXbSSSfZ29/+dvv93/99e+5zn2snn3yyvfOd77Rt27bZa17zGjMze8ELXmDnnnuuXXzxxfbhD3/YFhYW7NJLL7XXv/71rXb2mQ1e4Isicy+nMhM1OTj5HMXhxgkUMkYT/j9TBlbiLHvIPl6OSZ3jaIflprKt7LcCpohvZfQsd6XokXNhPlieJeWPgDLKPJmXbLkimnO1JOt9cdbO8lAyxDZYVzF4yu6VY11Q8oiy3ZoPb9BBuSh54Byr5w2yo+VlwlJQUgJh5IHnchRSYMU+QNliZiMOVOij+PrT7Ozs0G5I1CGz+MHP3B/yrXQ3CuT8PBNnZjXUGqTuvPNOO+OMM/r//VrRRRddZNdcc4399m//tj3xxBP2lre8xR555BH7qZ/6Kbv++utt3bp1/Tof+9jH7NJLL7UzzzzTJiYm7IILLrAPfOADbVmRyM6T6YTLe9mzznCZAVNlz64wg/K6ZsOTjvxl162c2KjVONuu5WI53lChjJR/R8e4/QiQI2CqaZcdmY8Bo0nl2NtEaN4OBzkRRc4V28F5ZnnweDAgwpcQqmtsnEU5z57xz8zM2Lp16wb0kbd34+s3sA01j2r+3dmpnXbsSPlxTGosfPM8ygTvicLrUU4KeHmu+DeCsuuPj9GzT7bXUQEK+1Y2gHKuddh46wUGz/6EkdnZ2f4zGnkDzcLCgpnlj45jX+pzG13ScP7xm9tRwQuOu4Zag9QrX/nKtPFer2fvfe977b3vfW9YZvPmzXbttde27XqIIoNSUUDkYLwc38QXRXK4ds9AiACGWRsqP/MURbJq84dy/KXokeXEkZuSHx/D71IfKlJDikAYx4e/cXnIj6PT8/MIXm1BCh17ZHQRUGA7al7UnHFQxA6dAYp3e6pMH5fevA47FtQrBgYGfZ5flAeCEW+O4OO8zMMgxYDLNyKj7Xl9XOpTuxTxgzczI9hGY8JyKwWo1SL2O2Y2EDxjQI1Le3gfnHoNi7qW5/2ZDeqFywWvhXJQlgXQCqAOWia1VgkNkHc2memLoig0NCreysvP2kJjROLrVOgw8XoSR3nOCz5LS4GtWh9uY0jRdTj+Vo66BFL4O+IvAzwlE5wvBxA0VjWeEo9INWPHMaibg7N+FGhFzlzNt9dDGXgbvOvUnTleH+UMgzMdBinmJRubl1f6iv3UBIxOke1xUIIgx5uflINV48LlQ/x/uCnTVQRXXO1BPfAyqDNN08g3O6hs3wllhtf6sQ7zG/kkFQy08VtHBEixgTmpaJzvL0EDVds4+Z4T7M/rmw1GEngPyPLy8sDDMznyQF6R2KmZHdgYogDXy7JcFKl+M+AqUaSwqv2sTZaNGiMClgcEJYeq+GPKAJkjTvwujQWJdQ035rCOeb/+cd1BPVUPkcUNE/w4oOnp6T7fvMkIl5A4i+cxIQBFmVSUlfFSn/Pn5X1Hml9jmZ6eHqjPzx588skn++1GtszAxMGBLyP7hx3x4SQGVtYdX9rDZVWfe94oYza8EclJ6TTKCoNpFShzwO5tZuOqle9YgxRHwCpVR0Eqp85RnzK6yLGqFNZs8NoYRi444byGno1RpckcOXJZlIGi0jnso6Z+BgzMN2eyPFfqoq7iCfvE5UA+p6K7jFSgo0AqokhuqB9ZFqV01/vkWyJ4OSy75sCZqB/Lsh8ei8o2s2yK24rm1T/qehcGLwhw/FgntF8EKZQpj8PlwfNwMAFK6UdNoMljw9/IM77Ecm5ubuiRb9gejpft0vuvmUPOoliHV0JjD1IcGZlpB6UusHJdvjaAbXp51T//9qgOryXgOf+unTzM3JhQCbhMluGodtT/zNmWjuE5BXoMPpylMBio+mg8vHwRyYANLqOSMWYUORy1rKWclpKJO3K8yRWdlHLiqB8q8mWA4nEz/xzdM8ihPbHTVzL0MU1MTAzc24PZoY+Nn7fpS+TcL+7GVUEF2xQHDCirg0E4dgVaHCQwAPPcmx3wKZ5BPfnkk/aDH/ygv2mCgxAMzJEnLxsFGmqFI1r18G+lR7U01iCldhihYNBgI6OLlgAwuvCykdPHsmi8fsxvQnSeej19UxwS8qQmFQ2JlRj7YqevnEVGJfCpJQYjHwOOR62V17SLv7OozvvJAEy1n7VXy5v3jdmCWl7j+fJjqq7XNztwvQwf+cU7+bg9lAf/dz5wWRttIXJ4JefGNoVOl5dA0Q55swQGeer6k5o/1DseM48Deayd39J5ZXPRZp1Su/z8SsycHaTwEVgoFwYllCXaKQZP0Vj4mAcSyCcmB+i3ammsQUotl6BCqmsJaPAq+lPRpCIFXvybox9frnHePaLlh3w6YZSjlAWNijd4uIJ42wqoamg1I0kGFLPh+7dWCgYRIDu1zQyjaHAUUvqGjhz74TFEWRgaPGZP6kK5kgPqCwYRDlBoL2osNZ9Ihgy8Kjs0G3xrAQedOI7MhtFRRsGDAq02lPmKTK/Ztjl4U2DP7bp8HKT4CSPcF/YXBTCoA2ocXB55w/9c5ykFUrxUoiYTd//gkkkEUkq5o6U2L4d1UAkwSsbopdc7sJWTn5WleMO+nLwf74MfJOkbNjyiRgfEkaUyjtUEJ2wTnSGDcAlE1Txz2+paDFKUdWf8ZmXakDtk9a4n7E8Zt/PMu7pclxyMsnuicHNBZDMeOHlQpZyLWhrDFQm1K0+BrtfBjRK+GQTr42YJdLy4KcTlGjl1jPDxWhXapwp6ce7ZPlCHMsJxK3+i2o/a8TbQlzVNM5A949zjW7pdrhyQcNvoj1gfs2BXnVfXRWvGijTWIBVFPCwcRnis7+WxDD/Hyn9jHWyDwYnLqazKlWVqamrAkTBfnGkgvzhe9T9zvhGvh5IU3yVSc63aqum7DdU6pKguOuXIGTpfav44cFFOOHuqBIOtchr+YaCP+FABnlrZ4HF5O25Tasu5ss3SA6GRR65bcw1YZYFZ2SxAVbLFY4oiG+ZAhcfSNM1AFs3+hPtl8K21HeZJ+Ss13/5RPrGGjgiQQlJGyzf7qfK+butK4NkI7qZiI1T8qGMYgXo7Dj69Xm9gaYaVz5+vlykUGqz/ZsNUwL1SWonjxmiuJqvJziknytGgn1vJONqOl525csacSZrZkC5w5okO3h0UbybIot1oDLjM5+27vqKTUQDlWZ0CGkWe/Xsf/OxBDMxwfLyjL1vyR52PQIqBn2WrglueF0UK/Ll9FaA4qadfRIQgrsApA95IH1iOSqZOeF0Nx54FXG1o7EFKpaau2NHEObHjd+VHYOKLxzgBmLZHkRIqtC8xuAKggflEZ88iiyYclQSvb6GBliLrWnmrsa2UVBQaEWaWGei4XPCYmUlQ4IiY21EGlukT94fOG8FpampqKCjhOVPZCjtjd+K+zKPG7e1HjgNlg9cy8VFULJcIpHjpHHn1fnx5zsyG3nvETs75UW+e5o1TLEt+7FIpO1IbQHh+FThFx7JPRKgHuFyXjSMqo3wX11E885iVbqpxsk9F31MKXCIae5CKBKOiqBqgUsf5Tm7ul52magcn0tf7sQyDE4NIBiy++QKBz8txNpVF0kouKOvVIpZVmwylxL+K6Jh3dp7My2pR5MzV9U+nLPqMnDFvlmBHpYKdKHDD1YcsG+XMgD8K4HksHrRFT5dAG+alzMzxo81HmyWiuVIfHnvUFpdVMiqBFIMCP+WE+yuNCXnJdC5rE3WAA6jMhrKAqA2NPUghcQSCF1gzxfK62Ab/N7OhycHddErxMmDhZURc3nHHg9/cLvKPy5UIUqr8WqEI4Et1MENiEIrAaKXjj7IsxRuexwxKvSWV66vo09tUThzByV/Fge05KbDhgM55xuU+10v1dAq3Az+H9yWpbfX+QYfpr9/gp2Z4WRwf7+xTYMJ2qhx7Nse1mU5NOyhDdfNtppMMzirYxn4i/nEc2b14zAuu6pjZkC/NwDILEkeV6ViDFJNKSaOo1L9xIlkBuF08b3ZgaS1T7ChTUMblGZaKQiOl9uOcmSnFcKWOsrWMaqPJtsTyqWkf69SAXAlQonI1dbAu6xg6q9J1Go46GWQw8zAbzHjwE7WPDk3pJOp2ZD8KhF2fOEMsyQoje1WHnbSyYRwTjqNk9zxmbjMLOtVYSmOryTJVIJp9sBz7M5armX6obtSeByEYDKp77pQf5bF5W8yfkl1EYw1SCkg41VfLH/5ds/7sv30Dg29jVVFCBAx43j/4WBufcL63RfGiiAEH++FIiyP2mvaxzGpmZcqBZFkj/me58ri5Te6r1A9TKRosZVL8dAl2ptGyNF6/cZDilQJ++kKk0/iIINYv5ZCUs2Kb4euyKpNSskS9VMufuMQX7X5FkMR6KpOKHKPKTErOMwIrlksEVKyX7GvU9eMosGTe8foggz/WUQCpznvQgsvJeA1c6YTLuzawKNFYg5RZvJMkS/c5FcdvdCDRhgPlsDPQ4p1D6Kx8c4bZD50IXqhWzjQjXqt3RVFG7e3WLoccbFKZSinSNBuWN29OOVTEAQ7ONb41VTmOyDGhk8ONBq6XvpEANxSwI0TeMEjxdtU1S3dKeJ4dlwIol7+69sFzwc7b6/Nyvb9iQi0xoRxVcIpLpqxf7ECRJwZ3xb+af/YjGUip9ti3KBCL6iBA8U5kLIeBaeTXcB48aGGZRvPofWbZbyngYxprkIoihEjwWE9FPa5gvi7vWVO0+UBFd/jbjRgVH6PZTAFrJ5CJHbty/uzYD7VDjyjioWSk/D+TXXYumwt0dqV2sB6CDINHpq9c3z94IZ0zDXcqat5Z59lZlGwoyx488ImW+xioVCDnhM6wlEWx/Sugj3iuIZVhKIpkm324j1I/NX2zH2MQV/2Ugm8sh20ocOJ+VR3mu4bGGqSQVDTKpCbUL/h6pIt32qtMCpfj+F4G7pcvQDoAer2pqSmbm5sbeFoAb5RoCyBq4iOlx2gYDT2qcziolg80MFyOcGoD+hHwoaPM+ELnGRkxts1ZPwKbg5t/4xOu/fE3eFFbAQ+Py/Xd+0MwUJE32oMCJpQLR9NYN5OZ8+IZlL+Kg58ugWND/qO+lK37B8epgkyUWS1AKR3xoBfLc9sRcESEKzH8LEfPuKP21Lyq/rJAB/vzt5ZjIKZ86Ki+ZaxBiidcTW4maHQeuO6PBspRGkZ4PhlskEhoBByR8GNLIkOuBaosyozaQIfDTgfHcKhp1D65Xk3EVhvNlnhTzg11Db9VvypbUMtEqIO864qv0UR8utNEfXc9xeg7cpioN15eAQB/R87QbYLHhtvq1Tzydb1aOXJbXIfPRXoV6VcpiEFZZ/at+vVvdb1TbePH3zUfJ2+HgxMHfFzKjvSUA7BaEEY6YkDKbDhCwAnisqy4GI1ge/zBjAczI3zybxQhIXl9vqCrDK0tQPEx51vJAPvgcSLfbTO6w0XIYxQdc2DDdSNHqurjMeVIVVCkeObon4Mo/900TT+Tip507u2p6x/cNgdPUdDHQGY2/GoYBgHlkLgNs+FX2yBA4bZ6BSxo8/gkCjVWBVQlZ6lsmAMPLu/fqo8ImPC/khmPC32WZzP8LFP3MdgOX1tSq0XehwInvHUGnyPJ1/g5mFJZ7lMCpPBeouhmWDN9DQaJlV0RtolLHX6Oo5foQi/zpCiL0FR7CqCisdcQR8mRwUTtHS4wi/qtlXvWRk3ffKEcl+pK96mg80aDR73CgEjduxKBqwJQdJS4td3MhvrMbAp/Z1mIclAMZGbDW+txqQzbxLbVCoSPj+XO9snLlbW2h3woIPJvXplg21K8Ydvcntf3e+78QcPRhpXa5VbUBwZh1GHPmvit0jxHDIZRsFJDRwRIcVpZmmgnJbQakPJ2OEpGR8COITIA5TwUZRF6qU4EzFEd5ikj1a7KNGpppQAXOWWzYR1Qjq2N8UTE0bv/5z6wH+XIeOnEo9toY0EUoSqZ4H8OuiIwzdryb2VfGW9MCmzRcfrv7HoK2wXXVXxlVLIJHhvzq/jij9IFBj4E3uihvNhWDTCo4Jn1D/vx63rqzdAckESfNrI3G3OQWlhYGLiYzGklUxTp4WYGviFSTTBHJTixeB6jC754zm0izwq4OErD9pjPaMxZpIvn2ZFxeR53yakdakJn5HwqqgXuWuKoEy/URzyg8WJ9rufXLvlVHJmTzgjLM4Ci7fDSEG7xx/7wG9tQTikDU3yqBDp71kll51hOXXtCsMe2kEqBDfejAMHnUW2YwGVbpW/RcV5q80xmdnZ2AJwYJKIH1WJwgvz58V6vJwEJecDrUzh3y8sHnjmI8+nH2m6gGGuQ4uW+mqgtivR4BxMSRzltHLCDFTogBikGQOVg2EgUSKnrSJmzYsNCp8B9K14ZnCN5HE6g4t/K6bDx8rE2xFmUAqkskmRnhuXd6eA3z3nk3HmspTlkp6ayhIiwDY6eEVQUSPG1OZYNt8Wgg2PKMpWaOVCUyTMC4qgfJiVv5IevQ/Eysrouid9suxFAoW/B/jiA4uVFleGra2BtA9gjBqSiJT9WVicUlBuOigiZlGHVOOcIpMwOKBc7VcUvKxRnUqUMMjJWLsd9oUNAOSlnWpJZWyplBQezvawszyPKSy3DYFn/rXSEHQAauLo3apRxKSfo84y8scNDYmCLbEOBFfMR9aPGFAVMKMPI5tm2Mt1UAU3EN4/Ls6gIeCJSQIe2jhu88KnxOG9qiY3b5nGxT0Fw4ucysow5iMJdy+o2HcVLRmMNUmxEkYJHxuv1MItQ7eP/iAeV0jo/uETCioAAEPXJ/bIzM7OBnU1RtMRApqI/Xp4wG35ihnLKaj78GEeyB5tWAmqlLNYsdjYMUOpRSF6f51VlXy5r3DDBAFVyskr2maPC61ORTah6kfOJHCa2hQ4MlzCRH7apyM4j8FEghnWia9EZQKl+uFzkvKNNBVFg65sWeLOEy56vVWa6UQIKtmEGIiXDpmnS3aZKrrV2esSAFB9j4ClFQ22+mVhBUGm8b3zMDPKEAFZaavRyKjtUk64AIlLQCEwYrKLolduJ5B45yOxYzbmassrpcp2oDGe5/luVU1uda8ACAwGOUqOnYasgJJNDjcOI9Cpy/JGj5Qg6CvpUOewjA/eS/mMbaox8LJJB5gsUcOM59AceTGbghDakAtvo6SWZfqjvDKDwoy4jsG8t6WgU3NTQWIOUUy1i8wRxBOXno3VTFjY6Eb/pkG/OxfaQF7+vwVNpjjRLE4jK631k0QkrbGZsTTMYSZeWdVimtcHBoSQGYZw7pAyo8LwitZafORNVF9f9nVyv+OZW1CufM+cxcrLOg//nHX2oV6yzKEcvw/ajHJxahsfyfI2NHZpywDgfikcklA3XYTljvyyXkoNnQn/iY/RnEapyPC6fA98o4Z/Z2dmhepht+z2bGT8K6HFsuMLkdTzYVkGMg5TaqKFsrA2NNUjhZgm15mkWO0mVDpeceFZWLUPgLpnogiO2UQISdKzRxKtoMnJQGbBH5TNZZBkVHlP9ZUbfVqkVbyXiTCnSn0heCOgqi+I2VIQZleVlHOXs2bFGW5KzseM4sM1Mx5FH5FWBlNKlSAdrjmXApMrhBiZlhyg3P8/XlljWmf1ge2YHwIrrRrrgfWXbvbMgAPnJMlouy0CFsuMP6yGOh4+NSmMNUhxRlibAbDDi52P+G48jZYDHCoKTys/Wynb4ZWNgBxoRZwm1yqOck9nwZowSWGFbKtrOHL+SeVsFVxlTDa8MVDX9YHmMzmsCEo7cFajj9YzsadTMU5YBR5lCRsrWkJBXLsv8qqCJ21TXTXnZiSmyV5ybKFjEzAXBiTc/8P+I1IYZlE/Es9fFMfBLIRVIYcDOMkC58/xFQIMAxdcIo4AqCkSQ8Hx0HZBprEHK30artp8rQiePBuTnFICZDa9Pc5v88cnEewr4lQ3Z40MiR8D9Zo4qitJK7XL7PG527hmx81e81fDRNpNSQIvH1XxGwIk6k/GB4MQbJtjYeU6U0bMD4idMeD0cE7bF4Mhy4YgYx4FtMx8elUfb40vXzpgfBXboxFjPs3ZYNxGoHaA4I+FlTQ4w/JqxmjvUDeQtkq0v20a2pDIw3tGHy/veHgYxJZtSvEc26XrGgJtlVCWfNQpItXv7lJnddtttdt5559m2bdus1+vZdddd1z+3sLBg73jHO+yUU06xY445xrZt22b/6T/9J/vOd74z0MaznvWsoUFeeeWVbVkpXiRk4uwFjU89jiW7CBgZnlL0KLJGHkoGjfzjGGucgOI5OqbkF7VTKxPmXwFDVrdNH214Rr7UJ+I1cuz+W0WbmdxVm1w2uybDvzNe24wZeWB7YH3lJT4+VkOqX6WTo+hiabzRMZZrm3Z4/jM58rU4DnhKmVmtrDO7byvbNv4j4qGGWmdSTzzxhJ122mn2pje9yc4///yBc9///vfty1/+sr3zne+00047zf7f//t/9l/+y3+xf//v/73deeedA2Xf+9732sUXX9z/v379+rasDGVQJQfPUY3aKsmK4r+bphk6hoTgwxfAOXNqmmZgy3i2tZh31CG5ovpvRUouUVtRppC13ea4mb5uVaqzGuR9cT88ZgUUJeLABP9zW+ycIodmpp8IXmvgCkija2Vq3tGp+s7UhYUFa5oDmxAmJycH3taqgCqSLY6dL9Ljd7RagMQ2g3aKslDBItos2rjzxOe9PeUv8LfiI5o3lAPeh8Q37iqAQvlnhH2jz/C+FF8uA//mcgzA/jvrv6YsU2uQ2rVrl+3atUue27hxo91www0Dx/74j//YXv7yl9sDDzxgJ510Uv/4+vXrbevWrW27HyBe2y0BVCnCVIiPAIXHsB10SOykvJ/M4DhzY57ZALA9Xz6IlKCNU4vkElFt5KT6qSlb03ct4TyyY4nGy05V/cY5ZseH5XH+1ZwooMoyl9oxY3t8DSYboxNfN1OAws4aZaF44rrKPktjxfO8dB/ZO9skA1npehPbIo5H+QXnUwVHOA4OhhUQqDHzJ5Ij1mWAYh5LgVwpsFR+knng3yU66NekHn30Uev1erZp06aB41deeaX93u/9np100kn2C7/wC3bZZZcNvCYDaW5uzubm5vr/9+/fb2Yawdlg/LsUOSpSURK3q9awua8sulTGxhFhZgD8eoJRAIrbLZ1nh819KCVX/GP5zJBXg1TAweeYZz6e6UOkY0pWamkmc9Q112YyoOWMTlE0B+7U8CZUzipUf9lcRgCXlfW2WZ68ZKaie54fFRCUfAPrA4NTCVyy8UVzyn37eV7mw6wc+2b5qv/RGJVNIM9q3lW5qH52numggtSTTz5p73jHO+wNb3iDbdiwoX/813/91+0lL3mJbd682b7whS/Y7t277cEHH7Q/+qM/ku3s2bPHrrjiiqHjtQ5YRY+ljCGLKvwcApNviJienjYzfRe9L+tlEQjyh785MmGFiyKolVLmwLFMFiE5b7iUgQ44i+IOJqlo0vllfiIDxqg8u9HSv5XD4MwDZcMAFWXjmdyjTRQquGC+3SGaHVjGbprGpqenrWmaoYfh+jyX5M5jwmwiitTRFtR1L1yW4jo8R8peFHixQ621xxIp+1FL+zgWHyu/roX1kdtR84z91QRtGZUCUEW1mybMDiJILSws2H/8j//RmqaxD33oQwPnLr/88v7vU0891WZmZuxXfuVXbM+ePf0b1ZB27949UGf//v22ffv2VkqBysXHM8RnZ8S/VR3/ZmejrkV4e/itjkWTnzmZlRIaJrevosfMUfJnFDCqHVsU0fJ/HldJfsw3O0AV/GQAhTxksqlxgpGjiHhS/9kRK2dtNhjJtwn8WCaKR742hEDOdUp8R4EfU2Zbbr/qHB7LfEitDPAaEGasPk7c6KU20ZTGxDqk9EDVjfhX5ZT/i/xCrQ84KCDlAPWtb33Lbr755oEsStGOHTtscXHR/vEf/9Ge97znDZ2fnZ2V4DUKtTEmLu+Goy684sNuzQ5EoX6Mrzmp1zF4PXaakdMqGY/Z4AXfGlBX591gMmBW9VBemSNXlAG3Kpfxn9Xneqq+MjD/xgvcarOE188yIZQNgwFnChmpQKokawQi3jCggIoDrtr7hjKZoG3hBgGXJz7FQAUFzgNek+LMLJNFNOc4Xi6rbDKjKHDxbwz6Jicn+34EHyCtXvLKsmI9qt35pwCfz2V1nXhJOgOpWh+86iDlAPXNb37TPve5z9mxxx5brHP33XfbxMSEbdmyZcX91zjhGmfNxEbEBsyKwM4F/3tb2evq1bIgTn60I8p55W+O/HhsfhyNBc+VgAr5N9MPTM2yBeSFHS3yofhWIB/VifjO/md8ZjvGkJ8osmSnO8oGiYw//52N2csqgEJ9cCBDvVay4/ktySPi1x21kgnKja/J+Hk1HzXAzXaL130V2PB/pb9RH2r8DlQsA3xNC/dVWtKtASh1nMecnVfzpK6jZnqpqDVIPf7443bvvff2/99///1299132+bNm+3EE0+0n//5n7cvf/nL9pnPfMaWlpZs3759Zma2efNmm5mZsb1799odd9xhZ5xxhq1fv9727t1rl112mf3iL/6iPf3pT2/LTp+UE84Uqi1lERlOChtwyTFNTU31jVqtsyPvCHJqvHiuTaSCFEWV0XWGqA/lxNGgoqWKNtkAK3sWDUZjVY4mcrDMI49ROQluV7Vfq1fR+FU7XAZlpOqjoyplHTyGkrwUMfBFvCpdVk4Or2spPtrag5qvWh+CY6rpw3+77nr2iEGLX9N2kIr0Rvk89h+1Y1D6XKvjanxcvpZag9Sdd95pZ5xxRv+/Xyu66KKL7D3veY/97//9v83M7MUvfvFAvc997nP2yle+0mZnZ+0Tn/iEvec977G5uTk7+eST7bLLLhu45tSGWHDKeFyB21ys4/a9LhsCCz8CGi/vF25nZmZsenraZmZm+vXx4ZO4Nq8AEJ2jH1PyyCL0yNGo8eMYsG1sC4FHvSQNSTkNZXhZVFpy7FHExg4o25abEQOUGmcpWMF2mEfsZ3JyMpwL7BufTJAFENyPAiiXC9pNrzf8mB5colN9ZKQyAbZTnGN1rw7quDrOoOe/3bZYF0Zx6qOMPaOmafoPpHU++I3FZsO3QGB9HMMo42B+ItBSup3tRG1LrUHqla98ZdppiaGXvOQldvvtt7ftNqQo2lPHRhUaK3EUJSln5BOJOwH5xWVqucLHpkAKo0vMnHBsWYSObWYyiyhySLw1GY2nxnDZ2EYBKeSnBFJ8D5Cfi0C5hmfuA39ngKB4ZRnyq9tR7qhfvLSq5j4bF7bPgRLyovpS/Cm7i8as5BfxroJAp9LSa6QTUd/Yf0lmNeMrUQ1oRnMcBUUlG4/0QgXL3n8bMOT6tT5nrJ/dl5Fywpw5KGFH7bCyqGsskXLgxXXfqj4zM9PPpnxLqeKLDZKVwp1GKUuMtqViOyWlUc7IZeEGhZmgWurzviPAia7teBn+HWVoJQfB2TXrQgRUkaPInCL+xrYzIGF5TE9Py2UxDoD4CQU1IKt+Y1+8i89fvodvho0+Sg7cX+Zk0e6yZWJuR409mgM8r7K4mv+RrEcBKORF3ayPbUf9ovwymWF5DkbYX6qgBc/jN5ICuBKgIR3xIIVOtITgkaPGY4uLi/3NDr4Eo9pExcHsiQ18cnJyaOvp0tJS3/inpqaGFE45gshAFKg4v7j5gp11idyJReOPQCqLkNHZIvgrmSpHX3POecB1/yxSxW8VfCBARdellKOIgE05QV5S4/Hxo3RqrodwH/wfj2HG6Trp7zZSdRA4UXe5bOZc+aGp6Dy9HH5nIB85VTWvPMfeRuR8+TsDpdogkMeXAXx2LVQFSVnfo1AbsHGq2W2IdESCVC1qs2JnQsNz0Q2okUPlTIrfL+V1cWuvuvEVATcCIOWg1TjZGPg/llcOgH+rqIsdcclIaupEzkg5es48kD+WKY8hirS5XgRQKoNC2SjZqvJejtuP5BXNCZOKmFUbLhtfakQgxCBN8aoyfNS/SHfUfYVcNws0lU7w3EXApQCRf6v+sEwEpBllfJfqKMqC8ohfpfPqf3QsIp7ftqB2RIJURBlQ1QpOGVfkdPz/7Oxs/42as7OzfaDxZb65uTlbWFiw+fn5/lMpPFtjkGKjZ6NiB4B8RE4hyyTQEaoxc1l3TArMIufjZXBTiTICJW+We+kYytH/8zUw5fjNBjfF+Dzwe3744nU2XibMpPHhx8i7qovt+vKQZ4klUnMcBQm8QQTLMPhiAIbLhdw3Zk5N88PNQ/xqEuSPVy+iDI31HfvwY3gPEupFtvM0OhbNSQSEXJd5Luk/Ay6PU23gigLQzF9k/GB5padR2bY+d6xBigWrBo1RU00U5nXU8SxyUVGXGxa//rnX6/UNcHFx0RYWFobeF9QmmmM+IiVjp4LXkpQyRm2hjNRux4hndARsrOjY+BmOmcOIwCgqw8YaOVdcckRni/ermNlAxqvkEzmISDbqo8pHuoo6XtIXP4+8ezDBwVEkUzXfDHxsd8w76oV6ZQ5vGPHfpb5ZLrhbkeuzvNtkVCwbBqhapx19Z+2wLpfGovrOQErxw22wHBWxHtfop9NYg5RZDk44STUAlEWtGUDxeV4G8q3m69at6285d0Can5/vZ1C+BZ03IGCEVALJDGz4OoE7JCd0Tuy40SlFfWK2pQwAHREDMS+J4ly1BalMHgjQ0Q3W+I0ZAb42I7rArpxeBMjIO867uh6TOboaEFTk/XsmiPOmnAjz7scUiJkd2OigwIV/+yYB/uBN5FGfUWDEc+HzzVkvni9tzIhkq0ARv2vby0AWf2c+Lwt0or6j/zVAgmVqsqMSoDGNPUiZlVNMswMTWKNokdEh4cVkXC7C7bm4fIV3kDdN01/em5+f7y9xqJ1xvrECjT1SPqXoUfSTjUuNPQJtHD9uz2dw8vNq+YH5LQUEzIc6bqavSXF24M4Jszh/UDCDCc6tepdZLbHckK/o/jq1GaI0//5bBWtY3vvFOrxZojQfvHyayYZ5UMvAyikr/r0/blfxh8GJ94s6EPGcOfQ2VAN+WDbqJ9IHrMt9qUymtr9aqsmikIfa/sYapJQyRRGGQu8oqmsbJWGbCC4OTh6VezbkUaNH5R41Y38q2lNjVs4+IwZi7wP/c/ksqsY6uAsqAgZ2CFEEWeMcIwB2KoGU//eyGFj4k0DYIXBWGb14U33UBofIQUVOskTIM+t85ojUdSOlhxFFoFITUHmd0rW7LCPIon+shwEfrhxkN10rnY7mrS1lwXAkB3W9VNXJQAPtuhagIj+a9TeKTJjGGqRWQmpyIpBj8jJ4Udfr+/by2dlZW7duXd/ZuXH40t6TTz7Zf08W3wvCT2tAZUJHHxm0ymLU+L0/NEj+H9XNHBjXV8tYil8cMy6ZYln+rcaIY2NDRmfES5+eQfn8cT9+0R6XLD0L5q3SmXNWYFUjc/6t+uDregqgUWYoD3w4sp/3AItvC+D6mHl5O9FradSYGCjUOLEPbkfpRATS+Mghbp/Byv+3DRRWwzmrdl1/eCkal5/VPDPhGNuMTQF21s9q0FiDlFJoVQYNKwIlVqzaSAnBDq+r4DeCGq6349PRlfJl0VIWmdZmUwx8ZtZfXoycJoMG8+iywDZUlIr1ozGrpcMSKb6iDA+plOUoniPeFFCxbtWOKSoT9Yl6yOUUiJoNZlEKxNA2VLbB7fL1PqWrHPFH7SneS/JRDhr1LtJJVY+DWQWQXKcNIU9RW6yTyjdkelICTeX3kK+241FjWAmIHREgVUpZ1TlVh5U6cyIcnXn0OjMzM/Dt1y98Y8Tc3NzAjj5e5sPHzagdYzVRkrcVlWFQYQfuIKMyNc6gmE/lDNQ1B8x28FoQZ1LYLztL/+ZMLgIbdjZ83UXJPJKd9zkxMSEzhsz5KUdTExBw++q42p3I8lLBgnKSGGSoBwwrJ49yxW/Wi1JbzIe3w4Glf5ecIGZN2TIflnNSgRnyrcaSUQSCCpz8m3UGbYT9lqJM7lHffI7tL/O3ES81vDIdESDlxI4pQnN0srhcoSK8UoTiGdPMzEx/ic+dbdM0tri42N8gMTc3Z08++WQfpNy5oWPBDExdWEfDUjxFclK8swL6B5fnmmZwV2EEVMwjvlZAvaTNZYdvNMbnGXK2gsQ767BMTZYS6YZfK/R5z/TIy+OGl8jRsn6x3jJPPvZSEBLpgFoOQl7MbEjv0eHhXHHQEG3g4HF6HxHvXo/niXU8CkK8LH4rUgECnlPBWgQiNfal5jajWoBCm4se2qz4UbbJZXnTC/OF84nllExL/IxCRwxIlbIoNgh2Zqz4qj0+5oqDD4ydmZkZUgzMnDCDiiJ59QpybE8pXBZ1M88KzNUSAjsdrsfXfJi3bNcWjpWfqh0tdSpHEs1pFlyw7FBH1JzwHJjpp90rgGK+mZdobtR5xYdqk4GK6/l4EUQUsGAAFekHjzHST+wjAycVjPEcl5wez606H81JBFBZOfwf9V3qiynSDZ6LrD207ShgwHo1suVymYyw7ZIsMhprkMIt2xll0YpnPLjsFhE7D8ykZmdn7aijjrJjjjnGmuaHW8wdkDyD+sEPfmA/+MEPbHFxcSBLQGftGYVSROUUFX88djzPAMPkgDI5OWnz8/NmNrhBhI3Ef6My4r1E6ukJLDu/j4wfjsrjYOeF867AV8mgBCjqOiHOtwIpNSeR02U9VBmbOqYoCqRctirYUQGEH8PsFwFCLeuq9lg2UbDBjo3nhzP5SF+zYCCyjWhj0EqDvwi0aigCF//NdltzvRqDQZ+LKIDCerxRKxt3SSbZWKMgQdFYgxROSI0SqejEz+H/yPj9G3ff+eOOHFjMDjh6lTnhJEXKiEqYGQ8qVJQ94LWTaGMGl0cH5uNRETRH3bysl12Hwu3emdFFVFtOAQWOLQIM7KMGMDMHqXiv4b/UTuRofC6apkmfho46xDaA7Wb8so5iPdSbDNyyXaqo3xjYcWbO9hG1V+vcWRbqf0QRH23qRvMQ1VH9oPw4KOBzqv3IP0YBQO3YlKwzGnuQ4rX+UvmI2BD4nH9j1uNbzNetW9e/tuIRoN+si8/kU85etY3Xo1TEW3Iaivi6V7ZDCLMfVkz/VvcHqetQ/KprBHe1xFeaJzUfWfaEQIJyxO3W2KZvEEC9yhxa5KD5eC0IR+2pbEjNC+qfBxzRXHPQgctCmT1lY3E5+4aSEiiwHqkAgW2T51bJgWXWJgvBOlEArBx4W0CK6rQJeBgQFYD43PpYMGjMZI7HsI9o3lV5NR4VIGY01iCFu3Qio60hjrCiqMQnwLOnY445pr/U5xEr7+RzkFJLXk54QZSXu1Sk6XWcLx6vcox8o2q0e86VGO91QT4QvNABIggwsDrxCx+jXYw+hlI0WVqbx2jdv31jBIMnf9ghKrDGttU5xZPqj+tg214enYoCQhU88DXO7IWIPp/+wU1FtRE08oxzmIEZBzIoH1yq4g06Xof7wza4/0jXuC4HF7W+RGUnWbms/RL4qPOsC4ovswNLgCoQi+ardD4aH1J2XSyjsQapiKJoSp3HclE0jucnJg68otsfGOu70cwOZCH8kEze2cZ9q6U45yGKyCMnp3hWkaQCKezbswp8WrQTOjI8hs5D8ZMtO+JYWNFLc8rERouZqFoqQrBVRqb+K8fQJmCKgE+NJQIors+gqZwTZxh8ProeGvGoHCYGkFE7akMN6yKDFDs6pSMKoBQol8YW2Sv3y32q76gdxQOWjQISbLcEoplPY4BSdUrEc6zGhf9r+XYaa5BSu8ucIiVUhNmGl0ND6/V6Azfo+is3PIPC6zjRLj7v2zdqIH8IengDMDtUN1p+ZA+WwyU6HpvaPcjgxHz1er2BpTE0Dl4aYj7Y8Xgm5eBee02qVqkjg2be2lLkmKMPn3diObOO+geDG6+neFFZHL7IEbMPzlqzzSncp5NaNlXX5NQGESUTpSvqPWvMq5I1k9rco+TO/Kt5a0sKqFQZJgWQaNMs1xogQQCK6rUZay3gKlsYlcYapCLnGkUO/l9FJOz0vZ4bjHrtOxoPPiaHb9LljRDIowMgL3+pJ2AjkDK/zgP3EUXN+J8VjsHLQTPb0KFAlecpy6B4Fx3OXQmksuOKF5aFcmDIg3IeGR8lo+S+Iueo2lCBAfbL4KDsIuo/2wTi8+88qF2NJV5qggfUB+ZftYGZFc8j6lVNdlgCKHUcZdgm+8jax354ydcDZ+5f8RTxx+Oo5VsBLx+L5nglQDXWIBXtUlNgoJAdhYYTjwCGIDU9Pd1/3Qbv5vNrUfPz80PbZ9HJszEySGFm5rwgjwiokZNBOeDvKJqMFNjH56+wxyUcL5sto3F/vKOPPzxfaq5w/G0Un/WBn6CQyYblkrVfMkruh514BBRqrlW/3q4HSbwRiJ23t6WuJXLbOP+o4xFI4bhqnBeCKAaAyCPzw5uRGJDQhnle1ZyNet0ksqcaAGAg8jbwG+d0aWlpKNtUvGRA0paiNhTP/lv5hFH6PmJAiimKGpzQqLG9Xm/wwZPuVB2cjjrqqCEDws0SDlLeLzoDnFiM8Bz0PDubmJiwxcVFMxt8SCcqpXJ0/q2iSyWbSB4MMrxM6GP2Or4ZAfnA8eGFewaqaIt0pOQ4ThUxqnGpsijDqI0awyoFP0jKaapxIlggv0ouij+XP/eNS6uoS9yvckDctztNvgaJ40c7UsuDOD4MZFBXer1e3xZ4uZE3dqgMSgUjPJ5szkahGmDiMiiLaPeb0k8cn49b2b/aJLES0EL+oqBO/R6FxhqkVOTrxAJUTk4Jmf+jg8ZXOHj9bEcbGg9PkhsgGyRnSIonLONKqTIBFc1Fjlgdq1GsKIKOAC67/lSKvldKbJwluZQiwDb81TiumjHX8IVyj65BIpCoYCADfQSp7JoOAyB/uzPl60c4NgbyqD9lHwq8cBxK5qM47Zq5zerwvKjgAOXmNs+yUu1i+9w2g1YbymRV0s+2NNYghSkvEgueBVZyKtgmPk/OgcSVhe8L8sgma5sjRl9KxIyQI0yOFpFHjJRwG7wCSgY/jqq8nP/GZ/ehU0G5siy9H18OxU0SPk51jY6dZMl5KgNRAF1LJbDm83xMgQfLJcr6uXwNSCmee73e0LMQ/ToqB1ie/aIeqwDL7/3Ducd3oqnxY5atwAnPmQ1nEX5N1nXF31jN71/D5b4s8FABHPLMel0DVKM49qiuAmf8dmJ5cVs4Z1wXl2lLgWrt2BCo0I+UxtGWjliQQgWMsi0vy4TKra6d+I25+OgfjAojQkPhZY3MgSkjxLYcoDIHGVG0Bo+RaxQxZ2P0sSHA44NjFWAySKklJAa3TEY4dlWn9B/54jLqWPS/BEwluSonEM0xPvAYd43iBpjSNSheCnTi5Tvum3nLwEqBA47VwQozNgYntRTMVApaVICF8i61uxJSgTQfRxlhUOHEfoPHmwU00ThVQBsRAhX3lwVUbeiIAylUcrO6JUGlIBFA4eQ5SKHxRruSmJfosUDMj6rLH4+SIueSjVmVZSfDkXbUNo6dQQoz0ciA2MlF16OwvsoUaxxNdoG8FpQyoHI+SqTGHPFSItcrByn1VA9+Egi/rNFMg5SaIxUARoCkxomZm5O36SCFbSqAiuRUkn2m/zVtjAJSkY5gcBUFYVgf5Z7pHQOI6pPtpGYlgv1lFqRl+lur22MNUgp8ssgZU2qV4nsdBD43iIWFhf6EOjh5vWgHVcYzv/HUSUXK0bIFO2ZcjswcHiqoinjQKbgjq7k5GS/KK3BCPjLeSg4Ej0XBSBbdYb2IakFJzV0NcVtq3CUeXK6Ylc/OzvYf2MtLyJ79+y5U70PthnPiDRoKfFAOGUipsfN4OZtBPcWMPsrClJycokCV+YscdfS/1ilzXe5H2TW2i9ehWO8VPzWyKPlPbDOyRZURZnbcFuCPOJDCcywoPOa/szVavEjp9yBx9qQmO1IA5RCyzCJrVzlldYzHFzlDHDNGulEEq5yUOziVeZYMqPSNc6rmCuWrxsxjL4EU95H1m9Ubpd0ap+ey9oAHnyjPG3HwWga+FVrNHesQrhJkwMP8q3IZiKtxs81m/ar2skwhakeVjajG2ZZ4RZ1WAKV8CH6irCoDqIz/KNBr027kY7DNNkA11iAVXZNC6vUObCTAsiw4zALwYiwqghs3G7S3gX06f94XfqMzVxmGG7iXYyXG3xxt4hiwTb/XqeRcEJD41Q2YXeEHnRw6THSUmcOKFD9TZNVO5IywXK2TU31F57Jgqba9yEmrOt6fg5FnT/7xeUD99U0H/llYWDCzwSc84MYK//bNEaibagwo2+i5etH8qrlhpzbK/JXKKx2OAkzFd3SurY6VAs0M5NkH4Njw28tGlyNKY0ffoECHyyK/be2NaaxBCu/bQWIH5b9dGbNrHRy9eXl/B5Q7B7PB12B4vzjBfC+MipgcUDJD5XGpiBcVMLpWkAGF84AOhq9b8DIO8sRZlMpkOCrn30yZsXJ7PMdIapyqzyiCzYws27jDdbO55TIRj7ykio+Z4qeg+Fz57jh8Gsri4uJQcMbLxQhyyjkp+US/vY6aHz+HoIaBIsuF6yldVzqg5MxtqfmszahUH9E5lTGxT+C+lSxYnyJ9Qz6Unkff/hvfzs1jjI7V2FkN5WmIoNtuu83OO+8827Ztm/V6PbvuuusGzr/xjW8cigjOPffcgTLf+9737MILL7QNGzbYpk2b7M1vfrM9/vjjbVmRGxv4w0+AjrKXyDlgBoFbYKNrWspZqyUwFX2zQ+D2uZ8ss4oUIXI8eAzPqeU/js6yMap2o1d6RH1nABs5VPXhjJCvszEPzIs6XuMwImONxpABN29IQYDCl2VioIE3m+NyH1/XiDYiZYGJcrZMKkqPdDiTDx7P+ova4PqqfWVHK/koOajjbfpW4+PxsN5mgZbiQ/HD8i7pb9RnyUcxtc6knnjiCTvttNPsTW96k51//vmyzLnnnmtXX311///s7OzA+QsvvNAefPBBu+GGG2xhYcF++Zd/2d7ylrfYtdde24oXN1gmFuTk5GT/vgtc9uBXBKgIDCMQNHw/5+dVGq2iFnYI2D4vqfnxEmUK7Zmmt+8OjMfFu74yUEL5c/88FjwXOXNuG3lEWTNlbalyPC7VNy/jRvLO+lLfzEeNMbsMUF/8nifMnHyZzufRv/2zsLBgS0tLA5slnPA2CF8ixGBM2QkGX0q+qGtKbs4rHvc2o+XEqB3/xjawPvblPGGAUbo2WetIs7I+jwzyXBft0u9P8/qZrbA8sL8aEMff7i8iGWdtMI/8nwP0Gt9mNgJI7dq1y3bt2pWWmZ2dta1bt8pz3/jGN+z666+3L33pS/bSl77UzMw++MEP2qte9Sr7wz/8Q9u2bVs1LxHCs8NEobiSYkqdtY/kSu118XdpAlV0onhVjjxSah5XJh/mB39zv9kGCR6LGlMGiPxd6wRU2RqQwvOcCTFF4+EyPCYen+Iv+q/4VQ7JnbADFD4/UgUHCDIOUn59KQpmuJ3o1gMETScMZngJvjTH3l5kR1lb6r8KFGsCBPYhNVQKWJSeYL2oTwwws75VYOf9ZVSyF9UmH/fyNYGi0rlaOijXpG655RbbsmWLPf3pT7ef+Zmfsd///d+3Y4891szM9u7da5s2beoDlJnZWWedZRMTE3bHHXfYa1/72qH25ubmbG5urv9///79Zqaf3YdG5+TRIV7/MRu+PpE5eqyDD49lw0VSionRBPOOmRTzgLyo46xcJRBEHpUDz67dqLEwb+hwIkVmHjkCVH3XAhXX9fGoe9ucapZklW5lwK949Q87UwZU7BM3SczMzAy8aBPnDAHJAcqP4VMc+HUv3j5mYr48yKCO5Z1fj/xxGdFlHjkknGPckMTyjBxvTVDBwZeT0u82jlONhfvNnLyqg9vL+XYZP47+puQPoqw0Co56vd7QPGS8R/5ElVUBUSlJcFp1kDr33HPt/PPPt5NPPtnuu+8++53f+R3btWuX7d271yYnJ23fvn22ZcuWQSampmzz5s22b98+2eaePXvsiiuuGDoebXNWjsSPLy0t2fT0dP+cPx5GkVIAVnrOCnCJS91/gn2hMvKEczkeGzoABhl0ghz1ZOARRVGqLRUZsTOpMQ6enxJAsdOKDK62Tz7GToIB38uVImj+HUXwyvGi7jqg8NZyBxIzG8h4cJOEurcNQQqvY5kNvoKDHSLu1MTru6wjfFw5ZLQVHKfzoOQYgVB2nPt14lUCPM51VgO4VLCqdArPqcyUAxfOZlmnavqMAgKURZTRsU/AtlXfEaiWaNVB6vWvf33/9ymnnGKnnnqqPec5z7FbbrnFzjzzzJHa3L17t11++eX9//v377ft27cPTBJPAjtNPMYIjoCjDCOaXHT4vLyFhup8quUMdgiZA8PIiidcLdEpUu3jMR6vMrBMNgyKtcRAhcTzWAKmqP0MIDAyNLOhuWKAHrVvLK+cCu6yc0DgJ+RjYGJmA4/nwkd2IUhheQcp/6B81ZIoB1xqc4yX4zIZKd1hMFPl+VjkLLO2siygxLfiJdMBLhuBtwrCcFzIF8o5Gg8HmIoX5J1lqAKsrO1o3FmZGjroW9Cf/exn23HHHWf33nuvnXnmmbZ161Z7+OGHB8osLi7a9773vfA6lr8Flylb7osmpWl+eHFwcXGxv4UXLy57tBmhv7eR7XbC5RD/7e16Obz4rKJXlQrzRgTkRzlh5ln9VpQpdARSWX+1FIFADUXgVXseo3s0fm6jxFvmDBEE0LGqjMXswLLa9PR0f3kPl9IcfBCQeMMDgpS3OTU1ZevWrRsAPzOTz8ZzPeV3gKFMUW4oO1wOZPmgM8ZvXpJH2av5ULKPAgrldJWdqUAz4yOqz9/YRvbf7ED2lMkB62XBhfu8SH9V0MT8szxxTJhplQKJUWz7oIPUP/3TP9l3v/tdO/HEE83MbOfOnfbII4/YXXfdZaeffrqZmd188822vLxsO3bsaNW2GngGUi5wf9Cm/zY7sGRSihY50kFSEbqKglBx0HGVgEb15b85Uq8BK3WsZJCZfLNsiNvIjisHVkM8T0htQVMZXRueauYQ20U9Y5BSO0FRb7Lt9CoCxm3sCIqlTRLRg5CjcZYu/LO+KIeuZJX993azNqJMNmpHAQWfi7IQ5LEU9JVIleWl1ch++bzKiiJScmK7bzPGtkDVGqQef/xxu/fee/v/77//frv77rtt8+bNtnnzZrviiivsggsusK1bt9p9991nv/3bv23/5t/8GzvnnHPMzOwFL3iBnXvuuXbxxRfbhz/8YVtYWLBLL73UXv/617fa2WdWBqkIqPAV7GaDLxbEaFat+SoF52No6AhI6qK8R6GYnXEkGSlh5ogjw4rkyIYdRbpZNIQGkK1j11KNQ1I8R5G7itBVe6pflsFqEV/XQT1UOsjAlC3tqQ0PvsSHuwO9PLbpPCCg8dKgAmLUVXUtxcsgsXyxjTbBThbNqwCuFBBiJqn6KIFhRrV1Iwef+TyWe21QmPmTzBZrlnVHBSizEUDqzjvvtDPOOKP/368VXXTRRfahD33IvvKVr9hHP/pRe+SRR2zbtm129tln2+/93u8NLNd97GMfs0svvdTOPPNMm5iYsAsuuMA+8IEPtGZerXuXhMARIu6Q6vV6/R1Q3r5S0EzR+VoGbnCIdo55ffXIIedVKTWDY2nsmfHyOd7QgdF9qa82WUQtv6XfmF2gw8fj/F+NAw1d/Y4AuxQgcB1vl0nJrWma/mOMosdWRdvFcb7w+hYv83E7qK+cdbEMERRVZoEBi8qYOCCK5lrJRbWhbIfrlbKobD6QN3bwmR7wOLPMi22I5abGy22gDSs9R7+k2s3qRPKoOa5sJqPWIPXKV74ybfz//J//U2xj8+bNrW/cVRRFtTXOGkHK7MD6udqIgaQcnR/H3+wEHQh5bRiVlR1MFtkgLzzu6LsUvUXnI8eM57iNWgWsjZQjMPHfKO9IbiXHxAASgYoCrKjtyPlm51QwxNct8ZpU9ioV5BVfD4NZFLfDgRx/1A3WkaPkeVDzw8ci22P5Yj3+rcpnvEZ9lfjN9C3qX7Wj6qtzNYBYAhsemwq22gBRW9DycyU/7TTWz+5rI1wmjO5w335pSQijcvzPyoNG4PcecPRcO1GZEaBzzhyh/46cKLatnG8EfPhbgVxtpJoBh+KTx6uWltjJZ04UHTl+Mxhlhl9DXA83JrAO4RIe3q/E4+ElPh6nB2MzMzP9jz+I1m/w9c1DaqlPZVIsZ3XRvk3EHOlcyRYjcIrK1wIU91Hbb62+M5DU8BNlRUyZzSt7zgKLjLc2fJT6yOiIACmeuDaOkqOJGgH6DiRexsguFGO2xPxnvGVUa2g1bbLBRNFYBlJRf6PwyW3XKjbOI9bFueFrDRhA4K5LBUorBamM0PkzSHEmxUAVZTIKaHgTBi4fsi2U7IL7RrtQ9/Cousivf+OnNjNR7eI4OSCKMhWe48xJZ5lQicfasbUN/LiOsmtVtq2dqvbaZFVPiUzKqY3DiLKdzDC4L9xOzllNKVJjpcwyHxWpqd8q0ld9lMalgErxlmU3WdtMCgxLY+b6qi8EGXeWfG2Q5z3b2JLx0IaUQ/c+eBkPvz2LUoCkHA6O07ewT01N9V/pgVvDeXegmUmwVmNRPHO0H80vZ2NReTwWyZTHr85HIM7/lY5zH1Ebo/ghJZdoLDWk5KfORbbDPJTKlzK7Ut0aGnuQigwIv9X5yNhrBM8ZlOpbXa/KoqZaIIqOK6DCfkvjqikT9VNDDL7syJCHrJ7im3/zPSYOVOxYGdB6vcH7lDKgrI3uFQ8KrBxI8eHH/jQUtYSniGXlYOMg5ct8JZBSGQ2PCXnHLEzdy8f3PjFw8LU+zsQwuyw5y9JcMDjwHGc6nQVaNXVK9oJyrQmOsiBNBd1sE1E7WL7GJ3A7aM+lIKKGxh6k2grQf0fRaG1bHJHzMc60lFJkBhVFQuqbj9UYbQTutUA1aoZRO19Rv95GqQyOXRkojgGvP3Hm0NZJ4DnVv/qgU2DQ8GM8xprIFrMp30KO72DDJUTces72EDlvBbbZtT8GUeyP+2mjXzU6VQL5bJyqr5qgtq19ty1Xk43UAK8KQpStlcq1HVNt+bEGKXY6beooqo3I+NqTOwPsI7qTPeqXJ07tMlRLLxFg9HrDr47PlIOVrYbXUUiBCJ9ziqK9iL/sfg10pBzxo1x5i30J5EtOIuKDAYx5VO8sy/jw+hgc+XZz3yyxbt26/pi9D3wRotKXLIsyy5e4UUa8sSUCMs6CvbwDYCl4y7b1Z+BeIhXklMpHVLLDWsD08pzBcN0aHVVjwoCXyyr+2wBmGz8y9iA1SjqpsigGqKidyLmqSDCKurie+q1ASilvpPCq77YRDLY1ShujglkpWFCRXikCzzKpSKbRGNoAUwS0yFMUuSpS42N9dbnxPU64zMebMiLQyMalgBbrZ05YjRXLc9CXBSCZfdT6h2jOWQ9rA+JSoJv1j/8jGaqgLdNV/vbfSj8jXrwM91+beWXzlNHYgxRGV5lTi+rzeRU91AAWOz6+WI/9RMDk9RTgKSesDIv5jraoR4acKaD6rjVE5UQzJc0cQ9RvFCx4GzVZYnSsbcaEFOmk+qjyJaehNoKYmXyDL29rZ7DitjJZqM0cTKwrvLMQy6nNSNE4M9mouVdlFa+lIKCkd1inhkoBUVQmC1Kw/yhIinxfTT/4XRPY81hqHgiANNYghbvszLTjbQNQtZQZGR7jJQ7lANixqg/3WxtloeKUblLmulgfAVg5hZJzUvxGRo/HHWRVH5FhZOPkPlcCPG3JDRqXrfAYjhF1KZIPjl8tXfqGCXy1vL951zdnqFfJe9tKB1XmlIEs1m+b0bBdox3Vgk7UX5T1RXqqgj3+HfW5Uh1rA1BZgI4yq9V/1KkoEGHQ4n6RRrlH1GzMQcps0JGWIoWac05tDSCLJqJoCQ2yDUi15Q/LqyizNgLKwLGmvzb1ov9t+8XjtWM9GBQ5deX4WWYq2o3mz50KPl0Ct5Q7qW3jClyi4CTKoLIsFYMdPIZ1+byvSJgNPueQ282orb5mAKrsIALzNlQCu6gcBzeZv2vLWySLldhQWz9idgSAlNmB5+XVXGheDYoMVEXAvkyBdbG8WQxSWKaGJ/+Ooq8oUmzjWKJy2e/MAUQBg5fDJ9ZHY874UWUVYK0k4q2NmF0/8b4nBxFuD+/z4nPOr59HvffHHfl2c3zFvAMTvrEXX1HDOoK7Hp1qQFaBnn9jdM43WPMY+ZlySldUJtFGLyL+IjCIVm7GhWpAzYl1wutwW6pNBdi1gSrTEQFSZvrirVm8xKQUuuTcojJ8TkWIqk02juh6VNROdoyj8FGAT42ttkwGUG0iTgbJaL6ytmoAKhofz5Xqv9Q3O293yPi4LBXsRLszlWNw4iwqesIEX4tCYl1U4yllUqVVCgQpBTJoB9kWfGVrJQfJ5xVIuSzVuFnmpbHWUMl31fRVC86R7nBbkf+IqAR6tcEx0xEFUvhtNpwJsDKr6K22jwi8Ro2u0HAzgFIGmTmtNpGMctZRFBWNgf9HYFUTeeH5rC8eP8sockaliDJzeIrXGmqaA8/aM7OBa0jcvu/GU21g31iX74viTMo/vvVcPb1cBU04XgTbKKvCb5Q7P81CBXCsM2p1ohacUKY8lpK+RgGC4pfnZCU6wpQFUSVSQQYeL4Ge8iuKFz9WuqdvlHGMNUiVBhxNwKiKk2UK2G4bwPPybEQr5atNBlhqUx3PwCwqV8NnLbU13AgssS3laCI5cv/ZU0iQZydcomagVv1hG1H5Xq83sJuPn3Tu90X5g2T5YcoOINlbeBUQRRlVNibMpFTwl9lXJNcIKCJdLOmesudSVsK61CZjWE1ajX4jwK6ps1JgQhprkDIbVIzompSKfmonMHNUqmx2PuIpKl/icRQnzY6kbbsZ+NQ42UiepSwwynQiipxVxCs71FFAvkanVDaPW69L4J7xhs4fl/kw84le68GBksroo4ypJAc1LnZ8NW2jnkRgqeqo7whAOSNj3SzpTantGlI81ugjy7Pku7JAL7PpmmC8xl5q/ddYg1R0z0VGqxXVsIDV3v+Mt5KhR+VreVLRtnL6beTB2Z5/Rw8hrelnlIivZCSl5Z4oi1KvXIlIybVWF9mx4v10bWXBTh9fbOjHfJv5/Pz80GYJbIevYWEmNYrdYDbBQLq8vGyTk5NDL/rk20pK7WM/URbWJhPAdv13TX1lbzyfyO9qOvFIF2uC3AzYDya1mZOxBikeaDYxq52VRJFMFgFnFDnGzGmWop1agCpFaaWxljLIGqNkB18zjzXAF/FWCwhKNlE0ytmbyiiQ1ENXa2QZ8YRLdOjo+eZddd1AteHHoiBLgUNprlXGhgAd6bL6KJlyP/wbz2f1uf9SxlEzBqWX/LsNZW3UBFgRrysFqZrs7ykFUngfReSU1X8u7//VZCtlUM5EOW7/zc5F8aoMrwZIMkPlMWTLJJnSl4xUjbsGUJjPrEzbrK/mGBPPU0m2eMM2yiDKrrA99TiiLLhgPhW4+G/fuo+v++CnnTt/2AYDlZKbGgs7aAaTqD8sW3oSgQKoUtaM/1dCJd1Uc6/KRe+Ti4At6jvzSVF/Ec8rocx2S8FtbcY89iCF0ZhaMqlxbDWTFSn8SiZaLVdEhlfiKzvvypr1V4rga8aCsi85kmwsNaBbaicKUmrbUDe5qvuZnHwnXu29eiivKFBQ/PNmB++bt5w7cOJmibm5uf71KO8LP7zch5mUkmeJ70zG6g0C7MBL9pEBIANhJNNS9hMd40Ci5BPYvtrYeMS76ivjK6O286f6Vf6Xy6D/rJXBWIOUWbuUt5R+Zu2XorJS39E5/l1y6jVRtuKn1B+Pr9SPkmWUAUZyz2SWZSNts0gsWzIQ5RDZKUZ6k0Wn7HQjY46chQJ/dsZ8TxS/isPfT8V9qWwqykayYKo2c1FgYjZ48y7OU00Qp8aS8cEAVePMcf5LuoB9rAaV9LwmyCzx0xbQ2gAb61Kb/sYapFi5fImDo1kVedUaFJZRZWsVMXL0tUbI/KgoMBofU/SW10jpSuOLgoFM7jXjG5UyIIoi0CxqR32Knk6Px2r1gZ1+ycEo4O/1euk9Ub5hYn5+3ubm5oauNzkwMciVNsOwrqrnsmWAjX1kGVKtbfBcRDcj8ziiMllAU2vzbJ94TLUb1cd++Xz0XwVyNYED1qmxw9qAMQp6amisQcqsTon5eI3QSuWjrAFptaIobm8lEVopq8nGh+dr+Gx7Lipbk/WothUAZIangIqfZJ+NR0XxkT6WHIACJO4XwcV39Xm7+ABZXOJT8qjNPtQ4+SHKyFd2czr2w22wnEpUcn41c+f/2wRH0bicMoCJ+q/pz39nc1WbeUb9ciDMZdvIOer7KQFSWSZSykhYsbO165IwayKjts69BCarQVEGwNGeijhrDT86VjLg7Djylc2zArEIIEo6VKNL6tXpbaPxjC/sD/tFoPKyvryHzwnMeGfgYV1U+ojXlTDDLD0wmZ8Jpx4BFcmjxD/zwjLL2vHvtkClAr0oOFF9Rsc4mMh45vbb+MEo21PnmBfsOyof8V9LYw9S/o1RbykLQIGp5+W5oEs3BysnphxiiZSSoaKXIjau0yYic1IgXcpUSsDJx2scfkQRcPB/Blgsh8GIcsIqg8qInbqK6KM55b6Rf3WO+zWzoccfTU9P2/z8vC0vL9v8/PxQNqV4x99ZZI784biZTzzHWVQUFGXyykiBa1aP7Vudw/81gRTXL91X5rIq8YrnIv8QUW2wXuMv2s4JP0VEjaMtjTVImZUvrDJo+G/lVJw4ksCJzBxtKfIvURRF4bkaoCr1kRkpR4ORkirn05YiJ1CT5eDxUptqHKW2S5TNR6ZTGa+RY1SBgnLQqKO43TwKNCKgqA2sVFnmS9laFLkzqcwA66j+eIwR7zXHFK9RwIbHSvpbakfxVAOsqu+M1NjYP9T2m5XH80qWJRprkPIlAr5rPaLIsGuMskapVko1hpvVrQVOXKbJwMbL8cX0Ng4h40Odi7Kg6Hyb9nq9A0tNuDSmgK/kfBVlDrN2LjMHh33wlnMnvyfKHyDrtuF1GND8P2c+GanxYT1uJ5OXGmebTKCtHZfG5d+lAEKNwctmFAUqbYC1lP1k9TK+ogC2DUXB7Upo7EHK7ICC802K+JuNMorwsD1sv0aJStEyU43SZ/3x75LB+NhxPJmzQSeBDqHEZ01UzL8V+ESOqkb5IyfjfLPuKHBimZScOEf6kU5EETBfl/F6uPTooMT3M3l59Wp4p9K1IvWp1X0G0VJ5rocy4DnBYxxUZWDYhqdoTFGQEWUdGUV2sRL5qbbUGEq+pi3YRu2wDawGHVEgVXIGbICl6zCZY4ycugK8iB/V3qhRUkTZtQhuh8+xDJRjwLLZsUyWnMnw8eg88l7iITLWaL4VeNeMX5WL5M39K+KAyeurxx/xfVH8MkMOMJjHSDeyTMZ5VPIqRecMsBEwqWCpJOtRMwKun2U+q5UxrDR7cRoFiNV3m/qrBUQZjT1I9XoHnqagHo+EvzGSzB5Oq5xibVQ5Cil+MwNYKR+qv0hmkSK3McwIgGrPZ8S8tnk7cxbclBy5CnB8lxouk/IGDBU1YxbFr8ZA4FF1/dji4qKZ2cAbd71v35ruT8VAZ4+Al8kI/7OssE1Vv6SrNeDEwQbbMvLCci7RoXC0h5rYXjFLRx3i8qvV70qBG2msQcpML9cowyhNSCmixXKrPaErjfja1lG/o0h3VGLgUbIdBaiUo454VkCvDDUy2iybUmXY+WdRfianSG5RwOJLev67tDMxGyfLlPlQgJsFWVFdBUCcWTHPqi81FzVjV3POdZUcIqrxCxGgr8QPqPZL57J5U9TGn/K8qzptxzvWIJVlGZGyldqLjLHUTsnhKlIODMcQtcXK3jZqYUfUxjhGzaBqACrrJwIn5h23wEbzyO3xRpIInLLfUd+c8XDfPtboehRmGF7Pz+ETzb2Mb5hQTpjvT8JvtXEiym7UvCg9xP9LS0tDoOU8cXuRLFHm2TwwrWYANoq9cf1Il1bCV4lqH+aqKOIpChKV/6qdK0WtOb/tttvsvPPOs23btlmv17PrrrtOMs6fP/iDP+iXedaznjV0/sorr2zLipnl1zCiSJzLR21m9bEPNGJ1r5ZqQ13E5sfT8JOt1TFvT/HFTpLLMh+RDHFs6oV53C7LMnJenCXUgjrvNmTZ8KN/1KOA+Liqo8pE+h21nd0rhOPGa0m4fM3yVnOxsLDQf/SRvyuqRp5q3ngOuT8GjGi+lX1wG9mLF6P5ZD75GLeRBRfcduY3WGbRscgOIj1RY6nVMSV71V9mF7hDtOaTyTbyNZnca6h1JvXEE0/YaaedZm9605vs/PPPHzr/4IMPDvz/7Gc/a29+85vtggsuGDj+3ve+1y6++OL+//Xr17dlpU+1kbpSbK5fmwWVMoEaypwXth1FHqVMKjJkdaxmPJwJ4HdWT7VROqbGwcfYCHjuIrlxHS6H57OnF0R84Q5K5VQUv/i7dB6dvJn1b9bF+6JK8lNjUbrn3zXBF9apOR5lZvxfPbWd//uH5ZfNbcnRK76ywDWyKeYrq5sFDDV8qvaVDFT/bfxY1KaSfySzbHxIrUFq165dtmvXrvD81q1bB/5/+tOftjPOOMOe/exnDxxfv379UNmI5ubmbG5urv9///79ZvbDi8R+MdiJkV5F0YpqMi8vp34rqlW82qi3xFeNEUR1eadkiV8GKh7LKGOqpTYKXqobGVtN+0rm6JhYB9HZerloQ0TUH24px2P4fD5sJ9L7zNnjOLxtBMCap3GwPKJzKqBg3cLHJkWgz0BV6h99Auo/UwngI5CpcdZtdE31vZrt1c5nKftRgULUX3WfVaVGpIceesj++q//2t785jcPnbvyyivt2GOPtR//8R+3P/iDP+jvTlK0Z88e27hxY/+zfft2M7Oh5QNlmCVjVcsTZvHacVRHOffSJ1oaUcs73CZT5HBGHb+K3KP/3BYfX02K5iXip9ROrawiGWRRIn6ym2U5U8l0AF+74def+PFHbAvIVy2xTqrfNXVXy1aiJdts3mptJWub5Zb10+acAreI79ox14y9Zq5YVqP0XdtnDR3UjRMf/ehHbf369UPLgr/+679uL3nJS2zz5s32hS98wXbv3m0PPvig/dEf/ZFsZ/fu3Xb55Zf3/+/fv9+2b9/eBzZWsOxajlPkmFVkpyiaUP/OJk45UuVkaxSOnbSqWzMONR7OFBTfvZ7OpA4WZfLEbxUZZw4rK4OUyYTb8TV/MxvY/s3tRW17Ozj/mDW5rPEalL+uQ11Hw/ZUH84DgyJfL8NySj78P3J8kdz82+WBQSjWz24pibI9P+Zzw3OpeMW+s5vhlVxxDKNQW/0chWp4K4Ee+4MaG6mlgwpS/+t//S+78MILbd26dQPHEXBOPfVUm5mZsV/5lV+xPXv22Ozs7FA7s7Oz8jhHG8o4VfSaZQZZH6XIPDpW4/jMBm9O5snG6DjiRxmOfysQrLkZuhQx8/8MdDO+aw0Fv7mfzDnVtqd0JeKf5yZqu3T/VBQcoQ5wgMUg5ccdDGujXaWbnEUhQLV583BJfkpeJUKwYPt2GaE9KZBQMvbfLOeMB/xfCka5L9VO1E8G6KX22oKasoUoyFF9K3kzsCt7yeiggdT//b//1+655x778z//82LZHTt22OLiov3jP/6jPe95z6vuA5Uzix6xrFlsQCuNdmqdgPeVOfW2ExkpF36rjEfx4HUiEIxIjWG1Ikg/psYUjauG9wyoRomCWdYYPGGgofhnvpVDUMFVNodtAcrbwBvl+ZPVX4m8lOxU1hKNjYMyP1YDVCoA5P5LY8jKRW2tpO0sY1mp78D/7N/aAt8oPCEdNJD6yEc+YqeffrqddtppxbJ33323TUxM2JYtW1r1MT09PfA2Uk/fM+TPIrw2BpaBoAIFpCwDifrCesinUsxSZIdbgv1bjamGMpCNZL1a4ItUm0FFzrrGwbThy39PTk7a8vKyTU0dMDW8Z8jbVvJyvnhzUCkIyHQ8GyPy7/LEp6n7sqLa8chtef+ZTbG8Mn75gj1vgc7OOy8qW80cewZUfEwFTyU/oCjyKW2D1lH642Mlf4JzpsZvNrxaU+pbUWuQevzxx+3ee+/t/7///vvt7rvvts2bN9tJJ51kZj+8ZvTJT37S/sf/+B9D9ffu3Wt33HGHnXHGGbZ+/Xrbu3evXXbZZfaLv/iL9vSnP70VL7jHHxVVOR2OtvmYU1Qvc2RZlJMBlcoCuM2obe4H6ymlYsfG965wSt6GSgAVZWhZBBcdx7FFj9CpocwAVTCQRcBRQOD1XS/VLjXWEzWG6FjED5fLHJ+qqzZxsKwzZ6V+M6+RzFVZ/h0FoUoGqNfqsWmRXBTQZjqAvPCcRvUjHjK7KOl3xmekM5E8FFhxeyUbdvn7+VGyvdYgdeedd9oZZ5zR/+/Xly666CK75pprzMzsE5/4hDVNY294wxuG6s/OztonPvEJe8973mNzc3N28skn22WXXTZwnaqW/GVv0a4pp1LEaaadOZ/LJlXVjSakpLBZJBMBATq+zHDR0ajdkdn4mD8V/SuAahNBlXhQ4+EdZ20iV9VuZIQZr1Ew4UaKrwbBa1MZAHGZbFzoGLm80h8/zkDFuoEPqo12Do6aNZSIdZL7zcjLoPwwI+V2ItDlYxngIK+ZzmfBcRsAZ2L9r5FXrxe/lTyTkfpvNnhdkHWSx1IdUDarqVWHiPbv328bN260H/uxHxt4n06tk40oEmYEAErgfK9KRqy47DBKSqYcUQRQvlSDTzTAT3QvWWZQEUjhOQWkkWxKURmTum+nJnL3T5Z5O9VmAhFI4be/PsOfCoGv0/BHGSlAUtdYFfV6vb49ePDGr/TANrycfxxIMXvCG4WdJ7/GxjsWFf9RkILyz3SGZazkje2pIIZ/R4CX8c7tRG6TnTKPT/EVBTuZn8n0rcZfMc/R8bY2mfkO/r+0tGTf+MY37NFHH7UNGzaEbY79s/vcqMzKke0o7UfRQ6RANZgfOblIcfE74o2VQ0VzkYIrx43n2sQxahwRryWgyogdYgSGbforyVi1p36jnBlsPKJnZ4Y36fL88dgUYXtoE1hPXUvCjAmP8YtElQ7UOuJI35h3RZEss7LclzpWEwCWzrFckMdMlyK/Eulp1G7JN/ENy21t2XllHhSpsdUEVTU01iDFLzksRVxYNjrHxNEjR39YDiP6TGGRRxV5KEcbUY2RqUiW63K0nmUmJUXH9mvu5F9JQJE5UebFv2v7y8pF0SrWY2eGG3vw2tTi4mIfwPD6II7P5RitGnhfvHyL9wNx5sgPp/X/6h6sTCYKTGuDNaxXahedrXq6Bv8vAYHSd+aLg6ymiZdpo/Eh7zw2xVdNmyWw8g8HnTxH2RgyoB6FIl5KNNYglVEpcihFPdhGVE8ZpQIEFV2UIrnViEIUL8wP85K1WxNJcXk2+MyBRP1FGaGK9JXxloKXUeagNI+Rg0PH4QDiAIbbvhGYIufPOqycID/5nHnmLeUs0xpHpnjL5FbSM5Rb5Nij4CkKCEs8qGBR2UrWbnSOx1NqL+KlBExOqF8MxOy/Mh+5WjSqj3M6IkCqTSRiNrwMoZS/dmkBMyi1xBLxG/2u7bdEStH5uP9Xzry23wzEsW4GHqqdyAllAFUaU+lYrdNRxzKQ5szYncjU1FSfd7y+hk92UN+ZXjE44Y5CxYdfC8NNBQoceYwlEM2Choxv1h9Vh7NztuMaoKqlTL8x48X+o3ZYH1R5BSrIR40e+kc9UcPMBrLy2puzs3Fl/7FfLveUyaRwUtQulVGihCwqZFDCb/9EDg95VdHcwY5okA/+rb7bRlocoZvFjjrih4mBCP/zvVEZ0GTRcQmgVotUtOtvzTWz/iYKl5//9u8S+Kv54mVCL+ffKEu8f0vxroCEHV9tNhOdr3HkNe3UOvWsnci58nm0+5ogVwGWKpPZacabelwU89U0TahXUZ+rSbWZt9mYg1Tm8JlqBFKTxdR8kD/+jqL3KHNoy6uKwtR5dSyKGksGq9pRYNembydlXDVZVNZe5ohrKAO4kg4xUOG4uF0OejLesX8GlIyv6BmHihcFJGp+eJyRHah+uf0S/1g+6y86p6gEHopfJYusLvOt/FhNIMXyVnX4GhqPr2QPowQLzF/bc0hjD1L+HW2JdVpploJr+x55ZjeTRg78UFLk+JBWI1oqOQFeFvHzmfPySA+JM9cafjJCABhl/KMGRQw6TXPgeX6+BMc7VmuChBJP2A7qqffrdhQ9VoztyetGUfFqyLSN3UYB4GpQBB7OY5bhZ+2pa4Z4Puo3C9CwrHryhs9z1o7it60PLYH2Uwak1GSa6Yv2GJliG0gsOP7PwDQKRZFoFpmuhErGmkVoNUo8alSKdTNDZeeoZJNFz4pUJBnpz0pJyYD/u9PwjRTowLBObbQeOTmWmXJsXIYj8IiiSD3LjDLelKwyMIyyiTYU6WsN6CHgR7wyMYgon1ar0+o87xbN5jqiLNhXGbvybTW8RjT2IKXe08MC8981EYMirJc93aBGoXASvT08V5v9RePNykaOV9WLwDPqOxpDjYxrHCW3n/XBkWI0xoiHWr6jdphKTtZs+JqR6whvgIgyd8UHP99Ofdc8jDkCqki2UR0+VksZgEVOvcYWV/O4z4V6DFNEKlDiMeH/NjxxZsdtN82Bp6CUtqkz6ET9RfVr+M1o7EGKJxTPmdVfI1AOXEXwqxFdZ9la28wJnbGKdNvyqxQseubZKFHZqHxFvEWZQynqP9gUBUpRObPBbMrBY3l52SYnJ+V7pNR9TQp8lCP0spkNcVnuo0bGo4BSRqqdDJhG6XfUOigjZdNK/iUeouCBdUr1yUEO1i0FcQrk2ti5CqZKDyaO6IgAqVKZLIMYxYmzsrAjqm0zAqe2GRTyUUsRUJaiZO4bjVNF2Yq3zIGsBFiidkdtsyTTyGFiv1lWouq4A0FAULJW8kZnWJNRtMk8SkGcqldjC7VzU3Loyi7aAtVqAqkae9v2a+cGif1IFFw4qaVJzgRL9l3Dx0porEHKKVoKwGORweC6f+mto6rdqE8/XqOotf0pKkXqkXOJIqeoHSflNP13liG2pSxrwt8qC8iWG/w4Gi9Hm6vlrJhqQA+3DPs3Xlfg5xXyb2yrRj/NBm/+NNMBykoyE2xrNWTLvETPJjyYlM2jCmRX0h62E/kTvmzghDeIs26pdlHveMNTzT1VpQxyFBprkFIRZUZcLnJgbZx0xptypBgNc381UbuKXqMxRefUd8RDZPglgFL84DhKUXk0D5hpRHNRAilsi8fORlziU42N/3sf0XhK7al7XVSEXMpsoj4QpGrHp+RdQ1lmzroaBZ+ZXZXAarUyO8VjxGdNf7VzpfpQGTt+Y7nagB7/Z76rDY0KVmMNUmaDk1yaAFUv+h1F72wEHIX4dxSxRK+nziYexxaNKwLcmnPqyQG1WWANUGV1Iv74eCTzCCxr5MkBDkabGV81Y4rqZ1khE4IGvibD+fRjJXBR5OV544TLI3qyRe2x2v6VrEuAjt8q+42ceYmX0jFl58gr2nyWlbOdcX/K7iJ9x2/UEWUHyidFY8XxsH61sXUeTxZARDTWIMXbPUuTkAm2FHUjYYSL/9nhIV9cPwKoyGAwilFKUpM1cFmVQWUKy+20pZKC1mRANQZS81Bb7FPJtTYKRicTtb+SiNN5wvdRsfOI5lA5PhX5q0CrrSxK4/C2ud0agFDBYQRObUA0m7Ma4rlVQWnWP/YVBbWKn6y9zDegb3IgjWSg2mX5qr5q/UQbsBprkMocADuQkhOOMqjIUGsnIzJIBRJZBKmUOQK6EmVOLQJV9c08RQbHMlTZhuI/+q3Go6JHHgPyxDxEPGeUObgsWlVRqQp0oqCLn8en5i8K2lhvlPNR/I8CVCsBgKi9SBej/iI7bZNRRG0rigA263clfZVAI+ONn+nIPERjicaQzY3q/ykBUvh6AaXATm2cnfqdRWJ4PstO0GlEYKWAquS8VHsRrxFoK35V9Bq1q8CVbyBsC1A1fWVyQMoMC/tVDpzLRFTqA/nnYzV1sR+UZxTocMChxoLn8PzS0lLVEuJKnXqN02I9jHRJOcg20XpENSCX6V0W8PHcldrz47zrDs9HwSGXi2RXAppeTz8jNZI/849jrn247diDlD8kcTUUsgaYsFwpcvGyqJBRBOzf6p1UrhiofNh3BFIlh8s8qbKRsqnfUX0+p3gvGWZ0nNuKHB8DQY2+ZHKNjBjLRMEHz0s2n2osajxehx8lheUVGKpxqKdjZ066RFkAkbXJc1X7Bu4sYMV+lIPH+qV+sL9SkBTxoOZR+ZUMZEq8qXaYX37XWElfSsutXA59m3+i19YzjTVIsaBrIrtIYdr0WVIobrfGCUVRtuo34lvJIuNNRXJZVKX6y6JhHHONU4/O18glAgHko00QU3IWKgJWvGX12vCnAo4MtJjYafBxpOhZi6NQyUai8sr5laJ8piwQqeUnC9JKso5+K8cf+YSM10z3VLslfWSAwrGrtkv+AgMLb59Xv2porEEKKYpkShFzSVlHNc6ornIkNZHhKH3XRPxOUSTFSpoZE4JFFsH5N0dXNeN0xVegqvjBMfFvRZkDiiLFDFjV/SvKWbTR0ZpgrFavlTz8KRelNrLsEH9j1MzlmY/I0dUCVGmeS7JTIBn1wfbFeuz6XZMxYN8Zjyo4UkBYG2DxOQVsfEzNGc9fzXhraaxByoVSijiconfqcJv8u43wo7aQUCGQ/8zwIr64PewjiugiQChFUCpzjSK1Eqnn0ClgVRkHy0SBfgRimRyYh0yG0RwoWaisVfFakmUb+aq6paiXy62E2rQRObrsuGojs6OSbNV8Z+W5n6xuTVYaAVTmp7hOTaDDQSLzVKt32XxlhD6vVkfGHqTMBqNyPo+RGE9ENFGqjahM5vBrnUqkIJGxlvqqGZNywBFAtY3wuE0uhxkUn8siQR5TKXpr4yhrAarUZ5RBMVAxnyUHqepGTiaSGwJ3FviUrkc5qRdbenlsn3eRMX/8epDM6WVz2gaconmopRqg8f94zbfGiWcBDpbBtrMgTOlNBH6oh5F/zPwG9+/ftTqlaKxBanFx0SYnJ/vrnEoQ+J4e3rSAk5ABgjKUDLjM4vSZ28GyaulLGWxtxBbxpxwfAjqXi0i1UwtSWXm8/yd68nfkzHg8pbFgmWjJMzqfyYLnQ40Z+Y+eRB3VjcZaki/eIxOBbWQbij9+vT2WV+8sUvPo84xyiByi4iFznlw2+l/TZuawS0FIyadE8sFgTs230lluF0Ei0k/lb7Ix93q9/i5QfGIJy0t9/Fy0yYdprEFKvcbdbDAaUC9zi0BKTTx+O0UTipRFJFw3Uhwul0WVGUhxnbYgVWo3em6YchTKeXN5BinsI2ofZV1yMFzOy6qIN3MI3HZkjNx/CaRU2aw9bIcDNaVPtSDF9SL+lO2hXmMddpZeDutE/dYCBo4zGo+irG8uk+lAVDYKOJU9sk/j9vxb+Splc+o9eCUZZmNGv8q+QwG3sv2nBEj94Ac/GJjkaLmPf5tpB10TveExdQ7bZ6OLlEIpaYmPzGlExNGRcvglw1MURamZwmf8RUoelY/6GoUyh1eKgpFndVz9jtotzWmN0836iwCqTdslvVXReKZ7/Bspk3mN3bYdk2rDz6t+SrYSAWjUd/bGY26zRjdVIIVlosc4qbnNPl5G6Qb3+ZS4T2phYWHo+WZOWUSshJYt5TDVlIkcN7fTtkxtf6PQSpxWlimociXwz6I+xe9qgZRqK3MGypGUQKrUn2pLHR9FNzKHVuozK1dy7lwuA5m2c1mrBzU8trGj2oBO8VcCKQUm2dhqgw4FUFl9HhfbQhb0KIDEc08ZkEKHxpRFPKpsDfiotiOqcVKKrxqjW2l0XWo/osxpjtp3pOBtIsk2NKojOthzUUu1jqtELvca/kfNTDKqCVhq+8myt5XwFvXTpk5WL5J/1F40vkiGo+hmFoBxuZINZ/3VztFYgxRuhDAbNt7ImLPJP1ggxcdW2k82wavlxEq0UkdQszxTa1CHimrn42CCVESjyCJzkjW0muNcjblcLZA6VLRS+a+k7bbZb9SHf2eBzEoCuSMCpMxyJ8GTpaIQXz4ZNcpmKilHG4NsG8UeKpCq4SWiNrIedansYFJbgzzYdDCdXYlWa6l5XIDlSKGVghTOWdvLH1m7TGMNUrXRds35Q2UkbTOplfZxKMeU0ajLRYdi2Wm16HDxshYcfNtluVHrt6W1GOCsRWrrQ1dD52rr1z3h7/+nPXv22Mte9jJbv369bdmyxV7zmtfYPffcM1DmySeftEsuucSOPfZYe9rTnmYXXHCBPfTQQwNlHnjgAXv1q19tRx99tG3ZssV+67d+yxYXF9uwYmblNdNR2jrYn0PR3+Ea08GQ7+Gap4Mlh4NBh3vcbca+lvg63DIbp8/Bkl0NtQKpW2+91S655BK7/fbb7YYbbrCFhQU7++yz7YknnuiXueyyy+yv/uqv7JOf/KTdeuut9p3vfMfOP//8/vmlpSV79atfbfPz8/aFL3zBPvrRj9o111xj73rXu9qw0lFHHXXU0VOAek2bMIjoX/7lX2zLli1266232ite8Qp79NFH7fjjj7drr73Wfv7nf97MzP7hH/7BXvCCF9jevXvtJ37iJ+yzn/2s/bt/9+/sO9/5jp1wwglmZvbhD3/Y3vGOd9i//Mu/2MzMTLHf/fv328aNG212drZL2TvqqKOOxpCaprG5uTl79NFHbcOGDWG5VpkU06OPPmpmZps3bzYzs7vuussWFhbsrLPO6pd5/vOfbyeddJLt3bvXzMz27t1rp5xySh+gzMzOOecc279/v33ta1+T/czNzdn+/fsHPh111FFHHR35NDJILS8v29vf/nb7yZ/8SXvRi15kZmb79u2zmZkZ27Rp00DZE044wfbt29cvgwDl5/2coj179tjGjRv7n+3bt4/KdkcdddRRR2NEI4PUJZdcYl/96lftE5/4xGryI2n37t326KOP9j/f/va3D3qfHXXUUUcdHX4aaQv6pZdeap/5zGfstttus2c84xn941u3brX5+Xl75JFHBrKphx56yLZu3dov88UvfnGgPd/952WYZmdnbXZ2dhRWO+qoo446GmNqlUk1TWOXXnqpfepTn7Kbb77ZTj755IHzp59+uk1PT9tNN93UP3bPPffYAw88YDt37jQzs507d9rf//3f28MPP9wvc8MNN9iGDRvshS984UrG0lFHHXXU0RFGrXb3/dqv/Zpde+219ulPf9qe97zn9Y9v3LjRjjrqKDMze+tb32p/8zd/Y9dcc41t2LDB3va2t5mZ2Re+8AUz++EW9Be/+MW2bds2e9/73mf79u2zX/qlX7L//J//s/23//bfqvjodvd11FFHHY031e7uawVSESBcffXV9sY3vtHMfngz72/8xm/Yxz/+cZubm7NzzjnH/uRP/mRgKe9b3/qWvfWtb7VbbrnFjjnmGLvooovsyiuvtKmputXHDqQ66qijjsabDgpIrRXqQKqjjjrqaLzpkNwn1VFHHXXUUUcHkzqQ6qijjjrqaM1SB1IdddRRRx2tWepAqqOOOuqoozVLHUh11FFHHXW0ZqkDqY466qijjtYsdSDVUUcdddTRmqUOpDrqqKOOOlqz1IFURx111FFHa5Y6kOqoo4466mjNUgdSHXXUUUcdrVnqQKqjjjrqqKM1Sx1IddRRRx11tGapA6mOOuqoo47WLHUg1VFHHXXU0ZqlDqQ66qijjjpas9SBVEcdddRRR2uWOpDqqKOOOupozVIHUh111FFHHa1Z6kCqo4466qijNUsdSHXUUUcddbRmqQOpjjrqqKOO1ix1INVRRx111NGapQ6kOuqoo446WrPUgVRHHXXUUUdrljqQ6qijjjrqaM1SB1IdddRRRx2tWepAqqOOOuqoozVLHUh11FFHHXW0ZqkDqY466qijjtYsTR1uBkahpmkGvjvqqKOOOhovqvXjYwlSjz32mJmZzc/PH2ZOOuqoo446Wgk99thjtnHjxvB8rxnDdGR5ednuuecee+ELX2jf/va3bcOGDYebpbGl/fv32/bt2zs5rgJ1slwd6uS4erSWZdk0jT322GO2bds2m5iIrzyNZSY1MTFhP/IjP2JmZhs2bFhzwh9H6uS4etTJcnWok+Pq0VqVZZZBOXUbJzrqqKOOOlqz1IFURx111FFHa5bGFqRmZ2ft3e9+t83Ozh5uVsaaOjmuHnWyXB3q5Lh6dCTIciw3TnTUUUcddfTUoLHNpDrqqKOOOjryqQOpjjrqqKOO1ix1INVRRx111NGapQ6kOuqoo446WrPUgVRHHXXUUUdrlsYSpK666ip71rOeZevWrbMdO3bYF7/4xcPN0pqn97znPdbr9QY+z3/+8/vnn3zySbvkkkvs2GOPtac97Wl2wQUX2EMPPXQYOV4bdNttt9l5551n27Zts16vZ9ddd93A+aZp7F3vepedeOKJdtRRR9lZZ51l3/zmNwfKfO9737MLL7zQNmzYYJs2bbI3v/nN9vjjjx/CUawNKsnyjW9845COnnvuuQNlOlma7dmzx172spfZ+vXrbcuWLfaa17zG7rnnnoEyNfb8wAMP2Ktf/Wo7+uijbcuWLfZbv/Vbtri4eCiHUkVjB1J//ud/bpdffrm9+93vti9/+ct22mmn2TnnnGMPP/zw4WZtzdOP/diP2YMPPtj/fP7zn++fu+yyy+yv/uqv7JOf/KTdeuut9p3vfMfOP//8w8jt2qAnnnjCTjvtNLvqqqvk+fe97332gQ98wD784Q/bHXfcYcccc4ydc8459uSTT/bLXHjhhfa1r33NbrjhBvvMZz5jt912m73lLW85VENYM1SSpZnZueeeO6CjH//4xwfOd7I0u/XWW+2SSy6x22+/3W644QZbWFiws88+25544ol+mZI9Ly0t2atf/Wqbn5+3L3zhC/bRj37UrrnmGnvXu951OIaUUzNm9PKXv7y55JJL+v+Xlpaabdu2NXv27DmMXK19eve7392cdtpp8twjjzzSTE9PN5/85Cf7x77xjW80Ztbs3bv3EHG49snMmk996lP9/8vLy83WrVubP/iDP+gfe+SRR5rZ2dnm4x//eNM0TfP1r3+9MbPmS1/6Ur/MZz/72abX6zX//M//fMh4X2vEsmyaprnooouan/u5nwvrdLLU9PDDDzdm1tx6661N09TZ89/8zd80ExMTzb59+/plPvShDzUbNmxo5ubmDu0ACjRWmdT8/LzddddddtZZZ/WPTUxM2FlnnWV79+49jJyNB33zm9+0bdu22bOf/Wy78MIL7YEHHjAzs7vuussWFhYG5Pr85z/fTjrppE6uCd1///22b9++Ablt3LjRduzY0Zfb3r17bdOmTfbSl760X+ass86yiYkJu+OOOw45z2udbrnlFtuyZYs973nPs7e+9a323e9+t3+uk6WmRx991MzMNm/ebGZ19rx371475ZRT7IQTTuiXOeecc2z//v32ta997RByX6axAql//dd/taWlpQHBmpmdcMIJtm/fvsPE1XjQjh077JprrrHrr7/ePvShD9n9999vP/3TP22PPfaY7du3z2ZmZmzTpk0DdTq55uSyyfRx3759tmXLloHzU1NTtnnz5k62ROeee6796Z/+qd1000323//7f7dbb73Vdu3aZUtLS2bWyVLR8vKyvf3tb7ef/MmftBe96EVmZlX2vG/fPqm3fm4t0Vi+qqOj9rRr167+71NPPdV27Nhhz3zmM+0v/uIv7KijjjqMnHXU0Q/p9a9/ff/3KaecYqeeeqo95znPsVtuucXOPPPMw8jZ2qVLLrnEvvrVrw5cXz7SaKwyqeOOO84mJyeHdqk89NBDtnXr1sPE1XjSpk2b7Ed/9Eft3nvvta1bt9r8/Lw98sgjA2U6uebkssn0cevWrUObehYXF+173/teJ9sCPfvZz7bjjjvO7r33XjPrZMl06aWX2mc+8xn73Oc+Z894xjP6x2vseevWrVJv/dxaorECqZmZGTv99NPtpptu6h9bXl62m266yXbu3HkYORs/evzxx+2+++6zE0880U4//XSbnp4ekOs999xjDzzwQCfXhE4++WTbunXrgNz2799vd9xxR19uO3futEceecTuuuuufpmbb77ZlpeXbceOHYec53Gif/qnf7Lvfve7duKJJ5pZJ0unpmns0ksvtU996lN2880328knnzxwvsaed+7caX//938/APo33HCDbdiwwV74whcemoHU0uHeudGWPvGJTzSzs7PNNddc03z9619v3vKWtzSbNm0a2KXS0TD9xm/8RnPLLbc0999/f/O3f/u3zVlnndUcd9xxzcMPP9w0TdP86q/+anPSSSc1N998c3PnnXc2O3fubHbu3HmYuT789NhjjzV/93d/1/zd3/1dY2bNH/3RHzV/93d/13zrW99qmqZprrzyymbTpk3Npz/96eYrX/lK83M/93PNySef3PzgBz/ot3Huuec2P/7jP97ccccdzec///nmuc99bvOGN7zhcA3psFEmy8cee6z5zd/8zWbv3r3N/fff39x4443NS17ykua5z31u8+STT/bb6GTZNG9961ubjRs3Nrfcckvz4IMP9j/f//73+2VK9ry4uNi86EUvas4+++zm7rvvbq6//vrm+OOPb3bv3n04hpTS2IFU0zTNBz/4weakk05qZmZmmpe//OXN7bfffrhZWvP0ute9rjnxxBObmZmZ5kd+5Eea173udc29997bP/+DH/yg+bVf+7Xm6U9/enP00Uc3r33ta5sHH3zwMHK8Nuhzn/tcY2ZDn4suuqhpmh9uQ3/nO9/ZnHDCCc3s7Gxz5plnNvfcc89AG9/97nebN7zhDc3Tnva0ZsOGDc0v//IvN4899thhGM3hpUyW3//+95uzzz67Of7445vp6enmmc98ZnPxxRcPBZ+dLBspQzNrrr766n6ZGnv+x3/8x2bXrl3NUUcd1Rx33HHNb/zGbzQLCwuHeDRl6t4n1VFHHXXU0Zqlsbom1VFHHXXU0VOLOpDqqKOOOupozVIHUh111FFHHa1Z6kCqo4466qijNUsdSHXUUUcddbRmqQOpjjrqqKOO1ix1INVRRx111NGapQ6kOuqoo446WrPUgVRHHXXUUUdrljqQ6qijjjrqaM1SB1IdddRRRx2tWfr/AHdt0pFaOsRHAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plt.imshow(img_list[1].squeeze().permute(1, 2, 0))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.16 | packaged by conda-forge | (default, Feb 1 2023, 16:01:55) \n[GCC 11.3.0]" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "53427a73dce6cd561a14bc57d038a34300a2a6ca5e8afe9c5deb1771232f2ff8" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/env.yml b/PuzzleTuning/Counterpart PreTrain Methods/simclr/env.yml new file mode 100644 index 0000000000000000000000000000000000000000..26a630558b418b486600b63eea7dba075104c182 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/env.yml @@ -0,0 +1,21 @@ +name: simclr +channels: + - pytorch + - anaconda + - conda-forge + - defaults +dependencies: + - cudatoolkit=10.1 + - numpy=1.18.1 + - opencv=3.4.2 + - pillow=7.0 + - pip=20.0 + - python=3.7.6 + - pytorch=1.4.0 + - torchvision=0.5 + - tensorboard=2.1 + - matplotlib=3.1.3 + - scikit-learn=0.22.1 + - pyyaml=5.3.1 + - nvidia-apex=0.1 + diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/exceptions/exceptions.py b/PuzzleTuning/Counterpart PreTrain Methods/simclr/exceptions/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..a7370841cd2d638c64d7f640809c024877848643 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/exceptions/exceptions.py @@ -0,0 +1,10 @@ +class BaseSimCLRException(Exception): + """Base exception""" + + +class InvalidBackboneError(BaseSimCLRException): + """Raised when the choice of backbone Convnet is invalid.""" + + +class InvalidDatasetSelection(BaseSimCLRException): + """Raised when the choice of dataset is invalid.""" diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/feature_eval/mini_batch_logistic_regression_evaluator.ipynb b/PuzzleTuning/Counterpart PreTrain Methods/simclr/feature_eval/mini_batch_logistic_regression_evaluator.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..8fb01077f967f27e11673bba6207e03617694252 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/feature_eval/mini_batch_logistic_regression_evaluator.ipynb @@ -0,0 +1,821 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "kernelspec": { + "display_name": "pytorch", + "language": "python", + "name": "pytorch" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.6" + }, + "colab": { + "name": "Copy of mini-batch-logistic-regression-evaluator.ipynb", + "provenance": [], + "include_colab_link": true + }, + "accelerator": "GPU", + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "149b9ce8fb68473a837a77431c12281a": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "state": { + "_view_name": "HBoxView", + "_dom_classes": [], + "_model_name": "HBoxModel", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.5.0", + "box_style": "", + "layout": "IPY_MODEL_88cd3db2831e4c13a4a634709700d6b2", + "_model_module": "@jupyter-widgets/controls", + "children": [ + "IPY_MODEL_a88c31d74f5c40a2b24bcff5a35d216c", + "IPY_MODEL_60c6150177694717a622936b830427b5" + ] + } + }, + "88cd3db2831e4c13a4a634709700d6b2": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "a88c31d74f5c40a2b24bcff5a35d216c": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "state": { + "_view_name": "ProgressView", + "style": "IPY_MODEL_dba019efadee4fdc8c799f309b9a7e70", + "_dom_classes": [], + "description": "", + "_model_name": "FloatProgressModel", + "bar_style": "info", + "max": 1, + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": 1, + "_view_count": null, + "_view_module_version": "1.5.0", + "orientation": "horizontal", + "min": 0, + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_5901c2829a554c8ebbd5926610088041" + } + }, + "60c6150177694717a622936b830427b5": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "state": { + "_view_name": "HTMLView", + "style": "IPY_MODEL_957362a11d174407979cf17012bf9208", + "_dom_classes": [], + "description": "", + "_model_name": "HTMLModel", + "placeholder": "​", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": " 2640404480/? [00:51<00:00, 32685718.58it/s]", + "_view_count": null, + "_view_module_version": "1.5.0", + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_a4f82234388e4701a02a9f68a177193a" + } + }, + "dba019efadee4fdc8c799f309b9a7e70": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "state": { + "_view_name": "StyleView", + "_model_name": "ProgressStyleModel", + "description_width": "initial", + "_view_module": "@jupyter-widgets/base", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.2.0", + "bar_color": null, + "_model_module": "@jupyter-widgets/controls" + } + }, + "5901c2829a554c8ebbd5926610088041": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "957362a11d174407979cf17012bf9208": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "state": { + "_view_name": "StyleView", + "_model_name": "DescriptionStyleModel", + "description_width": "", + "_view_module": "@jupyter-widgets/base", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.2.0", + "_model_module": "@jupyter-widgets/controls" + } + }, + "a4f82234388e4701a02a9f68a177193a": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + } + } + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "YUemQib7ZE4D" + }, + "source": [ + "import torch\n", + "import sys\n", + "import numpy as np\n", + "import os\n", + "import yaml\n", + "import matplotlib.pyplot as plt\n", + "import torchvision" + ], + "execution_count": 10, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "WSgRE1CcLqdS", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "48a2ae15-f672-495b-8d43-9a23b85fa3b8" + }, + "source": [ + "!pip install gdown" + ], + "execution_count": 11, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Requirement already satisfied: gdown in /usr/local/lib/python3.6/dist-packages (3.6.4)\n", + "Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from gdown) (1.15.0)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from gdown) (2.23.0)\n", + "Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from gdown) (4.41.1)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (2020.12.5)\n", + "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (1.24.3)\n", + "Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (3.0.4)\n", + "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (2.10)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "NOIJEui1ZziV" + }, + "source": [ + "def get_file_id_by_model(folder_name):\n", + " file_id = {'resnet18_100-epochs_stl10': '14_nH2FkyKbt61cieQDiSbBVNP8-gtwgF',\n", + " 'resnet18_100-epochs_cifar10': '1lc2aoVtrAetGn0PnTkOyFzPCIucOJq7C',\n", + " 'resnet50_50-epochs_stl10': '1ByTKAUsdm_X7tLcii6oAEl5qFRqRMZSu'}\n", + " return file_id.get(folder_name, \"Model not found.\")" + ], + "execution_count": 12, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "G7YMxsvEZMrX", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "59475430-69d2-45a2-b61b-ae755d5d6e88" + }, + "source": [ + "folder_name = 'resnet50_50-epochs_stl10'\n", + "file_id = get_file_id_by_model(folder_name)\n", + "print(folder_name, file_id)" + ], + "execution_count": 13, + "outputs": [ + { + "output_type": "stream", + "text": [ + "resnet50_50-epochs_stl10 1ByTKAUsdm_X7tLcii6oAEl5qFRqRMZSu\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "PWZ8fet_YoJm", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "fbaeb858-221b-4d1b-dd90-001a6e713b75" + }, + "source": [ + "# download and extract model files\n", + "os.system('gdown https://drive.google.com/uc?id={}'.format(file_id))\n", + "os.system('unzip {}'.format(folder_name))\n", + "!ls" + ], + "execution_count": 14, + "outputs": [ + { + "output_type": "stream", + "text": [ + "checkpoint_0040.pth.tar\n", + "config.yml\n", + "events.out.tfevents.1610927742.4cb2c837708d.2694093.0\n", + "resnet50_50-epochs_stl10.zip\n", + "sample_data\n", + "training.log\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "3_nypQVEv-hn" + }, + "source": [ + "from torch.utils.data import DataLoader\n", + "import torchvision.transforms as transforms\n", + "from torchvision import datasets" + ], + "execution_count": 15, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "lDfbL3w_Z0Od", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "7532966e-1c4a-4641-c928-4cda14c53389" + }, + "source": [ + "device = 'cuda' if torch.cuda.is_available() else 'cpu'\n", + "print(\"Using device:\", device)" + ], + "execution_count": 16, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Using device: cuda\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "BfIPl0G6_RrT" + }, + "source": [ + "def get_stl10_data_loaders(download, shuffle=False, batch_size=256):\n", + " train_dataset = datasets.STL10('./data', split='train', download=download,\n", + " transform=transforms.ToTensor())\n", + "\n", + " train_loader = DataLoader(train_dataset, batch_size=batch_size,\n", + " num_workers=0, drop_last=False, shuffle=shuffle)\n", + " \n", + " test_dataset = datasets.STL10('./data', split='test', download=download,\n", + " transform=transforms.ToTensor())\n", + "\n", + " test_loader = DataLoader(test_dataset, batch_size=2*batch_size,\n", + " num_workers=10, drop_last=False, shuffle=shuffle)\n", + " return train_loader, test_loader\n", + "\n", + "def get_cifar10_data_loaders(download, shuffle=False, batch_size=256):\n", + " train_dataset = datasets.CIFAR10('./data', train=True, download=download,\n", + " transform=transforms.ToTensor())\n", + "\n", + " train_loader = DataLoader(train_dataset, batch_size=batch_size,\n", + " num_workers=0, drop_last=False, shuffle=shuffle)\n", + " \n", + " test_dataset = datasets.CIFAR10('./data', train=False, download=download,\n", + " transform=transforms.ToTensor())\n", + "\n", + " test_loader = DataLoader(test_dataset, batch_size=2*batch_size,\n", + " num_workers=10, drop_last=False, shuffle=shuffle)\n", + " return train_loader, test_loader" + ], + "execution_count": 17, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "6N8lYkbmDTaK" + }, + "source": [ + "with open(os.path.join('./config.yml')) as file:\n", + " config = yaml.load(file)" + ], + "execution_count": 18, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "a18lPD-tIle6" + }, + "source": [ + "if config.arch == 'resnet18':\n", + " model = torchvision.models.resnet18(pretrained=False, num_classes=10).to(device)\n", + "elif config.arch == 'resnet50':\n", + " model = torchvision.models.resnet50(pretrained=False, num_classes=10).to(device)" + ], + "execution_count": 19, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "4AIfgq41GuTT" + }, + "source": [ + "checkpoint = torch.load('checkpoint_0040.pth.tar', map_location=device)\n", + "state_dict = checkpoint['state_dict']\n", + "\n", + "for k in list(state_dict.keys()):\n", + "\n", + " if k.startswith('backbone.'):\n", + " if k.startswith('backbone') and not k.startswith('backbone.fc'):\n", + " # remove prefix\n", + " state_dict[k[len(\"backbone.\"):]] = state_dict[k]\n", + " del state_dict[k]" + ], + "execution_count": 21, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "VVjA83PPJYWl" + }, + "source": [ + "log = model.load_state_dict(state_dict, strict=False)\n", + "assert log.missing_keys == ['fc.weight', 'fc.bias']" + ], + "execution_count": 22, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "_GC0a14uWRr6", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 117, + "referenced_widgets": [ + "149b9ce8fb68473a837a77431c12281a", + "88cd3db2831e4c13a4a634709700d6b2", + "a88c31d74f5c40a2b24bcff5a35d216c", + "60c6150177694717a622936b830427b5", + "dba019efadee4fdc8c799f309b9a7e70", + "5901c2829a554c8ebbd5926610088041", + "957362a11d174407979cf17012bf9208", + "a4f82234388e4701a02a9f68a177193a" + ] + }, + "outputId": "4c2558db-921c-425e-f947-6cc746d8c749" + }, + "source": [ + "if config.dataset_name == 'cifar10':\n", + " train_loader, test_loader = get_cifar10_data_loaders(download=True)\n", + "elif config.dataset_name == 'stl10':\n", + " train_loader, test_loader = get_stl10_data_loaders(download=True)\n", + "print(\"Dataset:\", config.dataset_name)" + ], + "execution_count": 23, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Downloading http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz to ./data/stl10_binary.tar.gz\n" + ], + "name": "stdout" + }, + { + "output_type": "display_data", + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "149b9ce8fb68473a837a77431c12281a", + "version_minor": 0, + "version_major": 2 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=1.0, bar_style='info', max=1.0), HTML(value='')))" + ] + }, + "metadata": { + "tags": [] + } + }, + { + "output_type": "stream", + "text": [ + "Extracting ./data/stl10_binary.tar.gz to ./data\n", + "Files already downloaded and verified\n", + "Dataset: stl10\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "pYT_KsM0Mnnr" + }, + "source": [ + "# freeze all layers but the last fc\n", + "for name, param in model.named_parameters():\n", + " if name not in ['fc.weight', 'fc.bias']:\n", + " param.requires_grad = False\n", + "\n", + "parameters = list(filter(lambda p: p.requires_grad, model.parameters()))\n", + "assert len(parameters) == 2 # fc.weight, fc.bias" + ], + "execution_count": 24, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "aPVh1S_eMRDU" + }, + "source": [ + "optimizer = torch.optim.Adam(model.parameters(), lr=3e-4, weight_decay=0.0008)\n", + "criterion = torch.nn.CrossEntropyLoss().to(device)" + ], + "execution_count": 25, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "edr6RhP2PdVq" + }, + "source": [ + "def accuracy(output, target, topk=(1,)):\n", + " \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n", + " with torch.no_grad():\n", + " maxk = max(topk)\n", + " batch_size = target.size(0)\n", + "\n", + " _, pred = output.topk(maxk, 1, True, True)\n", + " pred = pred.t()\n", + " correct = pred.eq(target.view(1, -1).expand_as(pred))\n", + "\n", + " res = []\n", + " for k in topk:\n", + " correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n", + " res.append(correct_k.mul_(100.0 / batch_size))\n", + " return res" + ], + "execution_count": 26, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "qOder0dAMI7X", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "5f723b91-5a5e-43eb-ca01-a9b5ae2f1346" + }, + "source": [ + "epochs = 100\n", + "for epoch in range(epochs):\n", + " top1_train_accuracy = 0\n", + " for counter, (x_batch, y_batch) in enumerate(train_loader):\n", + " x_batch = x_batch.to(device)\n", + " y_batch = y_batch.to(device)\n", + "\n", + " logits = model(x_batch)\n", + " loss = criterion(logits, y_batch)\n", + " top1 = accuracy(logits, y_batch, topk=(1,))\n", + " top1_train_accuracy += top1[0]\n", + "\n", + " optimizer.zero_grad()\n", + " loss.backward()\n", + " optimizer.step()\n", + "\n", + " top1_train_accuracy /= (counter + 1)\n", + " top1_accuracy = 0\n", + " top5_accuracy = 0\n", + " for counter, (x_batch, y_batch) in enumerate(test_loader):\n", + " x_batch = x_batch.to(device)\n", + " y_batch = y_batch.to(device)\n", + "\n", + " logits = model(x_batch)\n", + " \n", + " top1, top5 = accuracy(logits, y_batch, topk=(1,5))\n", + " top1_accuracy += top1[0]\n", + " top5_accuracy += top5[0]\n", + " \n", + " top1_accuracy /= (counter + 1)\n", + " top5_accuracy /= (counter + 1)\n", + " print(f\"Epoch {epoch}\\tTop1 Train accuracy {top1_train_accuracy.item()}\\tTop1 Test accuracy: {top1_accuracy.item()}\\tTop5 test acc: {top5_accuracy.item()}\")" + ], + "execution_count": 27, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Epoch 0\tTop1 Train accuracy 28.7109375\tTop1 Test accuracy: 43.75\tTop5 test acc: 93.837890625\n", + "Epoch 1\tTop1 Train accuracy 49.37959671020508\tTop1 Test accuracy: 52.8662109375\tTop5 test acc: 95.439453125\n", + "Epoch 2\tTop1 Train accuracy 55.257354736328125\tTop1 Test accuracy: 56.45263671875\tTop5 test acc: 95.91796875\n", + "Epoch 3\tTop1 Train accuracy 57.51838302612305\tTop1 Test accuracy: 57.39013671875\tTop5 test acc: 96.19384765625\n", + "Epoch 4\tTop1 Train accuracy 58.727020263671875\tTop1 Test accuracy: 58.2568359375\tTop5 test acc: 96.435546875\n", + "Epoch 5\tTop1 Train accuracy 59.677162170410156\tTop1 Test accuracy: 58.7353515625\tTop5 test acc: 96.50390625\n", + "Epoch 6\tTop1 Train accuracy 60.065486907958984\tTop1 Test accuracy: 59.17724609375\tTop5 test acc: 96.708984375\n", + "Epoch 7\tTop1 Train accuracy 60.612361907958984\tTop1 Test accuracy: 59.482421875\tTop5 test acc: 96.74560546875\n", + "Epoch 8\tTop1 Train accuracy 60.827205657958984\tTop1 Test accuracy: 59.66064453125\tTop5 test acc: 96.77490234375\n", + "Epoch 9\tTop1 Train accuracy 61.100643157958984\tTop1 Test accuracy: 60.09521484375\tTop5 test acc: 96.82373046875\n", + "Epoch 10\tTop1 Train accuracy 61.52803421020508\tTop1 Test accuracy: 60.3466796875\tTop5 test acc: 96.82861328125\n", + "Epoch 11\tTop1 Train accuracy 61.80147171020508\tTop1 Test accuracy: 60.6640625\tTop5 test acc: 96.8896484375\n", + "Epoch 12\tTop1 Train accuracy 62.09444046020508\tTop1 Test accuracy: 60.96435546875\tTop5 test acc: 96.99462890625\n", + "Epoch 13\tTop1 Train accuracy 62.541358947753906\tTop1 Test accuracy: 61.13037109375\tTop5 test acc: 97.0068359375\n", + "Epoch 14\tTop1 Train accuracy 62.853858947753906\tTop1 Test accuracy: 61.34033203125\tTop5 test acc: 97.01904296875\n", + "Epoch 15\tTop1 Train accuracy 62.951515197753906\tTop1 Test accuracy: 61.5673828125\tTop5 test acc: 96.99951171875\n", + "Epoch 16\tTop1 Train accuracy 63.400733947753906\tTop1 Test accuracy: 61.806640625\tTop5 test acc: 97.0361328125\n", + "Epoch 17\tTop1 Train accuracy 63.66958236694336\tTop1 Test accuracy: 61.98974609375\tTop5 test acc: 97.0849609375\n", + "Epoch 18\tTop1 Train accuracy 63.82583236694336\tTop1 Test accuracy: 62.265625\tTop5 test acc: 97.07275390625\n", + "Epoch 19\tTop1 Train accuracy 64.1187973022461\tTop1 Test accuracy: 62.412109375\tTop5 test acc: 97.09716796875\n", + "Epoch 20\tTop1 Train accuracy 64.2750473022461\tTop1 Test accuracy: 62.56591796875\tTop5 test acc: 97.12158203125\n", + "Epoch 21\tTop1 Train accuracy 64.4140625\tTop1 Test accuracy: 62.724609375\tTop5 test acc: 97.20703125\n", + "Epoch 22\tTop1 Train accuracy 64.53125\tTop1 Test accuracy: 62.90771484375\tTop5 test acc: 97.255859375\n", + "Epoch 23\tTop1 Train accuracy 64.6484375\tTop1 Test accuracy: 62.95654296875\tTop5 test acc: 97.29248046875\n", + "Epoch 24\tTop1 Train accuracy 64.86328125\tTop1 Test accuracy: 63.12255859375\tTop5 test acc: 97.35595703125\n", + "Epoch 25\tTop1 Train accuracy 65.1344223022461\tTop1 Test accuracy: 63.330078125\tTop5 test acc: 97.40478515625\n", + "Epoch 26\tTop1 Train accuracy 65.3297348022461\tTop1 Test accuracy: 63.3984375\tTop5 test acc: 97.44873046875\n", + "Epoch 27\tTop1 Train accuracy 65.4469223022461\tTop1 Test accuracy: 63.34228515625\tTop5 test acc: 97.412109375\n", + "Epoch 28\tTop1 Train accuracy 65.6227035522461\tTop1 Test accuracy: 63.48876953125\tTop5 test acc: 97.412109375\n", + "Epoch 29\tTop1 Train accuracy 65.85478210449219\tTop1 Test accuracy: 63.56201171875\tTop5 test acc: 97.42431640625\n", + "Epoch 30\tTop1 Train accuracy 66.06732940673828\tTop1 Test accuracy: 63.67431640625\tTop5 test acc: 97.4560546875\n", + "Epoch 31\tTop1 Train accuracy 66.20404815673828\tTop1 Test accuracy: 63.80859375\tTop5 test acc: 97.48046875\n", + "Epoch 32\tTop1 Train accuracy 66.24080657958984\tTop1 Test accuracy: 63.92578125\tTop5 test acc: 97.5048828125\n", + "Epoch 33\tTop1 Train accuracy 66.58777618408203\tTop1 Test accuracy: 63.9990234375\tTop5 test acc: 97.529296875\n", + "Epoch 34\tTop1 Train accuracy 66.70496368408203\tTop1 Test accuracy: 64.1455078125\tTop5 test acc: 97.51708984375\n", + "Epoch 35\tTop1 Train accuracy 66.80261993408203\tTop1 Test accuracy: 64.20654296875\tTop5 test acc: 97.529296875\n", + "Epoch 36\tTop1 Train accuracy 66.91980743408203\tTop1 Test accuracy: 64.32861328125\tTop5 test acc: 97.51708984375\n", + "Epoch 37\tTop1 Train accuracy 66.93933868408203\tTop1 Test accuracy: 64.3896484375\tTop5 test acc: 97.51708984375\n", + "Epoch 38\tTop1 Train accuracy 66.97840118408203\tTop1 Test accuracy: 64.47021484375\tTop5 test acc: 97.529296875\n", + "Epoch 39\tTop1 Train accuracy 67.11282348632812\tTop1 Test accuracy: 64.53125\tTop5 test acc: 97.56591796875\n", + "Epoch 40\tTop1 Train accuracy 67.24954223632812\tTop1 Test accuracy: 64.6044921875\tTop5 test acc: 97.6025390625\n", + "Epoch 41\tTop1 Train accuracy 67.34949493408203\tTop1 Test accuracy: 64.62890625\tTop5 test acc: 97.59033203125\n", + "Epoch 42\tTop1 Train accuracy 67.42761993408203\tTop1 Test accuracy: 64.7265625\tTop5 test acc: 97.6025390625\n", + "Epoch 43\tTop1 Train accuracy 67.52527618408203\tTop1 Test accuracy: 64.84375\tTop5 test acc: 97.61474609375\n", + "Epoch 44\tTop1 Train accuracy 67.58386993408203\tTop1 Test accuracy: 64.87548828125\tTop5 test acc: 97.61474609375\n", + "Epoch 45\tTop1 Train accuracy 67.64246368408203\tTop1 Test accuracy: 64.9365234375\tTop5 test acc: 97.626953125\n", + "Epoch 46\tTop1 Train accuracy 67.75735473632812\tTop1 Test accuracy: 65.0341796875\tTop5 test acc: 97.66357421875\n", + "Epoch 47\tTop1 Train accuracy 67.85501098632812\tTop1 Test accuracy: 65.1318359375\tTop5 test acc: 97.7001953125\n", + "Epoch 48\tTop1 Train accuracy 67.89407348632812\tTop1 Test accuracy: 65.1318359375\tTop5 test acc: 97.73681640625\n", + "Epoch 49\tTop1 Train accuracy 67.95266723632812\tTop1 Test accuracy: 65.15625\tTop5 test acc: 97.73681640625\n", + "Epoch 50\tTop1 Train accuracy 68.01126098632812\tTop1 Test accuracy: 65.21728515625\tTop5 test acc: 97.76123046875\n", + "Epoch 51\tTop1 Train accuracy 68.05032348632812\tTop1 Test accuracy: 65.29052734375\tTop5 test acc: 97.7490234375\n", + "Epoch 52\tTop1 Train accuracy 68.05032348632812\tTop1 Test accuracy: 65.3564453125\tTop5 test acc: 97.78564453125\n", + "Epoch 53\tTop1 Train accuracy 68.20657348632812\tTop1 Test accuracy: 65.3759765625\tTop5 test acc: 97.7978515625\n", + "Epoch 54\tTop1 Train accuracy 68.28469848632812\tTop1 Test accuracy: 65.45654296875\tTop5 test acc: 97.822265625\n", + "Epoch 55\tTop1 Train accuracy 68.41912078857422\tTop1 Test accuracy: 65.46875\tTop5 test acc: 97.8466796875\n", + "Epoch 56\tTop1 Train accuracy 68.45818328857422\tTop1 Test accuracy: 65.5615234375\tTop5 test acc: 97.85888671875\n", + "Epoch 57\tTop1 Train accuracy 68.61443328857422\tTop1 Test accuracy: 65.56640625\tTop5 test acc: 97.87109375\n", + "Epoch 58\tTop1 Train accuracy 68.71208953857422\tTop1 Test accuracy: 65.5859375\tTop5 test acc: 97.90771484375\n", + "Epoch 59\tTop1 Train accuracy 68.69255828857422\tTop1 Test accuracy: 65.64697265625\tTop5 test acc: 97.919921875\n", + "Epoch 60\tTop1 Train accuracy 68.80744934082031\tTop1 Test accuracy: 65.64697265625\tTop5 test acc: 97.93212890625\n", + "Epoch 61\tTop1 Train accuracy 68.94416809082031\tTop1 Test accuracy: 65.72021484375\tTop5 test acc: 97.93212890625\n", + "Epoch 62\tTop1 Train accuracy 69.04182434082031\tTop1 Test accuracy: 65.76904296875\tTop5 test acc: 97.919921875\n", + "Epoch 63\tTop1 Train accuracy 69.06135559082031\tTop1 Test accuracy: 65.84228515625\tTop5 test acc: 97.90771484375\n", + "Epoch 64\tTop1 Train accuracy 69.19807434082031\tTop1 Test accuracy: 65.93505859375\tTop5 test acc: 97.90771484375\n", + "Epoch 65\tTop1 Train accuracy 69.23713684082031\tTop1 Test accuracy: 65.95947265625\tTop5 test acc: 97.9150390625\n", + "Epoch 66\tTop1 Train accuracy 69.25666809082031\tTop1 Test accuracy: 66.0888671875\tTop5 test acc: 97.939453125\n", + "Epoch 67\tTop1 Train accuracy 69.31526184082031\tTop1 Test accuracy: 66.02783203125\tTop5 test acc: 97.939453125\n", + "Epoch 68\tTop1 Train accuracy 69.43014526367188\tTop1 Test accuracy: 66.07666015625\tTop5 test acc: 97.9638671875\n", + "Epoch 69\tTop1 Train accuracy 69.48873901367188\tTop1 Test accuracy: 66.12060546875\tTop5 test acc: 97.9638671875\n", + "Epoch 70\tTop1 Train accuracy 69.50827026367188\tTop1 Test accuracy: 66.083984375\tTop5 test acc: 97.95166015625\n", + "Epoch 71\tTop1 Train accuracy 69.60592651367188\tTop1 Test accuracy: 66.1572265625\tTop5 test acc: 97.9638671875\n", + "Epoch 72\tTop1 Train accuracy 69.68635559082031\tTop1 Test accuracy: 66.2060546875\tTop5 test acc: 97.95166015625\n", + "Epoch 73\tTop1 Train accuracy 69.78170776367188\tTop1 Test accuracy: 66.2744140625\tTop5 test acc: 97.92724609375\n", + "Epoch 74\tTop1 Train accuracy 69.84030151367188\tTop1 Test accuracy: 66.31591796875\tTop5 test acc: 97.92724609375\n", + "Epoch 75\tTop1 Train accuracy 69.89889526367188\tTop1 Test accuracy: 66.328125\tTop5 test acc: 97.9150390625\n", + "Epoch 76\tTop1 Train accuracy 69.93795776367188\tTop1 Test accuracy: 66.41357421875\tTop5 test acc: 97.92724609375\n", + "Epoch 77\tTop1 Train accuracy 69.95748901367188\tTop1 Test accuracy: 66.41357421875\tTop5 test acc: 97.9150390625\n", + "Epoch 78\tTop1 Train accuracy 70.01608276367188\tTop1 Test accuracy: 66.474609375\tTop5 test acc: 97.9150390625\n", + "Epoch 79\tTop1 Train accuracy 69.99655151367188\tTop1 Test accuracy: 66.53564453125\tTop5 test acc: 97.939453125\n", + "Epoch 80\tTop1 Train accuracy 70.01608276367188\tTop1 Test accuracy: 66.56005859375\tTop5 test acc: 97.939453125\n", + "Epoch 81\tTop1 Train accuracy 70.09420776367188\tTop1 Test accuracy: 66.56494140625\tTop5 test acc: 97.939453125\n", + "Epoch 82\tTop1 Train accuracy 70.11373901367188\tTop1 Test accuracy: 66.650390625\tTop5 test acc: 97.939453125\n", + "Epoch 83\tTop1 Train accuracy 70.19186401367188\tTop1 Test accuracy: 66.71142578125\tTop5 test acc: 97.92724609375\n", + "Epoch 84\tTop1 Train accuracy 70.26998901367188\tTop1 Test accuracy: 66.7236328125\tTop5 test acc: 97.90283203125\n", + "Epoch 85\tTop1 Train accuracy 70.32858276367188\tTop1 Test accuracy: 66.73583984375\tTop5 test acc: 97.90283203125\n", + "Epoch 86\tTop1 Train accuracy 70.32858276367188\tTop1 Test accuracy: 66.748046875\tTop5 test acc: 97.890625\n", + "Epoch 87\tTop1 Train accuracy 70.46530151367188\tTop1 Test accuracy: 66.7724609375\tTop5 test acc: 97.890625\n", + "Epoch 88\tTop1 Train accuracy 70.52389526367188\tTop1 Test accuracy: 66.78466796875\tTop5 test acc: 97.90283203125\n", + "Epoch 89\tTop1 Train accuracy 70.56295776367188\tTop1 Test accuracy: 66.78466796875\tTop5 test acc: 97.890625\n", + "Epoch 90\tTop1 Train accuracy 70.68014526367188\tTop1 Test accuracy: 66.83349609375\tTop5 test acc: 97.87841796875\n", + "Epoch 91\tTop1 Train accuracy 70.77780151367188\tTop1 Test accuracy: 66.826171875\tTop5 test acc: 97.87841796875\n", + "Epoch 92\tTop1 Train accuracy 70.81686401367188\tTop1 Test accuracy: 66.88720703125\tTop5 test acc: 97.87841796875\n", + "Epoch 93\tTop1 Train accuracy 70.85592651367188\tTop1 Test accuracy: 66.8994140625\tTop5 test acc: 97.87841796875\n", + "Epoch 94\tTop1 Train accuracy 70.91452026367188\tTop1 Test accuracy: 66.9482421875\tTop5 test acc: 97.890625\n", + "Epoch 95\tTop1 Train accuracy 71.03170776367188\tTop1 Test accuracy: 66.98486328125\tTop5 test acc: 97.890625\n", + "Epoch 96\tTop1 Train accuracy 71.09030151367188\tTop1 Test accuracy: 67.001953125\tTop5 test acc: 97.91015625\n", + "Epoch 97\tTop1 Train accuracy 71.09030151367188\tTop1 Test accuracy: 67.0263671875\tTop5 test acc: 97.91015625\n", + "Epoch 98\tTop1 Train accuracy 71.12936401367188\tTop1 Test accuracy: 67.06298828125\tTop5 test acc: 97.89794921875\n", + "Epoch 99\tTop1 Train accuracy 71.12936401367188\tTop1 Test accuracy: 67.0751953125\tTop5 test acc: 97.8857421875\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "dtYqHZirMNZk" + }, + "source": [ + "" + ], + "execution_count": 27, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/load_vit_from_ckpt.py b/PuzzleTuning/Counterpart PreTrain Methods/simclr/load_vit_from_ckpt.py new file mode 100644 index 0000000000000000000000000000000000000000..eae311f5f0a67864f435a4f6495ae1bdb269f612 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/load_vit_from_ckpt.py @@ -0,0 +1,122 @@ +""" +Extracting backbone from a specified SimCLR checkpoint. + +Example: + +python load_vit_from_ckpt.py \ + --checkpoint ./runs/Aug13_10-31-32_lsq/checkpoint_0016.pth.tar \ + --save-to ./output \ + --save-name vit_simclr_16_224.pth \ + --num-classes 2 +""" + +import torchvision +import torch +import os +import argparse +from timm import create_model +# from net.models.vit import VisionTransformer + + +def gen_basic_weight(save_dir): + # Load timm vit weight + model = create_model('vit_base_patch16_224', pretrained=False, in_chans=3) + random_state_dict = model.state_dict() + + model = create_model('vit_base_patch16_224', pretrained=True, in_chans=3) + pretrained_state_dict = model.state_dict() + + # Save model + print(f'Saving backbone init weight to {save_dir}...') + if not os.path.exists(save_dir): + os.makedirs(save_dir) + torch.save(random_state_dict, os.path.join(save_dir, 'ViT_b16_224_Random_Init.pth')) + torch.save(pretrained_state_dict, os.path.join(save_dir, 'ViT_b16_224_Imagenet.pth')) + + +def main(args): + """Read ViT parameters from BYOL backbone + """ + + # Initialize model + if args.basic_weight: + model = create_model('vit_base_patch16_224', pretrained=False, in_chans=3) + # model = VisionTransformer(num_classes=args.num_classes) + + # Load basic weights (default initial parameters) + basic_weight = torch.load(args.basic_weight) + model.load_state_dict(basic_weight, strict=False) + else: + raise + model = create_model('vit_base_patch16_224', pretrained=True, in_chans=3) + + # Load checkpoint + # state_dict = torch.load(args.checkpoint)['state_dict'] + checkpoint = torch.load(args.checkpoint) + ckp_state_dict = checkpoint['state_dict'] + model_state_dict = model.state_dict() + + print('checking checkpoint weights...') + len_state_dict = len(ckp_state_dict) + for seq, src_k in enumerate(ckp_state_dict.keys()): + if "module.backbone." in src_k: + tgt_k = str(src_k).replace("module.backbone.", "") + if tgt_k not in model_state_dict.keys(): + print(f'{seq+1}/{len_state_dict} Skipped: {src_k}, {ckp_state_dict[src_k].shape}') + + print('loading weights...') + len_state_dict = len(model_state_dict) + for seq, tgt_k in enumerate(model_state_dict.keys()): + src_k = "module.backbone." + str(tgt_k) + if src_k in ckp_state_dict: + model_state_dict[tgt_k] = ckp_state_dict[src_k] + else: + print(f'{seq+1}/{len_state_dict} Skipped: {tgt_k}') + + model.load_state_dict(model_state_dict, strict=False) + + # Save model + print(f'Saving model to {args.save_to}...') + if not os.path.exists(args.save_to): + os.makedirs(args.save_to) + torch.save(model.state_dict(), os.path.join(args.save_to, args.save_name)) + + +def get_args_parser(): + """Input parameters + """ + parser = argparse.ArgumentParser(description='Extract backbone state dict') + parser.add_argument('--checkpoint', default='./checkpoint_0004.pth.tar', type=str, required=True, + help='Path to the checkpoint') + parser.add_argument('--save-to', default='./output', type=str, required=True, + help='Where to save the model') + parser.add_argument('--save-name', default='vit_simclr_16_224.pth', type=str, required=True, + help='Model save name') + parser.add_argument('--num-classes', default=2, type=int, + help='Number of classes to be classified') + parser.add_argument('--random-seed', default=42, type=int, + help='Random seed (enable reproduction)') + parser.add_argument('--basic-weight', default='', type=str, + help='Basic weight (used to init parameters)') + return parser + + +def setup_seed(seed): + """Fix up the random seed + + Args: + seed (int): Seed to be applied + """ + import random + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + random.seed(seed) + torch.backends.cudnn.deterministic = True + + +if __name__ == '__main__': + parser = get_args_parser() + args = parser.parse_args() + + setup_seed(args.random_seed) + main(args) \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/models/__pycache__/resnet_simclr.cpython-38.pyc b/PuzzleTuning/Counterpart PreTrain Methods/simclr/models/__pycache__/resnet_simclr.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f6ba59f4f9d872588a736ce381a9475418e73c3 Binary files /dev/null and b/PuzzleTuning/Counterpart PreTrain Methods/simclr/models/__pycache__/resnet_simclr.cpython-38.pyc differ diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/models/resnet_simclr.py b/PuzzleTuning/Counterpart PreTrain Methods/simclr/models/resnet_simclr.py new file mode 100644 index 0000000000000000000000000000000000000000..08f09ef7771ed5862cf3cd46980c23265aaed97b --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/models/resnet_simclr.py @@ -0,0 +1,70 @@ +import torch.nn as nn +import torchvision.models as models + +from exceptions.exceptions import InvalidBackboneError + +from timm import create_model +import torch +import logging + + +class ResNetSimCLR(nn.Module): + + def __init__(self, base_model, out_dim): + super(ResNetSimCLR, self).__init__() + self.resnet_dict = {"resnet18": models.resnet18(pretrained=False, num_classes=out_dim), + "resnet50": models.resnet50(pretrained=False, num_classes=out_dim)} + + self.backbone = self._get_basemodel(base_model) + dim_mlp = self.backbone.fc.in_features + + # add mlp projection head + self.backbone.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.backbone.fc) + + def _get_basemodel(self, model_name): + try: + model = self.resnet_dict[model_name] + except KeyError: + raise InvalidBackboneError( + "Invalid backbone architecture. Check the config file and pass one of: resnet18 or resnet50") + else: + return model + + def forward(self, x): + return self.backbone(x) + + +class ViTSimCLR(nn.Module): + + def __init__(self, base_model, out_dim, load_weight=None): + super(ViTSimCLR, self).__init__() + + # logging.info("=> preparing for backbone model '{}'".format(args.model)) + # backbone_model = create_model('vit_base_patch16_224', pretrained=args.pretrained, in_chans=3) + # if args.model_weights: + # model_weights = torch.load(args.model_weights) + # backbone_model.load_state_dict(model_weights, strict=True) + # logging.info(f"Loaded weights from: {args.model_weights}") + + assert 'vit' in base_model + backbone_model = create_model(base_model, pretrained=True, in_chans=3, num_classes=out_dim) + + # if load_weight: + # model_weights = torch.load(load_weight)['state_dict'] + # updated_weights = {key: value for key, value in model_weights.items() if not key.startswith('head')} + # backbone_model.load_state_dict(updated_weights, strict=False) + # optimizer.load_state_dict(checkpoint['optimizer']) + # lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) + # print(f"Loaded weights from: {load_weight}") + # logging.info(f"Loaded weights from: {load_weight}") + + self.backbone = backbone_model + + dim_mlp = self.backbone.head.in_features + + # add mlp projection head + self.backbone.head = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.backbone.head) + + + def forward(self, x): + return self.backbone(x) diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/pretrain.sh b/PuzzleTuning/Counterpart PreTrain Methods/simclr/pretrain.sh new file mode 100644 index 0000000000000000000000000000000000000000..e04017ba87adb04a0978cf35bcafc7d3c265a4e5 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/pretrain.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# ps -ef | grep simclr | awk '{print $2}' |xargs kill + +# Training settings +pretrain_model="timm" +dataset="All" +model_weights="/home/pancreatic-cancer-diagnosis-tansformer/saved_models/ViT_b16_224_Imagenet.pth" + +# Init params +data_path="/root/autodl-tmp/datasets/${dataset}" +model_name="ViT_b16_224_timm_SIMCLR_ALL_100.pth" +checkpoint_path="/root/autodl-tmp/LSQ-simclr/checkpoint/${pretrain_model}" +save_weight_path="/root/autodl-tmp/LSQ-simclr/model_saved/" +tensorboard_path="/root/tf-logs/" + +# Training. Save checkpoint every 20 epochs. +# The checkpoint and backbone model will be available under checkpoint_path folder. +set -e + +python -u run_vit.py \ + --data $data_path \ + --dataset-name "cpia-mini" \ + --output_dir $checkpoint_path \ + --log_dir $tensorboard_path \ + --arch vit_base_patch16_224 \ + --batch_size 512 \ + --epochs 100 \ + --seed 42 \ + --fp16-precision \ + --init_weight_pth $model_weights \ + --enable_notify + +# extract & save model +python -u load_vit_from_ckpt.py \ + --basic-weight ${model_weights} \ + --checkpoint ${checkpoint_path}/checkpoint_0100.pth.tar \ + --save-to $save_weight_path \ + --save-name $model_name \ + --num-classes 2 + +set +e \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/reproduce_env/requirements.txt b/PuzzleTuning/Counterpart PreTrain Methods/simclr/reproduce_env/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..2722afcef58cc96f375503bfb854f7e9b9501915 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/reproduce_env/requirements.txt @@ -0,0 +1,8 @@ +numpy==1.23.0 +numpy==1.23.5 +PyYAML==6.0 +PyYAML==6.0.1 +timm==0.6.12 +torch==2.0.0 +torchvision==0.15.0 +tqdm==4.65.0 diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/reproduce_env/simmim.yaml b/PuzzleTuning/Counterpart PreTrain Methods/simclr/reproduce_env/simmim.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a2abc17703b80fa839d606bf33a53c7ac1566bb9 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/reproduce_env/simmim.yaml @@ -0,0 +1,220 @@ +name: SimMIM +channels: + - pytorch + - nvidia + - https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge/ + - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/ + - https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/ + - defaults +dependencies: + - _libgcc_mutex=0.1=main + - _openmp_mutex=5.1=1_gnu + - asttokens=2.0.5=pyhd3eb1b0_0 + - backcall=0.2.0=pyhd3eb1b0_0 + - blas=1.0=mkl + - brotlipy=0.7.0=py39h27cfd23_1003 + - bzip2=1.0.8=h7b6447c_0 + - ca-certificates=2023.01.10=h06a4308_0 + - certifi=2022.12.7=py39h06a4308_0 + - cffi=1.15.1=py39h5eee18b_3 + - charset-normalizer=2.0.4=pyhd3eb1b0_0 + - comm=0.1.2=py39h06a4308_0 + - cryptography=39.0.1=py39h9ce1e76_0 + - cuda-cudart=11.7.99=0 + - cuda-cupti=11.7.101=0 + - cuda-libraries=11.7.1=0 + - cuda-nvrtc=11.7.99=0 + - cuda-nvtx=11.7.91=0 + - cuda-runtime=11.7.1=0 + - debugpy=1.5.1=py39h295c915_0 + - decorator=5.1.1=pyhd3eb1b0_0 + - entrypoints=0.4=py39h06a4308_0 + - executing=0.8.3=pyhd3eb1b0_0 + - ffmpeg=4.2.2=h20bf706_0 + - filelock=3.9.0=py39h06a4308_0 + - flit-core=3.8.0=py39h06a4308_0 + - freetype=2.12.1=h4a9f257_0 + - giflib=5.2.1=h5eee18b_3 + - gmp=6.2.1=h295c915_3 + - gmpy2=2.1.2=py39heeb90bb_0 + - gnutls=3.6.15=he1e5248_0 + - idna=3.4=py39h06a4308_0 + - ipykernel=6.19.2=py39hb070fc8_0 + - ipython=8.10.0=py39h06a4308_0 + - jedi=0.18.1=py39h06a4308_1 + - jinja2=3.1.2=py39h06a4308_0 + - jpeg=9e=h5eee18b_1 + - jupyter_client=7.4.9=py39h06a4308_0 + - jupyter_core=5.2.0=py39h06a4308_0 + - lame=3.100=h7b6447c_0 + - lcms2=2.12=h3be6417_0 + - ld_impl_linux-64=2.38=h1181459_1 + - lerc=3.0=h295c915_0 + - libcublas=11.10.3.66=0 + - libcufft=10.7.2.124=h4fbf590_0 + - libcufile=1.6.0.25=0 + - libcurand=10.3.2.56=0 + - libcusolver=11.4.0.1=0 + - libcusparse=11.7.4.91=0 + - libdeflate=1.17=h5eee18b_0 + - libffi=3.4.2=h6a678d5_6 + - libgcc-ng=11.2.0=h1234567_1 + - libgomp=11.2.0=h1234567_1 + - libidn2=2.3.2=h7f8727e_0 + - libnpp=11.7.4.75=0 + - libnvjpeg=11.8.0.2=0 + - libopus=1.3.1=h7b6447c_0 + - libpng=1.6.39=h5eee18b_0 + - libsodium=1.0.18=h7b6447c_0 + - libstdcxx-ng=11.2.0=h1234567_1 + - libtasn1=4.16.0=h27cfd23_0 + - libtiff=4.5.0=h6a678d5_2 + - libunistring=0.9.10=h27cfd23_0 + - libvpx=1.7.0=h439df22_0 + - libwebp=1.2.4=h11a3e52_1 + - libwebp-base=1.2.4=h5eee18b_1 + - lz4-c=1.9.4=h6a678d5_0 + - markupsafe=2.1.1=py39h7f8727e_0 + - matplotlib-inline=0.1.6=py39h06a4308_0 + - mkl-service=2.4.0=py39h7f8727e_0 + - mkl_fft=1.3.1=py39hd3c417c_0 + - mkl_random=1.2.2=py39h51133e4_0 + - mpc=1.1.0=h10f8cd9_1 + - mpfr=4.0.2=hb69a4c5_1 + - mpmath=1.2.1=py39h06a4308_0 + - ncurses=6.4=h6a678d5_0 + - nest-asyncio=1.5.6=py39h06a4308_0 + - nettle=3.7.3=hbbd107a_1 + - networkx=2.8.4=py39h06a4308_1 + - numpy-base=1.23.5=py39h31eccc5_0 + - openh264=2.1.1=h4ff587b_0 + - openssl=1.1.1t=h7f8727e_0 + - packaging=23.0=py39h06a4308_0 + - parso=0.8.3=pyhd3eb1b0_0 + - pexpect=4.8.0=pyhd3eb1b0_3 + - pickleshare=0.7.5=pyhd3eb1b0_1003 + - pillow=9.4.0=py39h6a678d5_0 + - pip=23.0.1=py39h06a4308_0 + - platformdirs=2.5.2=py39h06a4308_0 + - prompt-toolkit=3.0.36=py39h06a4308_0 + - psutil=5.9.0=py39h5eee18b_0 + - ptyprocess=0.7.0=pyhd3eb1b0_2 + - pure_eval=0.2.2=pyhd3eb1b0_0 + - pycparser=2.21=pyhd3eb1b0_0 + - pygments=2.11.2=pyhd3eb1b0_0 + - pyopenssl=23.0.0=py39h06a4308_0 + - pysocks=1.7.1=py39h06a4308_0 + - python=3.9.16=h7a1cb2a_2 + - python-dateutil=2.8.2=pyhd3eb1b0_0 + - pytorch=2.0.0=py3.9_cuda11.7_cudnn8.5.0_0 + - pytorch-cuda=11.7=h778d358_3 + - pytorch-mutex=1.0=cuda + - pyzmq=23.2.0=py39h6a678d5_0 + - readline=8.2=h5eee18b_0 + - requests=2.28.1=py39h06a4308_1 + - setuptools=65.6.3=py39h06a4308_0 + - six=1.16.0=pyhd3eb1b0_1 + - sqlite=3.41.1=h5eee18b_0 + - stack_data=0.2.0=pyhd3eb1b0_0 + - sympy=1.11.1=py39h06a4308_0 + - tk=8.6.12=h1ccaba5_0 + - torchaudio=2.0.0=py39_cu117 + - torchtriton=2.0.0=py39 + - torchvision=0.15.0=py39_cu117 + - tornado=6.2=py39h5eee18b_0 + - traitlets=5.7.1=py39h06a4308_0 + - typing_extensions=4.4.0=py39h06a4308_0 + - tzdata=2022g=h04d1e81_0 + - urllib3=1.26.14=py39h06a4308_0 + - wcwidth=0.2.5=pyhd3eb1b0_0 + - wheel=0.38.4=py39h06a4308_0 + - x264=1!157.20191217=h7b6447c_0 + - xz=5.2.10=h5eee18b_1 + - zeromq=4.3.4=h2531618_0 + - zlib=1.2.13=h5eee18b_0 + - zstd=1.5.2=ha4553b6_0 + - pip: + - absl-py==1.4.0 + - aiohttp==3.8.4 + - aiosignal==1.3.1 + - appdirs==1.4.4 + - async-timeout==4.0.2 + - attrs==23.1.0 + - cachetools==5.3.0 + - click==8.1.3 + - contourpy==1.0.7 + - cycler==0.11.0 + - cython==0.29.33 + - dnspython==2.3.0 + - docker-pycreds==0.4.0 + - einops==0.6.0 + - eventlet==0.33.3 + - fonttools==4.39.2 + - frozenlist==1.3.3 + - fsspec==2023.4.0 + - gco-wrapper==3.0.8 + - gitdb==4.0.10 + - gitpython==3.1.31 + - google-auth==2.16.2 + - google-auth-oauthlib==0.4.6 + - greenlet==2.0.2 + - grpcio==1.51.3 + - histolab==0.6.0 + - huggingface-hub==0.13.2 + - imageio==2.26.0 + - importlib-metadata==6.1.0 + - importlib-resources==5.12.0 + - intel-openmp==2023.0.0 + - joblib==1.2.0 + - kaggle==1.5.13 + - kiwisolver==1.4.4 + - lightning-utilities==0.8.0 + - markdown==3.4.1 + - matplotlib==3.7.1 + - mkl==2023.0.0 + - multidict==6.0.4 + - notifyemail==1.0.2 + - numpy==1.23.0 + - oauthlib==3.2.2 + - opencv-contrib-python==4.7.0.72 + - opencv-python==4.7.0.72 + - openslide-python==1.2.0 + - pandas==1.5.3 + - pathtools==0.1.2 + - protobuf==4.22.1 + - pyasn1==0.4.8 + - pyasn1-modules==0.2.8 + - pyparsing==3.0.9 + - python-slugify==8.0.1 + - pytorch-lightning==2.0.2 + - pytz==2022.7.1 + - pywavelets==1.4.1 + - pyyaml==6.0 + - requests-oauthlib==1.3.1 + - rsa==4.9 + - scikit-image==0.19.3 + - scikit-learn==1.2.2 + - scipy==1.8.1 + - sentry-sdk==1.17.0 + - setproctitle==1.3.2 + - smmap==5.0.0 + - spams==2.6.5.4 + - staintools==2.1.2 + - tbb==2021.8.0 + - tensorboard==2.12.0 + - tensorboard-data-server==0.7.0 + - tensorboard-plugin-wit==1.8.1 + - termcolor==2.3.0 + - text-unidecode==1.3 + - threadpool==1.3.2 + - threadpoolctl==3.1.0 + - tifffile==2023.3.15 + - timm==0.6.12 + - torchmetrics==0.11.4 + - tqdm==4.65.0 + - wandb==0.14.0 + - werkzeug==2.2.3 + - yacs==0.1.8 + - yarl==1.9.2 + - zipp==3.15.0 +prefix: /root/miniconda3/envs/SimMIM diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/requirements.txt b/PuzzleTuning/Counterpart PreTrain Methods/simclr/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..4231406fdab2ef741f492265e308aa05aeb3b6f4 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/requirements.txt @@ -0,0 +1,107 @@ +# This file may be used to create an environment using: +# $ conda create --name --file +# platform: linux-64 +_libgcc_mutex=0.1=main +absl-py=0.9.0=pypi_0 +blas=1.0=mkl +bzip2=1.0.8=h516909a_2 +ca-certificates=2019.11.28=hecc5488_0 +cachetools=4.0.0=pypi_0 +cairo=1.14.12=h80bd089_1005 +certifi=2019.11.28=py37hc8dfbb8_1 +chardet=3.0.4=pypi_0 +cudatoolkit=10.1.243=h6bb024c_0 +ffmpeg=4.0.2=ha0c5888_2 +fontconfig=2.13.1=he4413a7_1000 +freeglut=3.0.0=hf484d3e_1005 +freetype=2.9.1=h8a8886c_1 +gettext=0.19.8.1=hc5be6a0_1002 +glib=2.56.2=had28632_1001 +gmp=6.1.2=hf484d3e_1000 +gnutls=3.5.19=h2a4e5f8_1 +google-auth=1.11.3=pypi_0 +google-auth-oauthlib=0.4.1=pypi_0 +graphite2=1.3.13=hf484d3e_1000 +grpcio=1.27.2=pypi_0 +harfbuzz=1.9.0=he243708_1001 +hdf5=1.10.2=hc401514_3 +icu=58.2=hf484d3e_1000 +idna=2.9=pypi_0 +intel-openmp=2020.0=166 +jasper=2.0.14=h07fcdf6_1 +jpeg=9b=h024ee3a_2 +ld_impl_linux-64=2.33.1=h53a641e_7 +libedit=3.1.20181209=hc058e9b_0 +libffi=3.2.1=hd88cf55_4 +libgcc-ng=9.1.0=hdf63c60_0 +libgfortran=3.0.0=1 +libgfortran-ng=7.3.0=hdf63c60_0 +libglu=9.0.0=hf484d3e_1000 +libiconv=1.15=h516909a_1005 +libopencv=3.4.2=hb342d67_1 +libpng=1.6.37=hbc83047_0 +libstdcxx-ng=9.1.0=hdf63c60_0 +libtiff=4.1.0=h2733197_0 +libuuid=2.32.1=h14c3975_1000 +libxcb=1.13=h14c3975_1002 +libxml2=2.9.9=h13577e0_2 +markdown=3.2.1=pypi_0 +mkl=2020.0=166 +mkl-service=2.3.0=py37he904b0f_0 +mkl_fft=1.0.15=py37ha843d7b_0 +mkl_random=1.1.0=py37hd6b4f25_0 +ncurses=6.2=he6710b0_0 +nettle=3.3=0 +ninja=1.9.0=py37hfd86e86_0 +numpy=1.18.1=py37h4f9e942_0 +numpy-base=1.18.1=py37hde5b4d6_1 +oauthlib=3.1.0=pypi_0 +olefile=0.46=py37_0 +opencv=3.4.2=py37h6fd60c2_1 +openh264=1.8.0=hdbcaa40_1000 +openssl=1.1.1d=h516909a_0 +pcre=8.44=he1b5a44_0 +pillow=7.0.0=py37hb39fc2d_0 +pip=20.0.2=py37_1 +pixman=0.34.0=h14c3975_1003 +protobuf=3.11.3=pypi_0 +pthread-stubs=0.4=h14c3975_1001 +py-opencv=3.4.2=py37hb342d67_1 +pyasn1=0.4.8=pypi_0 +pyasn1-modules=0.2.8=pypi_0 +python=3.7.6=h0371630_2 +python_abi=3.7=1_cp37m +pytorch=1.4.0=py3.7_cuda10.1.243_cudnn7.6.3_0 +pyyaml=5.3=pypi_0 +readline=7.0=h7b6447c_5 +requests=2.23.0=pypi_0 +requests-oauthlib=1.3.0=pypi_0 +rsa=4.0=pypi_0 +setuptools=46.0.0=py37_0 +six=1.14.0=py37_0 +sqlite=3.31.1=h7b6447c_0 +tensorboard=2.1.1=pypi_0 +tk=8.6.8=hbc83047_0 +torchvision=0.5.0=py37_cu101 +urllib3=1.25.8=pypi_0 +werkzeug=1.0.0=pypi_0 +wheel=0.34.2=py37_0 +x264=1!152.20180806=h14c3975_0 +xorg-fixesproto=5.0=h14c3975_1002 +xorg-inputproto=2.3.2=h14c3975_1002 +xorg-kbproto=1.0.7=h14c3975_1002 +xorg-libice=1.0.10=h516909a_0 +xorg-libsm=1.2.3=h84519dc_1000 +xorg-libx11=1.6.9=h516909a_0 +xorg-libxau=1.0.9=h14c3975_0 +xorg-libxdmcp=1.1.3=h516909a_0 +xorg-libxext=1.3.4=h516909a_0 +xorg-libxfixes=5.0.3=h516909a_1004 +xorg-libxi=1.7.10=h516909a_0 +xorg-libxrender=0.9.10=h516909a_1002 +xorg-renderproto=0.11.1=h14c3975_1002 +xorg-xextproto=7.3.0=h14c3975_1002 +xorg-xproto=7.0.31=h14c3975_1007 +xz=5.2.4=h14c3975_4 +zlib=1.2.11=h7b6447c_3 +zstd=1.3.7=h0b5b093_0 diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/run.py b/PuzzleTuning/Counterpart PreTrain Methods/simclr/run.py new file mode 100644 index 0000000000000000000000000000000000000000..1b4e09e0dde21f4e72c9532ca478d57f9d186d67 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/run.py @@ -0,0 +1,92 @@ +import argparse +import torch +import torch.backends.cudnn as cudnn +from torchvision import models +from data_aug.contrastive_learning_dataset import ContrastiveLearningDataset +from models.resnet_simclr import ResNetSimCLR +from simclr import SimCLR + +model_names = sorted(name for name in models.__dict__ + if name.islower() and not name.startswith("__") + and callable(models.__dict__[name])) + +parser = argparse.ArgumentParser(description='PyTorch SimCLR') +parser.add_argument('--data', metavar='DIR', default='./datasets', + help='path to dataset') +parser.add_argument('--dataset-name', default='stl10', + help='dataset name', choices=['stl10', 'cifar10', 'local']) +parser.add_argument('--mode', default='train', + help='train val test stage', choices=['train', 'val', 'test']) +parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18', + choices=model_names, + help='model architecture: ' + + ' | '.join(model_names) + + ' (default: resnet50)') +parser.add_argument('-j', '--workers', default=12, type=int, metavar='N', + help='number of data loading workers (default: 32)') +parser.add_argument('--epochs', default=200, type=int, metavar='N', + help='number of total epochs to run') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', + help='mini-batch size (default: 256), this is the total ' + 'batch size of all GPUs on the current node when ' + 'using Data Parallel or Distributed Data Parallel') +parser.add_argument('-lr', '--learning-rate', default=0.0003, type=float, + metavar='LR', help='initial learning rate', dest='lr') +parser.add_argument('-wd', '--weight-decay', default=1e-4, type=float, + metavar='W', help='weight decay (default: 1e-4)', + dest='weight_decay') +parser.add_argument('--seed', default=None, type=int, + help='seed for initializing training. ') +parser.add_argument('--disable-cuda', action='store_true', + help='Disable CUDA') +parser.add_argument('--fp16-precision', action='store_true', + help='Whether or not to use 16-bit precision GPU training.') + +parser.add_argument('--out_dim', default=128, type=int, + help='feature dimension (default: 128)') +parser.add_argument('--log-every-n-steps', default=100, type=int, + help='Log every n steps') +parser.add_argument('--temperature', default=0.07, type=float, + help='softmax temperature (default: 0.07)') +parser.add_argument('--n-views', default=2, type=int, metavar='N', + help='Number of views for contrastive learning training.') +parser.add_argument('--gpu-index', default=0, type=int, help='Gpu index.') + + +def main(): + args = parser.parse_args() + assert args.n_views == 2, "Only two view training is supported. Please use --n-views 2." + # check if gpu training is available + if not args.disable_cuda and torch.cuda.is_available(): + args.device = torch.device('cuda') + cudnn.deterministic = True + cudnn.benchmark = True + else: + args.device = torch.device('cpu') + args.gpu_index = -1 + + dataset = ContrastiveLearningDataset(args.data) + + train_dataset = dataset.get_dataset(args.dataset_name, args.n_views, mode=args.mode) + + train_loader = torch.utils.data.DataLoader( + train_dataset, batch_size=args.batch_size, shuffle=True, + num_workers=args.workers, pin_memory=True, drop_last=True) + + model = ResNetSimCLR(base_model=args.arch, out_dim=args.out_dim) + + optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay) + + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=len(train_loader), eta_min=0, + last_epoch=-1) + + # It’s a no-op if the 'gpu_index' argument is a negative integer or None. + with torch.cuda.device(args.gpu_index): + simclr = SimCLR(model=model, optimizer=optimizer, scheduler=scheduler, args=args) + simclr.train(train_loader) + # simclr.train_pretend(train_loader) + + +if __name__ == "__main__": + main() diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/run_vit.py b/PuzzleTuning/Counterpart PreTrain Methods/simclr/run_vit.py new file mode 100644 index 0000000000000000000000000000000000000000..e45629279d13440d4b3346969ed25f15f5db1d61 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/run_vit.py @@ -0,0 +1,152 @@ +import argparse +import torch +import torch.backends.cudnn as cudnn +from torchvision import models +from data_aug.contrastive_learning_dataset import ContrastiveLearningDataset +from models.resnet_simclr import ViTSimCLR +from simclr import SimCLR +import os + +model_names = sorted(name for name in models.__dict__ + if name.islower() and not name.startswith("__") + and callable(models.__dict__[name])) + + +def get_args_parser(): + parser = argparse.ArgumentParser(description='PyTorch SimCLR') + + # Dataset related + parser.add_argument('--data', metavar='DIR', default='./datasets', + help='path to dataset') + parser.add_argument('--dataset-name', default='stl10', + help='dataset name', choices=['stl10', 'cifar10', 'imagefolder', 'cpia-mini']) + parser.add_argument('--mode', default='train', + help='train val test stage', choices=['train', 'val', 'test']) + + # Training related + parser.add_argument('-j', '--workers', default=40, type=int, metavar='N', + help='number of data loading workers (default: 32)') + parser.add_argument('--epochs', default=200, type=int, metavar='N', + help='number of total epochs to run') + parser.add_argument('--batch_size', default=256, type=int, + metavar='N', + help='mini-batch size (default: 256), this is the total ' + 'batch size of all GPUs on the current node when ' + 'using Data Parallel or Distributed Data Parallel') + parser.add_argument('-lr', '--learning-rate', default=0.0003, type=float, + metavar='LR', help='initial learning rate', dest='lr') + parser.add_argument('-wd', '--weight-decay', default=1e-4, type=float, + metavar='W', help='weight decay (default: 1e-4)', + dest='weight_decay') + parser.add_argument('--seed', default=42, type=int, + help='seed for initializing training. ') + parser.add_argument('--disable-cuda', action='store_true', + help='Disable CUDA') + parser.add_argument('--fp16-precision', action='store_true', + help='Whether or not to use 16-bit precision GPU training.') + parser.add_argument('--out_dim', default=128, type=int, + help='feature dimension (default: 128)') + parser.add_argument('--log-every-n-steps', default=100, type=int, + help='Log every n steps') + parser.add_argument('--temperature', default=0.07, type=float, + help='softmax temperature (default: 0.07)') + parser.add_argument('--n-views', default=2, type=int, metavar='N', + help='Number of views for contrastive learning training.') + parser.add_argument('--gpu-index', default=0, type=int, help='Gpu index.') + + # Model related + parser.add_argument('-a', '--arch', type=str, default='vit_base_patch16_224', + help='model architecture.') + parser.add_argument('--load_weight', type=str, help='model weight directory.') + parser.add_argument('--img_size', type=int, default=224, help='image size. For vit: 224, for resnet: 96.') + + # added + parser.add_argument('--log_dir', default=' ', + help='path where to tensorboard log') + parser.add_argument('--output_dir', default=' ', + help='path where to store checkpoints') + parser.add_argument('--init_weight_pth', default='', type=str, + help="init weight path") + parser.add_argument('--enable_notify', action='store_true', help='enable notify to send email') + return parser + + +def main(args): + + if args.enable_notify: + import notifyemail as notify + + notify.Reboost(mail_host='smtp.163.com', mail_user='tum9598@163.com', mail_pass='EXVGQACCXPUIUQAE', + default_reciving_list=['foe3305@163.com'], # change here if u want to use notify + log_root_path='log', max_log_cnt=5) + notify.add_text('SimCLR Training') + notify.add_text('------') + for a in str(args).split(','): + notify.add_text(a) + notify.add_text('------') + notify.send_log() + + assert args.n_views == 2, "Only two view training is supported. Please use --n-views 2." + # check if gpu training is available + if not args.disable_cuda and torch.cuda.is_available(): + args.device = torch.device('cuda') + cudnn.deterministic = True + cudnn.benchmark = True + else: + args.device = torch.device('cpu') + args.gpu_index = -1 + + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + + print("{}".format(args).replace(', ', ',\n')) + + dataset = ContrastiveLearningDataset(args.data) + + train_dataset = dataset.get_dataset(args.dataset_name, args.n_views, mode=args.mode, img_size=args.img_size) + + train_loader = torch.utils.data.DataLoader( + train_dataset, batch_size=args.batch_size, shuffle=True, + num_workers=args.workers, pin_memory=True, drop_last=True) + + model = ViTSimCLR(base_model=args.arch, out_dim=args.out_dim, load_weight=args.load_weight) + + # load weight from file + if args.init_weight_pth: + print(f'Loading weight from {args.init_weight_pth}...') + init_weight = torch.load(args.init_weight_pth) + model.load_state_dict(init_weight, strict=False) + print('Weight loaded.') + + model = model.to(args.device) + + model = torch.nn.DataParallel(model) + + optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay) + + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=0, + last_epoch=-1) + + start_epoch = 0 + if args.load_weight: # load from checkpoint + checkpoint = torch.load(args.load_weight) + model.load_state_dict(checkpoint['state_dict'], strict=True) + optimizer.load_state_dict(checkpoint['optimizer']) + # scheduler.load_state_dict(checkpoint['scheduler']) + start_epoch = int(checkpoint['epoch']) + # TODO: AAAAAAAA it become constant here!!!!! + scheduler.last_epoch = start_epoch + print(f"Loaded weights from: {args.load_weight}, starting epoch: {start_epoch}") + + # It’s a no-op if the 'gpu_index' argument is a negative integer or None. + with torch.cuda.device(args.gpu_index): + simclr = SimCLR(model=model, optimizer=optimizer, scheduler=scheduler, args=args) + simclr.train(start_epoch, train_loader) + # simclr.train_pretend(train_loader) + + +if __name__ == "__main__": + args = get_args_parser() + args = args.parse_args() + + main(args) diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/simclr.py b/PuzzleTuning/Counterpart PreTrain Methods/simclr/simclr.py new file mode 100644 index 0000000000000000000000000000000000000000..b9d3fa92437590f5aaba607593430c20c6f4cfa9 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/simclr.py @@ -0,0 +1,164 @@ +import logging +import os +import sys + +import torch +import torch.nn.functional as F +from torch.cuda.amp import GradScaler, autocast +from torch.utils.tensorboard import SummaryWriter +from tqdm import tqdm +from utils import save_config_file, accuracy, save_checkpoint +import time + +torch.manual_seed(0) + + +def time_to_str(t, mode='sec'): + """Formatted time""" + if mode=='min': + t = int(t)/60 + hr = t//60 + min = t%60 + return '%2d hr %02d min'%(hr,min) + elif mode=='sec': + t = int(t) + min = t//60 + sec = t%60 + return '%2d min %02d sec'%(min,sec) + else: + raise NotImplementedError + + +class SimCLR(object): + + def __init__(self, *args, **kwargs): + self.args = kwargs['args'] + self.model = kwargs['model'].to(self.args.device) + self.optimizer = kwargs['optimizer'] + self.scheduler = kwargs['scheduler'] + self.writer = SummaryWriter(log_dir=self.args.log_dir) + self.output_dir = self.args.output_dir + logging.basicConfig(filename=os.path.join(self.writer.log_dir, 'training.log'), level=logging.DEBUG) + self.criterion = torch.nn.CrossEntropyLoss().to(self.args.device) + + def info_nce_loss(self, features): + + # labels: [B] -> [2B], [512] + labels = torch.cat([torch.arange(self.args.batch_size) for i in range(self.args.n_views)], dim=0) + # labels: [2B] -> [2B, 2B], [512, 512] + labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float() + labels = labels.to(self.args.device) + + # features: [2B, CLS], [512, 128] + features = F.normalize(features, dim=1) + + # similarity_matrix: [2B, 2B], [512, 512] + similarity_matrix = torch.matmul(features, features.T) + # assert similarity_matrix.shape == ( + # self.args.n_views * self.args.batch_size, self.args.n_views * self.args.batch_size) + # assert similarity_matrix.shape == labels.shape + + # discard the main diagonal from both: labels and similarities matrix + mask = torch.eye(labels.shape[0], dtype=torch.bool).to(self.args.device) # [512, 512] + labels = labels[~mask].view(labels.shape[0], -1) # [512, 512] -> [512, 511] + similarity_matrix = similarity_matrix[~mask].view(similarity_matrix.shape[0], -1) # [512, 512] -> [512, 511] + # assert similarity_matrix.shape == labels.shape + + # select and combine multiple positives [512, 1] + positives = similarity_matrix[labels.bool()].view(labels.shape[0], -1) + + # select only the negatives the negatives [512, 510] + negatives = similarity_matrix[~labels.bool()].view(similarity_matrix.shape[0], -1) + + # [512, 510+1] -> [512, 511] + logits = torch.cat([positives, negatives], dim=1) + + # [512] + labels = torch.zeros(logits.shape[0], dtype=torch.long).to(self.args.device) + + logits = logits / self.args.temperature + return logits, labels + + def train(self, start_epoch, train_loader): + + scaler = GradScaler(enabled=self.args.fp16_precision) + + # save config file + save_config_file(self.writer.log_dir, self.args) + + n_iter = 0 + logging.info(f"Start SimCLR training for {self.args.epochs} epochs.") + logging.info(f"Training with: {self.args.device}.") + + for epoch_counter in range(start_epoch, self.args.epochs): + + time_start = time.time() + n_batch = 0 + for images, _ in tqdm(train_loader, desc=f'Epoch {epoch_counter}'): + images = torch.cat(images, dim=0) + + images = images.to(self.args.device) + + with autocast(enabled=self.args.fp16_precision): + features = self.model(images) + logits, labels = self.info_nce_loss(features) + loss = self.criterion(logits, labels) + + self.optimizer.zero_grad() + + scaler.scale(loss).backward() + + scaler.step(self.optimizer) + scaler.update() + + top1, top5 = accuracy(logits, labels, topk=(1, 5)) + if n_iter % self.args.log_every_n_steps == 0 and n_iter != 0: + # top1, top5 = accuracy(logits, labels, topk=(1, 5)) + self.writer.add_scalar('loss', loss, global_step=n_iter) + self.writer.add_scalar('acc/top1', top1[0], global_step=n_iter) + self.writer.add_scalar('acc/top5', top5[0], global_step=n_iter) + self.writer.add_scalar('learning_rate', self.scheduler.get_lr()[0], global_step=n_iter) + + if n_batch % self.args.log_every_n_steps == 0: + # Show training status + current_stat = 'lr: {:.7f}\t| epoch: {}\t| batch: {:.0f}/{}\t| loss: {:.3f}\t| time: {}'.format( + self.optimizer.state_dict()['param_groups'][0]['lr'], + epoch_counter, + n_batch, + len(train_loader)-1, + loss.item(), + time_to_str((time.time() - time_start), 'sec') + ) + logging.info(current_stat) + # logging.debug(f"Batch: {n_batch}\{len(train_loader)}\tLoss: {loss}\tTop1 accuracy: {top1[0]}") + n_iter += 1 + n_batch += 1 + + # warmup for the first 10 epochs + if epoch_counter >= 10: + self.scheduler.step() + logging.debug(f"Epoch: {epoch_counter}\tLoss: {loss}\tTop1 accuracy: {top1[0]}") + + # Save result evert 20 epochs + if epoch_counter % 20 == 0 and epoch_counter != 0: + checkpoint_name = 'checkpoint_{:04d}.pth.tar'.format(epoch_counter) + save_checkpoint({ + 'epoch': epoch_counter, + 'arch': self.args.arch, + 'scheduler': self.scheduler.state_dict(), + 'state_dict': self.model.state_dict(), + 'optimizer': self.optimizer.state_dict(), + }, is_best=False, filename=os.path.join(self.output_dir, checkpoint_name)) + logging.info(f"Model checkpoint and metadata has been saved at {self.writer.log_dir}.") + + logging.info("Training has finished.") + # save model checkpoints + checkpoint_name = 'checkpoint_{:04d}.pth.tar'.format(self.args.epochs) + save_checkpoint({ + 'epoch': self.args.epochs, + 'arch': self.args.arch, + 'scheduler': self.scheduler.state_dict(), + 'state_dict': self.model.state_dict(), + 'optimizer': self.optimizer.state_dict(), + }, is_best=False, filename=os.path.join(self.output_dir, checkpoint_name)) + logging.info(f"Model checkpoint and metadata has been saved at {self.writer.log_dir}.") \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simclr/utils.py b/PuzzleTuning/Counterpart PreTrain Methods/simclr/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cf92cbd8ee0aafd0316028bd8b2a63e0ffbbd805 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simclr/utils.py @@ -0,0 +1,35 @@ +import os +import shutil + +import torch +import yaml + + +def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): + torch.save(state, filename) + if is_best: + shutil.copyfile(filename, 'model_best.pth.tar') + + +def save_config_file(model_checkpoints_folder, args): + if not os.path.exists(model_checkpoints_folder): + os.makedirs(model_checkpoints_folder) + with open(os.path.join(model_checkpoints_folder, 'config.yml'), 'w') as outfile: + yaml.dump(args, outfile, default_flow_style=False) + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / batch_size)) + return res diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/CODE_OF_CONDUCT.md b/PuzzleTuning/Counterpart PreTrain Methods/simmim/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..f9ba8cf65f3e3104dd061c178066ec8247811f33 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/CODE_OF_CONDUCT.md @@ -0,0 +1,9 @@ +# Microsoft Open Source Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). + +Resources: + +- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) +- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/LICENSE b/PuzzleTuning/Counterpart PreTrain Methods/simmim/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..9e841e7a26e4eb057b24511e7b92d42b257a80e5 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/README.md b/PuzzleTuning/Counterpart PreTrain Methods/simmim/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9fdf785b92d5e8b43f967912a3a5e0a69ef69a73 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/README.md @@ -0,0 +1,14 @@ +# SimMIM + +The original repo of SimMIM could be found [here](https://github.com/microsoft/SimMIM) + +To install environments: +```bash +pip install -r requirements.txt +``` + +To start pretraining: +```bash +# You need to alter the script according to your directories +bash pretrain.sh +``` diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/README_origin.md b/PuzzleTuning/Counterpart PreTrain Methods/simmim/README_origin.md new file mode 100644 index 0000000000000000000000000000000000000000..ea9a2c111e621df80505c920b945dd1fc5137c4f --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/README_origin.md @@ -0,0 +1,156 @@ +# SimMIM + +By [Zhenda Xie](https://zdaxie.github.io)\*, [Zheng Zhang](https://stupidzz.github.io/)\*, [Yue Cao](http://yue-cao.me)\*, [Yutong Lin](https://github.com/impiga), [Jianmin Bao](https://jianminbao.github.io/), [Zhuliang Yao](https://github.com/Howal), [Qi Dai](https://www.microsoft.com/en-us/research/people/qid/) and [Han Hu](https://ancientmooner.github.io/)\*. + +This repo is the official implementation of ["SimMIM: A Simple Framework for Masked Image Modeling"](https://arxiv.org/abs/2111.09886). + +## Updates + +***09/29/2022*** + +SimMIM was merged to [Swin Transformer repo on GitHub](https://github.com/microsoft/Swin-Transformer). + +***03/02/2022*** + +SimMIM got accepted by CVPR 2022. SimMIM was used in ["Swin Transformer V2"](https://github.com/microsoft/Swin-Transformer) to alleviate the data hungry problem for large-scale vision model training. + +***12/09/2021*** + +Initial commits: + +1. Pre-trained and fine-tuned models on ImageNet-1K (`Swin Base`, `Swin Large`, and `ViT Base`) are provided. +2. The supported code for ImageNet-1K pre-training and fine-tuneing is provided. + +## Introduction + +**SimMIM** is initially described in [arxiv](https://arxiv.org/abs/2111.09886), which serves as a +simple framework for masked image modeling. From systematically study, we find that simple designs of each component have revealed very strong representation learning performance: 1) random masking of the input image with a moderately large masked patch size (e.g., 32) makes a strong pre-text task; 2) predicting raw pixels of RGB values by direct regression performs no worse than the patch classification approaches with complex designs; 3) the prediction head can be as light as a linear layer, with no worse performance than heavier ones. + +
+ +
+ +## Main Results on ImageNet + +### Swin Transformer + +**ImageNet-1K Pre-trained and Fine-tuned Models** + +| name | pre-train epochs | pre-train resolution | fine-tune resolution | acc@1 | pre-trained model | fine-tuned model | +| :---: | :---: | :---: | :---: | :---: | :---: | :---: | +| Swin-Base | 100 | 192x192 | 192x192 | 82.8 | [google](https://drive.google.com/file/d/1Wcbr66JL26FF30Kip9fZa_0lXrDAKP-d/view?usp=sharing)/[config](configs/swin_base__100ep/simmim_pretrain__swin_base__img192_window6__100ep.yaml) | [google](https://drive.google.com/file/d/1RsgHfjB4B1ZYblXEQVT-FPX3WSvBrxcs/view?usp=sharing)/[config](configs/swin_base__100ep/simmim_finetune__swin_base__img192_window6__100ep.yaml) | +| Swin-Base | 100 | 192x192 | 224x224 | 83.5 | [google](https://drive.google.com/file/d/1Wcbr66JL26FF30Kip9fZa_0lXrDAKP-d/view?usp=sharing)/[config](configs/swin_base__100ep/simmim_pretrain__swin_base__img192_window6__100ep.yaml) | [google](https://drive.google.com/file/d/1mb43BkW56F5smwiX-g7QUUD7f1Rftq8u/view?usp=sharing)/[config](configs/swin_base__100ep/simmim_finetune__swin_base__img224_window7__100ep.yaml) | +| Swin-Base | 800 | 192x192 | 224x224 | 84.0 | [google](https://drive.google.com/file/d/15zENvGjHlM71uKQ3d2FbljWPubtrPtjl/view?usp=sharing)/[config](configs/swin_base__800ep/simmim_pretrain__swin_base__img192_window6__800ep.yaml) | [google](https://drive.google.com/file/d/1xEKyfMTsdh6TfnYhk5vbw0Yz7a-viZ0w/view?usp=sharing)/[config](configs/swin_base__800ep/simmim_finetune__swin_base__img224_window7__800ep.yaml) | +| Swin-Large | 800 | 192x192 | 224x224 | 85.4 | [google](https://drive.google.com/file/d/1qDxrTl2YUDB0505_4QrU5LU2R1kKmcBP/view?usp=sharing)/[config](configs/swin_large__800ep/simmim_pretrain__swin_large__img192_window12__800ep.yaml) | [google](https://drive.google.com/file/d/1mf0ZpXttEvFsH87Www4oQ-t8Kwr0x485/view?usp=sharing)/[config](configs/swin_large__800ep/simmim_finetune__swin_large__img224_window14__800ep.yaml) | +| SwinV2-Huge | 800 | 192x192 | 224x224 | 85.7 | / | / | +| SwinV2-Huge | 800 | 192x192 | 512x512 | 87.1 | / | / | + +### Vision Transformer + +**ImageNet-1K Pre-trained and Fine-tuned Models** + +| name | pre-train epochs | pre-train resolution | fine-tune resolution | acc@1 | pre-trained model | fine-tuned model | +| :---: | :---: | :---: | :---: | :---: | :---: | :---: | +| ViT-Base | 800 | 224x224 | 224x224 | 83.8 | [google](https://drive.google.com/file/d/1dJn6GYkwMIcoP3zqOEyW1_iQfpBi8UOw/view?usp=sharing)/[config](configs/vit_base__800ep/simmim_pretrain__vit_base__img224__800ep.yaml) | [google](https://drive.google.com/file/d/1fKgDYd0tRgyHyTnyB1CleYxjo0Gn5tEB/view?usp=sharing)/[config](configs/vit_base__800ep/simmim_finetune__vit_base__img224__800ep.yaml) | + +## Citing SimMIM + +``` +@inproceedings{xie2021simmim, + title={SimMIM: A Simple Framework for Masked Image Modeling}, + author={Xie, Zhenda and Zhang, Zheng and Cao, Yue and Lin, Yutong and Bao, Jianmin and Yao, Zhuliang and Dai, Qi and Hu, Han}, + booktitle={International Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2022} +} +``` + +## Getting Started + +### Installation + +- Install `CUDA 11.3` with `cuDNN 8` following the official installation guide of [CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html) and [cuDNN](https://developer.nvidia.com/rdp/cudnn-archive). + +- Setup conda environment: +```bash +# Create environment +conda create -n SimMIM python=3.8 -y +conda activate SimMIM + +# Install requirements +conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch -y + +# Install apex +git clone https://github.com/NVIDIA/apex +cd apex +pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ +cd .. + +# Clone SimMIM +git clone https://github.com/microsoft/SimMIM +cd SimMIM + +# Install other requirements +pip install -r requirements.txt +``` + +### Evaluating provided models + +To evaluate a provided model on ImageNet validation set, run: +```bash +python -m torch.distributed.launch --nproc_per_node main_finetune.py \ +--eval --cfg --resume --data-path +``` + +For example, to evaluate the `Swin Base` model on a single GPU, run: +```bash +python -m torch.distributed.launch --nproc_per_node 1 main_finetune.py \ +--eval --cfg configs/swin_base__800ep/simmim_finetune__swin_base__img224_window7__800ep.yaml --resume simmim_finetune__swin_base__img224_window7__800ep.pth --data-path +``` + +### Pre-training with SimMIM +To pre-train models with `SimMIM`, run: +```bash +python -m torch.distributed.launch --nproc_per_node main_simmim.py \ +--cfg --data-path /train [--batch-size --output --tag ] +``` + +For example, to pre-train `Swin Base` for 800 epochs on one DGX-2 server, run: +```bash +python -m torch.distributed.launch --nproc_per_node 16 main_simmim.py \ +--cfg configs/swin_base__800ep/simmim_pretrain__swin_base__img192_window6__800ep.yaml --batch-size 128 --data-path /train [--output --tag ] +``` + +### Fine-tuning pre-trained models +To fine-tune models pre-trained by `SimMIM`, run: +```bash +python -m torch.distributed.launch --nproc_per_node main_finetune.py \ +--cfg --data-path --pretrained [--batch-size --output --tag ] +``` + +For example, to fine-tune `Swin Base` pre-trained by `SimMIM` on one DGX-2 server, run: +```bash +python -m torch.distributed.launch --nproc_per_node 16 main_finetune.py \ +--cfg configs/swin_base__800ep/simmim_finetune__swin_base__img224_window7__800ep.yaml --batch-size 128 --data-path --pretrained [--output --tag ] +``` + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. + +When you submit a pull request, a CLA bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Trademarks + +This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft +trademarks or logos is subject to and must follow +[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). +Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. +Any use of third-party trademarks or logos are subject to those third-party's policies. diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/SECURITY.md b/PuzzleTuning/Counterpart PreTrain Methods/simmim/SECURITY.md new file mode 100644 index 0000000000000000000000000000000000000000..f7b89984f0fb5dd204028bc525e19eefc0859f4f --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd). + + \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/SUPPORT.md b/PuzzleTuning/Counterpart PreTrain Methods/simmim/SUPPORT.md new file mode 100644 index 0000000000000000000000000000000000000000..dc72f0e5a0bc2807bf3df31dbc7455e6991b127a --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/SUPPORT.md @@ -0,0 +1,25 @@ +# TODO: The maintainer of this repo has not yet edited this file + +**REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project? + +- **No CSS support:** Fill out this template with information about how to file issues and get help. +- **Yes CSS support:** Fill out an intake form at [aka.ms/spot](https://aka.ms/spot). CSS will work with/help you to determine next steps. More details also available at [aka.ms/onboardsupport](https://aka.ms/onboardsupport). +- **Not sure?** Fill out a SPOT intake as though the answer were "Yes". CSS will help you decide. + +*Then remove this first heading from this SUPPORT.MD file before publishing your repo.* + +# Support + +## How to file issues and get help + +This project uses GitHub Issues to track bugs and feature requests. Please search the existing +issues before filing new issues to avoid duplicates. For new issues, file your bug or +feature request as a new Issue. + +For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE +FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER +CHANNEL. WHERE WILL YOU HELP PEOPLE?**. + +## Microsoft Support Policy + +Support for this **PROJECT or PRODUCT** is limited to the resources listed above. diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/config.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/config.py new file mode 100644 index 0000000000000000000000000000000000000000..a8c7735b75b46c36db276e5addeccd875dedb610 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/config.py @@ -0,0 +1,264 @@ +# -------------------------------------------------------- +# SimMIM +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# Modified by Zhenda Xie +# -------------------------------------------------------- + +import os +import yaml +from yacs.config import CfgNode as CN + +_C = CN() + +# Base config files +_C.BASE = [''] + +# ----------------------------------------------------------------------------- +# Data settings +# ----------------------------------------------------------------------------- +_C.DATA = CN() +# Batch size for a single GPU, could be overwritten by command line argument +_C.DATA.BATCH_SIZE = 128 +# Path to dataset, could be overwritten by command line argument +_C.DATA.DATA_PATH = '' +# Dataset name +_C.DATA.DATASET = 'imagenet' +# Input image size +_C.DATA.IMG_SIZE = 224 +# Interpolation to resize image (random, bilinear, bicubic) +_C.DATA.INTERPOLATION = 'bicubic' +# Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU. +_C.DATA.PIN_MEMORY = True +# Number of data loading threads +_C.DATA.NUM_WORKERS = 8 + +# [SimMIM] Mask patch size for MaskGenerator +_C.DATA.MASK_PATCH_SIZE = 32 +# [SimMIM] Mask ratio for MaskGenerator +_C.DATA.MASK_RATIO = 0.6 + +# ----------------------------------------------------------------------------- +# Model settings +# ----------------------------------------------------------------------------- +_C.MODEL = CN() +# Model type +_C.MODEL.TYPE = 'swin' +# Model name +_C.MODEL.NAME = 'swin_tiny_patch4_window7_224' +# Checkpoint to resume, could be overwritten by command line argument +_C.MODEL.RESUME = '' +# Number of classes, overwritten in data preparation +_C.MODEL.NUM_CLASSES = 1000 +# Dropout rate +_C.MODEL.DROP_RATE = 0.0 +# Drop path rate +_C.MODEL.DROP_PATH_RATE = 0.1 +# Label Smoothing +_C.MODEL.LABEL_SMOOTHING = 0.1 + +# Swin Transformer parameters +_C.MODEL.SWIN = CN() +_C.MODEL.SWIN.PATCH_SIZE = 4 +_C.MODEL.SWIN.IN_CHANS = 3 +_C.MODEL.SWIN.EMBED_DIM = 96 +_C.MODEL.SWIN.DEPTHS = [2, 2, 6, 2] +_C.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24] +_C.MODEL.SWIN.WINDOW_SIZE = 7 +_C.MODEL.SWIN.MLP_RATIO = 4. +_C.MODEL.SWIN.QKV_BIAS = True +_C.MODEL.SWIN.QK_SCALE = None +_C.MODEL.SWIN.APE = False +_C.MODEL.SWIN.PATCH_NORM = True + +# Vision Transformer parameters +_C.MODEL.VIT = CN() +_C.MODEL.VIT.PATCH_SIZE = 16 +_C.MODEL.VIT.IN_CHANS = 3 +_C.MODEL.VIT.EMBED_DIM = 768 +_C.MODEL.VIT.DEPTH = 12 +_C.MODEL.VIT.NUM_HEADS = 12 +_C.MODEL.VIT.MLP_RATIO = 4 +_C.MODEL.VIT.QKV_BIAS = True +_C.MODEL.VIT.INIT_VALUES = 0.1 +_C.MODEL.VIT.USE_APE = False +_C.MODEL.VIT.USE_RPB = False +_C.MODEL.VIT.USE_SHARED_RPB = True +_C.MODEL.VIT.USE_MEAN_POOLING = False + +# ----------------------------------------------------------------------------- +# Training settings +# ----------------------------------------------------------------------------- +_C.TRAIN = CN() +_C.TRAIN.START_EPOCH = 0 +_C.TRAIN.EPOCHS = 300 +_C.TRAIN.WARMUP_EPOCHS = 20 +_C.TRAIN.WEIGHT_DECAY = 0.05 +_C.TRAIN.BASE_LR = 5e-4 +_C.TRAIN.WARMUP_LR = 5e-7 +_C.TRAIN.MIN_LR = 5e-6 +# Clip gradient norm +_C.TRAIN.CLIP_GRAD = 5.0 +# Auto resume from latest checkpoint +_C.TRAIN.AUTO_RESUME = True +# Gradient accumulation steps +# could be overwritten by command line argument +_C.TRAIN.ACCUMULATION_STEPS = 0 +# Whether to use gradient checkpointing to save memory +# could be overwritten by command line argument +_C.TRAIN.USE_CHECKPOINT = False + +# LR scheduler +_C.TRAIN.LR_SCHEDULER = CN() +_C.TRAIN.LR_SCHEDULER.NAME = 'cosine' +# Epoch interval to decay LR, used in StepLRScheduler +_C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30 +# LR decay rate, used in StepLRScheduler +_C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1 +# Gamma / Multi steps value, used in MultiStepLRScheduler +_C.TRAIN.LR_SCHEDULER.GAMMA = 0.1 +_C.TRAIN.LR_SCHEDULER.MULTISTEPS = [] + +# Optimizer +_C.TRAIN.OPTIMIZER = CN() +_C.TRAIN.OPTIMIZER.NAME = 'adamw' +# Optimizer Epsilon +_C.TRAIN.OPTIMIZER.EPS = 1e-8 +# Optimizer Betas +_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999) +# SGD momentum +_C.TRAIN.OPTIMIZER.MOMENTUM = 0.9 + +# [SimMIM] Layer decay for fine-tuning +_C.TRAIN.LAYER_DECAY = 1.0 + +# ----------------------------------------------------------------------------- +# Augmentation settings +# ----------------------------------------------------------------------------- +_C.AUG = CN() +# Color jitter factor +_C.AUG.COLOR_JITTER = 0.4 +# Use AutoAugment policy. "v0" or "original" +_C.AUG.AUTO_AUGMENT = 'rand-m9-mstd0.5-inc1' +# Random erase prob +_C.AUG.REPROB = 0.25 +# Random erase mode +_C.AUG.REMODE = 'pixel' +# Random erase count +_C.AUG.RECOUNT = 1 +# Mixup alpha, mixup enabled if > 0 +_C.AUG.MIXUP = 0.8 +# Cutmix alpha, cutmix enabled if > 0 +_C.AUG.CUTMIX = 1.0 +# Cutmix min/max ratio, overrides alpha and enables cutmix if set +_C.AUG.CUTMIX_MINMAX = None +# Probability of performing mixup or cutmix when either/both is enabled +_C.AUG.MIXUP_PROB = 1.0 +# Probability of switching to cutmix when both mixup and cutmix enabled +_C.AUG.MIXUP_SWITCH_PROB = 0.5 +# How to apply mixup/cutmix params. Per "batch", "pair", or "elem" +_C.AUG.MIXUP_MODE = 'batch' + +# ----------------------------------------------------------------------------- +# Testing settings +# ----------------------------------------------------------------------------- +_C.TEST = CN() +# Whether to use center crop when testing +_C.TEST.CROP = True + +# ----------------------------------------------------------------------------- +# Misc +# ----------------------------------------------------------------------------- +# Mixed precision opt level, if O0, no amp is used ('O0', 'O1', 'O2') +# overwritten by command line argument +_C.AMP_OPT_LEVEL = '' +# Path to output folder, overwritten by command line argument +_C.OUTPUT = '' +# Tag of experiment, overwritten by command line argument +_C.TAG = 'default' +# Frequency to save checkpoint +_C.SAVE_FREQ = 1 +# Frequency to logging info +_C.PRINT_FREQ = 10 +# Fixed random seed +_C.SEED = 0 +# Perform evaluation only, overwritten by command line argument +_C.EVAL_MODE = False +# Test throughput only, overwritten by command line argument +_C.THROUGHPUT_MODE = False +# local rank for DistributedDataParallel, given by command line argument +_C.LOCAL_RANK = 0 + +# [SimMIM] path to pre-trained model +_C.PRETRAINED = '' + + +def _update_config_from_file(config, cfg_file): + config.defrost() + with open(cfg_file, 'r') as f: + yaml_cfg = yaml.load(f, Loader=yaml.FullLoader) + + for cfg in yaml_cfg.setdefault('BASE', ['']): + if cfg: + _update_config_from_file( + config, os.path.join(os.path.dirname(cfg_file), cfg) + ) + print('=> merge config from {}'.format(cfg_file)) + config.merge_from_file(cfg_file) + config.freeze() + + +def update_config(config, args): + _update_config_from_file(config, args.cfg) + + config.defrost() + if args.opts: + config.merge_from_list(args.opts) + + def _check_args(name): + if hasattr(args, name) and eval(f'args.{name}'): + return True + return False + + # merge from specific arguments + if _check_args('batch_size'): + config.DATA.BATCH_SIZE = args.batch_size + if _check_args('data_path'): + config.DATA.DATA_PATH = args.data_path + if _check_args('resume'): + config.MODEL.RESUME = args.resume + if _check_args('pretrained'): + config.PRETRAINED = args.pretrained + if _check_args('accumulation_steps'): + config.TRAIN.ACCUMULATION_STEPS = args.accumulation_steps + if _check_args('use_checkpoint'): + config.TRAIN.USE_CHECKPOINT = True + if _check_args('amp_opt_level'): + config.AMP_OPT_LEVEL = args.amp_opt_level + if _check_args('output'): + config.OUTPUT = args.output + if _check_args('tag'): + config.TAG = args.tag + if _check_args('eval'): + config.EVAL_MODE = True + if _check_args('throughput'): + config.THROUGHPUT_MODE = True + + # set local rank for distributed training + config.LOCAL_RANK = args.local_rank + + # output folder + config.OUTPUT = os.path.join(config.OUTPUT, config.MODEL.NAME, config.TAG) + + config.freeze() + + +def get_config(args): + """Get a yacs CfgNode object with default values.""" + # Return a clone so that the defaults will not be altered + # This is for the "local variable" use pattern + config = _C.clone() + update_config(config, args) + + return config diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_base__100ep/simmim_finetune__swin_base__img192_window6__100ep.yaml b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_base__100ep/simmim_finetune__swin_base__img192_window6__100ep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fdb6877d28ec1c80b4e340b93114cef4cd2f6a08 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_base__100ep/simmim_finetune__swin_base__img192_window6__100ep.yaml @@ -0,0 +1,22 @@ +MODEL: + TYPE: swin + NAME: simmim_finetune + DROP_PATH_RATE: 0.1 + SWIN: + EMBED_DIM: 128 + DEPTHS: [ 2, 2, 18, 2 ] + NUM_HEADS: [ 4, 8, 16, 32 ] + WINDOW_SIZE: 6 +DATA: + IMG_SIZE: 192 +TRAIN: + EPOCHS: 100 + WARMUP_EPOCHS: 20 + BASE_LR: 1.25e-3 + WARMUP_LR: 2.5e-7 + MIN_LR: 2.5e-7 + WEIGHT_DECAY: 0.05 + LAYER_DECAY: 0.9 +PRINT_FREQ: 100 +SAVE_FREQ: 5 +TAG: simmim_finetune__swin_base__img192_window6__100ep \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_base__100ep/simmim_finetune__swin_base__img224_window7__100ep.yaml b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_base__100ep/simmim_finetune__swin_base__img224_window7__100ep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fb79113b104ab1b3a634e6971093cd446a8c1dc0 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_base__100ep/simmim_finetune__swin_base__img224_window7__100ep.yaml @@ -0,0 +1,22 @@ +MODEL: + TYPE: swin + NAME: simmim_finetune + DROP_PATH_RATE: 0.1 + SWIN: + EMBED_DIM: 128 + DEPTHS: [ 2, 2, 18, 2 ] + NUM_HEADS: [ 4, 8, 16, 32 ] + WINDOW_SIZE: 7 +DATA: + IMG_SIZE: 224 +TRAIN: + EPOCHS: 100 + WARMUP_EPOCHS: 20 + BASE_LR: 1.25e-3 + WARMUP_LR: 2.5e-7 + MIN_LR: 2.5e-7 + WEIGHT_DECAY: 0.05 + LAYER_DECAY: 0.9 +PRINT_FREQ: 100 +SAVE_FREQ: 5 +TAG: simmim_finetune__swin_base__img224_window7__100ep \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_base__100ep/simmim_pretrain__swin_base__img192_window6__100ep.yaml b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_base__100ep/simmim_pretrain__swin_base__img192_window6__100ep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b2ea0167781b6ecbb48028eba095a3f926b6123e --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_base__100ep/simmim_pretrain__swin_base__img192_window6__100ep.yaml @@ -0,0 +1,23 @@ +MODEL: + TYPE: swin + NAME: simmim_pretrain + DROP_PATH_RATE: 0.0 + SWIN: + EMBED_DIM: 128 + DEPTHS: [ 2, 2, 18, 2 ] + NUM_HEADS: [ 4, 8, 16, 32 ] + WINDOW_SIZE: 6 +DATA: + IMG_SIZE: 192 + MASK_PATCH_SIZE: 32 + MASK_RATIO: 0.6 +TRAIN: + EPOCHS: 100 + WARMUP_EPOCHS: 10 + BASE_LR: 2e-4 + WARMUP_LR: 1e-6 + MIN_LR: 1e-5 + WEIGHT_DECAY: 0.05 +PRINT_FREQ: 100 +SAVE_FREQ: 5 +TAG: simmim_pretrain__swin_base__img192_window6__100ep \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_base__800ep/simmim_finetune__swin_base__img224_window7__800ep.yaml b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_base__800ep/simmim_finetune__swin_base__img224_window7__800ep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b13906708121f455586a6bddc5a259db4d82a894 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_base__800ep/simmim_finetune__swin_base__img224_window7__800ep.yaml @@ -0,0 +1,22 @@ +MODEL: + TYPE: swin + NAME: simmim_finetune + DROP_PATH_RATE: 0.1 + SWIN: + EMBED_DIM: 128 + DEPTHS: [ 2, 2, 18, 2 ] + NUM_HEADS: [ 4, 8, 16, 32 ] + WINDOW_SIZE: 7 +DATA: + IMG_SIZE: 224 +TRAIN: + EPOCHS: 100 + WARMUP_EPOCHS: 20 + BASE_LR: 1.25e-3 + WARMUP_LR: 2.5e-7 + MIN_LR: 2.5e-7 + WEIGHT_DECAY: 0.05 + LAYER_DECAY: 0.8 +PRINT_FREQ: 100 +SAVE_FREQ: 5 +TAG: simmim_finetune__swin_base__img224_window7__800ep \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_base__800ep/simmim_pretrain__swin_base__img192_window6__800ep.yaml b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_base__800ep/simmim_pretrain__swin_base__img192_window6__800ep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..92da90aba803c0c268dc4cd896dac2c7c4b01dee --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_base__800ep/simmim_pretrain__swin_base__img192_window6__800ep.yaml @@ -0,0 +1,26 @@ +MODEL: + TYPE: swin + NAME: simmim_pretrain + DROP_PATH_RATE: 0.0 + SWIN: + EMBED_DIM: 128 + DEPTHS: [ 2, 2, 18, 2 ] + NUM_HEADS: [ 4, 8, 16, 32 ] + WINDOW_SIZE: 6 +DATA: + IMG_SIZE: 192 + MASK_PATCH_SIZE: 32 + MASK_RATIO: 0.6 +TRAIN: + EPOCHS: 800 + WARMUP_EPOCHS: 10 + BASE_LR: 1e-4 + WARMUP_LR: 5e-7 + WEIGHT_DECAY: 0.05 + LR_SCHEDULER: + NAME: 'multistep' + GAMMA: 0.1 + MULTISTEPS: [700,] +PRINT_FREQ: 100 +SAVE_FREQ: 5 +TAG: simmim_pretrain__swin_base__img192_window6__800ep \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_large__800ep/simmim_finetune__swin_large__img224_window14__800ep.yaml b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_large__800ep/simmim_finetune__swin_large__img224_window14__800ep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e805413dba928f4c96c90dda9ecacdc639809b3 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_large__800ep/simmim_finetune__swin_large__img224_window14__800ep.yaml @@ -0,0 +1,22 @@ +MODEL: + TYPE: swin + NAME: simmim_finetune + DROP_PATH_RATE: 0.2 + SWIN: + EMBED_DIM: 192 + DEPTHS: [ 2, 2, 18, 2 ] + NUM_HEADS: [ 6, 12, 24, 48 ] + WINDOW_SIZE: 14 +DATA: + IMG_SIZE: 224 +TRAIN: + EPOCHS: 100 + WARMUP_EPOCHS: 20 + BASE_LR: 1.25e-3 + WARMUP_LR: 2.5e-7 + MIN_LR: 2.5e-7 + WEIGHT_DECAY: 0.05 + LAYER_DECAY: 0.7 +PRINT_FREQ: 100 +SAVE_FREQ: 5 +TAG: simmim_finetune__swin_large__img224_window14__800ep \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_large__800ep/simmim_pretrain__swin_large__img192_window12__800ep.yaml b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_large__800ep/simmim_pretrain__swin_large__img192_window12__800ep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5494530a91d586e1dd3de4aeb1229724b41966f5 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/swin_large__800ep/simmim_pretrain__swin_large__img192_window12__800ep.yaml @@ -0,0 +1,26 @@ +MODEL: + TYPE: swin + NAME: simmim_pretrain + DROP_PATH_RATE: 0.0 + SWIN: + EMBED_DIM: 192 + DEPTHS: [ 2, 2, 18, 2 ] + NUM_HEADS: [ 6, 12, 24, 48 ] + WINDOW_SIZE: 12 +DATA: + IMG_SIZE: 192 + MASK_PATCH_SIZE: 32 + MASK_RATIO: 0.6 +TRAIN: + EPOCHS: 800 + WARMUP_EPOCHS: 10 + BASE_LR: 1e-4 + WARMUP_LR: 5e-7 + WEIGHT_DECAY: 0.05 + LR_SCHEDULER: + NAME: 'multistep' + GAMMA: 0.1 + MULTISTEPS: [700,] +PRINT_FREQ: 100 +SAVE_FREQ: 5 +TAG: simmim_pretrain__swin_large__img192_window12__800ep \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/vit_base__800ep/simmim_finetune__vit_base__img224__800ep.yaml b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/vit_base__800ep/simmim_finetune__vit_base__img224__800ep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6fcc5fc965e7ecc375dc10b37b98e665d487296f --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/vit_base__800ep/simmim_finetune__vit_base__img224__800ep.yaml @@ -0,0 +1,25 @@ +MODEL: + TYPE: vit + NAME: simmim_finetune + DROP_PATH_RATE: 0.1 + VIT: + EMBED_DIM: 768 + DEPTH: 12 + NUM_HEADS: 12 + USE_APE: False + USE_RPB: True + USE_SHARED_RPB: False + USE_MEAN_POOLING: True +DATA: + IMG_SIZE: 224 +TRAIN: + EPOCHS: 100 + WARMUP_EPOCHS: 20 + BASE_LR: 1.25e-3 + WARMUP_LR: 2.5e-7 + MIN_LR: 2.5e-7 + WEIGHT_DECAY: 0.05 + LAYER_DECAY: 0.65 +PRINT_FREQ: 100 +SAVE_FREQ: 5 +TAG: simmim_finetune__vit_base__img224__800ep diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/vit_base__800ep/simmim_pretrain__vit_base__img224__800ep.yaml b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/vit_base__800ep/simmim_pretrain__vit_base__img224__800ep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3ac2bbda547430b906793e45c8720f0c2ee7ffa3 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/vit_base__800ep/simmim_pretrain__vit_base__img224__800ep.yaml @@ -0,0 +1,29 @@ +MODEL: + TYPE: vit + NAME: simmim_pretrain + DROP_PATH_RATE: 0.1 + VIT: + EMBED_DIM: 768 + DEPTH: 12 + NUM_HEADS: 12 + USE_APE: False + USE_RPB: False + USE_SHARED_RPB: True + USE_MEAN_POOLING: False +DATA: + IMG_SIZE: 224 + MASK_PATCH_SIZE: 32 + MASK_RATIO: 0.6 +TRAIN: + EPOCHS: 800 + WARMUP_EPOCHS: 10 + BASE_LR: 1e-4 + WARMUP_LR: 5e-7 + WEIGHT_DECAY: 0.05 + LR_SCHEDULER: + NAME: 'multistep' + GAMMA: 0.1 + MULTISTEPS: [700,] +PRINT_FREQ: 100 +SAVE_FREQ: 5 +TAG: simmim_pretrain__vit_base__img224__800ep diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/vit_base__test/simmim_pretrain__vit_base__img224__100ep.yaml b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/vit_base__test/simmim_pretrain__vit_base__img224__100ep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..018ff7981f1b9be23e24975653758043e6ecb793 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/configs/vit_base__test/simmim_pretrain__vit_base__img224__100ep.yaml @@ -0,0 +1,31 @@ +MODEL: + TYPE: vit + NAME: simmim_pretrain + DROP_PATH_RATE: 0.0 + VIT: + EMBED_DIM: 768 + DEPTH: 12 + NUM_HEADS: 12 + USE_APE: True + USE_RPB: False + USE_SHARED_RPB: True + USE_MEAN_POOLING: False + QKV_BIAS: True + INIT_VALUES: None +DATA: + IMG_SIZE: 224 + MASK_PATCH_SIZE: 32 + MASK_RATIO: 0.6 +TRAIN: + EPOCHS: 200 + WARMUP_EPOCHS: 20 + BASE_LR: 2e-4 + WARMUP_LR: 1e-6 + WEIGHT_DECAY: 0.05 + LR_SCHEDULER: + NAME: 'multistep' + GAMMA: 0.1 + MULTISTEPS: [700,] +PRINT_FREQ: 500 +SAVE_FREQ: 20 +TAG: simmim_pretrain__vit_base__img224__800ep diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/data/__init__.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..95e540099ae2829a4580e84c069d021035afad9d --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/data/__init__.py @@ -0,0 +1,8 @@ +from .data_simmim import build_loader_simmim +from .data_finetune import build_loader_finetune + +def build_loader(config, logger, is_pretrain): + if is_pretrain: + return build_loader_simmim(config, logger) + else: + return build_loader_finetune(config, logger) \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/data/data_finetune.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/data/data_finetune.py new file mode 100644 index 0000000000000000000000000000000000000000..70d32195ecba14cd385acc81ee0f43ed57082e75 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/data/data_finetune.py @@ -0,0 +1,115 @@ +# -------------------------------------------------------- +# SimMIM +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Zhenda Xie +# -------------------------------------------------------- + +import os +import torch.distributed as dist +from torch.utils.data import DataLoader, DistributedSampler +from torchvision import datasets, transforms +from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.data import Mixup +from timm.data import create_transform +# from timm.data.transforms import _pil_interp +from timm.data.transforms import str_to_pil_interp + + +def build_loader_finetune(config, logger): + config.defrost() + dataset_train, config.MODEL.NUM_CLASSES = build_dataset(is_train=True, config=config, logger=logger) + config.freeze() + dataset_val, _ = build_dataset(is_train=False, config=config, logger=logger) + logger.info(f"Build dataset: train images = {len(dataset_train)}, val images = {len(dataset_val)}") + + num_tasks = dist.get_world_size() + global_rank = dist.get_rank() + sampler_train = DistributedSampler( + dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True + ) + sampler_val = DistributedSampler( + dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False + ) + + data_loader_train = DataLoader( + dataset_train, sampler=sampler_train, + batch_size=config.DATA.BATCH_SIZE, + num_workers=config.DATA.NUM_WORKERS, + pin_memory=config.DATA.PIN_MEMORY, + drop_last=True, + ) + + data_loader_val = DataLoader( + dataset_val, sampler=sampler_val, + batch_size=config.DATA.BATCH_SIZE, + num_workers=config.DATA.NUM_WORKERS, + pin_memory=config.DATA.PIN_MEMORY, + drop_last=False, + ) + + # setup mixup / cutmix + mixup_fn = None + mixup_active = config.AUG.MIXUP > 0 or config.AUG.CUTMIX > 0. or config.AUG.CUTMIX_MINMAX is not None + if mixup_active: + mixup_fn = Mixup( + mixup_alpha=config.AUG.MIXUP, cutmix_alpha=config.AUG.CUTMIX, cutmix_minmax=config.AUG.CUTMIX_MINMAX, + prob=config.AUG.MIXUP_PROB, switch_prob=config.AUG.MIXUP_SWITCH_PROB, mode=config.AUG.MIXUP_MODE, + label_smoothing=config.MODEL.LABEL_SMOOTHING, num_classes=config.MODEL.NUM_CLASSES) + + return dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn + + +def build_dataset(is_train, config, logger): + transform = build_transform(is_train, config) + logger.info(f'Fine-tune data transform, is_train={is_train}:\n{transform}') + + if config.DATA.DATASET == 'imagenet': + prefix = 'train' if is_train else 'val' + root = os.path.join(config.DATA.DATA_PATH, prefix) + dataset = datasets.ImageFolder(root, transform=transform) + nb_classes = 1000 + else: + raise NotImplementedError("We only support ImageNet Now.") + + return dataset, nb_classes + + +def build_transform(is_train, config): + resize_im = config.DATA.IMG_SIZE > 32 + if is_train: + # this should always dispatch to transforms_imagenet_train + transform = create_transform( + input_size=config.DATA.IMG_SIZE, + is_training=True, + color_jitter=config.AUG.COLOR_JITTER if config.AUG.COLOR_JITTER > 0 else None, + auto_augment=config.AUG.AUTO_AUGMENT if config.AUG.AUTO_AUGMENT != 'none' else None, + re_prob=config.AUG.REPROB, + re_mode=config.AUG.REMODE, + re_count=config.AUG.RECOUNT, + interpolation=config.DATA.INTERPOLATION, + ) + if not resize_im: + # replace RandomResizedCropAndInterpolation with + # RandomCrop + transform.transforms[0] = transforms.RandomCrop(config.DATA.IMG_SIZE, padding=4) + return transform + + t = [] + if resize_im: + if config.TEST.CROP: + size = int((256 / 224) * config.DATA.IMG_SIZE) + t.append( + transforms.Resize(size, interpolation=str_to_pil_interp(config.DATA.INTERPOLATION)), + # to maintain same ratio w.r.t. 224 images + ) + t.append(transforms.CenterCrop(config.DATA.IMG_SIZE)) + else: + t.append( + transforms.Resize((config.DATA.IMG_SIZE, config.DATA.IMG_SIZE), + interpolation=str_to_pil_interp(config.DATA.INTERPOLATION)) + ) + + t.append(transforms.ToTensor()) + t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)) + return transforms.Compose(t) \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/data/data_simmim.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/data/data_simmim.py new file mode 100644 index 0000000000000000000000000000000000000000..89f468f42641e75cbcd01d51c55fd3d5efda7313 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/data/data_simmim.py @@ -0,0 +1,104 @@ +# -------------------------------------------------------- +# SimMIM +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Zhenda Xie +# -------------------------------------------------------- + +import math +import random +import numpy as np + +import torch +import torch.distributed as dist +import torchvision.transforms as T +from torch.utils.data import DataLoader, DistributedSampler +from torch.utils.data._utils.collate import default_collate +from torchvision.datasets import ImageFolder +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + + +class MaskGenerator: + def __init__(self, input_size=192, mask_patch_size=32, model_patch_size=4, mask_ratio=0.6): + self.input_size = input_size + self.mask_patch_size = mask_patch_size + self.model_patch_size = model_patch_size + self.mask_ratio = mask_ratio + + assert self.input_size % self.mask_patch_size == 0 + assert self.mask_patch_size % self.model_patch_size == 0 + + self.rand_size = self.input_size // self.mask_patch_size + self.scale = self.mask_patch_size // self.model_patch_size + + self.token_count = self.rand_size ** 2 + self.mask_count = int(np.ceil(self.token_count * self.mask_ratio)) + + def __call__(self): + mask_idx = np.random.permutation(self.token_count)[:self.mask_count] + mask = np.zeros(self.token_count, dtype=int) + mask[mask_idx] = 1 + + mask = mask.reshape((self.rand_size, self.rand_size)) + mask = mask.repeat(self.scale, axis=0).repeat(self.scale, axis=1) + + return mask + + +class SimMIMTransform: + def __init__(self, config): + self.transform_img = T.Compose([ + T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), + T.RandomResizedCrop(config.DATA.IMG_SIZE, scale=(0.67, 1.), ratio=(3. / 4., 4. / 3.)), + T.RandomHorizontalFlip(), + T.ToTensor(), + T.Normalize(mean=torch.tensor(IMAGENET_DEFAULT_MEAN),std=torch.tensor(IMAGENET_DEFAULT_STD)), + ]) + + if config.MODEL.TYPE == 'swin': + model_patch_size=config.MODEL.SWIN.PATCH_SIZE + elif config.MODEL.TYPE == 'vit': + model_patch_size=config.MODEL.VIT.PATCH_SIZE + else: + raise NotImplementedError + + self.mask_generator = MaskGenerator( + input_size=config.DATA.IMG_SIZE, + mask_patch_size=config.DATA.MASK_PATCH_SIZE, + model_patch_size=model_patch_size, + mask_ratio=config.DATA.MASK_RATIO, + ) + + def __call__(self, img): + img = self.transform_img(img) + mask = self.mask_generator() + + return img, mask + + +def collate_fn(batch): + if not isinstance(batch[0][0], tuple): + return default_collate(batch) + else: + batch_num = len(batch) + ret = [] + for item_idx in range(len(batch[0][0])): + if batch[0][0][item_idx] is None: + ret.append(None) + else: + ret.append(default_collate([batch[i][0][item_idx] for i in range(batch_num)])) + ret.append(default_collate([batch[i][1] for i in range(batch_num)])) + return ret + + +def build_loader_simmim(config, logger): + transform = SimMIMTransform(config) + logger.info(f'Pre-train data transform:\n{transform}') + + dataset = ImageFolder(config.DATA.DATA_PATH, transform) + logger.info(f'Build dataset: train images = {len(dataset)}') + + sampler = DistributedSampler(dataset, num_replicas=dist.get_world_size(), rank=dist.get_rank(), shuffle=True) + dataloader = DataLoader(dataset, config.DATA.BATCH_SIZE, sampler=sampler, num_workers=config.DATA.NUM_WORKERS, pin_memory=True, drop_last=True, collate_fn=collate_fn) + + return dataloader \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/figures/teaser.jpg b/PuzzleTuning/Counterpart PreTrain Methods/simmim/figures/teaser.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e116f28d33aad4a72f4a42f1b1e57b065fe1bca Binary files /dev/null and b/PuzzleTuning/Counterpart PreTrain Methods/simmim/figures/teaser.jpg differ diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/load_test.ipynb b/PuzzleTuning/Counterpart PreTrain Methods/simmim/load_test.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..1aa43e326f7e6c0ddc6f5a2c8092381687de8ec0 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/load_test.ipynb @@ -0,0 +1,61 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/root/miniconda3/envs/SimMIM/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Saving backbone init weight to ./model_base...\n" + ] + } + ], + "source": [ + "from load_vit_from_ckpt import gen_basic_weight\n", + "import os\n", + "\n", + "\n", + "base_weight_pth = './model_base'\n", + "gen_basic_weight(base_weight_pth)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "SimMIM", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "be8d61690b9c27505fb56c69c6c249490f4cb538c6e1d60f116d2e57d82ff881" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/load_vit_from_ckpt.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/load_vit_from_ckpt.py new file mode 100644 index 0000000000000000000000000000000000000000..6fcb2a1a271b45de62d079bf28da92bb608a427e --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/load_vit_from_ckpt.py @@ -0,0 +1,122 @@ +""" +Extracting backbone from a specified SimMIM checkpoint. + +Example: + +python load_vit_from_ckpt.py \ + --checkpoint ./output/simmim_pretrain/vit_run/ckpt_epoch_99.pth \ + --save-to ./output/models/ \ + --save-name vit_simmim_16_224.pth \ + --num-classes 2 +""" + +import torchvision +import torch +import os +import argparse +from timm import create_model +# from net.models.vit import VisionTransformer + + +def gen_basic_weight(save_dir): + # Load timm vit weight + model = create_model('vit_base_patch16_224', pretrained=False, in_chans=3) + random_state_dict = model.state_dict() + + model = create_model('vit_base_patch16_224', pretrained=True, in_chans=3) + pretrained_state_dict = model.state_dict() + + # Save model + print(f'Saving backbone init weight to {save_dir}...') + if not os.path.exists(save_dir): + os.makedirs(save_dir) + torch.save(random_state_dict, os.path.join(save_dir, 'ViT_b16_224_Random_Init.pth')) + torch.save(pretrained_state_dict, os.path.join(save_dir, 'ViT_b16_224_Imagenet.pth')) + + +def main(args): + """Read ViT parameters from BYOL backbone + """ + + # Initialize model + if args.basic_weight: + model = create_model('vit_base_patch16_224', pretrained=False, in_chans=3) + # model = VisionTransformer(num_classes=args.num_classes) + + # Load basic weights (default initial parameters) + basic_weight = torch.load(args.basic_weight) + model.load_state_dict(basic_weight, strict=False) + else: + raise + model = create_model('vit_base_patch16_224', pretrained=True, in_chans=3) + + # Load checkpoint + # state_dict = torch.load(args.checkpoint)['state_dict'] + checkpoint = torch.load(args.checkpoint) + ckp_state_dict = checkpoint['model'] + model_state_dict = model.state_dict() + + print('checking checkpoint weights...') + len_state_dict = len(ckp_state_dict) + for seq, src_k in enumerate(ckp_state_dict.keys()): + if "encoder." in src_k: + tgt_k = str(src_k).replace("encoder.", "") + if tgt_k not in model_state_dict.keys(): + print(f'{seq+1}/{len_state_dict} Skipped: {src_k}, {ckp_state_dict[src_k].shape}') + + print('loading weights...') + len_state_dict = len(model_state_dict) + for seq, tgt_k in enumerate(model_state_dict.keys()): + src_k = "encoder." + str(tgt_k) + if src_k in ckp_state_dict: + model_state_dict[tgt_k] = ckp_state_dict[src_k] + else: + print(f'{seq+1}/{len_state_dict} Skipped: {tgt_k}') + + model.load_state_dict(model_state_dict, strict=False) + + # Save model + print(f'Saving model to {args.save_to}...') + if not os.path.exists(args.save_to): + os.makedirs(args.save_to) + torch.save(model.state_dict(), os.path.join(args.save_to, args.save_name)) + + +def get_args_parser(): + """Input parameters + """ + parser = argparse.ArgumentParser(description='Extract backbone state dict') + parser.add_argument('--checkpoint', default='./checkpoint_0004.pth.tar', type=str, required=True, + help='Path to the checkpoint') + parser.add_argument('--save-to', default='./output', type=str, required=True, + help='Where to save the model') + parser.add_argument('--save-name', default='vit_simmim_16_224.pth', type=str, required=True, + help='Model save name') + parser.add_argument('--num-classes', default=2, type=int, + help='Number of classes to be classified') + parser.add_argument('--random-seed', default=42, type=int, + help='Random seed (enable reproduction)') + parser.add_argument('--basic-weight', default='', type=str, + help='Basic weight (used to init parameters)') + return parser + + +def setup_seed(seed): + """Fix up the random seed + + Args: + seed (int): Seed to be applied + """ + import random + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + random.seed(seed) + torch.backends.cudnn.deterministic = True + + +if __name__ == '__main__': + parser = get_args_parser() + args = parser.parse_args() + + setup_seed(args.random_seed) + main(args) \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/logger.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..f7d95b21648bd00a57ff7d0107064d425308a580 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/logger.py @@ -0,0 +1,42 @@ +# -------------------------------------------------------- +# SimMIM +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# Modified by Zhenda Xie +# -------------------------------------------------------- + +import os +import sys +import logging +import functools +from termcolor import colored + + +@functools.lru_cache() +def create_logger(output_dir, dist_rank=0, name=''): + # create logger + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + logger.propagate = False + + # create formatter + fmt = '[%(asctime)s %(name)s] (%(filename)s %(lineno)d): %(levelname)s %(message)s' + color_fmt = colored('[%(asctime)s %(name)s]', 'green') + \ + colored('(%(filename)s %(lineno)d)', 'yellow') + ': %(levelname)s %(message)s' + + # create console handlers for master process + if dist_rank == 0: + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setLevel(logging.DEBUG) + console_handler.setFormatter( + logging.Formatter(fmt=color_fmt, datefmt='%Y-%m-%d %H:%M:%S')) + logger.addHandler(console_handler) + + # create file handlers + file_handler = logging.FileHandler(os.path.join(output_dir, f'log_rank{dist_rank}.txt'), mode='a') + file_handler.setLevel(logging.DEBUG) + file_handler.setFormatter(logging.Formatter(fmt=fmt, datefmt='%Y-%m-%d %H:%M:%S')) + logger.addHandler(file_handler) + + return logger diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/lr_scheduler.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/lr_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..ee27b8cd5867c849e1f2d9eead752c49990fcb55 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/lr_scheduler.py @@ -0,0 +1,153 @@ +# -------------------------------------------------------- +# SimMIM +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# Modified by Zhenda Xie +# -------------------------------------------------------- + +from collections import Counter +from bisect import bisect_right + +import torch +from timm.scheduler.cosine_lr import CosineLRScheduler +from timm.scheduler.step_lr import StepLRScheduler +from timm.scheduler.scheduler import Scheduler + + +def build_scheduler(config, optimizer, n_iter_per_epoch): + num_steps = int(config.TRAIN.EPOCHS * n_iter_per_epoch) + warmup_steps = int(config.TRAIN.WARMUP_EPOCHS * n_iter_per_epoch) + decay_steps = int(config.TRAIN.LR_SCHEDULER.DECAY_EPOCHS * n_iter_per_epoch) + multi_steps = [i * n_iter_per_epoch for i in config.TRAIN.LR_SCHEDULER.MULTISTEPS] + + lr_scheduler = None + if config.TRAIN.LR_SCHEDULER.NAME == 'cosine': + lr_scheduler = CosineLRScheduler( + optimizer, + t_initial=num_steps, + t_mul=1., + lr_min=config.TRAIN.MIN_LR, + warmup_lr_init=config.TRAIN.WARMUP_LR, + warmup_t=warmup_steps, + cycle_limit=1, + t_in_epochs=False, + ) + elif config.TRAIN.LR_SCHEDULER.NAME == 'linear': + lr_scheduler = LinearLRScheduler( + optimizer, + t_initial=num_steps, + lr_min_rate=0.01, + warmup_lr_init=config.TRAIN.WARMUP_LR, + warmup_t=warmup_steps, + t_in_epochs=False, + ) + elif config.TRAIN.LR_SCHEDULER.NAME == 'step': + lr_scheduler = StepLRScheduler( + optimizer, + decay_t=decay_steps, + decay_rate=config.TRAIN.LR_SCHEDULER.DECAY_RATE, + warmup_lr_init=config.TRAIN.WARMUP_LR, + warmup_t=warmup_steps, + t_in_epochs=False, + ) + elif config.TRAIN.LR_SCHEDULER.NAME == 'multistep': + lr_scheduler = MultiStepLRScheduler( + optimizer, + milestones=multi_steps, + gamma=config.TRAIN.LR_SCHEDULER.GAMMA, + warmup_lr_init=config.TRAIN.WARMUP_LR, + warmup_t=warmup_steps, + t_in_epochs=False, + ) + + return lr_scheduler + + +class LinearLRScheduler(Scheduler): + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + lr_min_rate: float, + warmup_t=0, + warmup_lr_init=0., + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + self.t_initial = t_initial + self.lr_min_rate = lr_min_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.t_in_epochs = t_in_epochs + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + t = t - self.warmup_t + total_t = self.t_initial - self.warmup_t + lrs = [v - ((v - v * self.lr_min_rate) * (t / total_t)) for v in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + +class MultiStepLRScheduler(Scheduler): + def __init__(self, optimizer: torch.optim.Optimizer, milestones, gamma=0.1, warmup_t=0, warmup_lr_init=0, t_in_epochs=True) -> None: + super().__init__(optimizer, param_group_field="lr") + + self.milestones = milestones + self.gamma = gamma + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.t_in_epochs = t_in_epochs + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + assert self.warmup_t <= min(self.milestones) + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + lrs = [v * (self.gamma ** bisect_right(self.milestones, t)) for v in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/main_finetune.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/main_finetune.py new file mode 100644 index 0000000000000000000000000000000000000000..3ca947a9f020e4fb2b7ae4ab89bb16c8bfb3af08 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/main_finetune.py @@ -0,0 +1,348 @@ +# -------------------------------------------------------- +# SimMIM +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# Modified by Zhenda Xie +# -------------------------------------------------------- + +import os +import time +import argparse +import datetime +import numpy as np + +import torch +import torch.backends.cudnn as cudnn +import torch.distributed as dist + +from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy +from timm.utils import accuracy, AverageMeter + +from config import get_config +from models import build_model +from data import build_loader +from lr_scheduler import build_scheduler +from optimizer import build_optimizer +from logger import create_logger +from utils import load_checkpoint, load_pretrained, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor + +try: + # noinspection PyUnresolvedReferences + from apex import amp +except ImportError: + amp = None + + +def parse_option(): + parser = argparse.ArgumentParser('Swin Transformer training and evaluation script', add_help=False) + parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', ) + parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', + ) + + # easy config modification + parser.add_argument('--batch-size', type=int, help="batch size for single GPU") + parser.add_argument('--data-path', type=str, help='path to dataset') + parser.add_argument('--pretrained', type=str, help='path to pre-trained model') + parser.add_argument('--resume', help='resume from checkpoint') + parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps") + parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") + parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') + parser.add_argument('--output', default='output', type=str, metavar='PATH', + help='root of output folder, the full path is // (default: output)') + parser.add_argument('--tag', help='tag of experiment') + parser.add_argument('--eval', action='store_true', help='Perform evaluation only') + parser.add_argument('--throughput', action='store_true', help='Test throughput only') + + # distributed training + parser.add_argument("--local_rank", type=int, required=True, help='local rank for DistributedDataParallel') + + args = parser.parse_args() + + config = get_config(args) + + return args, config + + +def main(config): + dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn = build_loader(config, logger, is_pretrain=False) + + logger.info(f"Creating model:{config.MODEL.TYPE}/{config.MODEL.NAME}") + model = build_model(config, is_pretrain=False) + model.cuda() + logger.info(str(model)) + + optimizer = build_optimizer(config, model, logger, is_pretrain=False) + if config.AMP_OPT_LEVEL != "O0": + model, optimizer = amp.initialize(model, optimizer, opt_level=config.AMP_OPT_LEVEL) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False) + model_without_ddp = model.module + + n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) + logger.info(f"number of params: {n_parameters}") + if hasattr(model_without_ddp, 'flops'): + flops = model_without_ddp.flops() + logger.info(f"number of GFLOPs: {flops / 1e9}") + + lr_scheduler = build_scheduler(config, optimizer, len(data_loader_train)) + + if config.AUG.MIXUP > 0.: + # smoothing is handled with mixup label transform + criterion = SoftTargetCrossEntropy() + elif config.MODEL.LABEL_SMOOTHING > 0.: + criterion = LabelSmoothingCrossEntropy(smoothing=config.MODEL.LABEL_SMOOTHING) + else: + criterion = torch.nn.CrossEntropyLoss() + + max_accuracy = 0.0 + + if config.TRAIN.AUTO_RESUME: + resume_file = auto_resume_helper(config.OUTPUT, logger) + if resume_file: + if config.MODEL.RESUME: + logger.warning(f"auto-resume changing resume file from {config.MODEL.RESUME} to {resume_file}") + config.defrost() + config.MODEL.RESUME = resume_file + config.freeze() + logger.info(f'auto resuming from {resume_file}') + else: + logger.info(f'no checkpoint found in {config.OUTPUT}, ignoring auto resume') + + if config.MODEL.RESUME: + max_accuracy = load_checkpoint(config, model_without_ddp, optimizer, lr_scheduler, logger) + acc1, acc5, loss = validate(config, data_loader_val, model) + logger.info(f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%") + if config.EVAL_MODE: + return + elif config.PRETRAINED: + load_pretrained(config, model_without_ddp, logger) + + if config.THROUGHPUT_MODE: + throughput(data_loader_val, model, logger) + return + + logger.info("Start training") + start_time = time.time() + for epoch in range(config.TRAIN.START_EPOCH, config.TRAIN.EPOCHS): + data_loader_train.sampler.set_epoch(epoch) + + train_one_epoch(config, model, criterion, data_loader_train, optimizer, epoch, mixup_fn, lr_scheduler) + if dist.get_rank() == 0 and (epoch % config.SAVE_FREQ == 0 or epoch == (config.TRAIN.EPOCHS - 1)): + save_checkpoint(config, epoch, model_without_ddp, max_accuracy, optimizer, lr_scheduler, logger) + + acc1, acc5, loss = validate(config, data_loader_val, model) + logger.info(f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%") + max_accuracy = max(max_accuracy, acc1) + logger.info(f'Max accuracy: {max_accuracy:.2f}%') + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + logger.info('Training time {}'.format(total_time_str)) + + +def train_one_epoch(config, model, criterion, data_loader, optimizer, epoch, mixup_fn, lr_scheduler): + model.train() + optimizer.zero_grad() + + logger.info(f'Current learning rate for different parameter groups: {[it["lr"] for it in optimizer.param_groups]}') + + num_steps = len(data_loader) + batch_time = AverageMeter() + loss_meter = AverageMeter() + norm_meter = AverageMeter() + + start = time.time() + end = time.time() + for idx, (samples, targets) in enumerate(data_loader): + samples = samples.cuda(non_blocking=True) + targets = targets.cuda(non_blocking=True) + + if mixup_fn is not None: + samples, targets = mixup_fn(samples, targets) + + outputs = model(samples) + + if config.TRAIN.ACCUMULATION_STEPS > 1: + loss = criterion(outputs, targets) + loss = loss / config.TRAIN.ACCUMULATION_STEPS + if config.AMP_OPT_LEVEL != "O0": + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + if config.TRAIN.CLIP_GRAD: + grad_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.TRAIN.CLIP_GRAD) + else: + grad_norm = get_grad_norm(amp.master_params(optimizer)) + else: + loss.backward() + if config.TRAIN.CLIP_GRAD: + grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.TRAIN.CLIP_GRAD) + else: + grad_norm = get_grad_norm(model.parameters()) + if (idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0: + optimizer.step() + optimizer.zero_grad() + lr_scheduler.step_update(epoch * num_steps + idx) + else: + loss = criterion(outputs, targets) + optimizer.zero_grad() + if config.AMP_OPT_LEVEL != "O0": + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + if config.TRAIN.CLIP_GRAD: + grad_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config.TRAIN.CLIP_GRAD) + else: + grad_norm = get_grad_norm(amp.master_params(optimizer)) + else: + loss.backward() + if config.TRAIN.CLIP_GRAD: + grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.TRAIN.CLIP_GRAD) + else: + grad_norm = get_grad_norm(model.parameters()) + optimizer.step() + lr_scheduler.step_update(epoch * num_steps + idx) + + torch.cuda.synchronize() + + loss_meter.update(loss.item(), targets.size(0)) + norm_meter.update(grad_norm) + batch_time.update(time.time() - end) + end = time.time() + + if idx % config.PRINT_FREQ == 0: + lr = optimizer.param_groups[-1]['lr'] + memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0) + etas = batch_time.avg * (num_steps - idx) + logger.info( + f'Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\t' + f'eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f}\t' + f'time {batch_time.val:.4f} ({batch_time.avg:.4f})\t' + f'loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t' + f'grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f})\t' + f'mem {memory_used:.0f}MB') + epoch_time = time.time() - start + logger.info(f"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}") + + +@torch.no_grad() +def validate(config, data_loader, model): + criterion = torch.nn.CrossEntropyLoss() + model.eval() + + batch_time = AverageMeter() + loss_meter = AverageMeter() + acc1_meter = AverageMeter() + acc5_meter = AverageMeter() + + end = time.time() + for idx, (images, target) in enumerate(data_loader): + images = images.cuda(non_blocking=True) + target = target.cuda(non_blocking=True) + + # compute output + output = model(images) + + # measure accuracy and record loss + loss = criterion(output, target) + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + + acc1 = reduce_tensor(acc1) + acc5 = reduce_tensor(acc5) + loss = reduce_tensor(loss) + + loss_meter.update(loss.item(), target.size(0)) + acc1_meter.update(acc1.item(), target.size(0)) + acc5_meter.update(acc5.item(), target.size(0)) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if idx % config.PRINT_FREQ == 0: + memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0) + logger.info( + f'Test: [{idx}/{len(data_loader)}]\t' + f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' + f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t' + f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t' + f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t' + f'Mem {memory_used:.0f}MB') + logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}') + return acc1_meter.avg, acc5_meter.avg, loss_meter.avg + + +@torch.no_grad() +def throughput(data_loader, model, logger): + model.eval() + + for idx, (images, _) in enumerate(data_loader): + images = images.cuda(non_blocking=True) + batch_size = images.shape[0] + for i in range(50): + model(images) + torch.cuda.synchronize() + logger.info(f"throughput averaged with 30 times") + tic1 = time.time() + for i in range(30): + model(images) + torch.cuda.synchronize() + tic2 = time.time() + logger.info(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}") + return + + +if __name__ == '__main__': + _, config = parse_option() + + if config.AMP_OPT_LEVEL != "O0": + assert amp is not None, "amp not installed!" + + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + rank = int(os.environ["RANK"]) + world_size = int(os.environ['WORLD_SIZE']) + print(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}") + else: + rank = -1 + world_size = -1 + torch.cuda.set_device(config.LOCAL_RANK) + torch.distributed.init_process_group(backend='nccl', init_method='env://', world_size=world_size, rank=rank) + torch.distributed.barrier() + + seed = config.SEED + dist.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + cudnn.benchmark = True + + # linear scale the learning rate according to total batch size, may not be optimal + linear_scaled_lr = config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0 + linear_scaled_warmup_lr = config.TRAIN.WARMUP_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0 + linear_scaled_min_lr = config.TRAIN.MIN_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0 + # gradient accumulation also need to scale the learning rate + if config.TRAIN.ACCUMULATION_STEPS > 1: + linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUMULATION_STEPS + linear_scaled_warmup_lr = linear_scaled_warmup_lr * config.TRAIN.ACCUMULATION_STEPS + linear_scaled_min_lr = linear_scaled_min_lr * config.TRAIN.ACCUMULATION_STEPS + config.defrost() + config.TRAIN.BASE_LR = linear_scaled_lr + config.TRAIN.WARMUP_LR = linear_scaled_warmup_lr + config.TRAIN.MIN_LR = linear_scaled_min_lr + config.freeze() + + os.makedirs(config.OUTPUT, exist_ok=True) + logger = create_logger(output_dir=config.OUTPUT, dist_rank=dist.get_rank(), name=f"{config.MODEL.NAME}") + + if dist.get_rank() == 0: + path = os.path.join(config.OUTPUT, "config.json") + with open(path, "w") as f: + f.write(config.dump()) + logger.info(f"Full config saved to {path}") + + # print config + logger.info(config.dump()) + + main(config) diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/main_simmim.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/main_simmim.py new file mode 100644 index 0000000000000000000000000000000000000000..c87dfd993ec2726f05e7e86325cac158c3326db0 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/main_simmim.py @@ -0,0 +1,283 @@ +# -------------------------------------------------------- +# SimMIM +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# Modified by Zhenda Xie +# -------------------------------------------------------- + + +"""_summary_ + +Test code: + +simmim_pretrain__vit_base__img224__800ep.yaml + +python -m torch.distributed.launch \ + --nproc_per_node 3 \ + main_simmim.py \ + --cfg ./configs/vit_base__800ep/simmim_pretrain__vit_base__img224__800ep.yaml \ + --data-path /data/imagenet_ILSVRC/ILSVRC/Data/CLS-LOC/train \ + --batch-size 128 \ + --output ./output \ + --tag test_run \ + --amp-opt-level O0 +""" + +import os +import time +import argparse +import datetime +import numpy as np + +import torch +import torch.backends.cudnn as cudnn +import torch.distributed as dist +from timm.utils import AverageMeter, accuracy + +from config import get_config +from models import build_model +from data import build_loader +from lr_scheduler import build_scheduler +from optimizer import build_optimizer +from logger import create_logger +from utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper + + +from torch.utils.tensorboard import SummaryWriter + +try: + # noinspection PyUnresolvedReferences + # from apex import amp + import torch.cuda.amp as amp + from torch.cuda.amp import autocast as autocast +except ImportError: + amp = None + +# fixme: fix cpu number here! +os.environ["OMP_NUM_THREADS"] = "64" + + +def parse_option(): + parser = argparse.ArgumentParser('SimMIM pre-training script', add_help=False) + parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', ) + parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', + ) + + # easy config modification + parser.add_argument('--batch-size', type=int, help="batch size for single GPU") + parser.add_argument('--data-path', type=str, help='path to dataset') + parser.add_argument('--resume', help='resume from checkpoint') + parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps") + parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") + parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') + parser.add_argument('--output', default='output', type=str, metavar='PATH', + help='root of output folder, the full path is // (default: output)') + parser.add_argument('--tag', help='tag of experiment') + + # distributed training + parser.add_argument("--local-rank", type=int, required=True, help='local rank for DistributedDataParallel') + + # others + parser.add_argument('--load-weight', type=str, help='Path to init model weight (only applicable for vit model)') + + parser.add_argument('--log_dir', default='./runs', + help='path where to tensorboard log') + + args = parser.parse_args() + + config = get_config(args) + + return args, config + + +def main(config, args): + data_loader_train = build_loader(config, logger, is_pretrain=True) + + logger.info(f"Creating model:{config.MODEL.TYPE}/{config.MODEL.NAME}") + model = build_model(config, is_pretrain=True, load_weight=args.load_weight) + model.cuda() + logger.info(str(model)) + + optimizer = build_optimizer(config, model, logger, is_pretrain=True) + + # Modified: use scalar for AMP + assert config.AMP_OPT_LEVEL == "O1", "Only support amp opt level: O1!" + scaler = torch.cuda.amp.GradScaler() + + # if config.AMP_OPT_LEVEL != "O0": + # model, optimizer = amp.initialize(model, optimizer, opt_level=config.AMP_OPT_LEVEL) + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False) + model_without_ddp = model.module + + n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) + logger.info(f"number of params: {n_parameters}") + if hasattr(model_without_ddp, 'flops'): + flops = model_without_ddp.flops() + logger.info(f"number of GFLOPs: {flops / 1e9}") + + lr_scheduler = build_scheduler(config, optimizer, len(data_loader_train)) + + if config.TRAIN.AUTO_RESUME: + resume_file = auto_resume_helper(config.OUTPUT, logger) + if resume_file: + if config.MODEL.RESUME: + logger.warning(f"auto-resume changing resume file from {config.MODEL.RESUME} to {resume_file}") + config.defrost() + config.MODEL.RESUME = resume_file + config.freeze() + logger.info(f'auto resuming from {resume_file}') + else: + logger.info(f'no checkpoint found in {config.OUTPUT}, ignoring auto resume') + + if config.MODEL.RESUME: + load_checkpoint(config, model_without_ddp, optimizer, lr_scheduler, logger) + + + writer = SummaryWriter(log_dir=args.log_dir) + + logger.info("Start training") + start_time = time.time() + for epoch in range(config.TRAIN.START_EPOCH, config.TRAIN.EPOCHS): + data_loader_train.sampler.set_epoch(epoch) + + train_one_epoch(config, model, data_loader_train, optimizer, epoch, lr_scheduler, scaler, writer) + if dist.get_rank() == 0 and (epoch % config.SAVE_FREQ == 0 or epoch == (config.TRAIN.EPOCHS - 1)): + save_checkpoint(config, epoch, model_without_ddp, 0., optimizer, lr_scheduler, logger) + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + logger.info('Training time {}'.format(total_time_str)) + + +def train_one_epoch(config, model, data_loader, optimizer, epoch, lr_scheduler, scaler, writer): + model.train() + optimizer.zero_grad() + + num_steps = len(data_loader) + batch_time = AverageMeter() + loss_meter = AverageMeter() + norm_meter = AverageMeter() + + start = time.time() + end = time.time() + for idx, (img, mask, _) in enumerate(data_loader): + img = img.cuda(non_blocking=True) + mask = mask.cuda(non_blocking=True) + + # Modified: Use autocast + with autocast(): + loss = model(img, mask) + + optimizer.zero_grad() + if config.TRAIN.ACCUMULATION_STEPS > 1: + loss = loss / config.TRAIN.ACCUMULATION_STEPS + scaler.scale(loss).backward() + + if config.TRAIN.CLIP_GRAD: + grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.TRAIN.CLIP_GRAD) + else: + grad_norm = get_grad_norm(model.parameters()) + if (idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0: + # 将梯度值缩放回原尺度后,优化器进行一步优化 + scaler.step(optimizer) + + # 更新scalar的缩放信息 + scaler.update() + + # optimizer.step() + # optimizer.zero_grad() + lr_scheduler.step_update(epoch * num_steps + idx) + else: + scaler.scale(loss).backward() + if config.TRAIN.CLIP_GRAD: + grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.TRAIN.CLIP_GRAD) + else: + grad_norm = get_grad_norm(model.parameters()) + scaler.step(optimizer) + scaler.update() + lr_scheduler.step_update(epoch * num_steps + idx) + + torch.cuda.synchronize() + + loss_meter.update(loss.item(), img.size(0)) + norm_meter.update(grad_norm) + batch_time.update(time.time() - end) + end = time.time() + + if idx % config.PRINT_FREQ == 0: + lr = optimizer.param_groups[0]['lr'] + memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0) + etas = batch_time.avg * (num_steps - idx) + logger.info( + f'Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\t' + f'eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f}\t' + f'time {batch_time.val:.4f} ({batch_time.avg:.4f})\t' + f'loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t' + f'grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f})\t' + f'mem {memory_used:.0f}MB') + writer.add_scalar('loss', loss_meter.val, global_step=epoch*len(data_loader)+idx) + + epoch_time = time.time() - start + logger.info(f"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}") + + # writer.add_scalar('loss', loss_meter.val, global_step=epoch) + + +if __name__ == '__main__': + + args, config = parse_option() + + if config.AMP_OPT_LEVEL != "O0": + assert amp is not None, "amp not installed!" + + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + rank = int(os.environ["RANK"]) + world_size = int(os.environ['WORLD_SIZE']) + print(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}") + else: + rank = -1 + world_size = -1 + torch.cuda.set_device(config.LOCAL_RANK) + torch.distributed.init_process_group(backend='nccl', init_method='env://', world_size=world_size, rank=rank) + torch.distributed.barrier() + + seed = config.SEED + dist.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + cudnn.benchmark = True + + # linear scale the learning rate according to total batch size, may not be optimal + linear_scaled_lr = config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0 + linear_scaled_warmup_lr = config.TRAIN.WARMUP_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0 + linear_scaled_min_lr = config.TRAIN.MIN_LR * config.DATA.BATCH_SIZE * dist.get_world_size() / 512.0 + # gradient accumulation also need to scale the learning rate + if config.TRAIN.ACCUMULATION_STEPS > 1: + linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUMULATION_STEPS + linear_scaled_warmup_lr = linear_scaled_warmup_lr * config.TRAIN.ACCUMULATION_STEPS + linear_scaled_min_lr = linear_scaled_min_lr * config.TRAIN.ACCUMULATION_STEPS + config.defrost() + config.TRAIN.BASE_LR = linear_scaled_lr + config.TRAIN.WARMUP_LR = linear_scaled_warmup_lr + config.TRAIN.MIN_LR = linear_scaled_min_lr + config.freeze() + + os.makedirs(config.OUTPUT, exist_ok=True) + logger = create_logger(output_dir=config.OUTPUT, dist_rank=dist.get_rank(), name=f"{config.MODEL.NAME}") + + if dist.get_rank() == 0: + path = os.path.join(config.OUTPUT, "config.json") + with open(path, "w") as f: + f.write(config.dump()) + logger.info(f"Full config saved to {path}") + + # print config + logger.info(config.dump()) + + main(config, args) diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/__init__.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2d9c65e39f0fb592bd09ebd5eaba754c5a8f192e --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/__init__.py @@ -0,0 +1 @@ +from .build import build_model \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/build.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/build.py new file mode 100644 index 0000000000000000000000000000000000000000..e3a2966d807a8eea75fa7984813582e24ee56f38 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/build.py @@ -0,0 +1,26 @@ +# -------------------------------------------------------- +# SimMIM +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# Modified by Zhenda Xie +# -------------------------------------------------------- + +from .swin_transformer import build_swin +from .vision_transformer import build_vit, build_vit_mod +from .simmim import build_simmim + + +def build_model(config, is_pretrain=True, load_weight=None): + if is_pretrain: + model = build_simmim(config, load_weight) + else: + model_type = config.MODEL.TYPE + if model_type == 'swin': + model = build_swin(config) + elif model_type == 'vit': + model = build_vit_mod(config) + else: + raise NotImplementedError(f"Unknown fine-tune model: {model_type}") + + return model diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/simmim.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/simmim.py new file mode 100644 index 0000000000000000000000000000000000000000..ebf78542156de089e9ed447bea3cbb84dff78d6b --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/simmim.py @@ -0,0 +1,241 @@ +# -------------------------------------------------------- +# SimMIM +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Zhenda Xie +# -------------------------------------------------------- + +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F +from timm.models.layers import trunc_normal_ + +from .swin_transformer import SwinTransformer +# from .vision_transformer import VisionTransformer +# from timm.models.vision_transformer import VisionTransformer +from .vit_simple import VisionTransformer + + +class SwinTransformerForSimMIM(SwinTransformer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + assert self.num_classes == 0 + + self.mask_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) + trunc_normal_(self.mask_token, mean=0., std=.02) + + def forward(self, x, mask): + x = self.patch_embed(x) + + assert mask is not None + B, L, _ = x.shape + + mask_tokens = self.mask_token.expand(B, L, -1) + w = mask.flatten(1).unsqueeze(-1).type_as(mask_tokens) + x = x * (1. - w) + mask_tokens * w + + if self.ape: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + + for layer in self.layers: + x = layer(x) + x = self.norm(x) + + x = x.transpose(1, 2) + B, C, L = x.shape + H = W = int(L ** 0.5) + x = x.reshape(B, C, H, W) + return x + + @torch.jit.ignore + def no_weight_decay(self): + return super().no_weight_decay() | {'mask_token'} + + +# class VisionTransformerForSimMIM(VisionTransformer): +# def __init__(self, **kwargs): +# super().__init__(**kwargs) + +# assert self.num_classes == 0 + +# self.mask_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) +# self._trunc_normal_(self.mask_token, std=.02) + +# def _trunc_normal_(self, tensor, mean=0., std=1.): +# trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std) + +# def forward(self, x, mask): +# x = self.patch_embed(x) + +# assert mask is not None +# B, L, _ = x.shape + +# mask_token = self.mask_token.expand(B, L, -1) +# w = mask.flatten(1).unsqueeze(-1).type_as(mask_token) +# x = x * (1 - w) + mask_token * w + +# cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks +# x = torch.cat((cls_tokens, x), dim=1) + +# if self.pos_embed is not None: +# x = x + self.pos_embed +# x = self.pos_drop(x) + +# rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None +# for blk in self.blocks: +# x = blk(x, rel_pos_bias=rel_pos_bias) +# x = self.norm(x) + +# x = x[:, 1:] +# B, L, C = x.shape +# H = W = int(L ** 0.5) +# x = x.permute(0, 2, 1).reshape(B, C, H, W) +# return x + + +class VisionTransformerForSimMIM(VisionTransformer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + assert self.num_classes == 0 + + self.mask_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) + self._trunc_normal_(self.mask_token, std=.02) + + def _trunc_normal_(self, tensor, mean=0., std=1.): + trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std) + + def forward(self, x, mask): + x = self.patch_embed(x) + + assert mask is not None + B, L, _ = x.shape + + # Randomly mask some patches + mask_token = self.mask_token.expand(B, L, -1) + w = mask.flatten(1).unsqueeze(-1).type_as(mask_token) + x = x * (1 - w) + mask_token * w + + if self.pos_embed is not None: + x = self._pos_embed(x) + + x = self.blocks(x) + x = self.norm(x) + + x = x[:, 1:] + B, L, C = x.shape + H = W = int(L ** 0.5) + x = x.permute(0, 2, 1).reshape(B, C, H, W) + return x + + +class SimMIM(nn.Module): + def __init__(self, encoder, encoder_stride): + super().__init__() + self.encoder = encoder + self.encoder_stride = encoder_stride + + self.decoder = nn.Sequential( + nn.Conv2d( + in_channels=self.encoder.num_features, + out_channels=self.encoder_stride ** 2 * 3, kernel_size=1), + nn.PixelShuffle(self.encoder_stride), + ) + + self.in_chans = self.encoder.in_chans + self.patch_size = self.encoder.patch_size + + def forward(self, x, mask): + z = self.encoder(x, mask) + x_rec = self.decoder(z) + + mask = mask.repeat_interleave(self.patch_size, 1).repeat_interleave(self.patch_size, 2).unsqueeze(1).contiguous() + loss_recon = F.l1_loss(x, x_rec, reduction='none') + loss = (loss_recon * mask).sum() / (mask.sum() + 1e-5) / self.in_chans + return loss + + @torch.jit.ignore + def no_weight_decay(self): + if hasattr(self.encoder, 'no_weight_decay'): + return {'encoder.' + i for i in self.encoder.no_weight_decay()} + return {} + + @torch.jit.ignore + def no_weight_decay_keywords(self): + if hasattr(self.encoder, 'no_weight_decay_keywords'): + return {'encoder.' + i for i in self.encoder.no_weight_decay_keywords()} + return {} + + +def build_simmim(config, load_weight=None): + model_type = config.MODEL.TYPE + if model_type == 'swin': + encoder = SwinTransformerForSimMIM( + img_size=config.DATA.IMG_SIZE, + patch_size=config.MODEL.SWIN.PATCH_SIZE, + in_chans=config.MODEL.SWIN.IN_CHANS, + num_classes=0, + embed_dim=config.MODEL.SWIN.EMBED_DIM, + depths=config.MODEL.SWIN.DEPTHS, + num_heads=config.MODEL.SWIN.NUM_HEADS, + window_size=config.MODEL.SWIN.WINDOW_SIZE, + mlp_ratio=config.MODEL.SWIN.MLP_RATIO, + qkv_bias=config.MODEL.SWIN.QKV_BIAS, + qk_scale=config.MODEL.SWIN.QK_SCALE, + drop_rate=config.MODEL.DROP_RATE, + drop_path_rate=config.MODEL.DROP_PATH_RATE, + ape=config.MODEL.SWIN.APE, + patch_norm=config.MODEL.SWIN.PATCH_NORM, + use_checkpoint=config.TRAIN.USE_CHECKPOINT) + encoder_stride = 32 + elif model_type == 'vit': + # encoder = VisionTransformerForSimMIM( + # img_size=config.DATA.IMG_SIZE, + # patch_size=config.MODEL.VIT.PATCH_SIZE, + # in_chans=config.MODEL.VIT.IN_CHANS, + # num_classes=0, + # embed_dim=config.MODEL.VIT.EMBED_DIM, + # depth=config.MODEL.VIT.DEPTH, + # num_heads=config.MODEL.VIT.NUM_HEADS, + # mlp_ratio=config.MODEL.VIT.MLP_RATIO, + # qkv_bias=config.MODEL.VIT.QKV_BIAS, + # drop_rate=config.MODEL.DROP_RATE, + # drop_path_rate=config.MODEL.DROP_PATH_RATE, + # norm_layer=partial(nn.LayerNorm, eps=1e-6), + # init_values=config.MODEL.VIT.INIT_VALUES, + # use_abs_pos_emb=config.MODEL.VIT.USE_APE, + # use_rel_pos_bias=config.MODEL.VIT.USE_RPB, + # use_shared_rel_pos_bias=config.MODEL.VIT.USE_SHARED_RPB, + # use_mean_pooling=config.MODEL.VIT.USE_MEAN_POOLING) + print('Ignored all config about ViT!') + encoder = VisionTransformerForSimMIM( + img_size=224, + patch_size=16, + in_chans=3, + num_classes=0, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=True, + drop_rate=0., + drop_path_rate=0., + norm_layer=None) + encoder_stride = 16 + + # load pretrained weight + if load_weight: + model_weights = torch.load(load_weight) + encoder.load_state_dict(model_weights, strict=False) + print('loaded from pretrained weight') + + else: + raise NotImplementedError(f"Unknown pre-train model: {model_type}") + + model = SimMIM(encoder=encoder, encoder_stride=encoder_stride) + + return model diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/swin_transformer.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/swin_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..56783400cad7bb32ec5b0f116f8f187bb9f91b67 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/swin_transformer.py @@ -0,0 +1,612 @@ +# -------------------------------------------------------- +# SimMIM +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# Modified by Zhenda Xie +# -------------------------------------------------------- + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def extra_repr(self) -> str: + return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}' + + def flops(self, N): + # calculate flops for 1 window with token length of N + flops = 0 + # qkv = self.qkv(x) + flops += N * self.dim * 3 * self.dim + # attn = (q @ k.transpose(-2, -1)) + flops += self.num_heads * N * (self.dim // self.num_heads) * N + # x = (attn @ v) + flops += self.num_heads * N * N * (self.dim // self.num_heads) + # x = self.proj(x) + flops += N * self.dim * self.dim + return flops + + +class SwinTransformerBlock(nn.Module): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, + qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if self.shift_size > 0: + # calculate attention mask for SW-MSA + H, W = self.input_resolution + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ + f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" + + def flops(self): + flops = 0 + H, W = self.input_resolution + # norm1 + flops += self.dim * H * W + # W-MSA/SW-MSA + nW = H * W / self.window_size / self.window_size + flops += nW * self.attn.flops(self.window_size * self.window_size) + # mlp + flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio + # norm2 + flops += self.dim * H * W + return flops + + +class PatchMerging(nn.Module): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." + + x = x.view(B, H, W, C) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + def extra_repr(self) -> str: + return f"input_resolution={self.input_resolution}, dim={self.dim}" + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.dim + flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim + return flops + + +class BasicLayer(nn.Module): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock(dim=dim, input_resolution=input_resolution, + num_heads=num_heads, window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" + + def flops(self): + flops = 0 + for blk in self.blocks: + flops += blk.flops() + if self.downsample is not None: + flops += self.downsample.flops() + return flops + + +class PatchEmbed(nn.Module): + r""" Image to Patch Embedding + + Args: + img_size (int): Image size. Default: 224. + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + self.num_patches = patches_resolution[0] * patches_resolution[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C + if self.norm is not None: + x = self.norm(x) + return x + + def flops(self): + Ho, Wo = self.patches_resolution + flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1]) + if self.norm is not None: + flops += Ho * Wo * self.embed_dim + return flops + + +class SwinTransformer(nn.Module): + r""" Swin Transformer + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + + Args: + img_size (int | tuple(int)): Input image size. Default 224 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, + embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], + window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, + norm_layer=nn.LayerNorm, ape=False, patch_norm=True, + use_checkpoint=False, **kwargs): + super().__init__() + + self.img_size = img_size + self.patch_size = patch_size + self.in_chans = in_chans + + self.num_classes = num_classes + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) + self.mlp_ratio = mlp_ratio + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.absolute_pos_embed, std=.02) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + + # build layers + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer), + input_resolution=(patches_resolution[0] // (2 ** i_layer), + patches_resolution[1] // (2 ** i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint) + self.layers.append(layer) + + self.norm = norm_layer(self.num_features) + self.avgpool = nn.AdaptiveAvgPool1d(1) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'absolute_pos_embed'} + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {'relative_position_bias_table'} + + def forward_features(self, x): + x = self.patch_embed(x) + if self.ape: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + + for layer in self.layers: + x = layer(x) + + x = self.norm(x) # B L C + x = self.avgpool(x.transpose(1, 2)) # B C 1 + x = torch.flatten(x, 1) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + def flops(self): + flops = 0 + flops += self.patch_embed.flops() + for i, layer in enumerate(self.layers): + flops += layer.flops() + flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers) + flops += self.num_features * self.num_classes + return flops + + +def build_swin(config): + model = SwinTransformer( + img_size=config.DATA.IMG_SIZE, + patch_size=config.MODEL.SWIN.PATCH_SIZE, + in_chans=config.MODEL.SWIN.IN_CHANS, + num_classes=config.MODEL.NUM_CLASSES, + embed_dim=config.MODEL.SWIN.EMBED_DIM, + depths=config.MODEL.SWIN.DEPTHS, + num_heads=config.MODEL.SWIN.NUM_HEADS, + window_size=config.MODEL.SWIN.WINDOW_SIZE, + mlp_ratio=config.MODEL.SWIN.MLP_RATIO, + qkv_bias=config.MODEL.SWIN.QKV_BIAS, + qk_scale=config.MODEL.SWIN.QK_SCALE, + drop_rate=config.MODEL.DROP_RATE, + drop_path_rate=config.MODEL.DROP_PATH_RATE, + ape=config.MODEL.SWIN.APE, + patch_norm=config.MODEL.SWIN.PATCH_NORM, + use_checkpoint=config.TRAIN.USE_CHECKPOINT) + + return model \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/vision_transformer.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/vision_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..c161245c2485eec92e1c1ed3af2ac6c5b5c6c166 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/vision_transformer.py @@ -0,0 +1,377 @@ +# -------------------------------------------------------- +# SimMIM +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Based on BEIT code bases (https://github.com/microsoft/unilm/tree/master/beit) +# Written by Yutong Lin, Zhenda Xie +# -------------------------------------------------------- + +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + # x = self.drop(x) + # comment out this for the orignal BERT implement + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__( + self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., + proj_drop=0., window_size=None, attn_head_dim=None): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.v_bias = None + + if window_size: + self.window_size = window_size + # cls to token & token to cls & cls to cls + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", relative_position_index) + else: + self.window_size = None + self.relative_position_bias_table = None + self.relative_position_index = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, rel_pos_bias=None): + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if self.relative_position_bias_table is not None: + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if rel_pos_bias is not None: + attn = attn + rel_pos_bias + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, + window_size=None, attn_head_dim=None): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, + attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if init_values is not None: + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x, rel_pos_bias=None): + if self.gamma_1 is None: + x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x, **kwargs): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class RelativePositionBias(nn.Module): + + def __init__(self, window_size, num_heads): + super().__init__() + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", relative_position_index) + + def forward(self): + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + + +class VisionTransformer(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, + use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, + use_mean_pooling=True, init_scale=0.001): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim + self.patch_size = patch_size + self.in_chans = in_chans + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + if use_abs_pos_emb: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + else: + self.pos_embed = None + self.pos_drop = nn.Dropout(p=drop_rate) + + if use_shared_rel_pos_bias: + self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) + else: + self.rel_pos_bias = None + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.use_rel_pos_bias = use_rel_pos_bias + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None) + for i in range(depth)]) + self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) + self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + if self.pos_embed is not None: + self._trunc_normal_(self.pos_embed, std=.02) + self._trunc_normal_(self.cls_token, std=.02) + if num_classes > 0: + self._trunc_normal_(self.head.weight, std=.02) + self.apply(self._init_weights) + self.fix_init_weight() + + if num_classes > 0: + self.head.weight.data.mul_(init_scale) + self.head.bias.data.mul_(init_scale) + + def _trunc_normal_(self, tensor, mean=0., std=1.): + trunc_normal_(tensor, mean=mean, std=std) + + def fix_init_weight(self): + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + self._trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + self._trunc_normal_(m.weight, std=.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + batch_size, seq_len, _ = x.size() + + cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed + x = self.pos_drop(x) + + rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None + for blk in self.blocks: + x = blk(x, rel_pos_bias=rel_pos_bias) + + x = self.norm(x) + if self.fc_norm is not None: + t = x[:, 1:, :] + return self.fc_norm(t.mean(1)) + else: + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def build_vit(config): + model = VisionTransformer( + img_size=config.DATA.IMG_SIZE, + patch_size=config.MODEL.VIT.PATCH_SIZE, + in_chans=config.MODEL.VIT.IN_CHANS, + num_classes=config.MODEL.NUM_CLASSES, + embed_dim=config.MODEL.VIT.EMBED_DIM, + depth=config.MODEL.VIT.DEPTH, + num_heads=config.MODEL.VIT.NUM_HEADS, + mlp_ratio=config.MODEL.VIT.MLP_RATIO, + qkv_bias=config.MODEL.VIT.QKV_BIAS, + drop_rate=config.MODEL.DROP_RATE, + drop_path_rate=config.MODEL.DROP_PATH_RATE, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + init_values=config.MODEL.VIT.INIT_VALUES, + use_abs_pos_emb=config.MODEL.VIT.USE_APE, + use_rel_pos_bias=config.MODEL.VIT.USE_RPB, + use_shared_rel_pos_bias=config.MODEL.VIT.USE_SHARED_RPB, + use_mean_pooling=config.MODEL.VIT.USE_MEAN_POOLING) + + return model + +def build_vit_mod(config): + model = VisionTransformer( + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=True, + drop_rate=0., + drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), + init_values=None, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + use_shared_rel_pos_bias=False, + use_mean_pooling=False) + + return model \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/vit_simple.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/vit_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..61b11eb7f5299e4431a41daf43d29049133c6af9 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/models/vit_simple.py @@ -0,0 +1,360 @@ +""" +Here defines the model. +""" + +import torch +import torch.nn as nn +from functools import partial +from timm.models.layers import DropPath, to_2tuple + + +class PatchEmbed(nn.Module): + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + """Image to patch embedding + Input dimension: [B, c, h, w] + + Notes: + B : Batches + c : Input channels + h : Input heights + w : Input widths + D : Patch dimension + + Args: + img_size (int, optional): Width and height of the input image. Defaults to 224. + patch_size (int, optional): Width and height of the patch. Defaults to 16. + in_chans (int, optional): Input image channel. Defaults to 3. + embed_dim (int, optional): Patch embedding dimension. Defaults to 768. + """ + super().__init__() + + img_size = to_2tuple(img_size) # (img_size, img_size) + patch_size = to_2tuple(patch_size) # (patch_size, patch_size) + + self.num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + + # Use CNN to split patches + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + """Divide image into batches + + Args: + x : [B, c, h, w] + + Returns: + _type_: _description_ + """ + # Divide of the input image into patches + # [B, c, h, w] -> [B, D, h//patch_size, w//patch_size] + # eg: [10, 3, 224, 224] -> [10, 768, 14, 14] + x = self.proj(x) + + # Flatten + # [B, D, h//patch_size, w//patch_size] -> [B, D, h//patch_size*w//patch_size] + # eg: [10, 768, 14, 14] -> [10, 768, 196] + x = x.flatten(2) + + # Transpose + # [B, D, h//patch_size*w//patch_size] -> [B, h//patch_size*w//patch_size, D] + # eg: [10, 768, 196] -> [10, 196, 768] + x = x.transpose(1, 2) + return x + + +class FFN(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + """Feed forward network (with one hidden layer) + + Args: + in_features: Number of input features + hidden_features (optional): Number of features in the hidden layer. Defaults to None. + out_features (optional): Number of output features. Defaults to None. + act_layer (optional): The activation function. Defaults to nn.GELU. + drop (optional): Dropout percentage. Defaults to 0.. + """ + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + """Multi-head self attention + + Args: + dim : Patch dimension + num_heads (optional): Number of heads. Defaults to 8. + qkv_bias (bool, optional): Whether we add bias for each output. Defaults to False. + attn_drop (optional): Drop out for the output of softmax(q*k^T). Defaults to 0.. + proj_drop (optional): Drop out for the final MLP. Defaults to 0.. + """ + super().__init__() + assert dim % num_heads == 0, 'dim should be divisible by num_heads' + + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + # The linear layer used to divide qkv from the input for self attention + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + """Forward function + + Notes: + B: Batch + N: Patch number + D: Patch dimension (embedding dimension) (should be divisible by H) + H: Number of heads + + Args: + x : [B, N, D] + + Returns: + x : [B, N, D] + """ + + B, N, D = x.shape + + # Generate qkv based using a same FFN + # [B, N, D] -> [B, N, 3D] -> [B, N, 3, H, D/H] -> [3, B, H, N, D/H] + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, D // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # [B, H, N, D/H] + + # Here we will generate a correlation matrix by applying q * k^T + # [B, H, N, D/H] @ [B, H, D/H, N] -> [B, H, N, N] -> normalize + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) # Default not been used + + # Weighted sum + # [B, H, N, N] @ [B, H, N, D/H] -> [B, H, N, D/H] -> aggregate to [B, N, D] + x = (attn @ v).transpose(1, 2).reshape(B, N, D) + + # Use MLP to stable the training process + x = self.proj(x) + x = self.proj_drop(x) # Default not been used + + # x: [B, N, D] + return x + + +class Block(nn.Module): + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + """Transformer encoder block + + Args: + dim : Patch dimension. + num_heads : Number of heads in the attention model. + mlp_ratio (optional): Hidden layer dimension (times of the patch dimenstion). Defaults to 4.. + qkv_bias (bool, optional): Whether we add bias for each output. Defaults to False. + drop (optional): Drop out for the output of final MLP in the attention model. Defaults to 0.. + attn_drop (optional): Drop out for the output of softmax(q*k^T) in the attention model. Defaults to 0.. + drop_path (optional): Drop path. Defaults to 0.. + act_layer (optional): Activation layer. Defaults to nn.GELU. + norm_layer (optional): Normalization layer. Defaults to nn.LayerNorm. + """ + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + + # Use drop path if selected + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = FFN(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + """Here defined the block structure: + + x + |--------------- + Norm1 | + Attention | (skip connection) + Drop path | + | <-------------- + |--------------- + Norm2 | + FFN | (skip connection) + Drop path | + |<--------------- + x + + Args: + x : [B, N, D] + + Returns: + x : [B, N, D] + """ + x = x + self.drop_path(self.attn(self.norm1(x))) # skip connection + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class VisionTransformer(nn.Module): + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=True, + pre_norm=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + embed_layer=PatchEmbed, + norm_layer=None, + act_layer=None, + block_fn=Block, + ): + """Vision Transformer (SimMIM compat version) + + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - + https://arxiv.org/abs/2010.11929 + + Ref: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/vision_transformer.py + + Args: + img_size (int, tuple, optional): Input image size. Defaults to [B, 3, 224, 224)]. + patch_size (int, tuple, optional): Patch size. Defaults to [16, 16]. + in_chans (int, optional): Number of input channels. Defaults to 3. + num_classes (int, optional): Number of classes for classification head. Defaults to 1000. + embed_dim (int, optional): Embedding dimension (patch dimension). Defaults to 768. + depth (int, optional): Depth of transformer encoder blocks. Defaults to 12. + num_heads (int, optional): number of attention heads. Defaults to 12. + mlp_ratio (int, optional): Ratio of mlp hidden dim to embedding dim. Defaults to 4.. + qkv_bias (bool, optional): Enable bias for qkv if True. Defaults to True. + pre_norm (bool, optional): Whether to normalize before encoder blocks. Defaults to [16, 16]. + drop_rate (float, optional): Dropout rate in attention model. Defaults to 0. + attn_drop_rate (float, optional): Attention dropout rate in attention model. Defaults to 0.. + drop_path_rate (float, optional): Stochastic depth rate. Defaults to 0.. + embed_layer (nn.Module, optional): Patch embedding layer. Defaults to PatchEmbed. + norm_layer (nn.Module, optional): Customized normalization layer. Defaults to None. + act_layer (nn.Module, optional): Customized MLP activation layer. Defaults to None. + block_fn (nn.Module, optional): Encoder block. Defaults to Block. + """ + super().__init__() + + # Setup normalization and activation function + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) # Layer normalization with default eps=1e-6 + act_layer = act_layer or nn.GELU + + self.num_classes = num_classes + self.embed_dim = embed_dim + + # Added for SimMIM compatibility + self.num_features = embed_dim + self.in_chans = in_chans + self.patch_size = patch_size + + # Define the patch embedding + self.patch_embed = embed_layer( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim + ) + + # Define the class token + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + + # Define the positional embedding + embed_len = self.patch_embed.num_patches + 1 + self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02) + self.pos_drop = nn.Dropout(p=drop_rate) + self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity() + + # Define the stochastic depth decay rule based on drop path + # As the depth increases, the drop path rate increases, and finally reaches drop_path_rate + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + + # Define the encoder blocks + self.blocks = nn.Sequential(*[ + block_fn( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + act_layer=act_layer + ) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + # # Classifier Head + # # Head is removed because not used in SimMIM + # self.head = nn.Linear(self.embed_dim, num_classes) + + def _pos_embed(self, x): + # Concat the class token + x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) + + # Add the positional embedding (Need to be learned) + x = x + self.pos_embed + + # Drop out + x = self.pos_drop(x) + return x + + def forward_features(self, x): + # Patch embedding [B, c, h, w] -> [B, N-1, D] (Define N = h*w+1) + x = self.patch_embed(x) + + # Positional embedding [B, N-1, D] -> [B, N, D] + x = self._pos_embed(x) + + # Pre-normalize (Default not been used) [B, N, D] + x = self.norm_pre(x) + + # Transformer encoder blocks [B, N, D] + x = self.blocks(x) + + # Normalize (Default been used) [B, N, D] + x = self.norm(x) + return x + + def forward_head(self, x): + # Fetch the class token [B, N, D] -> [B, 1, D] + x = x[:, 0] + + # Fetch the head of the token [B, 1, D] -> [B, 1, number_of_classes] + # x = self.head(x) + x = nn.Linear(self.embed_dim, self.num_classes) + return x + + def forward(self, x): + # The main part of the ViT + x = self.forward_features(x) + + # Fetch result based on the class token + x = self.forward_head(x) + return x + diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/nohup.out b/PuzzleTuning/Counterpart PreTrain Methods/simmim/nohup.out new file mode 100644 index 0000000000000000000000000000000000000000..0e0cef66623b5af24e49c3a16852ce14210decad --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/nohup.out @@ -0,0 +1,3428 @@ +/root/miniconda3/lib/python3.8/site-packages/torch/distributed/launch.py:181: FutureWarning: The module torch.distributed.launch is deprecated +and will be removed in future. Use torchrun. +Note that --use-env is set by default in torchrun. +If your script expects `--local-rank` argument to be set, please +change it to read from `os.environ['LOCAL_RANK']` instead. See +https://pytorch.org/docs/stable/distributed.html#launch-utility for +further instructions + + warnings.warn( +=> merge config from ./configs/vit_base__test/simmim_pretrain__vit_base__img224__100ep.yaml +RANK and WORLD_SIZE in environ: 2/4 +=> merge config from ./configs/vit_base__test/simmim_pretrain__vit_base__img224__100ep.yaml +RANK and WORLD_SIZE in environ: 3/4 +=> merge config from ./configs/vit_base__test/simmim_pretrain__vit_base__img224__100ep.yaml +RANK and WORLD_SIZE in environ: 0/4 +=> merge config from ./configs/vit_base__test/simmim_pretrain__vit_base__img224__100ep.yaml +RANK and WORLD_SIZE in environ: 1/4 +[2023-10-10 03:03:53 simmim_pretrain](main_simmim.py 278): INFO Full config saved to /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/config.json +[2023-10-10 03:03:53 simmim_pretrain](main_simmim.py 281): INFO AMP_OPT_LEVEL: O1 +AUG: + AUTO_AUGMENT: rand-m9-mstd0.5-inc1 + COLOR_JITTER: 0.4 + CUTMIX: 1.0 + CUTMIX_MINMAX: null + MIXUP: 0.8 + MIXUP_MODE: batch + MIXUP_PROB: 1.0 + MIXUP_SWITCH_PROB: 0.5 + RECOUNT: 1 + REMODE: pixel + REPROB: 0.25 +BASE: +- '' +DATA: + BATCH_SIZE: 128 + DATASET: imagenet + DATA_PATH: /root/autodl-tmp/datasets/All + IMG_SIZE: 224 + INTERPOLATION: bicubic + MASK_PATCH_SIZE: 32 + MASK_RATIO: 0.6 + NUM_WORKERS: 8 + PIN_MEMORY: true +EVAL_MODE: false +LOCAL_RANK: 0 +MODEL: + DROP_PATH_RATE: 0.0 + DROP_RATE: 0.0 + LABEL_SMOOTHING: 0.1 + NAME: simmim_pretrain + NUM_CLASSES: 1000 + RESUME: '' + SWIN: + APE: false + DEPTHS: + - 2 + - 2 + - 6 + - 2 + EMBED_DIM: 96 + IN_CHANS: 3 + MLP_RATIO: 4.0 + NUM_HEADS: + - 3 + - 6 + - 12 + - 24 + PATCH_NORM: true + PATCH_SIZE: 4 + QKV_BIAS: true + QK_SCALE: null + WINDOW_SIZE: 7 + TYPE: vit + VIT: + DEPTH: 12 + EMBED_DIM: 768 + INIT_VALUES: null + IN_CHANS: 3 + MLP_RATIO: 4 + NUM_HEADS: 12 + PATCH_SIZE: 16 + QKV_BIAS: true + USE_APE: true + USE_MEAN_POOLING: false + USE_RPB: false + USE_SHARED_RPB: true +OUTPUT: /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim +PRETRAINED: '' +PRINT_FREQ: 500 +SAVE_FREQ: 20 +SEED: 0 +TAG: vit_simmim +TEST: + CROP: true +THROUGHPUT_MODE: false +TRAIN: + ACCUMULATION_STEPS: 0 + AUTO_RESUME: true + BASE_LR: 0.0002 + CLIP_GRAD: 5.0 + EPOCHS: 200 + LAYER_DECAY: 1.0 + LR_SCHEDULER: + DECAY_EPOCHS: 30 + DECAY_RATE: 0.1 + GAMMA: 0.1 + MULTISTEPS: + - 700 + NAME: multistep + MIN_LR: 5.0e-06 + OPTIMIZER: + BETAS: + - 0.9 + - 0.999 + EPS: 1.0e-08 + MOMENTUM: 0.9 + NAME: adamw + START_EPOCH: 0 + USE_CHECKPOINT: false + WARMUP_EPOCHS: 20 + WARMUP_LR: 1.0e-06 + WEIGHT_DECAY: 0.05 + +[2023-10-10 03:03:53 simmim_pretrain](data_simmim.py 96): INFO Pre-train data transform: + +[2023-10-10 03:04:01 simmim_pretrain](data_simmim.py 99): INFO Build dataset: train images = 3475344 +[2023-10-10 03:04:01 simmim_pretrain](main_simmim.py 103): INFO Creating model:vit/simmim_pretrain +Ignored all config about ViT! +Ignored all config about ViT! +Ignored all config about ViT! +Ignored all config about ViT! +loaded from pretrained weight +loaded from pretrained weight +loaded from pretrained weight +loaded from pretrained weight +[2023-10-10 03:04:02 simmim_pretrain](main_simmim.py 106): INFO SimMIM( + (encoder): VisionTransformerForSimMIM( + (patch_embed): PatchEmbed( + (proj): Conv2d(3, 768, kernel_size=(16, 16), stride=(16, 16)) + ) + (pos_drop): Dropout(p=0.0, inplace=False) + (norm_pre): Identity() + (blocks): Sequential( + (0): Block( + (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (attn): Attention( + (qkv): Linear(in_features=768, out_features=2304, bias=True) + (attn_drop): Dropout(p=0.0, inplace=False) + (proj): Linear(in_features=768, out_features=768, bias=True) + (proj_drop): Dropout(p=0.0, inplace=False) + ) + (drop_path): Identity() + (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (mlp): FFN( + (fc1): Linear(in_features=768, out_features=3072, bias=True) + (act): GELU(approximate='none') + (fc2): Linear(in_features=3072, out_features=768, bias=True) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (1): Block( + (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (attn): Attention( + (qkv): Linear(in_features=768, out_features=2304, bias=True) + (attn_drop): Dropout(p=0.0, inplace=False) + (proj): Linear(in_features=768, out_features=768, bias=True) + (proj_drop): Dropout(p=0.0, inplace=False) + ) + (drop_path): Identity() + (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (mlp): FFN( + (fc1): Linear(in_features=768, out_features=3072, bias=True) + (act): GELU(approximate='none') + (fc2): Linear(in_features=3072, out_features=768, bias=True) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (2): Block( + (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (attn): Attention( + (qkv): Linear(in_features=768, out_features=2304, bias=True) + (attn_drop): Dropout(p=0.0, inplace=False) + (proj): Linear(in_features=768, out_features=768, bias=True) + (proj_drop): Dropout(p=0.0, inplace=False) + ) + (drop_path): Identity() + (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (mlp): FFN( + (fc1): Linear(in_features=768, out_features=3072, bias=True) + (act): GELU(approximate='none') + (fc2): Linear(in_features=3072, out_features=768, bias=True) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (3): Block( + (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (attn): Attention( + (qkv): Linear(in_features=768, out_features=2304, bias=True) + (attn_drop): Dropout(p=0.0, inplace=False) + (proj): Linear(in_features=768, out_features=768, bias=True) + (proj_drop): Dropout(p=0.0, inplace=False) + ) + (drop_path): Identity() + (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (mlp): FFN( + (fc1): Linear(in_features=768, out_features=3072, bias=True) + (act): GELU(approximate='none') + (fc2): Linear(in_features=3072, out_features=768, bias=True) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (4): Block( + (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (attn): Attention( + (qkv): Linear(in_features=768, out_features=2304, bias=True) + (attn_drop): Dropout(p=0.0, inplace=False) + (proj): Linear(in_features=768, out_features=768, bias=True) + (proj_drop): Dropout(p=0.0, inplace=False) + ) + (drop_path): Identity() + (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (mlp): FFN( + (fc1): Linear(in_features=768, out_features=3072, bias=True) + (act): GELU(approximate='none') + (fc2): Linear(in_features=3072, out_features=768, bias=True) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (5): Block( + (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (attn): Attention( + (qkv): Linear(in_features=768, out_features=2304, bias=True) + (attn_drop): Dropout(p=0.0, inplace=False) + (proj): Linear(in_features=768, out_features=768, bias=True) + (proj_drop): Dropout(p=0.0, inplace=False) + ) + (drop_path): Identity() + (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (mlp): FFN( + (fc1): Linear(in_features=768, out_features=3072, bias=True) + (act): GELU(approximate='none') + (fc2): Linear(in_features=3072, out_features=768, bias=True) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (6): Block( + (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (attn): Attention( + (qkv): Linear(in_features=768, out_features=2304, bias=True) + (attn_drop): Dropout(p=0.0, inplace=False) + (proj): Linear(in_features=768, out_features=768, bias=True) + (proj_drop): Dropout(p=0.0, inplace=False) + ) + (drop_path): Identity() + (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (mlp): FFN( + (fc1): Linear(in_features=768, out_features=3072, bias=True) + (act): GELU(approximate='none') + (fc2): Linear(in_features=3072, out_features=768, bias=True) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (7): Block( + (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (attn): Attention( + (qkv): Linear(in_features=768, out_features=2304, bias=True) + (attn_drop): Dropout(p=0.0, inplace=False) + (proj): Linear(in_features=768, out_features=768, bias=True) + (proj_drop): Dropout(p=0.0, inplace=False) + ) + (drop_path): Identity() + (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (mlp): FFN( + (fc1): Linear(in_features=768, out_features=3072, bias=True) + (act): GELU(approximate='none') + (fc2): Linear(in_features=3072, out_features=768, bias=True) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (8): Block( + (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (attn): Attention( + (qkv): Linear(in_features=768, out_features=2304, bias=True) + (attn_drop): Dropout(p=0.0, inplace=False) + (proj): Linear(in_features=768, out_features=768, bias=True) + (proj_drop): Dropout(p=0.0, inplace=False) + ) + (drop_path): Identity() + (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (mlp): FFN( + (fc1): Linear(in_features=768, out_features=3072, bias=True) + (act): GELU(approximate='none') + (fc2): Linear(in_features=3072, out_features=768, bias=True) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (9): Block( + (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (attn): Attention( + (qkv): Linear(in_features=768, out_features=2304, bias=True) + (attn_drop): Dropout(p=0.0, inplace=False) + (proj): Linear(in_features=768, out_features=768, bias=True) + (proj_drop): Dropout(p=0.0, inplace=False) + ) + (drop_path): Identity() + (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (mlp): FFN( + (fc1): Linear(in_features=768, out_features=3072, bias=True) + (act): GELU(approximate='none') + (fc2): Linear(in_features=3072, out_features=768, bias=True) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (10): Block( + (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (attn): Attention( + (qkv): Linear(in_features=768, out_features=2304, bias=True) + (attn_drop): Dropout(p=0.0, inplace=False) + (proj): Linear(in_features=768, out_features=768, bias=True) + (proj_drop): Dropout(p=0.0, inplace=False) + ) + (drop_path): Identity() + (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (mlp): FFN( + (fc1): Linear(in_features=768, out_features=3072, bias=True) + (act): GELU(approximate='none') + (fc2): Linear(in_features=3072, out_features=768, bias=True) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + (11): Block( + (norm1): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (attn): Attention( + (qkv): Linear(in_features=768, out_features=2304, bias=True) + (attn_drop): Dropout(p=0.0, inplace=False) + (proj): Linear(in_features=768, out_features=768, bias=True) + (proj_drop): Dropout(p=0.0, inplace=False) + ) + (drop_path): Identity() + (norm2): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + (mlp): FFN( + (fc1): Linear(in_features=768, out_features=3072, bias=True) + (act): GELU(approximate='none') + (fc2): Linear(in_features=3072, out_features=768, bias=True) + (drop): Dropout(p=0.0, inplace=False) + ) + ) + ) + (norm): LayerNorm((768,), eps=1e-06, elementwise_affine=True) + ) + (decoder): Sequential( + (0): Conv2d(768, 768, kernel_size=(1, 1), stride=(1, 1)) + (1): PixelShuffle(upscale_factor=16) + ) +) +[2023-10-10 03:04:02 simmim_pretrain](optimizer.py 22): INFO >>>>>>>>>> Build Optimizer for Pre-training Stage +[2023-10-10 03:04:02 simmim_pretrain](optimizer.py 27): INFO No weight decay: {} +[2023-10-10 03:04:02 simmim_pretrain](optimizer.py 30): INFO No weight decay keywords: {} +[2023-10-10 03:04:02 simmim_pretrain](optimizer.py 63): INFO No decay params: ['encoder.patch_embed.proj.bias', 'encoder.blocks.0.norm1.weight', 'encoder.blocks.0.norm1.bias', 'encoder.blocks.0.attn.qkv.bias', 'encoder.blocks.0.attn.proj.bias', 'encoder.blocks.0.norm2.weight', 'encoder.blocks.0.norm2.bias', 'encoder.blocks.0.mlp.fc1.bias', 'encoder.blocks.0.mlp.fc2.bias', 'encoder.blocks.1.norm1.weight', 'encoder.blocks.1.norm1.bias', 'encoder.blocks.1.attn.qkv.bias', 'encoder.blocks.1.attn.proj.bias', 'encoder.blocks.1.norm2.weight', 'encoder.blocks.1.norm2.bias', 'encoder.blocks.1.mlp.fc1.bias', 'encoder.blocks.1.mlp.fc2.bias', 'encoder.blocks.2.norm1.weight', 'encoder.blocks.2.norm1.bias', 'encoder.blocks.2.attn.qkv.bias', 'encoder.blocks.2.attn.proj.bias', 'encoder.blocks.2.norm2.weight', 'encoder.blocks.2.norm2.bias', 'encoder.blocks.2.mlp.fc1.bias', 'encoder.blocks.2.mlp.fc2.bias', 'encoder.blocks.3.norm1.weight', 'encoder.blocks.3.norm1.bias', 'encoder.blocks.3.attn.qkv.bias', 'encoder.blocks.3.attn.proj.bias', 'encoder.blocks.3.norm2.weight', 'encoder.blocks.3.norm2.bias', 'encoder.blocks.3.mlp.fc1.bias', 'encoder.blocks.3.mlp.fc2.bias', 'encoder.blocks.4.norm1.weight', 'encoder.blocks.4.norm1.bias', 'encoder.blocks.4.attn.qkv.bias', 'encoder.blocks.4.attn.proj.bias', 'encoder.blocks.4.norm2.weight', 'encoder.blocks.4.norm2.bias', 'encoder.blocks.4.mlp.fc1.bias', 'encoder.blocks.4.mlp.fc2.bias', 'encoder.blocks.5.norm1.weight', 'encoder.blocks.5.norm1.bias', 'encoder.blocks.5.attn.qkv.bias', 'encoder.blocks.5.attn.proj.bias', 'encoder.blocks.5.norm2.weight', 'encoder.blocks.5.norm2.bias', 'encoder.blocks.5.mlp.fc1.bias', 'encoder.blocks.5.mlp.fc2.bias', 'encoder.blocks.6.norm1.weight', 'encoder.blocks.6.norm1.bias', 'encoder.blocks.6.attn.qkv.bias', 'encoder.blocks.6.attn.proj.bias', 'encoder.blocks.6.norm2.weight', 'encoder.blocks.6.norm2.bias', 'encoder.blocks.6.mlp.fc1.bias', 'encoder.blocks.6.mlp.fc2.bias', 'encoder.blocks.7.norm1.weight', 'encoder.blocks.7.norm1.bias', 'encoder.blocks.7.attn.qkv.bias', 'encoder.blocks.7.attn.proj.bias', 'encoder.blocks.7.norm2.weight', 'encoder.blocks.7.norm2.bias', 'encoder.blocks.7.mlp.fc1.bias', 'encoder.blocks.7.mlp.fc2.bias', 'encoder.blocks.8.norm1.weight', 'encoder.blocks.8.norm1.bias', 'encoder.blocks.8.attn.qkv.bias', 'encoder.blocks.8.attn.proj.bias', 'encoder.blocks.8.norm2.weight', 'encoder.blocks.8.norm2.bias', 'encoder.blocks.8.mlp.fc1.bias', 'encoder.blocks.8.mlp.fc2.bias', 'encoder.blocks.9.norm1.weight', 'encoder.blocks.9.norm1.bias', 'encoder.blocks.9.attn.qkv.bias', 'encoder.blocks.9.attn.proj.bias', 'encoder.blocks.9.norm2.weight', 'encoder.blocks.9.norm2.bias', 'encoder.blocks.9.mlp.fc1.bias', 'encoder.blocks.9.mlp.fc2.bias', 'encoder.blocks.10.norm1.weight', 'encoder.blocks.10.norm1.bias', 'encoder.blocks.10.attn.qkv.bias', 'encoder.blocks.10.attn.proj.bias', 'encoder.blocks.10.norm2.weight', 'encoder.blocks.10.norm2.bias', 'encoder.blocks.10.mlp.fc1.bias', 'encoder.blocks.10.mlp.fc2.bias', 'encoder.blocks.11.norm1.weight', 'encoder.blocks.11.norm1.bias', 'encoder.blocks.11.attn.qkv.bias', 'encoder.blocks.11.attn.proj.bias', 'encoder.blocks.11.norm2.weight', 'encoder.blocks.11.norm2.bias', 'encoder.blocks.11.mlp.fc1.bias', 'encoder.blocks.11.mlp.fc2.bias', 'encoder.norm.weight', 'encoder.norm.bias', 'decoder.0.bias'] +[2023-10-10 03:04:02 simmim_pretrain](optimizer.py 64): INFO Has decay params: ['encoder.cls_token', 'encoder.pos_embed', 'encoder.mask_token', 'encoder.patch_embed.proj.weight', 'encoder.blocks.0.attn.qkv.weight', 'encoder.blocks.0.attn.proj.weight', 'encoder.blocks.0.mlp.fc1.weight', 'encoder.blocks.0.mlp.fc2.weight', 'encoder.blocks.1.attn.qkv.weight', 'encoder.blocks.1.attn.proj.weight', 'encoder.blocks.1.mlp.fc1.weight', 'encoder.blocks.1.mlp.fc2.weight', 'encoder.blocks.2.attn.qkv.weight', 'encoder.blocks.2.attn.proj.weight', 'encoder.blocks.2.mlp.fc1.weight', 'encoder.blocks.2.mlp.fc2.weight', 'encoder.blocks.3.attn.qkv.weight', 'encoder.blocks.3.attn.proj.weight', 'encoder.blocks.3.mlp.fc1.weight', 'encoder.blocks.3.mlp.fc2.weight', 'encoder.blocks.4.attn.qkv.weight', 'encoder.blocks.4.attn.proj.weight', 'encoder.blocks.4.mlp.fc1.weight', 'encoder.blocks.4.mlp.fc2.weight', 'encoder.blocks.5.attn.qkv.weight', 'encoder.blocks.5.attn.proj.weight', 'encoder.blocks.5.mlp.fc1.weight', 'encoder.blocks.5.mlp.fc2.weight', 'encoder.blocks.6.attn.qkv.weight', 'encoder.blocks.6.attn.proj.weight', 'encoder.blocks.6.mlp.fc1.weight', 'encoder.blocks.6.mlp.fc2.weight', 'encoder.blocks.7.attn.qkv.weight', 'encoder.blocks.7.attn.proj.weight', 'encoder.blocks.7.mlp.fc1.weight', 'encoder.blocks.7.mlp.fc2.weight', 'encoder.blocks.8.attn.qkv.weight', 'encoder.blocks.8.attn.proj.weight', 'encoder.blocks.8.mlp.fc1.weight', 'encoder.blocks.8.mlp.fc2.weight', 'encoder.blocks.9.attn.qkv.weight', 'encoder.blocks.9.attn.proj.weight', 'encoder.blocks.9.mlp.fc1.weight', 'encoder.blocks.9.mlp.fc2.weight', 'encoder.blocks.10.attn.qkv.weight', 'encoder.blocks.10.attn.proj.weight', 'encoder.blocks.10.mlp.fc1.weight', 'encoder.blocks.10.mlp.fc2.weight', 'encoder.blocks.11.attn.qkv.weight', 'encoder.blocks.11.attn.proj.weight', 'encoder.blocks.11.mlp.fc1.weight', 'encoder.blocks.11.mlp.fc2.weight', 'decoder.0.weight'] +[2023-10-10 03:04:02 simmim_pretrain](optimizer.py 43): INFO AdamW ( +Parameter Group 0 + amsgrad: False + betas: (0.9, 0.999) + capturable: False + differentiable: False + eps: 1e-08 + foreach: None + fused: None + lr: 0.0002 + maximize: False + weight_decay: 0.05 + +Parameter Group 1 + amsgrad: False + betas: (0.9, 0.999) + capturable: False + differentiable: False + eps: 1e-08 + foreach: None + fused: None + lr: 0.0002 + maximize: False + weight_decay: 0.0 +) +[2023-10-10 03:04:02 simmim_pretrain](main_simmim.py 120): INFO number of params: 86390016 +[2023-10-10 03:04:02 simmim_pretrain](utils.py 83): INFO All checkpoints founded in /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim: [] +[2023-10-10 03:04:02 simmim_pretrain](main_simmim.py 137): INFO no checkpoint found in /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim, ignoring auto resume +[2023-10-10 03:04:02 simmim_pretrain](main_simmim.py 145): INFO Start training +/root/miniconda3/lib/python3.8/site-packages/torch/autograd/__init__.py:200: UserWarning: Grad strides do not match bucket view strides. This may indicate grad was not created according to the gradient layout contract, or that the param's strides changed since DDP was constructed. This is not an error, but may impair performance. +grad.sizes() = [768, 768, 1, 1], strides() = [768, 1, 768, 768] +bucket_view.sizes() = [768, 768, 1, 1], strides() = [768, 1, 1, 1] (Triggered internally at ../torch/csrc/distributed/c10d/reducer.cpp:323.) + Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass +/root/miniconda3/lib/python3.8/site-packages/torch/autograd/__init__.py:200: UserWarning: Grad strides do not match bucket view strides. This may indicate grad was not created according to the gradient layout contract, or that the param's strides changed since DDP was constructed. This is not an error, but may impair performance. +grad.sizes() = [768, 768, 1, 1], strides() = [768, 1, 768, 768] +bucket_view.sizes() = [768, 768, 1, 1], strides() = [768, 1, 1, 1] (Triggered internally at ../torch/csrc/distributed/c10d/reducer.cpp:323.) + Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass +/root/miniconda3/lib/python3.8/site-packages/torch/autograd/__init__.py:200: UserWarning: Grad strides do not match bucket view strides. This may indicate grad was not created according to the gradient layout contract, or that the param's strides changed since DDP was constructed. This is not an error, but may impair performance. +grad.sizes() = [768, 768, 1, 1], strides() = [768, 1, 768, 768] +bucket_view.sizes() = [768, 768, 1, 1], strides() = [768, 1, 1, 1] (Triggered internally at ../torch/csrc/distributed/c10d/reducer.cpp:323.) + Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass +/root/miniconda3/lib/python3.8/site-packages/torch/autograd/__init__.py:200: UserWarning: Grad strides do not match bucket view strides. This may indicate grad was not created according to the gradient layout contract, or that the param's strides changed since DDP was constructed. This is not an error, but may impair performance. +grad.sizes() = [768, 768, 1, 1], strides() = [768, 1, 768, 768] +bucket_view.sizes() = [768, 768, 1, 1], strides() = [768, 1, 1, 1] (Triggered internally at ../torch/csrc/distributed/c10d/reducer.cpp:323.) + Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass +[2023-10-10 03:04:07 simmim_pretrain](main_simmim.py 218): INFO Train: [0/200][0/6787] eta 8:30:41 lr 0.000001 time 4.5148 (4.5148) loss 1.7152 (1.7152) grad_norm 553505.6250 (553505.6250) mem 13533MB +[2023-10-10 03:06:09 simmim_pretrain](main_simmim.py 218): INFO Train: [0/200][500/6787] eta 0:26:26 lr 0.000002 time 0.2444 (0.2523) loss 0.6251 (0.8950) grad_norm 25038.8047 (100184.2969) mem 14543MB +[2023-10-10 03:08:12 simmim_pretrain](main_simmim.py 218): INFO Train: [0/200][1000/6787] eta 0:24:04 lr 0.000002 time 0.2562 (0.2497) loss 0.5471 (0.7294) grad_norm 23971.2754 (64596.7500) mem 14543MB +[2023-10-10 03:10:17 simmim_pretrain](main_simmim.py 218): INFO Train: [0/200][1500/6787] eta 0:22:00 lr 0.000003 time 0.2493 (0.2498) loss 0.5397 (0.6632) grad_norm 59519.0664 (58565.0820) mem 14543MB +[2023-10-10 03:12:22 simmim_pretrain](main_simmim.py 218): INFO Train: [0/200][2000/6787] eta 0:19:56 lr 0.000004 time 0.2587 (0.2500) loss 0.5102 (0.6263) grad_norm 240317.0781 (71993.7969) mem 14543MB +[2023-10-10 03:14:28 simmim_pretrain](main_simmim.py 218): INFO Train: [0/200][2500/6787] eta 0:17:52 lr 0.000005 time 0.2461 (0.2501) loss 0.4838 (0.6021) grad_norm 154843.9688 (98793.8047) mem 14543MB +[2023-10-10 03:16:33 simmim_pretrain](main_simmim.py 218): INFO Train: [0/200][3000/6787] eta 0:15:47 lr 0.000005 time 0.2448 (0.2501) loss 0.5123 (0.5849) grad_norm 541804.3750 (134192.4844) mem 14543MB +[2023-10-10 03:18:38 simmim_pretrain](main_simmim.py 218): INFO Train: [0/200][3500/6787] eta 0:13:42 lr 0.000006 time 0.2503 (0.2502) loss 0.5012 (0.5723) grad_norm 501434.0938 (172352.0156) mem 14543MB +[2023-10-10 03:20:44 simmim_pretrain](main_simmim.py 218): INFO Train: [0/200][4000/6787] eta 0:11:37 lr 0.000007 time 0.2492 (0.2503) loss 0.4889 (0.5626) grad_norm 609606.8750 (195628.6562) mem 14543MB +[2023-10-10 03:22:50 simmim_pretrain](main_simmim.py 218): INFO Train: [0/200][4500/6787] eta 0:09:32 lr 0.000008 time 0.2598 (0.2504) loss 0.4657 (0.5546) grad_norm 821053.7500 (206329.3281) mem 14543MB +[2023-10-10 03:24:59 simmim_pretrain](main_simmim.py 218): INFO Train: [0/200][5000/6787] eta 0:07:29 lr 0.000008 time 0.2599 (0.2513) loss 0.4856 (0.5479) grad_norm 359824.9688 (230155.2344) mem 14543MB +[2023-10-10 03:27:05 simmim_pretrain](main_simmim.py 218): INFO Train: [0/200][5500/6787] eta 0:05:23 lr 0.000009 time 0.2514 (0.2514) loss 0.4633 (0.5422) grad_norm 816823.6875 (253836.1719) mem 14543MB +[2023-10-10 03:29:10 simmim_pretrain](main_simmim.py 218): INFO Train: [0/200][6000/6787] eta 0:03:17 lr 0.000010 time 0.2589 (0.2513) loss 0.4895 (0.5374) grad_norm 809127.5625 (266184.0938) mem 14543MB +[2023-10-10 03:31:16 simmim_pretrain](main_simmim.py 218): INFO Train: [0/200][6500/6787] eta 0:01:12 lr 0.000011 time 0.2519 (0.2513) loss 0.5012 (0.5330) grad_norm 980117.6875 (inf) mem 14543MB +[2023-10-10 03:32:28 simmim_pretrain](main_simmim.py 228): INFO EPOCH 0 training takes 0:28:26 +[2023-10-10 03:32:28 simmim_pretrain](utils.py 62): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_0.pth saving...... +[2023-10-10 03:32:29 simmim_pretrain](utils.py 64): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_0.pth saved !!! +[2023-10-10 03:32:30 simmim_pretrain](main_simmim.py 218): INFO Train: [1/200][0/6787] eta 2:32:49 lr 0.000011 time 1.3511 (1.3511) loss 0.4772 (0.4772) grad_norm 144767.9062 (144767.9062) mem 14543MB +[2023-10-10 03:34:36 simmim_pretrain](main_simmim.py 218): INFO Train: [1/200][500/6787] eta 0:26:27 lr 0.000012 time 0.2545 (0.2525) loss 0.4586 (0.4690) grad_norm 535874.0625 (665340.0625) mem 14543MB +[2023-10-10 03:36:41 simmim_pretrain](main_simmim.py 218): INFO Train: [1/200][1000/6787] eta 0:24:17 lr 0.000012 time 0.2483 (0.2518) loss 0.4475 (0.4653) grad_norm 960475.7500 (644721.1875) mem 14543MB +[2023-10-10 03:38:46 simmim_pretrain](main_simmim.py 218): INFO Train: [1/200][1500/6787] eta 0:22:09 lr 0.000013 time 0.2486 (0.2514) loss 0.4601 (0.4622) grad_norm 539256.4375 (648973.9375) mem 14543MB +[2023-10-10 03:40:52 simmim_pretrain](main_simmim.py 218): INFO Train: [1/200][2000/6787] eta 0:20:02 lr 0.000014 time 0.2541 (0.2511) loss 0.4288 (0.4594) grad_norm 710371.8750 (712513.4375) mem 14543MB +[2023-10-10 03:42:57 simmim_pretrain](main_simmim.py 218): INFO Train: [1/200][2500/6787] eta 0:17:56 lr 0.000015 time 0.2554 (0.2510) loss 0.4711 (0.4572) grad_norm 764299.3125 (719295.8750) mem 14543MB +[2023-10-10 03:45:02 simmim_pretrain](main_simmim.py 218): INFO Train: [1/200][3000/6787] eta 0:15:50 lr 0.000015 time 0.2468 (0.2509) loss 0.4599 (0.4552) grad_norm 511418.3125 (765126.8125) mem 14543MB +[2023-10-10 03:47:07 simmim_pretrain](main_simmim.py 218): INFO Train: [1/200][3500/6787] eta 0:13:44 lr 0.000016 time 0.2505 (0.2509) loss 0.4501 (0.4535) grad_norm 1814348.5000 (inf) mem 14543MB +[2023-10-10 03:49:13 simmim_pretrain](main_simmim.py 218): INFO Train: [1/200][4000/6787] eta 0:11:39 lr 0.000017 time 0.2503 (0.2508) loss 0.4399 (0.4522) grad_norm 1776759.3750 (inf) mem 14543MB +[2023-10-10 03:51:18 simmim_pretrain](main_simmim.py 218): INFO Train: [1/200][4500/6787] eta 0:09:33 lr 0.000018 time 0.2514 (0.2507) loss 0.4322 (0.4509) grad_norm 922950.3125 (inf) mem 14543MB +[2023-10-10 03:53:23 simmim_pretrain](main_simmim.py 218): INFO Train: [1/200][5000/6787] eta 0:07:27 lr 0.000018 time 0.2493 (0.2507) loss 0.4397 (0.4497) grad_norm 1040136.3750 (inf) mem 14543MB +[2023-10-10 03:55:28 simmim_pretrain](main_simmim.py 218): INFO Train: [1/200][5500/6787] eta 0:05:22 lr 0.000019 time 0.2485 (0.2507) loss 0.4340 (0.4487) grad_norm 647195.6250 (inf) mem 14543MB +[2023-10-10 03:57:33 simmim_pretrain](main_simmim.py 218): INFO Train: [1/200][6000/6787] eta 0:03:17 lr 0.000020 time 0.2502 (0.2507) loss 0.4386 (0.4477) grad_norm 1406970.0000 (inf) mem 14543MB +[2023-10-10 03:59:39 simmim_pretrain](main_simmim.py 218): INFO Train: [1/200][6500/6787] eta 0:01:11 lr 0.000020 time 0.2488 (0.2507) loss 0.4304 (0.4467) grad_norm 487872.5938 (inf) mem 14543MB +[2023-10-10 04:00:51 simmim_pretrain](main_simmim.py 228): INFO EPOCH 1 training takes 0:28:22 +[2023-10-10 04:00:53 simmim_pretrain](main_simmim.py 218): INFO Train: [2/200][0/6787] eta 2:35:04 lr 0.000021 time 1.3709 (1.3709) loss 0.4393 (0.4393) grad_norm 659639.8125 (659639.8125) mem 14543MB +[2023-10-10 04:02:57 simmim_pretrain](main_simmim.py 218): INFO Train: [2/200][500/6787] eta 0:26:23 lr 0.000022 time 0.2462 (0.2518) loss 0.4335 (0.4324) grad_norm 872738.8125 (inf) mem 14543MB +[2023-10-10 04:05:03 simmim_pretrain](main_simmim.py 218): INFO Train: [2/200][1000/6787] eta 0:24:13 lr 0.000022 time 0.2536 (0.2512) loss 0.4051 (0.4329) grad_norm 1444892.6250 (inf) mem 14543MB +[2023-10-10 04:07:08 simmim_pretrain](main_simmim.py 218): INFO Train: [2/200][1500/6787] eta 0:22:06 lr 0.000023 time 0.2511 (0.2510) loss 0.4447 (0.4324) grad_norm 728204.2500 (inf) mem 14543MB +[2023-10-10 04:09:14 simmim_pretrain](main_simmim.py 218): INFO Train: [2/200][2000/6787] eta 0:20:01 lr 0.000024 time 0.2473 (0.2511) loss 0.4198 (0.4320) grad_norm 922745.8750 (inf) mem 14543MB +[2023-10-10 04:11:19 simmim_pretrain](main_simmim.py 218): INFO Train: [2/200][2500/6787] eta 0:17:56 lr 0.000025 time 0.2559 (0.2512) loss 0.4247 (0.4317) grad_norm 1370773.6250 (inf) mem 14543MB +[2023-10-10 04:13:25 simmim_pretrain](main_simmim.py 218): INFO Train: [2/200][3000/6787] eta 0:15:51 lr 0.000025 time 0.2496 (0.2513) loss 0.3997 (0.4312) grad_norm 585347.3750 (inf) mem 14543MB +[2023-10-10 04:15:31 simmim_pretrain](main_simmim.py 218): INFO Train: [2/200][3500/6787] eta 0:13:46 lr 0.000026 time 0.2492 (0.2514) loss 0.4111 (0.4308) grad_norm 592507.3125 (inf) mem 14543MB +[2023-10-10 04:17:38 simmim_pretrain](main_simmim.py 218): INFO Train: [2/200][4000/6787] eta 0:11:40 lr 0.000027 time 0.2577 (0.2515) loss 0.4029 (0.4304) grad_norm 939298.0625 (inf) mem 14543MB +[2023-10-10 04:19:44 simmim_pretrain](main_simmim.py 218): INFO Train: [2/200][4500/6787] eta 0:09:35 lr 0.000027 time 0.2520 (0.2516) loss 0.4360 (0.4301) grad_norm 746308.8750 (inf) mem 14543MB +[2023-10-10 04:21:50 simmim_pretrain](main_simmim.py 218): INFO Train: [2/200][5000/6787] eta 0:07:29 lr 0.000028 time 0.2493 (0.2516) loss 0.4662 (0.4298) grad_norm 573671.5625 (inf) mem 14543MB +[2023-10-10 04:23:55 simmim_pretrain](main_simmim.py 218): INFO Train: [2/200][5500/6787] eta 0:05:23 lr 0.000029 time 0.2523 (0.2516) loss 0.4098 (0.4294) grad_norm 2750094.2500 (inf) mem 14543MB +[2023-10-10 04:26:01 simmim_pretrain](main_simmim.py 218): INFO Train: [2/200][6000/6787] eta 0:03:17 lr 0.000030 time 0.2487 (0.2516) loss 0.4324 (0.4292) grad_norm 1130291.7500 (inf) mem 14543MB +[2023-10-10 04:28:07 simmim_pretrain](main_simmim.py 218): INFO Train: [2/200][6500/6787] eta 0:01:12 lr 0.000030 time 0.2520 (0.2515) loss 0.4199 (0.4290) grad_norm 529819.1875 (inf) mem 14543MB +[2023-10-10 04:29:19 simmim_pretrain](main_simmim.py 228): INFO EPOCH 2 training takes 0:28:27 +[2023-10-10 04:29:21 simmim_pretrain](main_simmim.py 218): INFO Train: [3/200][0/6787] eta 2:32:21 lr 0.000031 time 1.3469 (1.3469) loss 0.4341 (0.4341) grad_norm 794711.5000 (794711.5000) mem 14543MB +[2023-10-10 04:31:26 simmim_pretrain](main_simmim.py 218): INFO Train: [3/200][500/6787] eta 0:26:31 lr 0.000032 time 0.2562 (0.2531) loss 0.4010 (0.4247) grad_norm 601396.8750 (687885.3125) mem 14543MB +[2023-10-10 04:33:32 simmim_pretrain](main_simmim.py 218): INFO Train: [3/200][1000/6787] eta 0:24:19 lr 0.000032 time 0.2506 (0.2522) loss 0.4401 (0.4241) grad_norm 709212.1250 (711774.1250) mem 14543MB +[2023-10-10 04:35:37 simmim_pretrain](main_simmim.py 218): INFO Train: [3/200][1500/6787] eta 0:22:10 lr 0.000033 time 0.2517 (0.2517) loss 0.4268 (0.4241) grad_norm 403870.3125 (764413.6875) mem 14543MB +[2023-10-10 04:37:42 simmim_pretrain](main_simmim.py 218): INFO Train: [3/200][2000/6787] eta 0:20:03 lr 0.000034 time 0.2501 (0.2515) loss 0.4287 (0.4238) grad_norm 1236570.1250 (inf) mem 14543MB +[2023-10-10 04:39:48 simmim_pretrain](main_simmim.py 218): INFO Train: [3/200][2500/6787] eta 0:17:57 lr 0.000035 time 0.2558 (0.2514) loss 0.4235 (0.4238) grad_norm 465070.2188 (inf) mem 14543MB +[2023-10-10 04:41:53 simmim_pretrain](main_simmim.py 218): INFO Train: [3/200][3000/6787] eta 0:15:51 lr 0.000035 time 0.2520 (0.2513) loss 0.4204 (0.4235) grad_norm 961153.3125 (inf) mem 14543MB +[2023-10-10 04:43:59 simmim_pretrain](main_simmim.py 218): INFO Train: [3/200][3500/6787] eta 0:13:45 lr 0.000036 time 0.2535 (0.2512) loss 0.4419 (0.4232) grad_norm 600768.2500 (inf) mem 14543MB +[2023-10-10 04:46:04 simmim_pretrain](main_simmim.py 218): INFO Train: [3/200][4000/6787] eta 0:11:39 lr 0.000037 time 0.2496 (0.2512) loss 0.4316 (0.4232) grad_norm 530462.1250 (inf) mem 14543MB +[2023-10-10 04:48:09 simmim_pretrain](main_simmim.py 218): INFO Train: [3/200][4500/6787] eta 0:09:34 lr 0.000037 time 0.2522 (0.2511) loss 0.4503 (0.4227) grad_norm 399515.7500 (inf) mem 14543MB +[2023-10-10 04:50:15 simmim_pretrain](main_simmim.py 218): INFO Train: [3/200][5000/6787] eta 0:07:28 lr 0.000038 time 0.2537 (0.2510) loss 0.4279 (0.4223) grad_norm 513325.5000 (inf) mem 14543MB +[2023-10-10 04:52:20 simmim_pretrain](main_simmim.py 218): INFO Train: [3/200][5500/6787] eta 0:05:23 lr 0.000039 time 0.2517 (0.2510) loss 0.4282 (0.4220) grad_norm 868282.9375 (inf) mem 14543MB +[2023-10-10 04:54:25 simmim_pretrain](main_simmim.py 218): INFO Train: [3/200][6000/6787] eta 0:03:17 lr 0.000040 time 0.2473 (0.2510) loss 0.4171 (0.4216) grad_norm 571394.0000 (inf) mem 14543MB +[2023-10-10 04:56:31 simmim_pretrain](main_simmim.py 218): INFO Train: [3/200][6500/6787] eta 0:01:12 lr 0.000040 time 0.2577 (0.2510) loss 0.4230 (0.4212) grad_norm 919010.5000 (inf) mem 14543MB +[2023-10-10 04:57:43 simmim_pretrain](main_simmim.py 228): INFO EPOCH 3 training takes 0:28:23 +[2023-10-10 04:57:44 simmim_pretrain](main_simmim.py 218): INFO Train: [4/200][0/6787] eta 2:34:16 lr 0.000041 time 1.3639 (1.3639) loss 0.4357 (0.4357) grad_norm 661257.6250 (661257.6250) mem 14543MB +[2023-10-10 04:59:49 simmim_pretrain](main_simmim.py 218): INFO Train: [4/200][500/6787] eta 0:26:24 lr 0.000042 time 0.2461 (0.2521) loss 0.4066 (0.4155) grad_norm 774128.5625 (671610.5000) mem 14543MB +[2023-10-10 05:01:54 simmim_pretrain](main_simmim.py 218): INFO Train: [4/200][1000/6787] eta 0:24:14 lr 0.000042 time 0.2503 (0.2513) loss 0.4002 (0.4164) grad_norm 274338.7500 (nan) mem 14543MB +[2023-10-10 05:04:00 simmim_pretrain](main_simmim.py 218): INFO Train: [4/200][1500/6787] eta 0:22:07 lr 0.000043 time 0.2459 (0.2511) loss 0.3930 (0.4164) grad_norm 233489.1562 (nan) mem 14543MB +[2023-10-10 05:06:05 simmim_pretrain](main_simmim.py 218): INFO Train: [4/200][2000/6787] eta 0:20:01 lr 0.000044 time 0.2518 (0.2511) loss 0.4203 (0.4162) grad_norm 322065.9688 (nan) mem 14543MB +[2023-10-10 05:08:11 simmim_pretrain](main_simmim.py 218): INFO Train: [4/200][2500/6787] eta 0:17:56 lr 0.000044 time 0.2514 (0.2511) loss 0.4085 (0.4157) grad_norm 197069.8906 (nan) mem 14543MB +[2023-10-10 05:10:16 simmim_pretrain](main_simmim.py 218): INFO Train: [4/200][3000/6787] eta 0:15:50 lr 0.000045 time 0.2458 (0.2510) loss 0.4203 (0.4153) grad_norm 397812.2500 (nan) mem 14543MB +[2023-10-10 05:12:22 simmim_pretrain](main_simmim.py 218): INFO Train: [4/200][3500/6787] eta 0:13:45 lr 0.000046 time 0.2501 (0.2510) loss 0.3780 (0.4148) grad_norm 517283.5312 (nan) mem 14543MB +[2023-10-10 05:14:28 simmim_pretrain](main_simmim.py 218): INFO Train: [4/200][4000/6787] eta 0:11:39 lr 0.000047 time 0.2540 (0.2511) loss 0.4296 (0.4143) grad_norm 309018.5000 (nan) mem 14543MB +[2023-10-10 05:16:33 simmim_pretrain](main_simmim.py 218): INFO Train: [4/200][4500/6787] eta 0:09:34 lr 0.000047 time 0.2561 (0.2512) loss 0.4167 (0.4138) grad_norm 505430.5000 (nan) mem 14543MB +[2023-10-10 05:18:39 simmim_pretrain](main_simmim.py 218): INFO Train: [4/200][5000/6787] eta 0:07:28 lr 0.000048 time 0.2596 (0.2512) loss 0.4021 (0.4132) grad_norm 410150.2188 (nan) mem 14543MB +[2023-10-10 05:20:45 simmim_pretrain](main_simmim.py 218): INFO Train: [4/200][5500/6787] eta 0:05:23 lr 0.000049 time 0.2457 (0.2512) loss 0.3934 (0.4127) grad_norm 854229.9375 (nan) mem 14543MB +[2023-10-10 05:22:50 simmim_pretrain](main_simmim.py 218): INFO Train: [4/200][6000/6787] eta 0:03:17 lr 0.000050 time 0.2588 (0.2511) loss 0.4204 (0.4122) grad_norm 1532492.7500 (nan) mem 14543MB +[2023-10-10 05:24:55 simmim_pretrain](main_simmim.py 218): INFO Train: [4/200][6500/6787] eta 0:01:12 lr 0.000050 time 0.2514 (0.2511) loss 0.4155 (0.4118) grad_norm 491622.1562 (nan) mem 14543MB +[2023-10-10 05:26:08 simmim_pretrain](main_simmim.py 228): INFO EPOCH 4 training takes 0:28:24 +[2023-10-10 05:26:09 simmim_pretrain](main_simmim.py 218): INFO Train: [5/200][0/6787] eta 2:28:05 lr 0.000051 time 1.3092 (1.3092) loss 0.3984 (0.3984) grad_norm 578239.0625 (578239.0625) mem 14543MB +[2023-10-10 05:28:14 simmim_pretrain](main_simmim.py 218): INFO Train: [5/200][500/6787] eta 0:26:27 lr 0.000051 time 0.2496 (0.2525) loss 0.4156 (0.4047) grad_norm 390009.2500 (515957.2188) mem 14543MB +[2023-10-10 05:30:20 simmim_pretrain](main_simmim.py 218): INFO Train: [5/200][1000/6787] eta 0:24:17 lr 0.000052 time 0.2547 (0.2519) loss 0.3872 (0.4049) grad_norm 904920.0000 (521818.1562) mem 14543MB +[2023-10-10 05:32:25 simmim_pretrain](main_simmim.py 218): INFO Train: [5/200][1500/6787] eta 0:22:07 lr 0.000053 time 0.2457 (0.2511) loss 0.3968 (0.4047) grad_norm 943757.4375 (557525.3125) mem 14543MB +[2023-10-10 05:34:30 simmim_pretrain](main_simmim.py 218): INFO Train: [5/200][2000/6787] eta 0:20:00 lr 0.000054 time 0.2535 (0.2508) loss 0.4031 (0.4044) grad_norm 814286.0625 (582936.7500) mem 14543MB +[2023-10-10 05:36:35 simmim_pretrain](main_simmim.py 218): INFO Train: [5/200][2500/6787] eta 0:17:54 lr 0.000054 time 0.2540 (0.2507) loss 0.4162 (0.4042) grad_norm 487653.4062 (605626.3750) mem 14543MB +[2023-10-10 05:38:40 simmim_pretrain](main_simmim.py 218): INFO Train: [5/200][3000/6787] eta 0:15:49 lr 0.000055 time 0.2470 (0.2506) loss 0.3849 (0.4038) grad_norm 422073.7188 (inf) mem 14543MB +[2023-10-10 05:40:45 simmim_pretrain](main_simmim.py 218): INFO Train: [5/200][3500/6787] eta 0:13:43 lr 0.000056 time 0.2520 (0.2506) loss 0.4068 (0.4036) grad_norm 520833.2500 (inf) mem 14543MB +[2023-10-10 05:42:50 simmim_pretrain](main_simmim.py 218): INFO Train: [5/200][4000/6787] eta 0:11:38 lr 0.000057 time 0.2447 (0.2506) loss 0.4007 (0.4034) grad_norm 401273.1250 (inf) mem 14543MB +[2023-10-10 05:44:55 simmim_pretrain](main_simmim.py 218): INFO Train: [5/200][4500/6787] eta 0:09:32 lr 0.000057 time 0.2544 (0.2505) loss 0.3900 (0.4033) grad_norm 527926.8125 (inf) mem 14543MB +[2023-10-10 05:47:01 simmim_pretrain](main_simmim.py 218): INFO Train: [5/200][5000/6787] eta 0:07:27 lr 0.000058 time 0.2523 (0.2505) loss 0.3939 (0.4032) grad_norm 721252.0000 (inf) mem 14543MB +[2023-10-10 05:49:06 simmim_pretrain](main_simmim.py 218): INFO Train: [5/200][5500/6787] eta 0:05:22 lr 0.000059 time 0.2524 (0.2505) loss 0.3907 (0.4030) grad_norm 570211.2500 (inf) mem 14543MB +[2023-10-10 05:51:11 simmim_pretrain](main_simmim.py 218): INFO Train: [5/200][6000/6787] eta 0:03:17 lr 0.000060 time 0.2588 (0.2505) loss 0.4046 (0.4028) grad_norm 538420.2500 (inf) mem 14543MB +[2023-10-10 05:53:17 simmim_pretrain](main_simmim.py 218): INFO Train: [5/200][6500/6787] eta 0:01:11 lr 0.000060 time 0.2486 (0.2506) loss 0.4047 (0.4024) grad_norm 555881.3750 (inf) mem 14543MB +[2023-10-10 05:54:29 simmim_pretrain](main_simmim.py 228): INFO EPOCH 5 training takes 0:28:21 +[2023-10-10 05:54:31 simmim_pretrain](main_simmim.py 218): INFO Train: [6/200][0/6787] eta 2:38:55 lr 0.000061 time 1.4049 (1.4049) loss 0.3786 (0.3786) grad_norm 523171.9375 (523171.9375) mem 14543MB +[2023-10-10 05:56:36 simmim_pretrain](main_simmim.py 218): INFO Train: [6/200][500/6787] eta 0:26:28 lr 0.000061 time 0.2455 (0.2527) loss 0.4249 (0.3978) grad_norm 772955.5000 (582496.6875) mem 14543MB +[2023-10-10 05:58:41 simmim_pretrain](main_simmim.py 218): INFO Train: [6/200][1000/6787] eta 0:24:17 lr 0.000062 time 0.2521 (0.2519) loss 0.3953 (0.3980) grad_norm 493784.6875 (613746.7500) mem 14543MB +[2023-10-10 06:00:47 simmim_pretrain](main_simmim.py 218): INFO Train: [6/200][1500/6787] eta 0:22:10 lr 0.000063 time 0.2495 (0.2516) loss 0.3997 (0.3980) grad_norm 631632.8750 (inf) mem 14543MB +[2023-10-10 06:02:52 simmim_pretrain](main_simmim.py 218): INFO Train: [6/200][2000/6787] eta 0:20:03 lr 0.000064 time 0.2463 (0.2515) loss 0.4048 (0.3977) grad_norm 246547.5156 (inf) mem 14543MB +[2023-10-10 06:04:58 simmim_pretrain](main_simmim.py 218): INFO Train: [6/200][2500/6787] eta 0:17:57 lr 0.000064 time 0.2526 (0.2514) loss 0.3726 (0.3976) grad_norm 428185.0312 (inf) mem 14543MB +[2023-10-10 06:07:03 simmim_pretrain](main_simmim.py 218): INFO Train: [6/200][3000/6787] eta 0:15:51 lr 0.000065 time 0.2465 (0.2513) loss 0.3938 (0.3975) grad_norm 538215.9375 (inf) mem 14543MB +[2023-10-10 06:09:09 simmim_pretrain](main_simmim.py 218): INFO Train: [6/200][3500/6787] eta 0:13:45 lr 0.000066 time 0.2511 (0.2512) loss 0.3874 (0.3972) grad_norm 542470.0000 (inf) mem 14543MB +[2023-10-10 06:11:14 simmim_pretrain](main_simmim.py 218): INFO Train: [6/200][4000/6787] eta 0:11:39 lr 0.000067 time 0.2470 (0.2511) loss 0.3968 (0.3970) grad_norm 662246.0000 (inf) mem 14543MB +[2023-10-10 06:13:20 simmim_pretrain](main_simmim.py 218): INFO Train: [6/200][4500/6787] eta 0:09:34 lr 0.000067 time 0.2465 (0.2511) loss 0.3794 (0.3967) grad_norm 518386.2812 (inf) mem 14543MB +[2023-10-10 06:15:25 simmim_pretrain](main_simmim.py 218): INFO Train: [6/200][5000/6787] eta 0:07:28 lr 0.000068 time 0.2447 (0.2511) loss 0.3752 (0.3966) grad_norm 489676.6250 (inf) mem 14543MB +[2023-10-10 06:17:30 simmim_pretrain](main_simmim.py 218): INFO Train: [6/200][5500/6787] eta 0:05:23 lr 0.000069 time 0.2504 (0.2510) loss 0.3967 (0.3964) grad_norm 667977.8125 (inf) mem 14543MB +[2023-10-10 06:19:35 simmim_pretrain](main_simmim.py 218): INFO Train: [6/200][6000/6787] eta 0:03:17 lr 0.000069 time 0.2541 (0.2510) loss 0.3923 (0.3961) grad_norm 547869.0000 (inf) mem 14543MB +[2023-10-10 06:21:41 simmim_pretrain](main_simmim.py 218): INFO Train: [6/200][6500/6787] eta 0:01:12 lr 0.000070 time 0.2496 (0.2510) loss 0.4107 (0.3959) grad_norm 792803.1250 (inf) mem 14543MB +[2023-10-10 06:22:53 simmim_pretrain](main_simmim.py 228): INFO EPOCH 6 training takes 0:28:24 +[2023-10-10 06:22:55 simmim_pretrain](main_simmim.py 218): INFO Train: [7/200][0/6787] eta 2:32:16 lr 0.000071 time 1.3461 (1.3461) loss 0.3952 (0.3952) grad_norm 719464.4375 (719464.4375) mem 14543MB +[2023-10-10 06:25:00 simmim_pretrain](main_simmim.py 218): INFO Train: [7/200][500/6787] eta 0:26:29 lr 0.000071 time 0.2512 (0.2528) loss 0.3729 (0.3929) grad_norm 400657.5000 (725651.1250) mem 14543MB +[2023-10-10 06:27:05 simmim_pretrain](main_simmim.py 218): INFO Train: [7/200][1000/6787] eta 0:24:16 lr 0.000072 time 0.2507 (0.2517) loss 0.4076 (0.3936) grad_norm 420490.4375 (inf) mem 14543MB +[2023-10-10 06:29:11 simmim_pretrain](main_simmim.py 218): INFO Train: [7/200][1500/6787] eta 0:22:08 lr 0.000073 time 0.2477 (0.2513) loss 0.3962 (0.3933) grad_norm 409381.5312 (inf) mem 14543MB +[2023-10-10 06:31:16 simmim_pretrain](main_simmim.py 218): INFO Train: [7/200][2000/6787] eta 0:20:02 lr 0.000074 time 0.2521 (0.2511) loss 0.4027 (0.3934) grad_norm 650768.6875 (inf) mem 14543MB +[2023-10-10 06:33:21 simmim_pretrain](main_simmim.py 218): INFO Train: [7/200][2500/6787] eta 0:17:56 lr 0.000074 time 0.2519 (0.2511) loss 0.3823 (0.3932) grad_norm 423769.8125 (inf) mem 14543MB +[2023-10-10 06:35:27 simmim_pretrain](main_simmim.py 218): INFO Train: [7/200][3000/6787] eta 0:15:50 lr 0.000075 time 0.2556 (0.2511) loss 0.3847 (0.3928) grad_norm 785393.5000 (inf) mem 14543MB +[2023-10-10 06:37:32 simmim_pretrain](main_simmim.py 218): INFO Train: [7/200][3500/6787] eta 0:13:45 lr 0.000076 time 0.2523 (0.2511) loss 0.3747 (0.3927) grad_norm 567105.4375 (inf) mem 14543MB +[2023-10-10 06:39:38 simmim_pretrain](main_simmim.py 218): INFO Train: [7/200][4000/6787] eta 0:11:39 lr 0.000077 time 0.2543 (0.2510) loss 0.3926 (0.3927) grad_norm 563567.3125 (inf) mem 14543MB +[2023-10-10 06:41:43 simmim_pretrain](main_simmim.py 218): INFO Train: [7/200][4500/6787] eta 0:09:34 lr 0.000077 time 0.2515 (0.2510) loss 0.3847 (0.3925) grad_norm 639863.4375 (inf) mem 14543MB +[2023-10-10 06:43:49 simmim_pretrain](main_simmim.py 218): INFO Train: [7/200][5000/6787] eta 0:07:28 lr 0.000078 time 0.2466 (0.2510) loss 0.3835 (0.3924) grad_norm 266915.2812 (inf) mem 14543MB +[2023-10-10 06:45:54 simmim_pretrain](main_simmim.py 218): INFO Train: [7/200][5500/6787] eta 0:05:23 lr 0.000079 time 0.2496 (0.2510) loss 0.3880 (0.3922) grad_norm 526473.0625 (inf) mem 14543MB +[2023-10-10 06:48:00 simmim_pretrain](main_simmim.py 218): INFO Train: [7/200][6000/6787] eta 0:03:17 lr 0.000079 time 0.2510 (0.2510) loss 0.3696 (0.3920) grad_norm 810871.0000 (inf) mem 14543MB +[2023-10-10 06:50:05 simmim_pretrain](main_simmim.py 218): INFO Train: [7/200][6500/6787] eta 0:01:12 lr 0.000080 time 0.2573 (0.2510) loss 0.3907 (0.3918) grad_norm 390052.0625 (inf) mem 14543MB +[2023-10-10 06:51:18 simmim_pretrain](main_simmim.py 228): INFO EPOCH 7 training takes 0:28:24 +[2023-10-10 06:51:19 simmim_pretrain](main_simmim.py 218): INFO Train: [8/200][0/6787] eta 2:29:18 lr 0.000081 time 1.3200 (1.3200) loss 0.4024 (0.4024) grad_norm 462567.0938 (462567.0938) mem 14543MB +[2023-10-10 06:53:24 simmim_pretrain](main_simmim.py 218): INFO Train: [8/200][500/6787] eta 0:26:28 lr 0.000081 time 0.2499 (0.2527) loss 0.3791 (0.3907) grad_norm 581210.1875 (inf) mem 14543MB +[2023-10-10 06:55:30 simmim_pretrain](main_simmim.py 218): INFO Train: [8/200][1000/6787] eta 0:24:17 lr 0.000082 time 0.2506 (0.2519) loss 0.3972 (0.3905) grad_norm 479175.9062 (inf) mem 14543MB +[2023-10-10 06:57:36 simmim_pretrain](main_simmim.py 218): INFO Train: [8/200][1500/6787] eta 0:22:10 lr 0.000083 time 0.2520 (0.2516) loss 0.3999 (0.3902) grad_norm 466565.5938 (inf) mem 14543MB +[2023-10-10 06:59:41 simmim_pretrain](main_simmim.py 218): INFO Train: [8/200][2000/6787] eta 0:20:04 lr 0.000084 time 0.2565 (0.2516) loss 0.3970 (0.3901) grad_norm 299479.5625 (inf) mem 14543MB +[2023-10-10 07:01:47 simmim_pretrain](main_simmim.py 218): INFO Train: [8/200][2500/6787] eta 0:17:58 lr 0.000084 time 0.2592 (0.2515) loss 0.3868 (0.3900) grad_norm 555698.1250 (inf) mem 14543MB +[2023-10-10 07:03:52 simmim_pretrain](main_simmim.py 218): INFO Train: [8/200][3000/6787] eta 0:15:52 lr 0.000085 time 0.2514 (0.2515) loss 0.4017 (0.3901) grad_norm 632740.1875 (inf) mem 14543MB +[2023-10-10 07:05:58 simmim_pretrain](main_simmim.py 218): INFO Train: [8/200][3500/6787] eta 0:13:46 lr 0.000086 time 0.2549 (0.2514) loss 0.3963 (0.3898) grad_norm 455604.8438 (inf) mem 14543MB +[2023-10-10 07:08:04 simmim_pretrain](main_simmim.py 218): INFO Train: [8/200][4000/6787] eta 0:11:40 lr 0.000086 time 0.2590 (0.2514) loss 0.3721 (0.3896) grad_norm 485743.6875 (inf) mem 14543MB +[2023-10-10 07:10:09 simmim_pretrain](main_simmim.py 218): INFO Train: [8/200][4500/6787] eta 0:09:34 lr 0.000087 time 0.2587 (0.2514) loss 0.3873 (0.3893) grad_norm 326329.6250 (inf) mem 14543MB +[2023-10-10 07:12:15 simmim_pretrain](main_simmim.py 218): INFO Train: [8/200][5000/6787] eta 0:07:29 lr 0.000088 time 0.2525 (0.2513) loss 0.3877 (0.3890) grad_norm 401584.0625 (inf) mem 14543MB +[2023-10-10 07:14:20 simmim_pretrain](main_simmim.py 218): INFO Train: [8/200][5500/6787] eta 0:05:23 lr 0.000089 time 0.2459 (0.2512) loss 0.4064 (0.3892) grad_norm 189235.9375 (inf) mem 14543MB +[2023-10-10 07:16:25 simmim_pretrain](main_simmim.py 218): INFO Train: [8/200][6000/6787] eta 0:03:17 lr 0.000089 time 0.2467 (0.2512) loss 0.4035 (0.3892) grad_norm 362180.9062 (inf) mem 14543MB +[2023-10-10 07:18:31 simmim_pretrain](main_simmim.py 218): INFO Train: [8/200][6500/6787] eta 0:01:12 lr 0.000090 time 0.2507 (0.2512) loss 0.4013 (0.3892) grad_norm 273559.9375 (inf) mem 14543MB +[2023-10-10 07:19:43 simmim_pretrain](main_simmim.py 228): INFO EPOCH 8 training takes 0:28:25 +[2023-10-10 07:19:45 simmim_pretrain](main_simmim.py 218): INFO Train: [9/200][0/6787] eta 2:30:51 lr 0.000091 time 1.3336 (1.3336) loss 0.3687 (0.3687) grad_norm 269559.4375 (269559.4375) mem 14543MB +[2023-10-10 07:21:50 simmim_pretrain](main_simmim.py 218): INFO Train: [9/200][500/6787] eta 0:26:26 lr 0.000091 time 0.2483 (0.2524) loss 0.4103 (0.3880) grad_norm 234218.1094 (248650.3594) mem 14543MB +[2023-10-10 07:23:55 simmim_pretrain](main_simmim.py 218): INFO Train: [9/200][1000/6787] eta 0:24:15 lr 0.000092 time 0.2495 (0.2516) loss 0.3877 (0.3875) grad_norm 313479.8750 (302919.0625) mem 14543MB +[2023-10-10 07:26:00 simmim_pretrain](main_simmim.py 218): INFO Train: [9/200][1500/6787] eta 0:22:07 lr 0.000093 time 0.2496 (0.2510) loss 0.3852 (0.3871) grad_norm 438994.5938 (334929.5938) mem 14543MB +[2023-10-10 07:28:05 simmim_pretrain](main_simmim.py 218): INFO Train: [9/200][2000/6787] eta 0:20:00 lr 0.000093 time 0.2517 (0.2509) loss 0.3997 (0.3871) grad_norm 341424.4688 (344583.5000) mem 14543MB +[2023-10-10 07:30:10 simmim_pretrain](main_simmim.py 218): INFO Train: [9/200][2500/6787] eta 0:17:55 lr 0.000094 time 0.2499 (0.2508) loss 0.3826 (0.3870) grad_norm 555100.7500 (inf) mem 14543MB +[2023-10-10 07:32:16 simmim_pretrain](main_simmim.py 218): INFO Train: [9/200][3000/6787] eta 0:15:49 lr 0.000095 time 0.2470 (0.2508) loss 0.3847 (0.3868) grad_norm 362904.6875 (inf) mem 14543MB +[2023-10-10 07:34:21 simmim_pretrain](main_simmim.py 218): INFO Train: [9/200][3500/6787] eta 0:13:44 lr 0.000096 time 0.2512 (0.2508) loss 0.4190 (0.3867) grad_norm 335807.7812 (inf) mem 14543MB +[2023-10-10 07:36:26 simmim_pretrain](main_simmim.py 218): INFO Train: [9/200][4000/6787] eta 0:11:38 lr 0.000096 time 0.2588 (0.2507) loss 0.3723 (0.3866) grad_norm 727835.9375 (inf) mem 14543MB +[2023-10-10 07:38:31 simmim_pretrain](main_simmim.py 218): INFO Train: [9/200][4500/6787] eta 0:09:33 lr 0.000097 time 0.2522 (0.2506) loss 0.3831 (0.3865) grad_norm 556983.2500 (inf) mem 14543MB +[2023-10-10 07:40:37 simmim_pretrain](main_simmim.py 218): INFO Train: [9/200][5000/6787] eta 0:07:27 lr 0.000098 time 0.2449 (0.2506) loss 0.3679 (0.3865) grad_norm 215332.0156 (inf) mem 14543MB +[2023-10-10 07:42:42 simmim_pretrain](main_simmim.py 218): INFO Train: [9/200][5500/6787] eta 0:05:22 lr 0.000099 time 0.2553 (0.2507) loss 0.3855 (0.3864) grad_norm 302305.9375 (inf) mem 14543MB +[2023-10-10 07:44:48 simmim_pretrain](main_simmim.py 218): INFO Train: [9/200][6000/6787] eta 0:03:17 lr 0.000099 time 0.2467 (0.2507) loss 0.3639 (0.3863) grad_norm 642916.5625 (inf) mem 14543MB +[2023-10-10 07:46:53 simmim_pretrain](main_simmim.py 218): INFO Train: [9/200][6500/6787] eta 0:01:11 lr 0.000100 time 0.2517 (0.2507) loss 0.3862 (0.3861) grad_norm 245829.7031 (inf) mem 14543MB +[2023-10-10 07:48:05 simmim_pretrain](main_simmim.py 228): INFO EPOCH 9 training takes 0:28:22 +[2023-10-10 07:48:07 simmim_pretrain](main_simmim.py 218): INFO Train: [10/200][0/6787] eta 2:39:31 lr 0.000101 time 1.4102 (1.4102) loss 0.3614 (0.3614) grad_norm 206831.5000 (206831.5000) mem 14543MB +[2023-10-10 07:50:12 simmim_pretrain](main_simmim.py 218): INFO Train: [10/200][500/6787] eta 0:26:29 lr 0.000101 time 0.2537 (0.2528) loss 0.3976 (0.3877) grad_norm 250481.3281 (277964.6875) mem 14543MB +[2023-10-10 07:52:18 simmim_pretrain](main_simmim.py 218): INFO Train: [10/200][1000/6787] eta 0:24:19 lr 0.000102 time 0.2493 (0.2522) loss 0.3950 (0.3866) grad_norm 289378.8438 (261880.9688) mem 14543MB +[2023-10-10 07:54:24 simmim_pretrain](main_simmim.py 218): INFO Train: [10/200][1500/6787] eta 0:22:12 lr 0.000103 time 0.2479 (0.2520) loss 0.3967 (0.3860) grad_norm 238961.7344 (258310.2500) mem 14543MB +[2023-10-10 07:56:29 simmim_pretrain](main_simmim.py 218): INFO Train: [10/200][2000/6787] eta 0:20:05 lr 0.000103 time 0.2485 (0.2519) loss 0.4014 (0.3857) grad_norm 327502.4688 (254304.8438) mem 14543MB +[2023-10-10 07:58:35 simmim_pretrain](main_simmim.py 218): INFO Train: [10/200][2500/6787] eta 0:17:58 lr 0.000104 time 0.2483 (0.2517) loss 0.3639 (0.3853) grad_norm 400637.7812 (281078.4688) mem 14543MB +[2023-10-10 08:00:40 simmim_pretrain](main_simmim.py 218): INFO Train: [10/200][3000/6787] eta 0:15:52 lr 0.000105 time 0.2491 (0.2515) loss 0.3961 (0.3848) grad_norm 352890.6250 (296152.2812) mem 14543MB +[2023-10-10 08:02:46 simmim_pretrain](main_simmim.py 218): INFO Train: [10/200][3500/6787] eta 0:13:46 lr 0.000106 time 0.2568 (0.2514) loss 0.3667 (0.3847) grad_norm 162273.3750 (310088.4688) mem 14543MB +[2023-10-10 08:04:51 simmim_pretrain](main_simmim.py 218): INFO Train: [10/200][4000/6787] eta 0:11:40 lr 0.000106 time 0.2547 (0.2514) loss 0.3828 (0.3848) grad_norm 212811.9688 (inf) mem 14543MB +[2023-10-10 08:06:57 simmim_pretrain](main_simmim.py 218): INFO Train: [10/200][4500/6787] eta 0:09:34 lr 0.000107 time 0.2542 (0.2514) loss 0.3775 (0.3848) grad_norm 204729.8438 (inf) mem 14543MB +[2023-10-10 08:09:02 simmim_pretrain](main_simmim.py 218): INFO Train: [10/200][5000/6787] eta 0:07:29 lr 0.000108 time 0.2515 (0.2513) loss 0.3938 (0.3846) grad_norm 193556.4219 (inf) mem 14543MB +[2023-10-10 08:11:07 simmim_pretrain](main_simmim.py 218): INFO Train: [10/200][5500/6787] eta 0:05:23 lr 0.000109 time 0.2584 (0.2512) loss 0.3777 (0.3845) grad_norm 337832.3438 (inf) mem 14543MB +[2023-10-10 08:13:13 simmim_pretrain](main_simmim.py 218): INFO Train: [10/200][6000/6787] eta 0:03:17 lr 0.000109 time 0.2485 (0.2512) loss 0.3934 (0.3845) grad_norm 202112.8438 (inf) mem 14543MB +[2023-10-10 08:15:18 simmim_pretrain](main_simmim.py 218): INFO Train: [10/200][6500/6787] eta 0:01:12 lr 0.000110 time 0.2473 (0.2511) loss 0.3743 (0.3844) grad_norm 253926.7344 (inf) mem 14543MB +[2023-10-10 08:16:31 simmim_pretrain](main_simmim.py 228): INFO EPOCH 10 training takes 0:28:25 +[2023-10-10 08:16:32 simmim_pretrain](main_simmim.py 218): INFO Train: [11/200][0/6787] eta 2:35:59 lr 0.000110 time 1.3791 (1.3791) loss 0.3652 (0.3652) grad_norm 205525.9062 (205525.9062) mem 14543MB +[2023-10-10 08:18:37 simmim_pretrain](main_simmim.py 218): INFO Train: [11/200][500/6787] eta 0:26:27 lr 0.000111 time 0.2514 (0.2524) loss 0.3697 (0.3828) grad_norm 146882.2188 (211697.1562) mem 14543MB +[2023-10-10 08:20:42 simmim_pretrain](main_simmim.py 218): INFO Train: [11/200][1000/6787] eta 0:24:14 lr 0.000112 time 0.2513 (0.2514) loss 0.3967 (0.3824) grad_norm 592611.6875 (235206.2344) mem 14543MB +[2023-10-10 08:22:47 simmim_pretrain](main_simmim.py 218): INFO Train: [11/200][1500/6787] eta 0:22:06 lr 0.000113 time 0.2457 (0.2510) loss 0.3838 (0.3819) grad_norm 375908.1250 (275599.5938) mem 14543MB +[2023-10-10 08:24:53 simmim_pretrain](main_simmim.py 218): INFO Train: [11/200][2000/6787] eta 0:20:00 lr 0.000113 time 0.2545 (0.2509) loss 0.3628 (0.3817) grad_norm 183156.7656 (281917.5625) mem 14543MB +[2023-10-10 08:26:58 simmim_pretrain](main_simmim.py 218): INFO Train: [11/200][2500/6787] eta 0:17:55 lr 0.000114 time 0.2504 (0.2508) loss 0.3877 (0.3815) grad_norm 225173.9062 (291856.5938) mem 14543MB +[2023-10-10 08:29:03 simmim_pretrain](main_simmim.py 218): INFO Train: [11/200][3000/6787] eta 0:15:49 lr 0.000115 time 0.2463 (0.2508) loss 0.3689 (0.3813) grad_norm 298418.0625 (315759.6250) mem 14543MB +[2023-10-10 08:31:09 simmim_pretrain](main_simmim.py 218): INFO Train: [11/200][3500/6787] eta 0:13:44 lr 0.000116 time 0.2540 (0.2508) loss 0.3907 (0.3813) grad_norm 367320.2188 (inf) mem 14543MB +[2023-10-10 08:33:14 simmim_pretrain](main_simmim.py 218): INFO Train: [11/200][4000/6787] eta 0:11:38 lr 0.000116 time 0.2466 (0.2508) loss 0.3818 (0.3813) grad_norm 263023.5312 (inf) mem 14543MB +[2023-10-10 08:35:19 simmim_pretrain](main_simmim.py 218): INFO Train: [11/200][4500/6787] eta 0:09:33 lr 0.000117 time 0.2502 (0.2508) loss 0.3637 (0.3813) grad_norm 244497.2344 (inf) mem 14543MB +[2023-10-10 08:37:25 simmim_pretrain](main_simmim.py 218): INFO Train: [11/200][5000/6787] eta 0:07:28 lr 0.000118 time 0.2505 (0.2508) loss 0.3614 (0.3814) grad_norm 191996.0156 (inf) mem 14543MB +[2023-10-10 08:39:30 simmim_pretrain](main_simmim.py 218): INFO Train: [11/200][5500/6787] eta 0:05:22 lr 0.000119 time 0.2514 (0.2508) loss 0.3688 (0.3812) grad_norm 299578.2500 (inf) mem 14543MB +[2023-10-10 08:41:36 simmim_pretrain](main_simmim.py 218): INFO Train: [11/200][6000/6787] eta 0:03:17 lr 0.000119 time 0.2511 (0.2508) loss 0.3755 (0.3811) grad_norm 296311.7188 (inf) mem 14543MB +[2023-10-10 08:43:41 simmim_pretrain](main_simmim.py 218): INFO Train: [11/200][6500/6787] eta 0:01:11 lr 0.000120 time 0.2488 (0.2509) loss 0.3962 (0.3810) grad_norm 422952.8750 (inf) mem 14543MB +[2023-10-10 08:44:54 simmim_pretrain](main_simmim.py 228): INFO EPOCH 11 training takes 0:28:23 +[2023-10-10 08:44:55 simmim_pretrain](main_simmim.py 218): INFO Train: [12/200][0/6787] eta 2:39:31 lr 0.000120 time 1.4102 (1.4102) loss 0.3745 (0.3745) grad_norm 484865.1562 (484865.1562) mem 14543MB +[2023-10-10 08:47:01 simmim_pretrain](main_simmim.py 218): INFO Train: [12/200][500/6787] eta 0:26:31 lr 0.000121 time 0.2541 (0.2532) loss 0.3719 (0.3793) grad_norm 470115.7188 (inf) mem 14543MB +[2023-10-10 08:49:06 simmim_pretrain](main_simmim.py 218): INFO Train: [12/200][1000/6787] eta 0:24:19 lr 0.000122 time 0.2529 (0.2522) loss 0.3842 (0.3792) grad_norm 764396.7500 (inf) mem 14543MB +[2023-10-10 08:51:12 simmim_pretrain](main_simmim.py 218): INFO Train: [12/200][1500/6787] eta 0:22:12 lr 0.000123 time 0.2485 (0.2520) loss 0.3747 (0.3795) grad_norm 328970.7188 (inf) mem 14543MB +[2023-10-10 08:53:18 simmim_pretrain](main_simmim.py 218): INFO Train: [12/200][2000/6787] eta 0:20:05 lr 0.000123 time 0.2571 (0.2518) loss 0.3731 (0.3796) grad_norm 670594.7500 (inf) mem 14543MB +[2023-10-10 08:55:23 simmim_pretrain](main_simmim.py 218): INFO Train: [12/200][2500/6787] eta 0:17:58 lr 0.000124 time 0.2509 (0.2516) loss 0.3944 (0.3794) grad_norm 296960.9688 (inf) mem 14543MB +[2023-10-10 08:57:29 simmim_pretrain](main_simmim.py 218): INFO Train: [12/200][3000/6787] eta 0:15:52 lr 0.000125 time 0.2590 (0.2514) loss 0.3735 (0.3791) grad_norm 424444.0000 (inf) mem 14543MB +[2023-10-10 08:59:34 simmim_pretrain](main_simmim.py 218): INFO Train: [12/200][3500/6787] eta 0:13:46 lr 0.000126 time 0.2523 (0.2514) loss 0.3920 (0.3791) grad_norm 327560.0312 (inf) mem 14543MB +[2023-10-10 09:01:40 simmim_pretrain](main_simmim.py 218): INFO Train: [12/200][4000/6787] eta 0:11:40 lr 0.000126 time 0.2525 (0.2514) loss 0.3766 (0.3790) grad_norm 415809.1875 (inf) mem 14543MB +[2023-10-10 09:03:45 simmim_pretrain](main_simmim.py 218): INFO Train: [12/200][4500/6787] eta 0:09:34 lr 0.000127 time 0.2504 (0.2514) loss 0.3872 (0.3789) grad_norm 255386.7344 (inf) mem 14543MB +[2023-10-10 09:05:51 simmim_pretrain](main_simmim.py 218): INFO Train: [12/200][5000/6787] eta 0:07:29 lr 0.000128 time 0.2589 (0.2513) loss 0.3900 (0.3788) grad_norm 466542.5000 (inf) mem 14543MB +[2023-10-10 09:07:56 simmim_pretrain](main_simmim.py 218): INFO Train: [12/200][5500/6787] eta 0:05:23 lr 0.000128 time 0.2495 (0.2513) loss 0.3769 (0.3787) grad_norm 407039.7188 (inf) mem 14543MB +[2023-10-10 09:10:02 simmim_pretrain](main_simmim.py 218): INFO Train: [12/200][6000/6787] eta 0:03:17 lr 0.000129 time 0.2507 (0.2513) loss 0.3963 (0.3787) grad_norm 707011.6250 (inf) mem 14543MB +[2023-10-10 09:12:07 simmim_pretrain](main_simmim.py 218): INFO Train: [12/200][6500/6787] eta 0:01:12 lr 0.000130 time 0.2570 (0.2513) loss 0.3745 (0.3787) grad_norm 349811.6562 (inf) mem 14543MB +[2023-10-10 09:13:20 simmim_pretrain](main_simmim.py 228): INFO EPOCH 12 training takes 0:28:25 +[2023-10-10 09:13:21 simmim_pretrain](main_simmim.py 218): INFO Train: [13/200][0/6787] eta 2:45:14 lr 0.000130 time 1.4608 (1.4608) loss 0.4052 (0.4052) grad_norm 206445.3281 (206445.3281) mem 14543MB +[2023-10-10 09:15:27 simmim_pretrain](main_simmim.py 218): INFO Train: [13/200][500/6787] eta 0:26:32 lr 0.000131 time 0.2471 (0.2533) loss 0.3712 (0.3775) grad_norm 356742.4375 (400614.3125) mem 14543MB +[2023-10-10 09:17:32 simmim_pretrain](main_simmim.py 218): INFO Train: [13/200][1000/6787] eta 0:24:18 lr 0.000132 time 0.2492 (0.2521) loss 0.3737 (0.3769) grad_norm 364778.0938 (404376.3125) mem 14543MB +[2023-10-10 09:19:38 simmim_pretrain](main_simmim.py 218): INFO Train: [13/200][1500/6787] eta 0:22:10 lr 0.000133 time 0.2487 (0.2516) loss 0.4008 (0.3768) grad_norm 237716.0156 (inf) mem 14543MB +[2023-10-10 09:21:43 simmim_pretrain](main_simmim.py 218): INFO Train: [13/200][2000/6787] eta 0:20:03 lr 0.000133 time 0.2489 (0.2514) loss 0.3800 (0.3775) grad_norm 351537.3125 (inf) mem 14543MB +[2023-10-10 09:23:49 simmim_pretrain](main_simmim.py 218): INFO Train: [13/200][2500/6787] eta 0:17:57 lr 0.000134 time 0.2520 (0.2514) loss 0.3871 (0.3779) grad_norm 112729.3516 (inf) mem 14543MB +[2023-10-10 09:25:54 simmim_pretrain](main_simmim.py 218): INFO Train: [13/200][3000/6787] eta 0:15:51 lr 0.000135 time 0.2508 (0.2513) loss 0.4020 (0.3781) grad_norm 184437.5469 (inf) mem 14543MB +[2023-10-10 09:27:59 simmim_pretrain](main_simmim.py 218): INFO Train: [13/200][3500/6787] eta 0:13:45 lr 0.000135 time 0.2513 (0.2512) loss 0.3640 (0.3782) grad_norm 221889.7969 (inf) mem 14543MB +[2023-10-10 09:30:05 simmim_pretrain](main_simmim.py 218): INFO Train: [13/200][4000/6787] eta 0:11:39 lr 0.000136 time 0.2492 (0.2511) loss 0.3579 (0.3780) grad_norm 317096.5000 (inf) mem 14543MB +[2023-10-10 09:32:10 simmim_pretrain](main_simmim.py 218): INFO Train: [13/200][4500/6787] eta 0:09:34 lr 0.000137 time 0.2519 (0.2510) loss 0.3949 (0.3779) grad_norm 290177.7188 (inf) mem 14543MB +[2023-10-10 09:34:15 simmim_pretrain](main_simmim.py 218): INFO Train: [13/200][5000/6787] eta 0:07:28 lr 0.000138 time 0.2482 (0.2510) loss 0.3680 (0.3778) grad_norm 273392.9375 (inf) mem 14543MB +[2023-10-10 09:36:21 simmim_pretrain](main_simmim.py 218): INFO Train: [13/200][5500/6787] eta 0:05:23 lr 0.000138 time 0.2463 (0.2510) loss 0.3673 (0.3777) grad_norm 167449.9688 (inf) mem 14543MB +[2023-10-10 09:38:26 simmim_pretrain](main_simmim.py 218): INFO Train: [13/200][6000/6787] eta 0:03:17 lr 0.000139 time 0.2525 (0.2510) loss 0.3810 (0.3775) grad_norm 343543.6875 (inf) mem 14543MB +[2023-10-10 09:40:32 simmim_pretrain](main_simmim.py 218): INFO Train: [13/200][6500/6787] eta 0:01:12 lr 0.000140 time 0.2468 (0.2510) loss 0.3784 (0.3774) grad_norm 397498.8125 (inf) mem 14543MB +[2023-10-10 09:41:44 simmim_pretrain](main_simmim.py 228): INFO EPOCH 13 training takes 0:28:24 +[2023-10-10 09:41:45 simmim_pretrain](main_simmim.py 218): INFO Train: [14/200][0/6787] eta 2:32:41 lr 0.000140 time 1.3499 (1.3499) loss 0.3790 (0.3790) grad_norm 157906.1406 (157906.1406) mem 14543MB +[2023-10-10 09:43:51 simmim_pretrain](main_simmim.py 218): INFO Train: [14/200][500/6787] eta 0:26:27 lr 0.000141 time 0.2483 (0.2526) loss 0.3835 (0.3764) grad_norm 265614.0000 (348892.8750) mem 14543MB +[2023-10-10 09:45:56 simmim_pretrain](main_simmim.py 218): INFO Train: [14/200][1000/6787] eta 0:24:16 lr 0.000142 time 0.2471 (0.2517) loss 0.3623 (0.3762) grad_norm 458638.5312 (376636.5938) mem 14543MB +[2023-10-10 09:48:01 simmim_pretrain](main_simmim.py 218): INFO Train: [14/200][1500/6787] eta 0:22:09 lr 0.000142 time 0.2499 (0.2515) loss 0.3736 (0.3758) grad_norm 315340.2812 (376060.9062) mem 14543MB +[2023-10-10 09:50:07 simmim_pretrain](main_simmim.py 218): INFO Train: [14/200][2000/6787] eta 0:20:03 lr 0.000143 time 0.2490 (0.2514) loss 0.3591 (0.3756) grad_norm 250537.8125 (inf) mem 14543MB +[2023-10-10 09:52:12 simmim_pretrain](main_simmim.py 218): INFO Train: [14/200][2500/6787] eta 0:17:57 lr 0.000144 time 0.2508 (0.2513) loss 0.3965 (0.3757) grad_norm 534742.6875 (inf) mem 14543MB +[2023-10-10 09:54:18 simmim_pretrain](main_simmim.py 218): INFO Train: [14/200][3000/6787] eta 0:15:51 lr 0.000145 time 0.2540 (0.2513) loss 0.3713 (0.3757) grad_norm 177548.0938 (inf) mem 14543MB +[2023-10-10 09:56:24 simmim_pretrain](main_simmim.py 218): INFO Train: [14/200][3500/6787] eta 0:13:45 lr 0.000145 time 0.2512 (0.2513) loss 0.3679 (0.3759) grad_norm 434317.5312 (inf) mem 14543MB +[2023-10-10 09:58:29 simmim_pretrain](main_simmim.py 218): INFO Train: [14/200][4000/6787] eta 0:11:40 lr 0.000146 time 0.2523 (0.2513) loss 0.3780 (0.3761) grad_norm 392255.7500 (inf) mem 14543MB +[2023-10-10 10:00:35 simmim_pretrain](main_simmim.py 218): INFO Train: [14/200][4500/6787] eta 0:09:34 lr 0.000147 time 0.2472 (0.2513) loss 0.3873 (0.3763) grad_norm 267664.6250 (inf) mem 14543MB +[2023-10-10 10:02:40 simmim_pretrain](main_simmim.py 218): INFO Train: [14/200][5000/6787] eta 0:07:28 lr 0.000148 time 0.2499 (0.2512) loss 0.3720 (0.3765) grad_norm 205618.3750 (inf) mem 14543MB +[2023-10-10 10:04:45 simmim_pretrain](main_simmim.py 218): INFO Train: [14/200][5500/6787] eta 0:05:23 lr 0.000148 time 0.2500 (0.2511) loss 0.3801 (0.3765) grad_norm 323696.4062 (inf) mem 14543MB +[2023-10-10 10:06:51 simmim_pretrain](main_simmim.py 218): INFO Train: [14/200][6000/6787] eta 0:03:17 lr 0.000149 time 0.2513 (0.2511) loss 0.3932 (0.3765) grad_norm 334242.9375 (inf) mem 14543MB +[2023-10-10 10:08:56 simmim_pretrain](main_simmim.py 218): INFO Train: [14/200][6500/6787] eta 0:01:12 lr 0.000150 time 0.2473 (0.2511) loss 0.3664 (0.3765) grad_norm 168673.9531 (inf) mem 14543MB +[2023-10-10 10:10:09 simmim_pretrain](main_simmim.py 228): INFO EPOCH 14 training takes 0:28:24 +[2023-10-10 10:10:10 simmim_pretrain](main_simmim.py 218): INFO Train: [15/200][0/6787] eta 2:28:00 lr 0.000150 time 1.3084 (1.3084) loss 0.3545 (0.3545) grad_norm 348921.0938 (348921.0938) mem 14543MB +[2023-10-10 10:12:15 simmim_pretrain](main_simmim.py 218): INFO Train: [15/200][500/6787] eta 0:26:29 lr 0.000151 time 0.2513 (0.2528) loss 0.3871 (0.3747) grad_norm 249418.1562 (388708.0938) mem 14543MB +[2023-10-10 10:14:21 simmim_pretrain](main_simmim.py 218): INFO Train: [15/200][1000/6787] eta 0:24:15 lr 0.000152 time 0.2491 (0.2516) loss 0.3828 (0.3745) grad_norm 672871.3750 (378720.8125) mem 14543MB +[2023-10-10 10:16:25 simmim_pretrain](main_simmim.py 218): INFO Train: [15/200][1500/6787] eta 0:22:07 lr 0.000152 time 0.2469 (0.2510) loss 0.3955 (0.3745) grad_norm 569593.0000 (408535.6250) mem 14543MB +[2023-10-10 10:18:31 simmim_pretrain](main_simmim.py 218): INFO Train: [15/200][2000/6787] eta 0:20:01 lr 0.000153 time 0.2551 (0.2509) loss 0.3684 (0.3744) grad_norm 611606.6250 (433148.2500) mem 14543MB +[2023-10-10 10:20:36 simmim_pretrain](main_simmim.py 218): INFO Train: [15/200][2500/6787] eta 0:17:55 lr 0.000154 time 0.2512 (0.2509) loss 0.3799 (0.3746) grad_norm 244301.2969 (inf) mem 14543MB +[2023-10-10 10:22:42 simmim_pretrain](main_simmim.py 218): INFO Train: [15/200][3000/6787] eta 0:15:50 lr 0.000155 time 0.2497 (0.2509) loss 0.3637 (0.3751) grad_norm 168420.6406 (inf) mem 14543MB +[2023-10-10 10:24:47 simmim_pretrain](main_simmim.py 218): INFO Train: [15/200][3500/6787] eta 0:13:44 lr 0.000155 time 0.2466 (0.2509) loss 0.4175 (0.3752) grad_norm 155449.8438 (inf) mem 14543MB +[2023-10-10 10:26:52 simmim_pretrain](main_simmim.py 218): INFO Train: [15/200][4000/6787] eta 0:11:38 lr 0.000156 time 0.2534 (0.2507) loss 0.3790 (0.3754) grad_norm 273717.5938 (inf) mem 14543MB +[2023-10-10 10:28:57 simmim_pretrain](main_simmim.py 218): INFO Train: [15/200][4500/6787] eta 0:09:33 lr 0.000157 time 0.2525 (0.2507) loss 0.3594 (0.3755) grad_norm 463704.3125 (inf) mem 14543MB +[2023-10-10 10:31:03 simmim_pretrain](main_simmim.py 218): INFO Train: [15/200][5000/6787] eta 0:07:28 lr 0.000158 time 0.2519 (0.2507) loss 0.3433 (0.3754) grad_norm 291185.7812 (inf) mem 14543MB +[2023-10-10 10:33:08 simmim_pretrain](main_simmim.py 218): INFO Train: [15/200][5500/6787] eta 0:05:22 lr 0.000158 time 0.2596 (0.2508) loss 0.3643 (0.3753) grad_norm 334836.7500 (inf) mem 14543MB +[2023-10-10 10:35:14 simmim_pretrain](main_simmim.py 218): INFO Train: [15/200][6000/6787] eta 0:03:17 lr 0.000159 time 0.2543 (0.2508) loss 0.3888 (0.3753) grad_norm 485714.3750 (inf) mem 14543MB +[2023-10-10 10:37:19 simmim_pretrain](main_simmim.py 218): INFO Train: [15/200][6500/6787] eta 0:01:11 lr 0.000160 time 0.2473 (0.2508) loss 0.3791 (0.3752) grad_norm 241484.3750 (inf) mem 14543MB +[2023-10-10 10:38:31 simmim_pretrain](main_simmim.py 228): INFO EPOCH 15 training takes 0:28:22 +[2023-10-10 10:38:33 simmim_pretrain](main_simmim.py 218): INFO Train: [16/200][0/6787] eta 2:42:39 lr 0.000160 time 1.4380 (1.4380) loss 0.3894 (0.3894) grad_norm 362608.6875 (362608.6875) mem 14543MB +[2023-10-10 10:40:38 simmim_pretrain](main_simmim.py 218): INFO Train: [16/200][500/6787] eta 0:26:30 lr 0.000161 time 0.2467 (0.2531) loss 0.3715 (0.3739) grad_norm 204073.4062 (389797.8750) mem 14543MB +[2023-10-10 10:42:44 simmim_pretrain](main_simmim.py 218): INFO Train: [16/200][1000/6787] eta 0:24:19 lr 0.000162 time 0.2531 (0.2522) loss 0.4047 (0.3739) grad_norm 298060.7812 (390827.1250) mem 14543MB +[2023-10-10 10:44:50 simmim_pretrain](main_simmim.py 218): INFO Train: [16/200][1500/6787] eta 0:22:11 lr 0.000162 time 0.2463 (0.2519) loss 0.3602 (0.3742) grad_norm 406142.9375 (413585.6250) mem 14543MB +[2023-10-10 10:46:55 simmim_pretrain](main_simmim.py 218): INFO Train: [16/200][2000/6787] eta 0:20:05 lr 0.000163 time 0.2520 (0.2518) loss 0.3567 (0.3739) grad_norm 382483.0938 (420028.4688) mem 14543MB +[2023-10-10 10:49:01 simmim_pretrain](main_simmim.py 218): INFO Train: [16/200][2500/6787] eta 0:17:58 lr 0.000164 time 0.2527 (0.2516) loss 0.3507 (0.3739) grad_norm 302461.8438 (inf) mem 14543MB +[2023-10-10 10:51:06 simmim_pretrain](main_simmim.py 218): INFO Train: [16/200][3000/6787] eta 0:15:52 lr 0.000165 time 0.2517 (0.2515) loss 0.3827 (0.3738) grad_norm 278482.3750 (inf) mem 14543MB +[2023-10-10 10:53:12 simmim_pretrain](main_simmim.py 218): INFO Train: [16/200][3500/6787] eta 0:13:46 lr 0.000165 time 0.2530 (0.2515) loss 0.3645 (0.3738) grad_norm 391500.1875 (inf) mem 14543MB +[2023-10-10 10:55:18 simmim_pretrain](main_simmim.py 218): INFO Train: [16/200][4000/6787] eta 0:11:40 lr 0.000166 time 0.2518 (0.2515) loss 0.3427 (0.3739) grad_norm 327112.0312 (inf) mem 14543MB +[2023-10-10 10:57:23 simmim_pretrain](main_simmim.py 218): INFO Train: [16/200][4500/6787] eta 0:09:35 lr 0.000167 time 0.2502 (0.2514) loss 0.3659 (0.3740) grad_norm 544566.3750 (inf) mem 14543MB +[2023-10-10 10:59:28 simmim_pretrain](main_simmim.py 218): INFO Train: [16/200][5000/6787] eta 0:07:29 lr 0.000168 time 0.2477 (0.2514) loss 0.3801 (0.3740) grad_norm 525298.6875 (inf) mem 14543MB +[2023-10-10 11:01:34 simmim_pretrain](main_simmim.py 218): INFO Train: [16/200][5500/6787] eta 0:05:23 lr 0.000168 time 0.2459 (0.2513) loss 0.3583 (0.3740) grad_norm 315833.1562 (inf) mem 14543MB +[2023-10-10 11:03:39 simmim_pretrain](main_simmim.py 218): INFO Train: [16/200][6000/6787] eta 0:03:17 lr 0.000169 time 0.2518 (0.2512) loss 0.3775 (0.3740) grad_norm 313538.5000 (inf) mem 14543MB +[2023-10-10 11:05:45 simmim_pretrain](main_simmim.py 218): INFO Train: [16/200][6500/6787] eta 0:01:12 lr 0.000170 time 0.2450 (0.2512) loss 0.3825 (0.3740) grad_norm 463360.4062 (inf) mem 14543MB +[2023-10-10 11:06:57 simmim_pretrain](main_simmim.py 228): INFO EPOCH 16 training takes 0:28:25 +[2023-10-10 11:06:58 simmim_pretrain](main_simmim.py 218): INFO Train: [17/200][0/6787] eta 2:32:04 lr 0.000170 time 1.3444 (1.3444) loss 0.3684 (0.3684) grad_norm 429457.3125 (429457.3125) mem 14543MB +[2023-10-10 11:09:04 simmim_pretrain](main_simmim.py 218): INFO Train: [17/200][500/6787] eta 0:26:28 lr 0.000171 time 0.2518 (0.2527) loss 0.3724 (0.3734) grad_norm 277655.5625 (435504.7188) mem 14543MB +[2023-10-10 11:11:09 simmim_pretrain](main_simmim.py 218): INFO Train: [17/200][1000/6787] eta 0:24:16 lr 0.000172 time 0.2500 (0.2516) loss 0.3500 (0.3734) grad_norm 880038.3750 (430012.0625) mem 14543MB +[2023-10-10 11:13:14 simmim_pretrain](main_simmim.py 218): INFO Train: [17/200][1500/6787] eta 0:22:08 lr 0.000172 time 0.2460 (0.2512) loss 0.3756 (0.3735) grad_norm 368309.5938 (427902.3750) mem 14543MB +[2023-10-10 11:15:20 simmim_pretrain](main_simmim.py 218): INFO Train: [17/200][2000/6787] eta 0:20:01 lr 0.000173 time 0.2537 (0.2511) loss 0.3495 (0.3735) grad_norm 314261.6875 (inf) mem 14543MB +[2023-10-10 11:17:25 simmim_pretrain](main_simmim.py 218): INFO Train: [17/200][2500/6787] eta 0:17:56 lr 0.000174 time 0.2474 (0.2511) loss 0.3671 (0.3736) grad_norm 403794.0625 (inf) mem 14543MB +[2023-10-10 11:19:31 simmim_pretrain](main_simmim.py 218): INFO Train: [17/200][3000/6787] eta 0:15:50 lr 0.000175 time 0.2529 (0.2511) loss 0.3741 (0.3736) grad_norm 328522.8438 (inf) mem 14543MB +[2023-10-10 11:21:36 simmim_pretrain](main_simmim.py 218): INFO Train: [17/200][3500/6787] eta 0:13:45 lr 0.000175 time 0.2515 (0.2511) loss 0.3539 (0.3736) grad_norm 207432.8125 (inf) mem 14543MB +[2023-10-10 11:23:41 simmim_pretrain](main_simmim.py 218): INFO Train: [17/200][4000/6787] eta 0:11:39 lr 0.000176 time 0.2533 (0.2510) loss 0.3923 (0.3739) grad_norm 253584.5469 (inf) mem 14543MB +[2023-10-10 11:25:47 simmim_pretrain](main_simmim.py 218): INFO Train: [17/200][4500/6787] eta 0:09:34 lr 0.000177 time 0.2494 (0.2510) loss 0.3326 (0.3741) grad_norm 203535.0469 (inf) mem 14543MB +[2023-10-10 11:27:53 simmim_pretrain](main_simmim.py 218): INFO Train: [17/200][5000/6787] eta 0:07:28 lr 0.000177 time 0.2547 (0.2511) loss 0.3751 (0.3742) grad_norm 213703.9531 (inf) mem 14543MB +[2023-10-10 11:29:59 simmim_pretrain](main_simmim.py 218): INFO Train: [17/200][5500/6787] eta 0:05:23 lr 0.000178 time 0.2507 (0.2511) loss 0.3826 (0.3743) grad_norm 269971.9375 (inf) mem 14543MB +[2023-10-10 11:32:05 simmim_pretrain](main_simmim.py 218): INFO Train: [17/200][6000/6787] eta 0:03:17 lr 0.000179 time 0.2462 (0.2512) loss 0.3550 (0.3742) grad_norm 183230.6094 (inf) mem 14543MB +[2023-10-10 11:34:10 simmim_pretrain](main_simmim.py 218): INFO Train: [17/200][6500/6787] eta 0:01:12 lr 0.000180 time 0.2534 (0.2512) loss 0.3435 (0.3741) grad_norm 274625.6250 (inf) mem 14543MB +[2023-10-10 11:35:23 simmim_pretrain](main_simmim.py 228): INFO EPOCH 17 training takes 0:28:25 +[2023-10-10 11:35:24 simmim_pretrain](main_simmim.py 218): INFO Train: [18/200][0/6787] eta 2:32:31 lr 0.000180 time 1.3484 (1.3484) loss 0.3527 (0.3527) grad_norm 467103.2188 (467103.2188) mem 14543MB +[2023-10-10 11:37:30 simmim_pretrain](main_simmim.py 218): INFO Train: [18/200][500/6787] eta 0:26:32 lr 0.000181 time 0.2516 (0.2534) loss 0.3815 (0.3724) grad_norm 338192.0312 (373958.0938) mem 14543MB +[2023-10-10 11:39:36 simmim_pretrain](main_simmim.py 218): INFO Train: [18/200][1000/6787] eta 0:24:22 lr 0.000182 time 0.2586 (0.2527) loss 0.3758 (0.3723) grad_norm 247274.8750 (inf) mem 14543MB +[2023-10-10 11:41:42 simmim_pretrain](main_simmim.py 218): INFO Train: [18/200][1500/6787] eta 0:22:14 lr 0.000182 time 0.2521 (0.2524) loss 0.3602 (0.3725) grad_norm 319412.8750 (inf) mem 14543MB +[2023-10-10 11:43:48 simmim_pretrain](main_simmim.py 218): INFO Train: [18/200][2000/6787] eta 0:20:07 lr 0.000183 time 0.2493 (0.2523) loss 0.3636 (0.3723) grad_norm 288363.0625 (inf) mem 14543MB +[2023-10-10 11:45:53 simmim_pretrain](main_simmim.py 218): INFO Train: [18/200][2500/6787] eta 0:18:00 lr 0.000184 time 0.2502 (0.2520) loss 0.3819 (0.3723) grad_norm 243989.2812 (inf) mem 14543MB +[2023-10-10 11:47:59 simmim_pretrain](main_simmim.py 218): INFO Train: [18/200][3000/6787] eta 0:15:54 lr 0.000184 time 0.2545 (0.2519) loss 0.3540 (0.3725) grad_norm 432844.7188 (inf) mem 14543MB +[2023-10-10 11:50:05 simmim_pretrain](main_simmim.py 218): INFO Train: [18/200][3500/6787] eta 0:13:48 lr 0.000185 time 0.2486 (0.2519) loss 0.3596 (0.3725) grad_norm 425532.1562 (inf) mem 14543MB +[2023-10-10 11:52:12 simmim_pretrain](main_simmim.py 218): INFO Train: [18/200][4000/6787] eta 0:11:42 lr 0.000186 time 0.2540 (0.2521) loss 0.3539 (0.3723) grad_norm 317772.3438 (inf) mem 14543MB +[2023-10-10 11:54:19 simmim_pretrain](main_simmim.py 218): INFO Train: [18/200][4500/6787] eta 0:09:37 lr 0.000187 time 0.2525 (0.2524) loss 0.3680 (0.3724) grad_norm 671943.8750 (inf) mem 14543MB +[2023-10-10 11:56:26 simmim_pretrain](main_simmim.py 218): INFO Train: [18/200][5000/6787] eta 0:07:31 lr 0.000187 time 0.2496 (0.2526) loss 0.3616 (0.3725) grad_norm 406137.6250 (inf) mem 14543MB +[2023-10-10 11:58:33 simmim_pretrain](main_simmim.py 218): INFO Train: [18/200][5500/6787] eta 0:05:25 lr 0.000188 time 0.2542 (0.2527) loss 0.3831 (0.3725) grad_norm 449747.0938 (inf) mem 14543MB +[2023-10-10 12:00:40 simmim_pretrain](main_simmim.py 218): INFO Train: [18/200][6000/6787] eta 0:03:19 lr 0.000189 time 0.2527 (0.2529) loss 0.3725 (0.3727) grad_norm 172115.6719 (inf) mem 14543MB +[2023-10-10 12:02:48 simmim_pretrain](main_simmim.py 218): INFO Train: [18/200][6500/6787] eta 0:01:12 lr 0.000190 time 0.2593 (0.2530) loss 0.3618 (0.3728) grad_norm 194137.2812 (inf) mem 14543MB +[2023-10-10 12:04:01 simmim_pretrain](main_simmim.py 228): INFO EPOCH 18 training takes 0:28:38 +[2023-10-10 12:04:02 simmim_pretrain](main_simmim.py 218): INFO Train: [19/200][0/6787] eta 2:32:05 lr 0.000190 time 1.3446 (1.3446) loss 0.3613 (0.3613) grad_norm 464952.2188 (464952.2188) mem 14543MB +[2023-10-10 12:06:08 simmim_pretrain](main_simmim.py 218): INFO Train: [19/200][500/6787] eta 0:26:31 lr 0.000191 time 0.2536 (0.2532) loss 0.3670 (0.3738) grad_norm 212797.4062 (246745.7656) mem 14543MB +[2023-10-10 12:08:13 simmim_pretrain](main_simmim.py 218): INFO Train: [19/200][1000/6787] eta 0:24:17 lr 0.000192 time 0.2521 (0.2519) loss 0.3634 (0.3736) grad_norm 267791.7188 (266371.5312) mem 14543MB +[2023-10-10 12:10:19 simmim_pretrain](main_simmim.py 218): INFO Train: [19/200][1500/6787] eta 0:22:11 lr 0.000192 time 0.2545 (0.2518) loss 0.3799 (0.3732) grad_norm 311535.1562 (294528.4062) mem 14543MB +[2023-10-10 12:12:25 simmim_pretrain](main_simmim.py 218): INFO Train: [19/200][2000/6787] eta 0:20:05 lr 0.000193 time 0.2470 (0.2518) loss 0.3873 (0.3727) grad_norm 849710.6875 (328538.2188) mem 14543MB +[2023-10-10 12:14:31 simmim_pretrain](main_simmim.py 218): INFO Train: [19/200][2500/6787] eta 0:17:59 lr 0.000194 time 0.2561 (0.2519) loss 0.3572 (0.3726) grad_norm 539647.1875 (348918.1562) mem 14543MB +[2023-10-10 12:16:38 simmim_pretrain](main_simmim.py 218): INFO Train: [19/200][3000/6787] eta 0:15:54 lr 0.000194 time 0.2559 (0.2521) loss 0.3821 (0.3723) grad_norm 341284.0938 (inf) mem 14543MB +[2023-10-10 12:18:44 simmim_pretrain](main_simmim.py 218): INFO Train: [19/200][3500/6787] eta 0:13:49 lr 0.000195 time 0.2488 (0.2522) loss 0.3678 (0.3724) grad_norm 221480.1094 (inf) mem 14543MB +[2023-10-10 12:20:51 simmim_pretrain](main_simmim.py 218): INFO Train: [19/200][4000/6787] eta 0:11:43 lr 0.000196 time 0.2517 (0.2524) loss 0.3748 (0.3727) grad_norm 328909.1875 (inf) mem 14543MB +[2023-10-10 12:22:58 simmim_pretrain](main_simmim.py 218): INFO Train: [19/200][4500/6787] eta 0:09:37 lr 0.000197 time 0.2564 (0.2526) loss 0.3677 (0.3729) grad_norm 413293.4375 (inf) mem 14543MB +[2023-10-10 12:25:06 simmim_pretrain](main_simmim.py 218): INFO Train: [19/200][5000/6787] eta 0:07:32 lr 0.000197 time 0.2556 (0.2530) loss 0.3593 (0.3730) grad_norm 174848.3281 (inf) mem 14543MB +[2023-10-10 12:27:14 simmim_pretrain](main_simmim.py 218): INFO Train: [19/200][5500/6787] eta 0:05:25 lr 0.000198 time 0.2559 (0.2533) loss 0.3619 (0.3731) grad_norm 319927.0625 (inf) mem 14543MB +[2023-10-10 12:29:22 simmim_pretrain](main_simmim.py 218): INFO Train: [19/200][6000/6787] eta 0:03:19 lr 0.000199 time 0.2542 (0.2535) loss 0.3671 (0.3729) grad_norm 284328.2812 (inf) mem 14543MB +[2023-10-10 12:31:30 simmim_pretrain](main_simmim.py 218): INFO Train: [19/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2496 (0.2536) loss 0.3611 (0.3728) grad_norm 325417.2188 (inf) mem 14543MB +[2023-10-10 12:32:43 simmim_pretrain](main_simmim.py 228): INFO EPOCH 19 training takes 0:28:41 +[2023-10-10 12:32:44 simmim_pretrain](main_simmim.py 218): INFO Train: [20/200][0/6787] eta 2:29:16 lr 0.000200 time 1.3197 (1.3197) loss 0.3926 (0.3926) grad_norm 302473.0938 (302473.0938) mem 14543MB +[2023-10-10 12:34:50 simmim_pretrain](main_simmim.py 218): INFO Train: [20/200][500/6787] eta 0:26:31 lr 0.000200 time 0.2449 (0.2531) loss 0.3911 (0.3718) grad_norm 356058.3438 (379502.5000) mem 14543MB +[2023-10-10 12:36:56 simmim_pretrain](main_simmim.py 218): INFO Train: [20/200][1000/6787] eta 0:24:20 lr 0.000200 time 0.2467 (0.2524) loss 0.3344 (0.3719) grad_norm 461852.4062 (inf) mem 14543MB +[2023-10-10 12:39:02 simmim_pretrain](main_simmim.py 218): INFO Train: [20/200][1500/6787] eta 0:22:14 lr 0.000200 time 0.2537 (0.2524) loss 0.3749 (0.3719) grad_norm 966050.7500 (inf) mem 14543MB +[2023-10-10 12:41:08 simmim_pretrain](main_simmim.py 218): INFO Train: [20/200][2000/6787] eta 0:20:08 lr 0.000200 time 0.2518 (0.2524) loss 0.3607 (0.3717) grad_norm 371261.0938 (inf) mem 14543MB +[2023-10-10 12:43:14 simmim_pretrain](main_simmim.py 218): INFO Train: [20/200][2500/6787] eta 0:18:00 lr 0.000200 time 0.2475 (0.2521) loss 0.3600 (0.3716) grad_norm 453384.7500 (inf) mem 14543MB +[2023-10-10 12:45:19 simmim_pretrain](main_simmim.py 218): INFO Train: [20/200][3000/6787] eta 0:15:54 lr 0.000200 time 0.2489 (0.2520) loss 0.3779 (0.3715) grad_norm 363761.8750 (inf) mem 14543MB +[2023-10-10 12:47:25 simmim_pretrain](main_simmim.py 218): INFO Train: [20/200][3500/6787] eta 0:13:48 lr 0.000200 time 0.2504 (0.2520) loss 0.3503 (0.3715) grad_norm 405188.3750 (inf) mem 14543MB +[2023-10-10 12:49:31 simmim_pretrain](main_simmim.py 218): INFO Train: [20/200][4000/6787] eta 0:11:42 lr 0.000200 time 0.2469 (0.2520) loss 0.3788 (0.3714) grad_norm 404538.0000 (inf) mem 14543MB +[2023-10-10 12:51:37 simmim_pretrain](main_simmim.py 218): INFO Train: [20/200][4500/6787] eta 0:09:36 lr 0.000200 time 0.2527 (0.2520) loss 0.3607 (0.3714) grad_norm 401065.4062 (inf) mem 14543MB +[2023-10-10 12:53:43 simmim_pretrain](main_simmim.py 218): INFO Train: [20/200][5000/6787] eta 0:07:30 lr 0.000200 time 0.2490 (0.2520) loss 0.3415 (0.3714) grad_norm 457884.4062 (inf) mem 14543MB +[2023-10-10 12:55:50 simmim_pretrain](main_simmim.py 218): INFO Train: [20/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2540 (0.2520) loss 0.3888 (0.3715) grad_norm 152101.7031 (inf) mem 14543MB +[2023-10-10 12:57:56 simmim_pretrain](main_simmim.py 218): INFO Train: [20/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2518 (0.2521) loss 0.3691 (0.3716) grad_norm 115370.9375 (inf) mem 14543MB +[2023-10-10 13:00:02 simmim_pretrain](main_simmim.py 218): INFO Train: [20/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2573 (0.2521) loss 0.3680 (0.3718) grad_norm 431211.0312 (inf) mem 14543MB +[2023-10-10 13:01:16 simmim_pretrain](main_simmim.py 228): INFO EPOCH 20 training takes 0:28:33 +[2023-10-10 13:01:16 simmim_pretrain](utils.py 62): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_20.pth saving...... +[2023-10-10 13:01:17 simmim_pretrain](utils.py 64): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_20.pth saved !!! +[2023-10-10 13:01:18 simmim_pretrain](main_simmim.py 218): INFO Train: [21/200][0/6787] eta 2:23:19 lr 0.000200 time 1.2671 (1.2671) loss 0.3834 (0.3834) grad_norm 235104.2188 (235104.2188) mem 14543MB +[2023-10-10 13:03:24 simmim_pretrain](main_simmim.py 218): INFO Train: [21/200][500/6787] eta 0:26:30 lr 0.000200 time 0.2570 (0.2530) loss 0.3685 (0.3731) grad_norm 222242.5000 (275479.8125) mem 14543MB +[2023-10-10 13:05:29 simmim_pretrain](main_simmim.py 218): INFO Train: [21/200][1000/6787] eta 0:24:17 lr 0.000200 time 0.2494 (0.2519) loss 0.3713 (0.3718) grad_norm 424868.4062 (327895.8125) mem 14543MB +[2023-10-10 13:07:35 simmim_pretrain](main_simmim.py 218): INFO Train: [21/200][1500/6787] eta 0:22:10 lr 0.000200 time 0.2467 (0.2516) loss 0.3673 (0.3716) grad_norm 630535.5625 (352256.3750) mem 14543MB +[2023-10-10 13:09:40 simmim_pretrain](main_simmim.py 218): INFO Train: [21/200][2000/6787] eta 0:20:04 lr 0.000200 time 0.2512 (0.2515) loss 0.3743 (0.3714) grad_norm 846767.5625 (389730.0625) mem 14543MB +[2023-10-10 13:11:46 simmim_pretrain](main_simmim.py 218): INFO Train: [21/200][2500/6787] eta 0:17:58 lr 0.000200 time 0.2588 (0.2516) loss 0.3435 (0.3713) grad_norm 334865.5625 (392149.5625) mem 14543MB +[2023-10-10 13:13:52 simmim_pretrain](main_simmim.py 218): INFO Train: [21/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2594 (0.2516) loss 0.3633 (0.3712) grad_norm 259036.5000 (inf) mem 14543MB +[2023-10-10 13:15:58 simmim_pretrain](main_simmim.py 218): INFO Train: [21/200][3500/6787] eta 0:13:47 lr 0.000200 time 0.2527 (0.2516) loss 0.3686 (0.3712) grad_norm 262700.6875 (inf) mem 14543MB +[2023-10-10 13:18:04 simmim_pretrain](main_simmim.py 218): INFO Train: [21/200][4000/6787] eta 0:11:41 lr 0.000200 time 0.2464 (0.2516) loss 0.3899 (0.3712) grad_norm 409823.5312 (inf) mem 14543MB +[2023-10-10 13:20:10 simmim_pretrain](main_simmim.py 218): INFO Train: [21/200][4500/6787] eta 0:09:35 lr 0.000200 time 0.2471 (0.2516) loss 0.3811 (0.3711) grad_norm 286651.1562 (inf) mem 14543MB +[2023-10-10 13:22:17 simmim_pretrain](main_simmim.py 218): INFO Train: [21/200][5000/6787] eta 0:07:30 lr 0.000200 time 0.2606 (0.2520) loss 0.3807 (0.3711) grad_norm 592382.5000 (inf) mem 14543MB +[2023-10-10 13:24:27 simmim_pretrain](main_simmim.py 218): INFO Train: [21/200][5500/6787] eta 0:05:25 lr 0.000200 time 0.2603 (0.2527) loss 0.3903 (0.3711) grad_norm 286478.8125 (inf) mem 14543MB +[2023-10-10 13:26:37 simmim_pretrain](main_simmim.py 218): INFO Train: [21/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2598 (0.2533) loss 0.3667 (0.3710) grad_norm 451122.3438 (inf) mem 14543MB +[2023-10-10 13:28:47 simmim_pretrain](main_simmim.py 218): INFO Train: [21/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2524 (0.2537) loss 0.3643 (0.3710) grad_norm 154783.6250 (inf) mem 14543MB +[2023-10-10 13:30:02 simmim_pretrain](main_simmim.py 228): INFO EPOCH 21 training takes 0:28:44 +[2023-10-10 13:30:03 simmim_pretrain](main_simmim.py 218): INFO Train: [22/200][0/6787] eta 2:31:40 lr 0.000200 time 1.3409 (1.3409) loss 0.3810 (0.3810) grad_norm 289879.5938 (289879.5938) mem 14543MB +[2023-10-10 13:32:09 simmim_pretrain](main_simmim.py 218): INFO Train: [22/200][500/6787] eta 0:26:35 lr 0.000200 time 0.2547 (0.2538) loss 0.3609 (0.3736) grad_norm 270675.0312 (268461.7188) mem 14543MB +[2023-10-10 13:34:15 simmim_pretrain](main_simmim.py 218): INFO Train: [22/200][1000/6787] eta 0:24:23 lr 0.000200 time 0.2518 (0.2529) loss 0.3704 (0.3732) grad_norm 394348.3750 (259644.0938) mem 14543MB +[2023-10-10 13:36:20 simmim_pretrain](main_simmim.py 218): INFO Train: [22/200][1500/6787] eta 0:22:14 lr 0.000200 time 0.2502 (0.2525) loss 0.3590 (0.3720) grad_norm 193207.6562 (273961.2812) mem 14543MB +[2023-10-10 13:38:26 simmim_pretrain](main_simmim.py 218): INFO Train: [22/200][2000/6787] eta 0:20:07 lr 0.000200 time 0.2446 (0.2522) loss 0.3753 (0.3715) grad_norm 227379.9531 (283740.4375) mem 14543MB +[2023-10-10 13:40:32 simmim_pretrain](main_simmim.py 218): INFO Train: [22/200][2500/6787] eta 0:18:00 lr 0.000200 time 0.2485 (0.2520) loss 0.3821 (0.3711) grad_norm 520665.6875 (300077.6875) mem 14543MB +[2023-10-10 13:42:37 simmim_pretrain](main_simmim.py 218): INFO Train: [22/200][3000/6787] eta 0:15:53 lr 0.000200 time 0.2529 (0.2519) loss 0.3750 (0.3708) grad_norm 405808.3438 (317226.6250) mem 14543MB +[2023-10-10 13:44:43 simmim_pretrain](main_simmim.py 218): INFO Train: [22/200][3500/6787] eta 0:13:47 lr 0.000200 time 0.2467 (0.2518) loss 0.3653 (0.3706) grad_norm 303378.8438 (inf) mem 14543MB +[2023-10-10 13:46:49 simmim_pretrain](main_simmim.py 218): INFO Train: [22/200][4000/6787] eta 0:11:41 lr 0.000200 time 0.2515 (0.2518) loss 0.3600 (0.3705) grad_norm 269864.5000 (inf) mem 14543MB +[2023-10-10 13:48:55 simmim_pretrain](main_simmim.py 218): INFO Train: [22/200][4500/6787] eta 0:09:35 lr 0.000200 time 0.2494 (0.2517) loss 0.3691 (0.3705) grad_norm 742575.8750 (inf) mem 14543MB +[2023-10-10 13:51:00 simmim_pretrain](main_simmim.py 218): INFO Train: [22/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2490 (0.2516) loss 0.3680 (0.3708) grad_norm 341472.4062 (inf) mem 14543MB +[2023-10-10 13:53:06 simmim_pretrain](main_simmim.py 218): INFO Train: [22/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2560 (0.2516) loss 0.3643 (0.3709) grad_norm 281018.0312 (inf) mem 14543MB +[2023-10-10 13:55:13 simmim_pretrain](main_simmim.py 218): INFO Train: [22/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2464 (0.2518) loss 0.3654 (0.3711) grad_norm 255319.9219 (inf) mem 14543MB +[2023-10-10 13:57:21 simmim_pretrain](main_simmim.py 218): INFO Train: [22/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2570 (0.2522) loss 0.3789 (0.3712) grad_norm 211942.9688 (inf) mem 14543MB +[2023-10-10 13:58:35 simmim_pretrain](main_simmim.py 228): INFO EPOCH 22 training takes 0:28:33 +[2023-10-10 13:58:36 simmim_pretrain](main_simmim.py 218): INFO Train: [23/200][0/6787] eta 2:41:54 lr 0.000200 time 1.4313 (1.4313) loss 0.3728 (0.3728) grad_norm 229706.9688 (229706.9688) mem 14543MB +[2023-10-10 14:00:44 simmim_pretrain](main_simmim.py 218): INFO Train: [23/200][500/6787] eta 0:27:05 lr 0.000200 time 0.2596 (0.2585) loss 0.3633 (0.3691) grad_norm 414433.1562 (420590.0000) mem 14543MB +[2023-10-10 14:02:53 simmim_pretrain](main_simmim.py 218): INFO Train: [23/200][1000/6787] eta 0:24:51 lr 0.000200 time 0.2571 (0.2577) loss 0.3611 (0.3691) grad_norm 436161.5938 (404555.8125) mem 14543MB +[2023-10-10 14:05:01 simmim_pretrain](main_simmim.py 218): INFO Train: [23/200][1500/6787] eta 0:22:40 lr 0.000200 time 0.2550 (0.2573) loss 0.3597 (0.3696) grad_norm 752669.5000 (415470.2188) mem 14543MB +[2023-10-10 14:07:10 simmim_pretrain](main_simmim.py 218): INFO Train: [23/200][2000/6787] eta 0:20:31 lr 0.000200 time 0.2558 (0.2572) loss 0.3747 (0.3697) grad_norm 398325.4375 (440738.3750) mem 14543MB +[2023-10-10 14:09:18 simmim_pretrain](main_simmim.py 218): INFO Train: [23/200][2500/6787] eta 0:18:22 lr 0.000200 time 0.2560 (0.2572) loss 0.3679 (0.3695) grad_norm 380331.3125 (inf) mem 14543MB +[2023-10-10 14:11:27 simmim_pretrain](main_simmim.py 218): INFO Train: [23/200][3000/6787] eta 0:16:13 lr 0.000200 time 0.2596 (0.2572) loss 0.3509 (0.3693) grad_norm 256807.2188 (inf) mem 14543MB +[2023-10-10 14:13:35 simmim_pretrain](main_simmim.py 218): INFO Train: [23/200][3500/6787] eta 0:14:04 lr 0.000200 time 0.2601 (0.2570) loss 0.3745 (0.3693) grad_norm 368639.8750 (inf) mem 14543MB +[2023-10-10 14:15:43 simmim_pretrain](main_simmim.py 218): INFO Train: [23/200][4000/6787] eta 0:11:56 lr 0.000200 time 0.2542 (0.2570) loss 0.3767 (0.3694) grad_norm 232656.0156 (inf) mem 14543MB +[2023-10-10 14:17:51 simmim_pretrain](main_simmim.py 218): INFO Train: [23/200][4500/6787] eta 0:09:47 lr 0.000200 time 0.2599 (0.2569) loss 0.3766 (0.3694) grad_norm 519942.1250 (inf) mem 14543MB +[2023-10-10 14:20:00 simmim_pretrain](main_simmim.py 218): INFO Train: [23/200][5000/6787] eta 0:07:39 lr 0.000200 time 0.2558 (0.2570) loss 0.3692 (0.3694) grad_norm 196221.0000 (inf) mem 14543MB +[2023-10-10 14:22:09 simmim_pretrain](main_simmim.py 218): INFO Train: [23/200][5500/6787] eta 0:05:30 lr 0.000200 time 0.2588 (0.2570) loss 0.3740 (0.3694) grad_norm 454338.6250 (inf) mem 14543MB +[2023-10-10 14:24:17 simmim_pretrain](main_simmim.py 218): INFO Train: [23/200][6000/6787] eta 0:03:22 lr 0.000200 time 0.2586 (0.2570) loss 0.3660 (0.3694) grad_norm 584048.8125 (inf) mem 14543MB +[2023-10-10 14:26:27 simmim_pretrain](main_simmim.py 218): INFO Train: [23/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2674 (0.2572) loss 0.3715 (0.3694) grad_norm 496958.9688 (inf) mem 14543MB +[2023-10-10 14:27:41 simmim_pretrain](main_simmim.py 228): INFO EPOCH 23 training takes 0:29:06 +[2023-10-10 14:27:42 simmim_pretrain](main_simmim.py 218): INFO Train: [24/200][0/6787] eta 2:32:36 lr 0.000200 time 1.3492 (1.3492) loss 0.3663 (0.3663) grad_norm 379275.5000 (379275.5000) mem 14543MB +[2023-10-10 14:29:51 simmim_pretrain](main_simmim.py 218): INFO Train: [24/200][500/6787] eta 0:27:15 lr 0.000200 time 0.2597 (0.2602) loss 0.3696 (0.3697) grad_norm 483015.3125 (464004.2188) mem 14543MB +[2023-10-10 14:32:00 simmim_pretrain](main_simmim.py 218): INFO Train: [24/200][1000/6787] eta 0:24:58 lr 0.000200 time 0.2615 (0.2590) loss 0.3676 (0.3705) grad_norm 348116.3750 (inf) mem 14543MB +[2023-10-10 14:34:10 simmim_pretrain](main_simmim.py 218): INFO Train: [24/200][1500/6787] eta 0:22:48 lr 0.000200 time 0.2604 (0.2589) loss 0.3557 (0.3712) grad_norm 233333.4531 (inf) mem 14543MB +[2023-10-10 14:36:18 simmim_pretrain](main_simmim.py 218): INFO Train: [24/200][2000/6787] eta 0:20:37 lr 0.000200 time 0.2571 (0.2586) loss 0.3783 (0.3716) grad_norm 469070.0938 (inf) mem 14543MB +[2023-10-10 14:38:28 simmim_pretrain](main_simmim.py 218): INFO Train: [24/200][2500/6787] eta 0:18:28 lr 0.000200 time 0.2593 (0.2585) loss 0.3658 (0.3714) grad_norm 397583.6875 (inf) mem 14543MB +[2023-10-10 14:40:37 simmim_pretrain](main_simmim.py 218): INFO Train: [24/200][3000/6787] eta 0:16:19 lr 0.000200 time 0.2541 (0.2585) loss 0.3706 (0.3713) grad_norm 334887.9062 (inf) mem 14543MB +[2023-10-10 14:42:46 simmim_pretrain](main_simmim.py 218): INFO Train: [24/200][3500/6787] eta 0:14:10 lr 0.000200 time 0.2584 (0.2586) loss 0.3691 (0.3709) grad_norm 303614.9062 (inf) mem 14543MB +[2023-10-10 14:44:56 simmim_pretrain](main_simmim.py 218): INFO Train: [24/200][4000/6787] eta 0:12:00 lr 0.000200 time 0.2583 (0.2586) loss 0.3598 (0.3708) grad_norm 268938.7188 (inf) mem 14543MB +[2023-10-10 14:47:05 simmim_pretrain](main_simmim.py 218): INFO Train: [24/200][4500/6787] eta 0:09:51 lr 0.000200 time 0.2702 (0.2585) loss 0.3562 (0.3709) grad_norm 341748.3438 (inf) mem 14543MB +[2023-10-10 14:49:14 simmim_pretrain](main_simmim.py 218): INFO Train: [24/200][5000/6787] eta 0:07:41 lr 0.000200 time 0.2560 (0.2585) loss 0.3705 (0.3709) grad_norm 261469.5469 (inf) mem 14543MB +[2023-10-10 14:51:23 simmim_pretrain](main_simmim.py 218): INFO Train: [24/200][5500/6787] eta 0:05:32 lr 0.000200 time 0.2587 (0.2585) loss 0.3703 (0.3709) grad_norm 298047.9688 (inf) mem 14543MB +[2023-10-10 14:53:32 simmim_pretrain](main_simmim.py 218): INFO Train: [24/200][6000/6787] eta 0:03:23 lr 0.000200 time 0.2491 (0.2585) loss 0.3607 (0.3708) grad_norm 342671.8438 (inf) mem 14543MB +[2023-10-10 14:55:42 simmim_pretrain](main_simmim.py 218): INFO Train: [24/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2568 (0.2585) loss 0.3590 (0.3706) grad_norm 449279.7188 (inf) mem 14543MB +[2023-10-10 14:56:56 simmim_pretrain](main_simmim.py 228): INFO EPOCH 24 training takes 0:29:15 +[2023-10-10 14:56:58 simmim_pretrain](main_simmim.py 218): INFO Train: [25/200][0/6787] eta 2:19:38 lr 0.000200 time 1.2346 (1.2346) loss 0.3524 (0.3524) grad_norm 642508.6250 (642508.6250) mem 14543MB +[2023-10-10 14:59:07 simmim_pretrain](main_simmim.py 218): INFO Train: [25/200][500/6787] eta 0:27:13 lr 0.000200 time 0.2540 (0.2598) loss 0.3810 (0.3692) grad_norm 483071.6875 (447253.1562) mem 14543MB +[2023-10-10 15:01:15 simmim_pretrain](main_simmim.py 218): INFO Train: [25/200][1000/6787] eta 0:24:56 lr 0.000200 time 0.2555 (0.2586) loss 0.3376 (0.3692) grad_norm 475614.5625 (inf) mem 14543MB +[2023-10-10 15:03:24 simmim_pretrain](main_simmim.py 218): INFO Train: [25/200][1500/6787] eta 0:22:45 lr 0.000200 time 0.2480 (0.2582) loss 0.3769 (0.3690) grad_norm 412166.6875 (inf) mem 14543MB +[2023-10-10 15:05:33 simmim_pretrain](main_simmim.py 218): INFO Train: [25/200][2000/6787] eta 0:20:35 lr 0.000200 time 0.2608 (0.2581) loss 0.4118 (0.3691) grad_norm 215914.2188 (inf) mem 14543MB +[2023-10-10 15:07:42 simmim_pretrain](main_simmim.py 218): INFO Train: [25/200][2500/6787] eta 0:18:26 lr 0.000200 time 0.2592 (0.2580) loss 0.3598 (0.3696) grad_norm 356511.2188 (inf) mem 14543MB +[2023-10-10 15:09:51 simmim_pretrain](main_simmim.py 218): INFO Train: [25/200][3000/6787] eta 0:16:17 lr 0.000200 time 0.2543 (0.2580) loss 0.3507 (0.3698) grad_norm 249067.2031 (inf) mem 14543MB +[2023-10-10 15:12:00 simmim_pretrain](main_simmim.py 218): INFO Train: [25/200][3500/6787] eta 0:14:07 lr 0.000200 time 0.2595 (0.2580) loss 0.3824 (0.3700) grad_norm 306355.7500 (inf) mem 14543MB +[2023-10-10 15:14:09 simmim_pretrain](main_simmim.py 218): INFO Train: [25/200][4000/6787] eta 0:11:59 lr 0.000200 time 0.2556 (0.2580) loss 0.3645 (0.3699) grad_norm 264329.4688 (inf) mem 14543MB +[2023-10-10 15:16:18 simmim_pretrain](main_simmim.py 218): INFO Train: [25/200][4500/6787] eta 0:09:50 lr 0.000200 time 0.2596 (0.2581) loss 0.3684 (0.3698) grad_norm 201503.5312 (inf) mem 14543MB +[2023-10-10 15:18:27 simmim_pretrain](main_simmim.py 218): INFO Train: [25/200][5000/6787] eta 0:07:41 lr 0.000200 time 0.2620 (0.2581) loss 0.3564 (0.3697) grad_norm 391118.1875 (inf) mem 14543MB +[2023-10-10 15:20:36 simmim_pretrain](main_simmim.py 218): INFO Train: [25/200][5500/6787] eta 0:05:32 lr 0.000200 time 0.2599 (0.2581) loss 0.3547 (0.3696) grad_norm 544776.5000 (inf) mem 14543MB +[2023-10-10 15:22:45 simmim_pretrain](main_simmim.py 218): INFO Train: [25/200][6000/6787] eta 0:03:23 lr 0.000200 time 0.3232 (0.2581) loss 0.3382 (0.3695) grad_norm 322811.3438 (inf) mem 14543MB +[2023-10-10 15:24:54 simmim_pretrain](main_simmim.py 218): INFO Train: [25/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2552 (0.2581) loss 0.3623 (0.3695) grad_norm 281809.4062 (inf) mem 14543MB +[2023-10-10 15:26:09 simmim_pretrain](main_simmim.py 228): INFO EPOCH 25 training takes 0:29:12 +[2023-10-10 15:26:10 simmim_pretrain](main_simmim.py 218): INFO Train: [26/200][0/6787] eta 2:32:04 lr 0.000200 time 1.3445 (1.3445) loss 0.3839 (0.3839) grad_norm 391179.9375 (391179.9375) mem 14543MB +[2023-10-10 15:28:20 simmim_pretrain](main_simmim.py 218): INFO Train: [26/200][500/6787] eta 0:27:21 lr 0.000200 time 0.2600 (0.2610) loss 0.3830 (0.3718) grad_norm 160152.8594 (280513.9688) mem 14543MB +[2023-10-10 15:30:29 simmim_pretrain](main_simmim.py 218): INFO Train: [26/200][1000/6787] eta 0:25:01 lr 0.000200 time 0.2572 (0.2595) loss 0.3767 (0.3703) grad_norm 212435.6250 (273766.3438) mem 14543MB +[2023-10-10 15:32:38 simmim_pretrain](main_simmim.py 218): INFO Train: [26/200][1500/6787] eta 0:22:50 lr 0.000200 time 0.2560 (0.2592) loss 0.3584 (0.3704) grad_norm 289258.7188 (inf) mem 14543MB +[2023-10-10 15:34:47 simmim_pretrain](main_simmim.py 218): INFO Train: [26/200][2000/6787] eta 0:20:38 lr 0.000200 time 0.2600 (0.2588) loss 0.3674 (0.3704) grad_norm 219745.4219 (inf) mem 14543MB +[2023-10-10 15:36:56 simmim_pretrain](main_simmim.py 218): INFO Train: [26/200][2500/6787] eta 0:18:29 lr 0.000200 time 0.2580 (0.2587) loss 0.3742 (0.3705) grad_norm 251749.8594 (inf) mem 14543MB +[2023-10-10 15:39:05 simmim_pretrain](main_simmim.py 218): INFO Train: [26/200][3000/6787] eta 0:16:19 lr 0.000200 time 0.2593 (0.2587) loss 0.3725 (0.3704) grad_norm 155780.5312 (inf) mem 14543MB +[2023-10-10 15:41:15 simmim_pretrain](main_simmim.py 218): INFO Train: [26/200][3500/6787] eta 0:14:10 lr 0.000200 time 0.2575 (0.2589) loss 0.3571 (0.3702) grad_norm 587182.5000 (inf) mem 14543MB +[2023-10-10 15:43:25 simmim_pretrain](main_simmim.py 218): INFO Train: [26/200][4000/6787] eta 0:12:01 lr 0.000200 time 0.2541 (0.2588) loss 0.3812 (0.3700) grad_norm 330259.1875 (inf) mem 14543MB +[2023-10-10 15:45:34 simmim_pretrain](main_simmim.py 218): INFO Train: [26/200][4500/6787] eta 0:09:51 lr 0.000200 time 0.2606 (0.2587) loss 0.3644 (0.3700) grad_norm 410214.2500 (inf) mem 14543MB +[2023-10-10 15:47:43 simmim_pretrain](main_simmim.py 218): INFO Train: [26/200][5000/6787] eta 0:07:42 lr 0.000200 time 0.2620 (0.2588) loss 0.3703 (0.3698) grad_norm 684560.3750 (inf) mem 14543MB +[2023-10-10 15:49:53 simmim_pretrain](main_simmim.py 218): INFO Train: [26/200][5500/6787] eta 0:05:33 lr 0.000200 time 0.2609 (0.2589) loss 0.3766 (0.3697) grad_norm 433148.9688 (inf) mem 14543MB +[2023-10-10 15:52:03 simmim_pretrain](main_simmim.py 218): INFO Train: [26/200][6000/6787] eta 0:03:23 lr 0.000200 time 0.2597 (0.2589) loss 0.3448 (0.3696) grad_norm 389326.5000 (inf) mem 14543MB +[2023-10-10 15:54:11 simmim_pretrain](main_simmim.py 218): INFO Train: [26/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2617 (0.2588) loss 0.4199 (0.3753) grad_norm 21360.9102 (inf) mem 14543MB +[2023-10-10 15:55:26 simmim_pretrain](main_simmim.py 228): INFO EPOCH 26 training takes 0:29:16 +[2023-10-10 15:55:27 simmim_pretrain](main_simmim.py 218): INFO Train: [27/200][0/6787] eta 2:22:33 lr 0.000200 time 1.2603 (1.2603) loss 0.3957 (0.3957) grad_norm 37415.8945 (37415.8945) mem 14543MB +[2023-10-10 15:57:36 simmim_pretrain](main_simmim.py 218): INFO Train: [27/200][500/6787] eta 0:27:18 lr 0.000200 time 0.2585 (0.2606) loss 0.3674 (0.3884) grad_norm 23052.9512 (31458.7070) mem 14543MB +[2023-10-10 15:59:46 simmim_pretrain](main_simmim.py 218): INFO Train: [27/200][1000/6787] eta 0:25:05 lr 0.000200 time 0.2540 (0.2601) loss 0.3839 (0.3843) grad_norm 22628.7949 (31893.4414) mem 14543MB +[2023-10-10 16:01:56 simmim_pretrain](main_simmim.py 218): INFO Train: [27/200][1500/6787] eta 0:22:54 lr 0.000200 time 0.2680 (0.2600) loss 0.3809 (0.3815) grad_norm 28995.1387 (32161.4082) mem 14543MB +[2023-10-10 16:04:06 simmim_pretrain](main_simmim.py 218): INFO Train: [27/200][2000/6787] eta 0:20:44 lr 0.000200 time 0.2663 (0.2599) loss 0.3732 (0.3793) grad_norm 55555.1836 (35052.3203) mem 14543MB +[2023-10-10 16:06:15 simmim_pretrain](main_simmim.py 218): INFO Train: [27/200][2500/6787] eta 0:18:33 lr 0.000200 time 0.2539 (0.2597) loss 0.3566 (0.3779) grad_norm 59955.3164 (38509.3945) mem 14543MB +[2023-10-10 16:08:24 simmim_pretrain](main_simmim.py 218): INFO Train: [27/200][3000/6787] eta 0:16:21 lr 0.000200 time 0.2657 (0.2593) loss 0.3595 (0.3769) grad_norm 45988.8633 (41120.0352) mem 14543MB +[2023-10-10 16:10:34 simmim_pretrain](main_simmim.py 218): INFO Train: [27/200][3500/6787] eta 0:14:12 lr 0.000200 time 0.2578 (0.2593) loss 0.3818 (0.3761) grad_norm 49253.2695 (43320.5508) mem 14543MB +[2023-10-10 16:12:43 simmim_pretrain](main_simmim.py 218): INFO Train: [27/200][4000/6787] eta 0:12:02 lr 0.000200 time 0.2587 (0.2593) loss 0.3730 (0.3753) grad_norm 79705.6094 (47119.9258) mem 14543MB +[2023-10-10 16:14:54 simmim_pretrain](main_simmim.py 218): INFO Train: [27/200][4500/6787] eta 0:09:53 lr 0.000200 time 0.2582 (0.2596) loss 0.3524 (0.3746) grad_norm 138937.0781 (52247.7617) mem 14543MB +[2023-10-10 16:17:04 simmim_pretrain](main_simmim.py 218): INFO Train: [27/200][5000/6787] eta 0:07:43 lr 0.000200 time 0.2699 (0.2596) loss 0.3621 (0.3741) grad_norm 96436.4453 (56802.6055) mem 14543MB +[2023-10-10 16:19:13 simmim_pretrain](main_simmim.py 218): INFO Train: [27/200][5500/6787] eta 0:05:33 lr 0.000200 time 0.2577 (0.2595) loss 0.3819 (0.3736) grad_norm 85795.3125 (61281.0195) mem 14543MB +[2023-10-10 16:21:22 simmim_pretrain](main_simmim.py 218): INFO Train: [27/200][6000/6787] eta 0:03:24 lr 0.000200 time 0.2525 (0.2594) loss 0.3586 (0.3732) grad_norm 61373.0898 (68195.5078) mem 14543MB +[2023-10-10 16:23:32 simmim_pretrain](main_simmim.py 218): INFO Train: [27/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2507 (0.2593) loss 0.3549 (0.3729) grad_norm 83640.8047 (74236.5156) mem 14543MB +[2023-10-10 16:24:47 simmim_pretrain](main_simmim.py 228): INFO EPOCH 27 training takes 0:29:21 +[2023-10-10 16:24:48 simmim_pretrain](main_simmim.py 218): INFO Train: [28/200][0/6787] eta 2:30:28 lr 0.000200 time 1.3302 (1.3302) loss 0.3544 (0.3544) grad_norm 187873.9531 (187873.9531) mem 14543MB +[2023-10-10 16:26:58 simmim_pretrain](main_simmim.py 218): INFO Train: [28/200][500/6787] eta 0:27:29 lr 0.000200 time 0.3378 (0.2624) loss 0.3951 (0.3669) grad_norm 63378.6602 (174702.1250) mem 14543MB +[2023-10-10 16:29:08 simmim_pretrain](main_simmim.py 218): INFO Train: [28/200][1000/6787] eta 0:25:11 lr 0.000200 time 0.2545 (0.2611) loss 0.3523 (0.3669) grad_norm 201273.6875 (195749.8281) mem 14543MB +[2023-10-10 16:31:17 simmim_pretrain](main_simmim.py 218): INFO Train: [28/200][1500/6787] eta 0:22:54 lr 0.000200 time 0.2578 (0.2601) loss 0.3694 (0.3672) grad_norm 253867.1719 (216915.2969) mem 14543MB +[2023-10-10 16:33:27 simmim_pretrain](main_simmim.py 218): INFO Train: [28/200][2000/6787] eta 0:20:43 lr 0.000200 time 0.2603 (0.2598) loss 0.3662 (0.3672) grad_norm 398629.9062 (268910.2188) mem 14543MB +[2023-10-10 16:35:37 simmim_pretrain](main_simmim.py 218): INFO Train: [28/200][2500/6787] eta 0:18:33 lr 0.000200 time 0.2618 (0.2597) loss 0.3522 (0.3669) grad_norm 674539.5000 (295994.8438) mem 14543MB +[2023-10-10 16:37:47 simmim_pretrain](main_simmim.py 218): INFO Train: [28/200][3000/6787] eta 0:16:23 lr 0.000200 time 0.2590 (0.2597) loss 0.3657 (0.3670) grad_norm 339590.1250 (inf) mem 14543MB +[2023-10-10 16:39:57 simmim_pretrain](main_simmim.py 218): INFO Train: [28/200][3500/6787] eta 0:14:14 lr 0.000200 time 0.2547 (0.2600) loss 0.3941 (0.3670) grad_norm 301137.6875 (inf) mem 14543MB +[2023-10-10 16:42:06 simmim_pretrain](main_simmim.py 218): INFO Train: [28/200][4000/6787] eta 0:12:03 lr 0.000200 time 0.2603 (0.2596) loss 0.3630 (0.3670) grad_norm 308230.6562 (inf) mem 14543MB +[2023-10-10 16:44:15 simmim_pretrain](main_simmim.py 218): INFO Train: [28/200][4500/6787] eta 0:09:53 lr 0.000200 time 0.2593 (0.2594) loss 0.3493 (0.3667) grad_norm 487719.5625 (inf) mem 14543MB +[2023-10-10 16:46:24 simmim_pretrain](main_simmim.py 218): INFO Train: [28/200][5000/6787] eta 0:07:43 lr 0.000200 time 0.2737 (0.2594) loss 0.3537 (0.3667) grad_norm 498045.3438 (inf) mem 14543MB +[2023-10-10 16:48:34 simmim_pretrain](main_simmim.py 218): INFO Train: [28/200][5500/6787] eta 0:05:33 lr 0.000200 time 0.2605 (0.2594) loss 0.3700 (0.3668) grad_norm 321145.5625 (inf) mem 14543MB +[2023-10-10 16:50:44 simmim_pretrain](main_simmim.py 218): INFO Train: [28/200][6000/6787] eta 0:03:24 lr 0.000200 time 0.2558 (0.2594) loss 0.3826 (0.3668) grad_norm 329313.7500 (inf) mem 14543MB +[2023-10-10 16:52:53 simmim_pretrain](main_simmim.py 218): INFO Train: [28/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2597 (0.2594) loss 0.3861 (0.3669) grad_norm 391732.5938 (inf) mem 14543MB +[2023-10-10 16:54:08 simmim_pretrain](main_simmim.py 228): INFO EPOCH 28 training takes 0:29:20 +[2023-10-10 16:54:09 simmim_pretrain](main_simmim.py 218): INFO Train: [29/200][0/6787] eta 2:32:37 lr 0.000200 time 1.3493 (1.3493) loss 0.3603 (0.3603) grad_norm 315861.4375 (315861.4375) mem 14543MB +[2023-10-10 16:56:18 simmim_pretrain](main_simmim.py 218): INFO Train: [29/200][500/6787] eta 0:27:14 lr 0.000200 time 0.2595 (0.2600) loss 0.3534 (0.3667) grad_norm 434402.4375 (inf) mem 14543MB +[2023-10-10 16:58:27 simmim_pretrain](main_simmim.py 218): INFO Train: [29/200][1000/6787] eta 0:24:59 lr 0.000200 time 0.2564 (0.2591) loss 0.3626 (0.3665) grad_norm 366216.0312 (inf) mem 14543MB +[2023-10-10 17:00:37 simmim_pretrain](main_simmim.py 218): INFO Train: [29/200][1500/6787] eta 0:22:51 lr 0.000200 time 0.2586 (0.2593) loss 0.3572 (0.3663) grad_norm 355779.8438 (inf) mem 14543MB +[2023-10-10 17:02:47 simmim_pretrain](main_simmim.py 218): INFO Train: [29/200][2000/6787] eta 0:20:43 lr 0.000200 time 0.2625 (0.2598) loss 0.3775 (0.3664) grad_norm 151953.0000 (inf) mem 14543MB +[2023-10-10 17:04:57 simmim_pretrain](main_simmim.py 218): INFO Train: [29/200][2500/6787] eta 0:18:33 lr 0.000200 time 0.2515 (0.2598) loss 0.3604 (0.3667) grad_norm 291492.6875 (inf) mem 14543MB +[2023-10-10 17:07:06 simmim_pretrain](main_simmim.py 218): INFO Train: [29/200][3000/6787] eta 0:16:22 lr 0.000200 time 0.2589 (0.2595) loss 0.3652 (0.3669) grad_norm 173830.3906 (inf) mem 14543MB +[2023-10-10 17:09:16 simmim_pretrain](main_simmim.py 218): INFO Train: [29/200][3500/6787] eta 0:14:13 lr 0.000200 time 0.2566 (0.2596) loss 0.3779 (0.3670) grad_norm 217783.1875 (inf) mem 14543MB +[2023-10-10 17:11:26 simmim_pretrain](main_simmim.py 218): INFO Train: [29/200][4000/6787] eta 0:12:03 lr 0.000200 time 0.2574 (0.2596) loss 0.3743 (0.3672) grad_norm 218028.3438 (inf) mem 14543MB +[2023-10-10 17:13:37 simmim_pretrain](main_simmim.py 218): INFO Train: [29/200][4500/6787] eta 0:09:53 lr 0.000200 time 0.2543 (0.2597) loss 0.3449 (0.3673) grad_norm 306959.4688 (inf) mem 14543MB +[2023-10-10 17:15:46 simmim_pretrain](main_simmim.py 218): INFO Train: [29/200][5000/6787] eta 0:07:44 lr 0.000200 time 0.2528 (0.2597) loss 0.3803 (0.3672) grad_norm 335075.6875 (inf) mem 14543MB +[2023-10-10 17:17:55 simmim_pretrain](main_simmim.py 218): INFO Train: [29/200][5500/6787] eta 0:05:34 lr 0.000200 time 0.2551 (0.2596) loss 0.3613 (0.3672) grad_norm 368315.3438 (inf) mem 14543MB +[2023-10-10 17:20:06 simmim_pretrain](main_simmim.py 218): INFO Train: [29/200][6000/6787] eta 0:03:24 lr 0.000200 time 0.2604 (0.2597) loss 0.3827 (0.3671) grad_norm 491268.8438 (inf) mem 14543MB +[2023-10-10 17:22:16 simmim_pretrain](main_simmim.py 218): INFO Train: [29/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2598 (0.2597) loss 0.3631 (0.3670) grad_norm 389803.7500 (inf) mem 14543MB +[2023-10-10 17:23:30 simmim_pretrain](main_simmim.py 228): INFO EPOCH 29 training takes 0:29:22 +[2023-10-10 17:23:32 simmim_pretrain](main_simmim.py 218): INFO Train: [30/200][0/6787] eta 2:40:39 lr 0.000200 time 1.4203 (1.4203) loss 0.3706 (0.3706) grad_norm 325290.2188 (325290.2188) mem 14543MB +[2023-10-10 17:25:42 simmim_pretrain](main_simmim.py 218): INFO Train: [30/200][500/6787] eta 0:27:26 lr 0.000200 time 0.2592 (0.2619) loss 0.3641 (0.3672) grad_norm 310988.2812 (293879.4062) mem 14543MB +[2023-10-10 17:27:51 simmim_pretrain](main_simmim.py 218): INFO Train: [30/200][1000/6787] eta 0:25:09 lr 0.000200 time 0.2510 (0.2608) loss 0.3746 (0.3677) grad_norm 199780.5156 (271692.2812) mem 14543MB +[2023-10-10 17:30:01 simmim_pretrain](main_simmim.py 218): INFO Train: [30/200][1500/6787] eta 0:22:54 lr 0.000200 time 0.2555 (0.2601) loss 0.3648 (0.3676) grad_norm 241104.8281 (262166.2812) mem 14543MB +[2023-10-10 17:32:11 simmim_pretrain](main_simmim.py 218): INFO Train: [30/200][2000/6787] eta 0:20:44 lr 0.000200 time 0.2571 (0.2600) loss 0.3735 (0.3675) grad_norm 590034.1875 (256445.9219) mem 14543MB +[2023-10-10 17:34:20 simmim_pretrain](main_simmim.py 218): INFO Train: [30/200][2500/6787] eta 0:18:34 lr 0.000200 time 0.2565 (0.2599) loss 0.3631 (0.3675) grad_norm 388314.3750 (263672.1562) mem 14543MB +[2023-10-10 17:36:30 simmim_pretrain](main_simmim.py 218): INFO Train: [30/200][3000/6787] eta 0:16:23 lr 0.000200 time 0.2633 (0.2597) loss 0.3639 (0.3674) grad_norm 335827.6562 (284529.8125) mem 14543MB +[2023-10-10 17:38:38 simmim_pretrain](main_simmim.py 218): INFO Train: [30/200][3500/6787] eta 0:14:12 lr 0.000200 time 0.2581 (0.2593) loss 0.3755 (0.3673) grad_norm 264653.5938 (297412.7188) mem 14543MB +[2023-10-10 17:40:47 simmim_pretrain](main_simmim.py 218): INFO Train: [30/200][4000/6787] eta 0:12:01 lr 0.000200 time 0.2525 (0.2590) loss 0.3548 (0.3672) grad_norm inf (inf) mem 14543MB +[2023-10-10 17:42:56 simmim_pretrain](main_simmim.py 218): INFO Train: [30/200][4500/6787] eta 0:09:52 lr 0.000200 time 0.2599 (0.2589) loss 0.3550 (0.3669) grad_norm 574306.4375 (inf) mem 14543MB +[2023-10-10 17:45:05 simmim_pretrain](main_simmim.py 218): INFO Train: [30/200][5000/6787] eta 0:07:42 lr 0.000200 time 0.2599 (0.2589) loss 0.3664 (0.3668) grad_norm 356478.4688 (inf) mem 14543MB +[2023-10-10 17:47:15 simmim_pretrain](main_simmim.py 218): INFO Train: [30/200][5500/6787] eta 0:05:33 lr 0.000200 time 0.2587 (0.2589) loss 0.3625 (0.3668) grad_norm 197556.9219 (inf) mem 14543MB +[2023-10-10 17:49:24 simmim_pretrain](main_simmim.py 218): INFO Train: [30/200][6000/6787] eta 0:03:23 lr 0.000200 time 0.2586 (0.2589) loss 0.3650 (0.3668) grad_norm 185451.1719 (inf) mem 14543MB +[2023-10-10 17:51:34 simmim_pretrain](main_simmim.py 218): INFO Train: [30/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2600 (0.2589) loss 0.3594 (0.3668) grad_norm 243726.2812 (inf) mem 14543MB +[2023-10-10 17:52:48 simmim_pretrain](main_simmim.py 228): INFO EPOCH 30 training takes 0:29:18 +[2023-10-10 17:52:50 simmim_pretrain](main_simmim.py 218): INFO Train: [31/200][0/6787] eta 2:47:00 lr 0.000200 time 1.4765 (1.4765) loss 0.3584 (0.3584) grad_norm 279391.9375 (279391.9375) mem 14543MB +[2023-10-10 17:54:55 simmim_pretrain](main_simmim.py 218): INFO Train: [31/200][500/6787] eta 0:26:28 lr 0.000200 time 0.2529 (0.2526) loss 0.3634 (0.3679) grad_norm 199318.0469 (300677.0625) mem 14543MB +[2023-10-10 17:57:01 simmim_pretrain](main_simmim.py 218): INFO Train: [31/200][1000/6787] eta 0:24:17 lr 0.000200 time 0.2514 (0.2518) loss 0.3859 (0.3678) grad_norm 134295.1719 (inf) mem 14543MB +[2023-10-10 17:59:06 simmim_pretrain](main_simmim.py 218): INFO Train: [31/200][1500/6787] eta 0:22:10 lr 0.000200 time 0.2488 (0.2517) loss 0.3656 (0.3686) grad_norm 172415.0156 (inf) mem 14543MB +[2023-10-10 18:01:12 simmim_pretrain](main_simmim.py 218): INFO Train: [31/200][2000/6787] eta 0:20:05 lr 0.000200 time 0.2463 (0.2517) loss 0.3565 (0.3691) grad_norm 109556.2266 (inf) mem 14543MB +[2023-10-10 18:03:18 simmim_pretrain](main_simmim.py 218): INFO Train: [31/200][2500/6787] eta 0:17:58 lr 0.000200 time 0.2582 (0.2515) loss 0.3634 (0.3693) grad_norm 97669.3125 (inf) mem 14543MB +[2023-10-10 18:05:23 simmim_pretrain](main_simmim.py 218): INFO Train: [31/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2495 (0.2516) loss 0.3709 (0.3691) grad_norm 84036.6562 (inf) mem 14543MB +[2023-10-10 18:07:30 simmim_pretrain](main_simmim.py 218): INFO Train: [31/200][3500/6787] eta 0:13:47 lr 0.000200 time 0.2586 (0.2517) loss 0.3605 (0.3689) grad_norm 99617.6484 (inf) mem 14543MB +[2023-10-10 18:09:36 simmim_pretrain](main_simmim.py 218): INFO Train: [31/200][4000/6787] eta 0:11:41 lr 0.000200 time 0.2593 (0.2518) loss 0.3570 (0.3687) grad_norm 95499.8047 (inf) mem 14543MB +[2023-10-10 18:11:42 simmim_pretrain](main_simmim.py 218): INFO Train: [31/200][4500/6787] eta 0:09:36 lr 0.000200 time 0.2465 (0.2519) loss 0.3641 (0.3686) grad_norm 235403.1562 (inf) mem 14543MB +[2023-10-10 18:13:48 simmim_pretrain](main_simmim.py 218): INFO Train: [31/200][5000/6787] eta 0:07:30 lr 0.000200 time 0.2507 (0.2520) loss 0.3516 (0.3685) grad_norm 161023.8750 (inf) mem 14543MB +[2023-10-10 18:15:55 simmim_pretrain](main_simmim.py 218): INFO Train: [31/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2511 (0.2520) loss 0.3751 (0.3683) grad_norm 241905.4688 (inf) mem 14543MB +[2023-10-10 18:18:03 simmim_pretrain](main_simmim.py 218): INFO Train: [31/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2599 (0.2524) loss 0.3895 (0.3680) grad_norm 471995.2500 (inf) mem 14543MB +[2023-10-10 18:20:12 simmim_pretrain](main_simmim.py 218): INFO Train: [31/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2575 (0.2529) loss 0.3712 (0.3678) grad_norm 501597.9375 (inf) mem 14543MB +[2023-10-10 18:21:27 simmim_pretrain](main_simmim.py 228): INFO EPOCH 31 training takes 0:28:38 +[2023-10-10 18:21:29 simmim_pretrain](main_simmim.py 218): INFO Train: [32/200][0/6787] eta 2:45:23 lr 0.000200 time 1.4621 (1.4621) loss 0.3753 (0.3753) grad_norm 374420.7812 (374420.7812) mem 14543MB +[2023-10-10 18:23:35 simmim_pretrain](main_simmim.py 218): INFO Train: [32/200][500/6787] eta 0:26:45 lr 0.000200 time 0.2468 (0.2554) loss 0.3577 (0.3658) grad_norm 540711.9375 (383967.7188) mem 14543MB +[2023-10-10 18:25:41 simmim_pretrain](main_simmim.py 218): INFO Train: [32/200][1000/6787] eta 0:24:29 lr 0.000200 time 0.2494 (0.2540) loss 0.3798 (0.3660) grad_norm 258010.8750 (inf) mem 14543MB +[2023-10-10 18:27:48 simmim_pretrain](main_simmim.py 218): INFO Train: [32/200][1500/6787] eta 0:22:22 lr 0.000200 time 0.2598 (0.2540) loss 0.3689 (0.3662) grad_norm 159268.8281 (inf) mem 14543MB +[2023-10-10 18:29:58 simmim_pretrain](main_simmim.py 218): INFO Train: [32/200][2000/6787] eta 0:20:20 lr 0.000200 time 0.2572 (0.2551) loss 0.3899 (0.3666) grad_norm 260919.7344 (inf) mem 14543MB +[2023-10-10 18:32:06 simmim_pretrain](main_simmim.py 218): INFO Train: [32/200][2500/6787] eta 0:18:15 lr 0.000200 time 0.2573 (0.2554) loss 0.3765 (0.3669) grad_norm 171259.2188 (inf) mem 14543MB +[2023-10-10 18:34:15 simmim_pretrain](main_simmim.py 218): INFO Train: [32/200][3000/6787] eta 0:16:08 lr 0.000200 time 0.2580 (0.2557) loss 0.3593 (0.3666) grad_norm 201563.5312 (inf) mem 14543MB +[2023-10-10 18:36:23 simmim_pretrain](main_simmim.py 218): INFO Train: [32/200][3500/6787] eta 0:14:01 lr 0.000200 time 0.2596 (0.2559) loss 0.3660 (0.3665) grad_norm 284134.8750 (inf) mem 14543MB +[2023-10-10 18:38:32 simmim_pretrain](main_simmim.py 218): INFO Train: [32/200][4000/6787] eta 0:11:53 lr 0.000200 time 0.2570 (0.2560) loss 0.3641 (0.3666) grad_norm 294874.4688 (inf) mem 14543MB +[2023-10-10 18:40:40 simmim_pretrain](main_simmim.py 218): INFO Train: [32/200][4500/6787] eta 0:09:45 lr 0.000200 time 0.2594 (0.2562) loss 0.3833 (0.3666) grad_norm 265753.1562 (inf) mem 14543MB +[2023-10-10 18:42:49 simmim_pretrain](main_simmim.py 218): INFO Train: [32/200][5000/6787] eta 0:07:37 lr 0.000200 time 0.2547 (0.2562) loss 0.3934 (0.3666) grad_norm 160660.1406 (inf) mem 14543MB +[2023-10-10 18:44:57 simmim_pretrain](main_simmim.py 218): INFO Train: [32/200][5500/6787] eta 0:05:29 lr 0.000200 time 0.2572 (0.2563) loss 0.3668 (0.3666) grad_norm 650383.4375 (inf) mem 14543MB +[2023-10-10 18:47:06 simmim_pretrain](main_simmim.py 218): INFO Train: [32/200][6000/6787] eta 0:03:21 lr 0.000200 time 0.2571 (0.2564) loss 0.3618 (0.3666) grad_norm 218859.6562 (inf) mem 14543MB +[2023-10-10 18:49:14 simmim_pretrain](main_simmim.py 218): INFO Train: [32/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2577 (0.2564) loss 0.3807 (0.3666) grad_norm 311430.9062 (inf) mem 14543MB +[2023-10-10 18:50:28 simmim_pretrain](main_simmim.py 228): INFO EPOCH 32 training takes 0:29:01 +[2023-10-10 18:50:30 simmim_pretrain](main_simmim.py 218): INFO Train: [33/200][0/6787] eta 2:36:37 lr 0.000200 time 1.3846 (1.3846) loss 0.3678 (0.3678) grad_norm 293304.9375 (293304.9375) mem 14543MB +[2023-10-10 18:52:36 simmim_pretrain](main_simmim.py 218): INFO Train: [33/200][500/6787] eta 0:26:34 lr 0.000200 time 0.2477 (0.2536) loss 0.3506 (0.3671) grad_norm 188162.4375 (257625.1719) mem 14543MB +[2023-10-10 18:54:42 simmim_pretrain](main_simmim.py 218): INFO Train: [33/200][1000/6787] eta 0:24:23 lr 0.000200 time 0.2519 (0.2529) loss 0.3647 (0.3667) grad_norm 351517.0625 (262316.6875) mem 14543MB +[2023-10-10 18:56:48 simmim_pretrain](main_simmim.py 218): INFO Train: [33/200][1500/6787] eta 0:22:15 lr 0.000200 time 0.2507 (0.2526) loss 0.3523 (0.3659) grad_norm 408499.0938 (284289.0625) mem 14543MB +[2023-10-10 18:58:54 simmim_pretrain](main_simmim.py 218): INFO Train: [33/200][2000/6787] eta 0:20:08 lr 0.000200 time 0.2541 (0.2524) loss 0.3555 (0.3660) grad_norm 317959.3438 (inf) mem 14543MB +[2023-10-10 19:01:00 simmim_pretrain](main_simmim.py 218): INFO Train: [33/200][2500/6787] eta 0:18:01 lr 0.000200 time 0.2588 (0.2523) loss 0.3766 (0.3663) grad_norm 296599.7812 (inf) mem 14543MB +[2023-10-10 19:03:06 simmim_pretrain](main_simmim.py 218): INFO Train: [33/200][3000/6787] eta 0:15:55 lr 0.000200 time 0.2539 (0.2523) loss 0.3786 (0.3665) grad_norm 280083.1250 (inf) mem 14543MB +[2023-10-10 19:05:12 simmim_pretrain](main_simmim.py 218): INFO Train: [33/200][3500/6787] eta 0:13:49 lr 0.000200 time 0.2516 (0.2524) loss 0.3966 (0.3665) grad_norm 197832.0156 (inf) mem 14543MB +[2023-10-10 19:07:18 simmim_pretrain](main_simmim.py 218): INFO Train: [33/200][4000/6787] eta 0:11:43 lr 0.000200 time 0.2500 (0.2524) loss 0.3745 (0.3666) grad_norm 349727.8125 (inf) mem 14543MB +[2023-10-10 19:09:24 simmim_pretrain](main_simmim.py 218): INFO Train: [33/200][4500/6787] eta 0:09:37 lr 0.000200 time 0.2461 (0.2524) loss 0.3581 (0.3665) grad_norm 202528.2656 (inf) mem 14543MB +[2023-10-10 19:11:30 simmim_pretrain](main_simmim.py 218): INFO Train: [33/200][5000/6787] eta 0:07:30 lr 0.000200 time 0.2526 (0.2523) loss 0.3579 (0.3665) grad_norm 267166.7188 (inf) mem 14543MB +[2023-10-10 19:13:36 simmim_pretrain](main_simmim.py 218): INFO Train: [33/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2530 (0.2523) loss 0.3607 (0.3666) grad_norm 217122.0938 (inf) mem 14543MB +[2023-10-10 19:15:43 simmim_pretrain](main_simmim.py 218): INFO Train: [33/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2483 (0.2523) loss 0.3606 (0.3666) grad_norm 111676.1406 (inf) mem 14543MB +[2023-10-10 19:17:49 simmim_pretrain](main_simmim.py 218): INFO Train: [33/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2473 (0.2523) loss 0.3408 (0.3666) grad_norm 366012.4688 (inf) mem 14543MB +[2023-10-10 19:19:02 simmim_pretrain](main_simmim.py 228): INFO EPOCH 33 training takes 0:28:33 +[2023-10-10 19:19:03 simmim_pretrain](main_simmim.py 218): INFO Train: [34/200][0/6787] eta 2:42:52 lr 0.000200 time 1.4399 (1.4399) loss 0.3570 (0.3570) grad_norm 304229.9062 (304229.9062) mem 14543MB +[2023-10-10 19:21:09 simmim_pretrain](main_simmim.py 218): INFO Train: [34/200][500/6787] eta 0:26:37 lr 0.000200 time 0.2470 (0.2541) loss 0.3538 (0.3670) grad_norm 203120.2344 (252354.2188) mem 14543MB +[2023-10-10 19:23:16 simmim_pretrain](main_simmim.py 218): INFO Train: [34/200][1000/6787] eta 0:24:30 lr 0.000200 time 0.2552 (0.2541) loss 0.3746 (0.3667) grad_norm 340257.7500 (254896.8750) mem 14543MB +[2023-10-10 19:25:24 simmim_pretrain](main_simmim.py 218): INFO Train: [34/200][1500/6787] eta 0:22:26 lr 0.000200 time 0.2552 (0.2547) loss 0.3709 (0.3661) grad_norm 376421.8438 (274746.2188) mem 14543MB +[2023-10-10 19:27:32 simmim_pretrain](main_simmim.py 218): INFO Train: [34/200][2000/6787] eta 0:20:21 lr 0.000200 time 0.2569 (0.2551) loss 0.3675 (0.3658) grad_norm 192640.7188 (299802.8750) mem 14543MB +[2023-10-10 19:29:40 simmim_pretrain](main_simmim.py 218): INFO Train: [34/200][2500/6787] eta 0:18:14 lr 0.000200 time 0.2553 (0.2554) loss 0.3585 (0.3658) grad_norm 400062.5000 (324304.0938) mem 14543MB +[2023-10-10 19:31:49 simmim_pretrain](main_simmim.py 218): INFO Train: [34/200][3000/6787] eta 0:16:07 lr 0.000200 time 0.2571 (0.2556) loss 0.3634 (0.3656) grad_norm 335525.5312 (inf) mem 14543MB +[2023-10-10 19:33:57 simmim_pretrain](main_simmim.py 218): INFO Train: [34/200][3500/6787] eta 0:14:00 lr 0.000200 time 0.2564 (0.2557) loss 0.3775 (0.3656) grad_norm 247021.4688 (inf) mem 14543MB +[2023-10-10 19:36:05 simmim_pretrain](main_simmim.py 218): INFO Train: [34/200][4000/6787] eta 0:11:52 lr 0.000200 time 0.2551 (0.2557) loss 0.3628 (0.3657) grad_norm 288159.8125 (inf) mem 14543MB +[2023-10-10 19:38:13 simmim_pretrain](main_simmim.py 218): INFO Train: [34/200][4500/6787] eta 0:09:45 lr 0.000200 time 0.2565 (0.2558) loss 0.3805 (0.3658) grad_norm 452970.0000 (inf) mem 14543MB +[2023-10-10 19:40:21 simmim_pretrain](main_simmim.py 218): INFO Train: [34/200][5000/6787] eta 0:07:36 lr 0.000200 time 0.2546 (0.2557) loss 0.3581 (0.3660) grad_norm 311581.0938 (inf) mem 14543MB +[2023-10-10 19:42:27 simmim_pretrain](main_simmim.py 218): INFO Train: [34/200][5500/6787] eta 0:05:28 lr 0.000200 time 0.2464 (0.2554) loss 0.3735 (0.3661) grad_norm 183112.2500 (inf) mem 14543MB +[2023-10-10 19:44:32 simmim_pretrain](main_simmim.py 218): INFO Train: [34/200][6000/6787] eta 0:03:20 lr 0.000200 time 0.2492 (0.2551) loss 0.3578 (0.3661) grad_norm 746149.3125 (inf) mem 14543MB +[2023-10-10 19:46:38 simmim_pretrain](main_simmim.py 218): INFO Train: [34/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2530 (0.2548) loss 0.3643 (0.3660) grad_norm 397526.0000 (inf) mem 14543MB +[2023-10-10 19:47:51 simmim_pretrain](main_simmim.py 228): INFO EPOCH 34 training takes 0:28:49 +[2023-10-10 19:47:52 simmim_pretrain](main_simmim.py 218): INFO Train: [35/200][0/6787] eta 2:27:51 lr 0.000200 time 1.3071 (1.3071) loss 0.3896 (0.3896) grad_norm 319609.0000 (319609.0000) mem 14543MB +[2023-10-10 19:49:58 simmim_pretrain](main_simmim.py 218): INFO Train: [35/200][500/6787] eta 0:26:37 lr 0.000200 time 0.2490 (0.2542) loss 0.3565 (0.3664) grad_norm 272074.5625 (inf) mem 14543MB +[2023-10-10 19:52:05 simmim_pretrain](main_simmim.py 218): INFO Train: [35/200][1000/6787] eta 0:24:24 lr 0.000200 time 0.2475 (0.2532) loss 0.3687 (0.3664) grad_norm 385989.0312 (inf) mem 14543MB +[2023-10-10 19:54:10 simmim_pretrain](main_simmim.py 218): INFO Train: [35/200][1500/6787] eta 0:22:16 lr 0.000200 time 0.2487 (0.2527) loss 0.3684 (0.3666) grad_norm 203559.9531 (inf) mem 14543MB +[2023-10-10 19:56:16 simmim_pretrain](main_simmim.py 218): INFO Train: [35/200][2000/6787] eta 0:20:08 lr 0.000200 time 0.2504 (0.2525) loss 0.3785 (0.3672) grad_norm 112302.0938 (inf) mem 14543MB +[2023-10-10 19:58:22 simmim_pretrain](main_simmim.py 218): INFO Train: [35/200][2500/6787] eta 0:18:01 lr 0.000200 time 0.2531 (0.2524) loss 0.3476 (0.3675) grad_norm 174679.9375 (inf) mem 14543MB +[2023-10-10 20:00:28 simmim_pretrain](main_simmim.py 218): INFO Train: [35/200][3000/6787] eta 0:15:55 lr 0.000200 time 0.2521 (0.2523) loss 0.3589 (0.3677) grad_norm 112545.1719 (inf) mem 14543MB +[2023-10-10 20:02:35 simmim_pretrain](main_simmim.py 218): INFO Train: [35/200][3500/6787] eta 0:13:50 lr 0.000200 time 0.2543 (0.2525) loss 0.3740 (0.3676) grad_norm 273287.6875 (inf) mem 14543MB +[2023-10-10 20:04:43 simmim_pretrain](main_simmim.py 218): INFO Train: [35/200][4000/6787] eta 0:11:44 lr 0.000200 time 0.2547 (0.2529) loss 0.3826 (0.3676) grad_norm 156023.8750 (inf) mem 14543MB +[2023-10-10 20:06:49 simmim_pretrain](main_simmim.py 218): INFO Train: [35/200][4500/6787] eta 0:09:38 lr 0.000200 time 0.2516 (0.2529) loss 0.3852 (0.3674) grad_norm 281602.5312 (inf) mem 14543MB +[2023-10-10 20:08:55 simmim_pretrain](main_simmim.py 218): INFO Train: [35/200][5000/6787] eta 0:07:31 lr 0.000200 time 0.2497 (0.2528) loss 0.3557 (0.3673) grad_norm 237661.0781 (inf) mem 14543MB +[2023-10-10 20:11:03 simmim_pretrain](main_simmim.py 218): INFO Train: [35/200][5500/6787] eta 0:05:25 lr 0.000200 time 0.2606 (0.2530) loss 0.3621 (0.3673) grad_norm 337653.1875 (inf) mem 14543MB +[2023-10-10 20:13:13 simmim_pretrain](main_simmim.py 218): INFO Train: [35/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2552 (0.2536) loss 0.3739 (0.3672) grad_norm 305393.1875 (inf) mem 14543MB +[2023-10-10 20:15:23 simmim_pretrain](main_simmim.py 218): INFO Train: [35/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2607 (0.2541) loss 0.3521 (0.3670) grad_norm 284520.5312 (inf) mem 14543MB +[2023-10-10 20:16:38 simmim_pretrain](main_simmim.py 228): INFO EPOCH 35 training takes 0:28:46 +[2023-10-10 20:16:39 simmim_pretrain](main_simmim.py 218): INFO Train: [36/200][0/6787] eta 2:40:34 lr 0.000200 time 1.4196 (1.4196) loss 0.3630 (0.3630) grad_norm 215011.3750 (215011.3750) mem 14543MB +[2023-10-10 20:18:45 simmim_pretrain](main_simmim.py 218): INFO Train: [36/200][500/6787] eta 0:26:34 lr 0.000200 time 0.2491 (0.2536) loss 0.3545 (0.3661) grad_norm 241011.5156 (inf) mem 14543MB +[2023-10-10 20:20:51 simmim_pretrain](main_simmim.py 218): INFO Train: [36/200][1000/6787] eta 0:24:22 lr 0.000200 time 0.2551 (0.2527) loss 0.3205 (0.3655) grad_norm 138857.3750 (inf) mem 14543MB +[2023-10-10 20:22:57 simmim_pretrain](main_simmim.py 218): INFO Train: [36/200][1500/6787] eta 0:22:16 lr 0.000200 time 0.2472 (0.2527) loss 0.3753 (0.3659) grad_norm 314728.5938 (inf) mem 14543MB +[2023-10-10 20:25:04 simmim_pretrain](main_simmim.py 218): INFO Train: [36/200][2000/6787] eta 0:20:09 lr 0.000200 time 0.2474 (0.2527) loss 0.3612 (0.3660) grad_norm 286663.6875 (inf) mem 14543MB +[2023-10-10 20:27:11 simmim_pretrain](main_simmim.py 218): INFO Train: [36/200][2500/6787] eta 0:18:05 lr 0.000200 time 0.2558 (0.2532) loss 0.3782 (0.3661) grad_norm 289186.4688 (inf) mem 14543MB +[2023-10-10 20:29:19 simmim_pretrain](main_simmim.py 218): INFO Train: [36/200][3000/6787] eta 0:16:00 lr 0.000200 time 0.2545 (0.2536) loss 0.3788 (0.3662) grad_norm 309308.5312 (inf) mem 14543MB +[2023-10-10 20:31:26 simmim_pretrain](main_simmim.py 218): INFO Train: [36/200][3500/6787] eta 0:13:53 lr 0.000200 time 0.2589 (0.2537) loss 0.3573 (0.3663) grad_norm 225424.7969 (inf) mem 14543MB +[2023-10-10 20:33:34 simmim_pretrain](main_simmim.py 218): INFO Train: [36/200][4000/6787] eta 0:11:47 lr 0.000200 time 0.2502 (0.2538) loss 0.3606 (0.3662) grad_norm 141248.1406 (inf) mem 14543MB +[2023-10-10 20:35:41 simmim_pretrain](main_simmim.py 218): INFO Train: [36/200][4500/6787] eta 0:09:40 lr 0.000200 time 0.2538 (0.2538) loss 0.3410 (0.3661) grad_norm 297924.9062 (inf) mem 14543MB +[2023-10-10 20:37:47 simmim_pretrain](main_simmim.py 218): INFO Train: [36/200][5000/6787] eta 0:07:33 lr 0.000200 time 0.2535 (0.2538) loss 0.3575 (0.3660) grad_norm 246211.4062 (inf) mem 14543MB +[2023-10-10 20:39:54 simmim_pretrain](main_simmim.py 218): INFO Train: [36/200][5500/6787] eta 0:05:26 lr 0.000200 time 0.2536 (0.2538) loss 0.3660 (0.3659) grad_norm 340184.7812 (inf) mem 14543MB +[2023-10-10 20:42:00 simmim_pretrain](main_simmim.py 218): INFO Train: [36/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2514 (0.2537) loss 0.3714 (0.3660) grad_norm 321946.5312 (inf) mem 14543MB +[2023-10-10 20:44:06 simmim_pretrain](main_simmim.py 218): INFO Train: [36/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2518 (0.2536) loss 0.3493 (0.3660) grad_norm 235837.4688 (inf) mem 14543MB +[2023-10-10 20:45:19 simmim_pretrain](main_simmim.py 228): INFO EPOCH 36 training takes 0:28:41 +[2023-10-10 20:45:21 simmim_pretrain](main_simmim.py 218): INFO Train: [37/200][0/6787] eta 2:26:49 lr 0.000200 time 1.2980 (1.2980) loss 0.3625 (0.3625) grad_norm 334360.1875 (334360.1875) mem 14543MB +[2023-10-10 20:47:24 simmim_pretrain](main_simmim.py 218): INFO Train: [37/200][500/6787] eta 0:26:10 lr 0.000200 time 0.2461 (0.2498) loss 0.3499 (0.3651) grad_norm 274292.8750 (293740.8125) mem 14543MB +[2023-10-10 20:49:28 simmim_pretrain](main_simmim.py 218): INFO Train: [37/200][1000/6787] eta 0:24:00 lr 0.000200 time 0.2491 (0.2489) loss 0.3947 (0.3654) grad_norm 603381.3750 (327743.5938) mem 14543MB +[2023-10-10 20:51:32 simmim_pretrain](main_simmim.py 218): INFO Train: [37/200][1500/6787] eta 0:21:54 lr 0.000200 time 0.2461 (0.2486) loss 0.3687 (0.3651) grad_norm 321693.0625 (402229.9062) mem 14543MB +[2023-10-10 20:53:37 simmim_pretrain](main_simmim.py 218): INFO Train: [37/200][2000/6787] eta 0:19:49 lr 0.000200 time 0.2478 (0.2485) loss 0.3550 (0.3653) grad_norm 385785.1875 (inf) mem 14543MB +[2023-10-10 20:55:41 simmim_pretrain](main_simmim.py 218): INFO Train: [37/200][2500/6787] eta 0:17:45 lr 0.000200 time 0.2461 (0.2485) loss 0.3841 (0.3652) grad_norm 198693.8906 (inf) mem 14543MB +[2023-10-10 20:57:45 simmim_pretrain](main_simmim.py 218): INFO Train: [37/200][3000/6787] eta 0:15:40 lr 0.000200 time 0.2455 (0.2485) loss 0.3653 (0.3654) grad_norm 251116.9375 (inf) mem 14543MB +[2023-10-10 20:59:49 simmim_pretrain](main_simmim.py 218): INFO Train: [37/200][3500/6787] eta 0:13:36 lr 0.000200 time 0.2531 (0.2484) loss 0.3858 (0.3655) grad_norm 269586.3438 (inf) mem 14543MB +[2023-10-10 21:01:53 simmim_pretrain](main_simmim.py 218): INFO Train: [37/200][4000/6787] eta 0:11:32 lr 0.000200 time 0.2461 (0.2483) loss 0.3715 (0.3656) grad_norm 498564.3750 (inf) mem 14543MB +[2023-10-10 21:03:57 simmim_pretrain](main_simmim.py 218): INFO Train: [37/200][4500/6787] eta 0:09:27 lr 0.000200 time 0.2569 (0.2483) loss 0.3783 (0.3656) grad_norm 189048.2500 (inf) mem 14543MB +[2023-10-10 21:06:01 simmim_pretrain](main_simmim.py 218): INFO Train: [37/200][5000/6787] eta 0:07:23 lr 0.000200 time 0.2458 (0.2483) loss 0.3606 (0.3657) grad_norm 152714.5312 (inf) mem 14543MB +[2023-10-10 21:08:05 simmim_pretrain](main_simmim.py 218): INFO Train: [37/200][5500/6787] eta 0:05:19 lr 0.000200 time 0.2464 (0.2483) loss 0.3796 (0.3657) grad_norm 240211.0469 (inf) mem 14543MB +[2023-10-10 21:10:09 simmim_pretrain](main_simmim.py 218): INFO Train: [37/200][6000/6787] eta 0:03:15 lr 0.000200 time 0.2494 (0.2482) loss 0.3738 (0.3657) grad_norm 311883.7188 (inf) mem 14543MB +[2023-10-10 21:12:13 simmim_pretrain](main_simmim.py 218): INFO Train: [37/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2462 (0.2482) loss 0.3732 (0.3656) grad_norm 403084.1875 (inf) mem 14543MB +[2023-10-10 21:13:25 simmim_pretrain](main_simmim.py 228): INFO EPOCH 37 training takes 0:28:05 +[2023-10-10 21:13:26 simmim_pretrain](main_simmim.py 218): INFO Train: [38/200][0/6787] eta 2:27:03 lr 0.000200 time 1.3001 (1.3001) loss 0.3839 (0.3839) grad_norm 450857.7500 (450857.7500) mem 14543MB +[2023-10-10 21:15:30 simmim_pretrain](main_simmim.py 218): INFO Train: [38/200][500/6787] eta 0:26:12 lr 0.000200 time 0.2457 (0.2501) loss 0.3687 (0.3645) grad_norm 208406.9688 (443340.9375) mem 14543MB +[2023-10-10 21:17:34 simmim_pretrain](main_simmim.py 218): INFO Train: [38/200][1000/6787] eta 0:24:01 lr 0.000200 time 0.2446 (0.2492) loss 0.3754 (0.3651) grad_norm 265558.0312 (inf) mem 14543MB +[2023-10-10 21:19:38 simmim_pretrain](main_simmim.py 218): INFO Train: [38/200][1500/6787] eta 0:21:55 lr 0.000200 time 0.2470 (0.2488) loss 0.3650 (0.3652) grad_norm 279909.9375 (inf) mem 14543MB +[2023-10-10 21:21:42 simmim_pretrain](main_simmim.py 218): INFO Train: [38/200][2000/6787] eta 0:19:50 lr 0.000200 time 0.2539 (0.2486) loss 0.3613 (0.3653) grad_norm 202835.1875 (inf) mem 14543MB +[2023-10-10 21:23:46 simmim_pretrain](main_simmim.py 218): INFO Train: [38/200][2500/6787] eta 0:17:45 lr 0.000200 time 0.2447 (0.2485) loss 0.3575 (0.3654) grad_norm 235722.7188 (inf) mem 14543MB +[2023-10-10 21:25:50 simmim_pretrain](main_simmim.py 218): INFO Train: [38/200][3000/6787] eta 0:15:40 lr 0.000200 time 0.2490 (0.2484) loss 0.3805 (0.3654) grad_norm 307796.7812 (inf) mem 14543MB +[2023-10-10 21:27:54 simmim_pretrain](main_simmim.py 218): INFO Train: [38/200][3500/6787] eta 0:13:36 lr 0.000200 time 0.2456 (0.2483) loss 0.3769 (0.3652) grad_norm 564839.4375 (inf) mem 14543MB +[2023-10-10 21:29:58 simmim_pretrain](main_simmim.py 218): INFO Train: [38/200][4000/6787] eta 0:11:31 lr 0.000200 time 0.2486 (0.2482) loss 0.3585 (0.3651) grad_norm 532107.1250 (inf) mem 14543MB +[2023-10-10 21:32:02 simmim_pretrain](main_simmim.py 218): INFO Train: [38/200][4500/6787] eta 0:09:27 lr 0.000200 time 0.2483 (0.2482) loss 0.3684 (0.3650) grad_norm 534331.6250 (inf) mem 14543MB +[2023-10-10 21:34:06 simmim_pretrain](main_simmim.py 218): INFO Train: [38/200][5000/6787] eta 0:07:23 lr 0.000200 time 0.2515 (0.2482) loss 0.3707 (0.3651) grad_norm 435952.1562 (inf) mem 14543MB +[2023-10-10 21:36:10 simmim_pretrain](main_simmim.py 218): INFO Train: [38/200][5500/6787] eta 0:05:19 lr 0.000200 time 0.2495 (0.2482) loss 0.3884 (0.3651) grad_norm 253426.7812 (inf) mem 14543MB +[2023-10-10 21:38:14 simmim_pretrain](main_simmim.py 218): INFO Train: [38/200][6000/6787] eta 0:03:15 lr 0.000200 time 0.2473 (0.2482) loss 0.3581 (0.3652) grad_norm 226109.5469 (inf) mem 14543MB +[2023-10-10 21:40:18 simmim_pretrain](main_simmim.py 218): INFO Train: [38/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2467 (0.2482) loss 0.3604 (0.3653) grad_norm 631440.5000 (inf) mem 14543MB +[2023-10-10 21:41:30 simmim_pretrain](main_simmim.py 228): INFO EPOCH 38 training takes 0:28:05 +[2023-10-10 21:41:32 simmim_pretrain](main_simmim.py 218): INFO Train: [39/200][0/6787] eta 2:36:16 lr 0.000200 time 1.3816 (1.3816) loss 0.3679 (0.3679) grad_norm 706728.5625 (706728.5625) mem 14543MB +[2023-10-10 21:43:36 simmim_pretrain](main_simmim.py 218): INFO Train: [39/200][500/6787] eta 0:26:13 lr 0.000200 time 0.2490 (0.2503) loss 0.4073 (0.3653) grad_norm 350115.0312 (398354.1875) mem 14543MB +[2023-10-10 21:45:40 simmim_pretrain](main_simmim.py 218): INFO Train: [39/200][1000/6787] eta 0:24:02 lr 0.000200 time 0.2507 (0.2493) loss 0.3873 (0.3652) grad_norm 270967.1875 (414624.0625) mem 14543MB +[2023-10-10 21:47:44 simmim_pretrain](main_simmim.py 218): INFO Train: [39/200][1500/6787] eta 0:21:56 lr 0.000200 time 0.2464 (0.2490) loss 0.3938 (0.3650) grad_norm 643002.1875 (443122.7500) mem 14543MB +[2023-10-10 21:49:48 simmim_pretrain](main_simmim.py 218): INFO Train: [39/200][2000/6787] eta 0:19:50 lr 0.000200 time 0.2453 (0.2488) loss 0.4011 (0.3903) grad_norm 41909.7773 (inf) mem 14543MB +[2023-10-10 21:51:52 simmim_pretrain](main_simmim.py 218): INFO Train: [39/200][2500/6787] eta 0:17:46 lr 0.000200 time 0.2504 (0.2487) loss 0.4061 (0.3891) grad_norm 35817.0938 (inf) mem 14543MB +[2023-10-10 21:53:56 simmim_pretrain](main_simmim.py 218): INFO Train: [39/200][3000/6787] eta 0:15:41 lr 0.000200 time 0.2506 (0.2487) loss 0.3861 (0.3866) grad_norm 29815.2129 (inf) mem 14543MB +[2023-10-10 21:56:01 simmim_pretrain](main_simmim.py 218): INFO Train: [39/200][3500/6787] eta 0:13:37 lr 0.000200 time 0.2464 (0.2486) loss 0.3990 (0.3845) grad_norm 34887.1523 (inf) mem 14543MB +[2023-10-10 21:58:05 simmim_pretrain](main_simmim.py 218): INFO Train: [39/200][4000/6787] eta 0:11:32 lr 0.000200 time 0.2461 (0.2486) loss 0.3736 (0.3826) grad_norm 44092.2148 (inf) mem 14543MB +[2023-10-10 22:00:09 simmim_pretrain](main_simmim.py 218): INFO Train: [39/200][4500/6787] eta 0:09:28 lr 0.000200 time 0.2479 (0.2486) loss 0.3700 (0.3811) grad_norm 100508.8438 (inf) mem 14543MB +[2023-10-10 22:02:13 simmim_pretrain](main_simmim.py 218): INFO Train: [39/200][5000/6787] eta 0:07:24 lr 0.000200 time 0.2494 (0.2485) loss 0.3529 (0.3799) grad_norm 71744.2188 (inf) mem 14543MB +[2023-10-10 22:04:17 simmim_pretrain](main_simmim.py 218): INFO Train: [39/200][5500/6787] eta 0:05:19 lr 0.000200 time 0.2467 (0.2485) loss 0.4027 (0.3788) grad_norm 78603.0625 (inf) mem 14543MB +[2023-10-10 22:06:22 simmim_pretrain](main_simmim.py 218): INFO Train: [39/200][6000/6787] eta 0:03:15 lr 0.000200 time 0.2452 (0.2485) loss 0.3646 (0.3778) grad_norm 118329.6953 (inf) mem 14543MB +[2023-10-10 22:08:26 simmim_pretrain](main_simmim.py 218): INFO Train: [39/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2496 (0.2485) loss 0.3442 (0.3769) grad_norm 115296.9219 (inf) mem 14543MB +[2023-10-10 22:09:37 simmim_pretrain](main_simmim.py 228): INFO EPOCH 39 training takes 0:28:07 +[2023-10-10 22:09:39 simmim_pretrain](main_simmim.py 218): INFO Train: [40/200][0/6787] eta 2:33:26 lr 0.000200 time 1.3565 (1.3565) loss 0.3461 (0.3461) grad_norm 119867.4453 (119867.4453) mem 14543MB +[2023-10-10 22:11:43 simmim_pretrain](main_simmim.py 218): INFO Train: [40/200][500/6787] eta 0:26:13 lr 0.000200 time 0.2498 (0.2503) loss 0.3801 (0.3651) grad_norm 93892.8125 (92858.3359) mem 14543MB +[2023-10-10 22:13:47 simmim_pretrain](main_simmim.py 218): INFO Train: [40/200][1000/6787] eta 0:24:02 lr 0.000200 time 0.2456 (0.2492) loss 0.3660 (0.3657) grad_norm 83616.7109 (104966.8594) mem 14543MB +[2023-10-10 22:15:51 simmim_pretrain](main_simmim.py 218): INFO Train: [40/200][1500/6787] eta 0:21:55 lr 0.000200 time 0.2535 (0.2488) loss 0.3530 (0.3654) grad_norm 86830.6875 (122288.4844) mem 14543MB +[2023-10-10 22:17:55 simmim_pretrain](main_simmim.py 218): INFO Train: [40/200][2000/6787] eta 0:19:50 lr 0.000200 time 0.2460 (0.2487) loss 0.3633 (0.3653) grad_norm 252444.9375 (134365.3750) mem 14543MB +[2023-10-10 22:19:59 simmim_pretrain](main_simmim.py 218): INFO Train: [40/200][2500/6787] eta 0:17:45 lr 0.000200 time 0.2468 (0.2486) loss 0.3802 (0.3653) grad_norm 129132.5625 (146398.0625) mem 14543MB +[2023-10-10 22:22:03 simmim_pretrain](main_simmim.py 218): INFO Train: [40/200][3000/6787] eta 0:15:41 lr 0.000200 time 0.2509 (0.2485) loss 0.3658 (0.3652) grad_norm 243458.6875 (156810.0781) mem 14543MB +[2023-10-10 22:24:07 simmim_pretrain](main_simmim.py 218): INFO Train: [40/200][3500/6787] eta 0:13:36 lr 0.000200 time 0.2470 (0.2484) loss 0.3649 (0.3651) grad_norm 162436.7656 (181904.1094) mem 14543MB +[2023-10-10 22:26:11 simmim_pretrain](main_simmim.py 218): INFO Train: [40/200][4000/6787] eta 0:11:32 lr 0.000200 time 0.2473 (0.2483) loss 0.3626 (0.3649) grad_norm 283710.0312 (194689.8906) mem 14543MB +[2023-10-10 22:28:15 simmim_pretrain](main_simmim.py 218): INFO Train: [40/200][4500/6787] eta 0:09:27 lr 0.000200 time 0.2489 (0.2483) loss 0.3599 (0.3648) grad_norm 224440.3594 (219000.1719) mem 14543MB +[2023-10-10 22:30:19 simmim_pretrain](main_simmim.py 218): INFO Train: [40/200][5000/6787] eta 0:07:23 lr 0.000200 time 0.2485 (0.2483) loss 0.3666 (0.3647) grad_norm 160116.5156 (inf) mem 14543MB +[2023-10-10 22:32:23 simmim_pretrain](main_simmim.py 218): INFO Train: [40/200][5500/6787] eta 0:05:19 lr 0.000200 time 0.2471 (0.2483) loss 0.3772 (0.3647) grad_norm 87426.3594 (inf) mem 14543MB +[2023-10-10 22:34:27 simmim_pretrain](main_simmim.py 218): INFO Train: [40/200][6000/6787] eta 0:03:15 lr 0.000200 time 0.2485 (0.2483) loss 0.3623 (0.3647) grad_norm 110052.3672 (inf) mem 14543MB +[2023-10-10 22:36:32 simmim_pretrain](main_simmim.py 218): INFO Train: [40/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2465 (0.2483) loss 0.3623 (0.3647) grad_norm 254506.9219 (inf) mem 14543MB +[2023-10-10 22:37:44 simmim_pretrain](main_simmim.py 228): INFO EPOCH 40 training takes 0:28:06 +[2023-10-10 22:37:44 simmim_pretrain](utils.py 62): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_40.pth saving...... +[2023-10-10 22:37:44 simmim_pretrain](utils.py 64): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_40.pth saved !!! +[2023-10-10 22:37:46 simmim_pretrain](main_simmim.py 218): INFO Train: [41/200][0/6787] eta 2:31:36 lr 0.000200 time 1.3403 (1.3403) loss 0.3592 (0.3592) grad_norm 335095.7500 (335095.7500) mem 14543MB +[2023-10-10 22:39:50 simmim_pretrain](main_simmim.py 218): INFO Train: [41/200][500/6787] eta 0:26:12 lr 0.000200 time 0.2459 (0.2502) loss 0.3690 (0.3645) grad_norm 261329.5156 (292073.2812) mem 14543MB +[2023-10-10 22:41:54 simmim_pretrain](main_simmim.py 218): INFO Train: [41/200][1000/6787] eta 0:24:03 lr 0.000200 time 0.2466 (0.2494) loss 0.3745 (0.3642) grad_norm 457198.6875 (310417.1875) mem 14543MB +[2023-10-10 22:43:58 simmim_pretrain](main_simmim.py 218): INFO Train: [41/200][1500/6787] eta 0:21:56 lr 0.000200 time 0.2476 (0.2490) loss 0.3607 (0.3638) grad_norm 236496.8594 (361824.3438) mem 14543MB +[2023-10-10 22:46:02 simmim_pretrain](main_simmim.py 218): INFO Train: [41/200][2000/6787] eta 0:19:51 lr 0.000200 time 0.2517 (0.2489) loss 0.3647 (0.3639) grad_norm 450821.4688 (inf) mem 14543MB +[2023-10-10 22:48:06 simmim_pretrain](main_simmim.py 218): INFO Train: [41/200][2500/6787] eta 0:17:46 lr 0.000200 time 0.2515 (0.2488) loss 0.3587 (0.3637) grad_norm 173073.8750 (inf) mem 14543MB +[2023-10-10 22:50:11 simmim_pretrain](main_simmim.py 218): INFO Train: [41/200][3000/6787] eta 0:15:41 lr 0.000200 time 0.2472 (0.2487) loss 0.3792 (0.3637) grad_norm 447053.1250 (inf) mem 14543MB +[2023-10-10 22:52:15 simmim_pretrain](main_simmim.py 218): INFO Train: [41/200][3500/6787] eta 0:13:37 lr 0.000200 time 0.2475 (0.2487) loss 0.3682 (0.3640) grad_norm 194206.7031 (inf) mem 14543MB +[2023-10-10 22:54:19 simmim_pretrain](main_simmim.py 218): INFO Train: [41/200][4000/6787] eta 0:11:33 lr 0.000200 time 0.2503 (0.2487) loss 0.3821 (0.3642) grad_norm 220258.4531 (inf) mem 14543MB +[2023-10-10 22:56:23 simmim_pretrain](main_simmim.py 218): INFO Train: [41/200][4500/6787] eta 0:09:28 lr 0.000200 time 0.2464 (0.2486) loss 0.3655 (0.3643) grad_norm 280326.7500 (inf) mem 14543MB +[2023-10-10 22:58:28 simmim_pretrain](main_simmim.py 218): INFO Train: [41/200][5000/6787] eta 0:07:24 lr 0.000200 time 0.2526 (0.2486) loss 0.3416 (0.3643) grad_norm 149814.2031 (inf) mem 14543MB +[2023-10-10 23:00:32 simmim_pretrain](main_simmim.py 218): INFO Train: [41/200][5500/6787] eta 0:05:19 lr 0.000200 time 0.2469 (0.2486) loss 0.3415 (0.3644) grad_norm 160095.1875 (inf) mem 14543MB +[2023-10-10 23:02:36 simmim_pretrain](main_simmim.py 218): INFO Train: [41/200][6000/6787] eta 0:03:15 lr 0.000200 time 0.2446 (0.2486) loss 0.3627 (0.3643) grad_norm 310503.7188 (inf) mem 14543MB +[2023-10-10 23:04:40 simmim_pretrain](main_simmim.py 218): INFO Train: [41/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2496 (0.2486) loss 0.3556 (0.3642) grad_norm 791738.8750 (inf) mem 14543MB +[2023-10-10 23:05:52 simmim_pretrain](main_simmim.py 228): INFO EPOCH 41 training takes 0:28:07 +[2023-10-10 23:05:53 simmim_pretrain](main_simmim.py 218): INFO Train: [42/200][0/6787] eta 2:32:37 lr 0.000200 time 1.3493 (1.3493) loss 0.3763 (0.3763) grad_norm 356450.0312 (356450.0312) mem 14543MB +[2023-10-10 23:07:57 simmim_pretrain](main_simmim.py 218): INFO Train: [42/200][500/6787] eta 0:26:11 lr 0.000200 time 0.2583 (0.2500) loss 0.3892 (0.3627) grad_norm 429443.6562 (inf) mem 14543MB +[2023-10-10 23:10:01 simmim_pretrain](main_simmim.py 218): INFO Train: [42/200][1000/6787] eta 0:24:00 lr 0.000200 time 0.2465 (0.2489) loss 0.3640 (0.3629) grad_norm 678288.6250 (inf) mem 14543MB +[2023-10-10 23:12:05 simmim_pretrain](main_simmim.py 218): INFO Train: [42/200][1500/6787] eta 0:21:54 lr 0.000200 time 0.2488 (0.2486) loss 0.3653 (0.3633) grad_norm 333081.0312 (inf) mem 14543MB +[2023-10-10 23:14:09 simmim_pretrain](main_simmim.py 218): INFO Train: [42/200][2000/6787] eta 0:19:48 lr 0.000200 time 0.2463 (0.2483) loss 0.3777 (0.3634) grad_norm 360545.2500 (inf) mem 14543MB +[2023-10-10 23:16:13 simmim_pretrain](main_simmim.py 218): INFO Train: [42/200][2500/6787] eta 0:17:44 lr 0.000200 time 0.2475 (0.2482) loss 0.3528 (0.3634) grad_norm 552183.4375 (inf) mem 14543MB +[2023-10-10 23:18:16 simmim_pretrain](main_simmim.py 218): INFO Train: [42/200][3000/6787] eta 0:15:39 lr 0.000200 time 0.2463 (0.2481) loss 0.3798 (0.3634) grad_norm 300832.6875 (inf) mem 14543MB +[2023-10-10 23:20:20 simmim_pretrain](main_simmim.py 218): INFO Train: [42/200][3500/6787] eta 0:13:35 lr 0.000200 time 0.2454 (0.2480) loss 0.3712 (0.3635) grad_norm 233827.8594 (inf) mem 14543MB +[2023-10-10 23:22:24 simmim_pretrain](main_simmim.py 218): INFO Train: [42/200][4000/6787] eta 0:11:31 lr 0.000200 time 0.2486 (0.2480) loss 0.3717 (0.3636) grad_norm 394343.6875 (inf) mem 14543MB +[2023-10-10 23:24:28 simmim_pretrain](main_simmim.py 218): INFO Train: [42/200][4500/6787] eta 0:09:27 lr 0.000200 time 0.2584 (0.2479) loss 0.3503 (0.3637) grad_norm 317775.4688 (inf) mem 14543MB +[2023-10-10 23:26:32 simmim_pretrain](main_simmim.py 218): INFO Train: [42/200][5000/6787] eta 0:07:23 lr 0.000200 time 0.2459 (0.2479) loss 0.3850 (0.3638) grad_norm 239926.7656 (inf) mem 14543MB +[2023-10-10 23:28:36 simmim_pretrain](main_simmim.py 218): INFO Train: [42/200][5500/6787] eta 0:05:19 lr 0.000200 time 0.2522 (0.2479) loss 0.3518 (0.3638) grad_norm 160476.3125 (inf) mem 14543MB +[2023-10-10 23:30:40 simmim_pretrain](main_simmim.py 218): INFO Train: [42/200][6000/6787] eta 0:03:15 lr 0.000200 time 0.2462 (0.2479) loss 0.3756 (0.3638) grad_norm 210586.4531 (inf) mem 14543MB +[2023-10-10 23:32:44 simmim_pretrain](main_simmim.py 218): INFO Train: [42/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2484 (0.2479) loss 0.3865 (0.3638) grad_norm 663934.7500 (inf) mem 14543MB +[2023-10-10 23:33:55 simmim_pretrain](main_simmim.py 228): INFO EPOCH 42 training takes 0:28:03 +[2023-10-10 23:33:57 simmim_pretrain](main_simmim.py 218): INFO Train: [43/200][0/6787] eta 2:17:25 lr 0.000200 time 1.2148 (1.2148) loss 0.3362 (0.3362) grad_norm 664057.2500 (664057.2500) mem 14543MB +[2023-10-10 23:36:01 simmim_pretrain](main_simmim.py 218): INFO Train: [43/200][500/6787] eta 0:26:11 lr 0.000200 time 0.2461 (0.2500) loss 0.3644 (0.3627) grad_norm 370556.8750 (inf) mem 14543MB +[2023-10-10 23:38:05 simmim_pretrain](main_simmim.py 218): INFO Train: [43/200][1000/6787] eta 0:24:01 lr 0.000200 time 0.2528 (0.2491) loss 0.3590 (0.3631) grad_norm 758542.1250 (inf) mem 14543MB +[2023-10-10 23:40:09 simmim_pretrain](main_simmim.py 218): INFO Train: [43/200][1500/6787] eta 0:21:55 lr 0.000200 time 0.2461 (0.2488) loss 0.3608 (0.3637) grad_norm 274564.7812 (inf) mem 14543MB +[2023-10-10 23:42:13 simmim_pretrain](main_simmim.py 218): INFO Train: [43/200][2000/6787] eta 0:19:50 lr 0.000200 time 0.2473 (0.2487) loss 0.3464 (0.3639) grad_norm 267989.1250 (inf) mem 14543MB +[2023-10-10 23:44:17 simmim_pretrain](main_simmim.py 218): INFO Train: [43/200][2500/6787] eta 0:17:45 lr 0.000200 time 0.2460 (0.2486) loss 0.3591 (0.3643) grad_norm 283347.2188 (inf) mem 14543MB +[2023-10-10 23:46:21 simmim_pretrain](main_simmim.py 218): INFO Train: [43/200][3000/6787] eta 0:15:41 lr 0.000200 time 0.2521 (0.2486) loss 0.3408 (0.3643) grad_norm 279111.9688 (inf) mem 14543MB +[2023-10-10 23:48:25 simmim_pretrain](main_simmim.py 218): INFO Train: [43/200][3500/6787] eta 0:13:36 lr 0.000200 time 0.2460 (0.2485) loss 0.3596 (0.3643) grad_norm 640211.0625 (inf) mem 14543MB +[2023-10-10 23:50:30 simmim_pretrain](main_simmim.py 218): INFO Train: [43/200][4000/6787] eta 0:11:32 lr 0.000200 time 0.2504 (0.2485) loss 0.3527 (0.3642) grad_norm 488216.4688 (inf) mem 14543MB +[2023-10-10 23:52:34 simmim_pretrain](main_simmim.py 218): INFO Train: [43/200][4500/6787] eta 0:09:28 lr 0.000200 time 0.2482 (0.2485) loss 0.3690 (0.3639) grad_norm 217386.0469 (inf) mem 14543MB +[2023-10-10 23:54:38 simmim_pretrain](main_simmim.py 218): INFO Train: [43/200][5000/6787] eta 0:07:24 lr 0.000200 time 0.2494 (0.2485) loss 0.3559 (0.3640) grad_norm 340602.0625 (inf) mem 14543MB +[2023-10-10 23:56:42 simmim_pretrain](main_simmim.py 218): INFO Train: [43/200][5500/6787] eta 0:05:19 lr 0.000200 time 0.2445 (0.2485) loss 0.3807 (0.3639) grad_norm 232049.5000 (inf) mem 14543MB +[2023-10-10 23:58:46 simmim_pretrain](main_simmim.py 218): INFO Train: [43/200][6000/6787] eta 0:03:15 lr 0.000200 time 0.2447 (0.2485) loss 0.3534 (0.3639) grad_norm 516933.0312 (inf) mem 14543MB +[2023-10-11 00:00:51 simmim_pretrain](main_simmim.py 218): INFO Train: [43/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2486 (0.2484) loss 0.3595 (0.3639) grad_norm 367178.9062 (inf) mem 14543MB +[2023-10-11 00:02:02 simmim_pretrain](main_simmim.py 228): INFO EPOCH 43 training takes 0:28:06 +[2023-10-11 00:02:04 simmim_pretrain](main_simmim.py 218): INFO Train: [44/200][0/6787] eta 2:32:22 lr 0.000200 time 1.3471 (1.3471) loss 0.3786 (0.3786) grad_norm 544773.5625 (544773.5625) mem 14543MB +[2023-10-11 00:04:07 simmim_pretrain](main_simmim.py 218): INFO Train: [44/200][500/6787] eta 0:26:10 lr 0.000200 time 0.2483 (0.2498) loss 0.3803 (0.3625) grad_norm 584269.8125 (inf) mem 14543MB +[2023-10-11 00:06:11 simmim_pretrain](main_simmim.py 218): INFO Train: [44/200][1000/6787] eta 0:24:00 lr 0.000200 time 0.2501 (0.2489) loss 0.3747 (0.3633) grad_norm 776864.4375 (inf) mem 14543MB +[2023-10-11 00:08:15 simmim_pretrain](main_simmim.py 218): INFO Train: [44/200][1500/6787] eta 0:21:54 lr 0.000200 time 0.2456 (0.2486) loss 0.3549 (0.3631) grad_norm 564549.8125 (inf) mem 14543MB +[2023-10-11 00:10:19 simmim_pretrain](main_simmim.py 218): INFO Train: [44/200][2000/6787] eta 0:19:49 lr 0.000200 time 0.2467 (0.2484) loss 0.3726 (0.3633) grad_norm 294429.7500 (inf) mem 14543MB +[2023-10-11 00:12:23 simmim_pretrain](main_simmim.py 218): INFO Train: [44/200][2500/6787] eta 0:17:44 lr 0.000200 time 0.2458 (0.2483) loss 0.3813 (0.3635) grad_norm 451391.1875 (inf) mem 14543MB +[2023-10-11 00:14:27 simmim_pretrain](main_simmim.py 218): INFO Train: [44/200][3000/6787] eta 0:15:40 lr 0.000200 time 0.2465 (0.2483) loss 0.3803 (0.3638) grad_norm 440069.8750 (inf) mem 14543MB +[2023-10-11 00:16:31 simmim_pretrain](main_simmim.py 218): INFO Train: [44/200][3500/6787] eta 0:13:35 lr 0.000200 time 0.2465 (0.2482) loss 0.3627 (0.3639) grad_norm 181651.0938 (inf) mem 14543MB +[2023-10-11 00:18:35 simmim_pretrain](main_simmim.py 218): INFO Train: [44/200][4000/6787] eta 0:11:31 lr 0.000200 time 0.2459 (0.2482) loss 0.3704 (0.3641) grad_norm 510534.1875 (inf) mem 14543MB +[2023-10-11 00:20:39 simmim_pretrain](main_simmim.py 218): INFO Train: [44/200][4500/6787] eta 0:09:27 lr 0.000200 time 0.2484 (0.2482) loss 0.3561 (0.3640) grad_norm 203330.4219 (inf) mem 14543MB +[2023-10-11 00:22:43 simmim_pretrain](main_simmim.py 218): INFO Train: [44/200][5000/6787] eta 0:07:23 lr 0.000200 time 0.2472 (0.2481) loss 0.3775 (0.3640) grad_norm 382957.3750 (inf) mem 14543MB +[2023-10-11 00:24:47 simmim_pretrain](main_simmim.py 218): INFO Train: [44/200][5500/6787] eta 0:05:19 lr 0.000200 time 0.2517 (0.2481) loss 0.3520 (0.3641) grad_norm 276487.5312 (inf) mem 14543MB +[2023-10-11 00:26:51 simmim_pretrain](main_simmim.py 218): INFO Train: [44/200][6000/6787] eta 0:03:15 lr 0.000200 time 0.2484 (0.2481) loss 0.3767 (0.3642) grad_norm 248337.1406 (inf) mem 14543MB +[2023-10-11 00:28:55 simmim_pretrain](main_simmim.py 218): INFO Train: [44/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2452 (0.2481) loss 0.3934 (0.3642) grad_norm 320632.6562 (inf) mem 14543MB +[2023-10-11 00:30:07 simmim_pretrain](main_simmim.py 228): INFO EPOCH 44 training takes 0:28:04 +[2023-10-11 00:30:08 simmim_pretrain](main_simmim.py 218): INFO Train: [45/200][0/6787] eta 2:57:11 lr 0.000200 time 1.5664 (1.5664) loss 0.3878 (0.3878) grad_norm 163496.7344 (163496.7344) mem 14543MB +[2023-10-11 00:32:13 simmim_pretrain](main_simmim.py 218): INFO Train: [45/200][500/6787] eta 0:26:18 lr 0.000200 time 0.2454 (0.2510) loss 0.3620 (0.3642) grad_norm 379163.0625 (387397.6250) mem 14543MB +[2023-10-11 00:34:17 simmim_pretrain](main_simmim.py 218): INFO Train: [45/200][1000/6787] eta 0:24:07 lr 0.000200 time 0.2478 (0.2502) loss 0.3446 (0.3633) grad_norm 1049138.6250 (387211.0312) mem 14543MB +[2023-10-11 00:36:22 simmim_pretrain](main_simmim.py 218): INFO Train: [45/200][1500/6787] eta 0:22:01 lr 0.000200 time 0.2451 (0.2500) loss 0.3507 (0.3638) grad_norm 250728.8125 (inf) mem 14543MB +[2023-10-11 00:38:27 simmim_pretrain](main_simmim.py 218): INFO Train: [45/200][2000/6787] eta 0:19:55 lr 0.000200 time 0.2516 (0.2498) loss 0.3703 (0.3640) grad_norm 176143.6406 (inf) mem 14543MB +[2023-10-11 00:40:32 simmim_pretrain](main_simmim.py 218): INFO Train: [45/200][2500/6787] eta 0:17:50 lr 0.000200 time 0.2489 (0.2498) loss 0.3568 (0.3643) grad_norm 308656.3125 (inf) mem 14543MB +[2023-10-11 00:42:36 simmim_pretrain](main_simmim.py 218): INFO Train: [45/200][3000/6787] eta 0:15:45 lr 0.000200 time 0.2470 (0.2498) loss 0.3668 (0.3644) grad_norm 361113.4062 (inf) mem 14543MB +[2023-10-11 00:44:41 simmim_pretrain](main_simmim.py 218): INFO Train: [45/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2505 (0.2498) loss 0.3555 (0.3645) grad_norm 256804.1250 (inf) mem 14543MB +[2023-10-11 00:46:47 simmim_pretrain](main_simmim.py 218): INFO Train: [45/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2594 (0.2499) loss 0.3612 (0.3645) grad_norm 216762.2500 (inf) mem 14543MB +[2023-10-11 00:48:52 simmim_pretrain](main_simmim.py 218): INFO Train: [45/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2455 (0.2499) loss 0.3551 (0.3645) grad_norm 350893.2500 (inf) mem 14543MB +[2023-10-11 00:50:56 simmim_pretrain](main_simmim.py 218): INFO Train: [45/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2585 (0.2499) loss 0.3717 (0.3645) grad_norm 427980.7500 (inf) mem 14543MB +[2023-10-11 00:53:01 simmim_pretrain](main_simmim.py 218): INFO Train: [45/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2516 (0.2498) loss 0.3373 (0.3645) grad_norm 338574.9375 (inf) mem 14543MB +[2023-10-11 00:55:06 simmim_pretrain](main_simmim.py 218): INFO Train: [45/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2522 (0.2499) loss 0.3605 (0.3643) grad_norm 401816.5938 (inf) mem 14543MB +[2023-10-11 00:57:11 simmim_pretrain](main_simmim.py 218): INFO Train: [45/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2586 (0.2499) loss 0.3600 (0.3643) grad_norm 627936.7500 (inf) mem 14543MB +[2023-10-11 00:58:23 simmim_pretrain](main_simmim.py 228): INFO EPOCH 45 training takes 0:28:16 +[2023-10-11 00:58:25 simmim_pretrain](main_simmim.py 218): INFO Train: [46/200][0/6787] eta 2:41:29 lr 0.000200 time 1.4277 (1.4277) loss 0.3555 (0.3555) grad_norm 489117.3125 (489117.3125) mem 14543MB +[2023-10-11 01:00:30 simmim_pretrain](main_simmim.py 218): INFO Train: [46/200][500/6787] eta 0:26:30 lr 0.000200 time 0.2555 (0.2529) loss 0.3367 (0.3634) grad_norm 723940.0625 (inf) mem 14543MB +[2023-10-11 01:02:36 simmim_pretrain](main_simmim.py 218): INFO Train: [46/200][1000/6787] eta 0:24:20 lr 0.000200 time 0.2505 (0.2523) loss 0.3571 (0.3634) grad_norm 697757.7500 (inf) mem 14543MB +[2023-10-11 01:04:42 simmim_pretrain](main_simmim.py 218): INFO Train: [46/200][1500/6787] eta 0:22:13 lr 0.000200 time 0.2523 (0.2522) loss 0.3778 (0.3630) grad_norm 548565.5000 (inf) mem 14543MB +[2023-10-11 01:06:48 simmim_pretrain](main_simmim.py 218): INFO Train: [46/200][2000/6787] eta 0:20:06 lr 0.000200 time 0.2525 (0.2521) loss 0.3641 (0.3631) grad_norm 478167.4062 (inf) mem 14543MB +[2023-10-11 01:08:54 simmim_pretrain](main_simmim.py 218): INFO Train: [46/200][2500/6787] eta 0:18:00 lr 0.000200 time 0.2524 (0.2520) loss 0.3573 (0.3627) grad_norm 481278.5938 (inf) mem 14543MB +[2023-10-11 01:11:00 simmim_pretrain](main_simmim.py 218): INFO Train: [46/200][3000/6787] eta 0:15:54 lr 0.000200 time 0.2554 (0.2520) loss 0.3284 (0.3628) grad_norm 598187.1250 (inf) mem 14543MB +[2023-10-11 01:13:06 simmim_pretrain](main_simmim.py 218): INFO Train: [46/200][3500/6787] eta 0:13:48 lr 0.000200 time 0.2514 (0.2519) loss 0.3649 (0.3627) grad_norm 466964.3125 (inf) mem 14543MB +[2023-10-11 01:15:11 simmim_pretrain](main_simmim.py 218): INFO Train: [46/200][4000/6787] eta 0:11:42 lr 0.000200 time 0.2506 (0.2519) loss 0.3532 (0.3629) grad_norm 341306.7812 (inf) mem 14543MB +[2023-10-11 01:17:17 simmim_pretrain](main_simmim.py 218): INFO Train: [46/200][4500/6787] eta 0:09:35 lr 0.000200 time 0.2500 (0.2518) loss 0.5133 (0.3670) grad_norm 8192.9541 (inf) mem 14543MB +[2023-10-11 01:19:23 simmim_pretrain](main_simmim.py 218): INFO Train: [46/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2492 (0.2518) loss 0.3916 (0.3736) grad_norm 35077.4961 (inf) mem 14543MB +[2023-10-11 01:21:28 simmim_pretrain](main_simmim.py 218): INFO Train: [46/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2439 (0.2517) loss 0.3743 (0.3739) grad_norm 31794.4277 (inf) mem 14543MB +[2023-10-11 01:23:34 simmim_pretrain](main_simmim.py 218): INFO Train: [46/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2500 (0.2517) loss 0.3786 (0.3737) grad_norm 40885.3125 (inf) mem 14543MB +[2023-10-11 01:25:39 simmim_pretrain](main_simmim.py 218): INFO Train: [46/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2510 (0.2516) loss 0.3573 (0.3736) grad_norm 29708.0996 (inf) mem 14543MB +[2023-10-11 01:26:52 simmim_pretrain](main_simmim.py 228): INFO EPOCH 46 training takes 0:28:28 +[2023-10-11 01:26:53 simmim_pretrain](main_simmim.py 218): INFO Train: [47/200][0/6787] eta 2:56:28 lr 0.000200 time 1.5602 (1.5602) loss 0.3808 (0.3808) grad_norm 27461.1562 (27461.1562) mem 14543MB +[2023-10-11 01:28:58 simmim_pretrain](main_simmim.py 218): INFO Train: [47/200][500/6787] eta 0:26:22 lr 0.000200 time 0.2512 (0.2517) loss 0.3734 (0.3680) grad_norm 69219.5391 (48537.2734) mem 14543MB +[2023-10-11 01:31:03 simmim_pretrain](main_simmim.py 218): INFO Train: [47/200][1000/6787] eta 0:24:10 lr 0.000200 time 0.2467 (0.2507) loss 0.3630 (0.3677) grad_norm 50439.2930 (50515.4961) mem 14543MB +[2023-10-11 01:33:08 simmim_pretrain](main_simmim.py 218): INFO Train: [47/200][1500/6787] eta 0:22:03 lr 0.000200 time 0.2496 (0.2503) loss 0.3663 (0.3677) grad_norm 78190.7188 (51518.5156) mem 14543MB +[2023-10-11 01:35:12 simmim_pretrain](main_simmim.py 218): INFO Train: [47/200][2000/6787] eta 0:19:57 lr 0.000200 time 0.2471 (0.2501) loss 0.3632 (0.3675) grad_norm 59504.5820 (55988.7344) mem 14543MB +[2023-10-11 01:37:17 simmim_pretrain](main_simmim.py 218): INFO Train: [47/200][2500/6787] eta 0:17:52 lr 0.000200 time 0.2480 (0.2501) loss 0.3664 (0.3670) grad_norm 82650.3438 (62042.2109) mem 14543MB +[2023-10-11 01:39:22 simmim_pretrain](main_simmim.py 218): INFO Train: [47/200][3000/6787] eta 0:15:46 lr 0.000200 time 0.2589 (0.2500) loss 0.3406 (0.3667) grad_norm 113726.7969 (71312.3906) mem 14543MB +[2023-10-11 01:41:27 simmim_pretrain](main_simmim.py 218): INFO Train: [47/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2461 (0.2500) loss 0.3812 (0.3665) grad_norm 113979.7656 (75201.1094) mem 14543MB +[2023-10-11 01:43:32 simmim_pretrain](main_simmim.py 218): INFO Train: [47/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2463 (0.2500) loss 0.3696 (0.3662) grad_norm 273631.8750 (85130.3906) mem 14543MB +[2023-10-11 01:45:37 simmim_pretrain](main_simmim.py 218): INFO Train: [47/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2534 (0.2500) loss 0.3499 (0.3660) grad_norm 114111.0938 (94094.6016) mem 14543MB +[2023-10-11 01:47:42 simmim_pretrain](main_simmim.py 218): INFO Train: [47/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2548 (0.2500) loss 0.3617 (0.3658) grad_norm 345186.6562 (102983.7734) mem 14543MB +[2023-10-11 01:49:47 simmim_pretrain](main_simmim.py 218): INFO Train: [47/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2465 (0.2499) loss 0.3850 (0.3657) grad_norm 280478.8125 (111281.3984) mem 14543MB +[2023-10-11 01:51:52 simmim_pretrain](main_simmim.py 218): INFO Train: [47/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2547 (0.2499) loss 0.3660 (0.3654) grad_norm 279369.4688 (122865.2031) mem 14543MB +[2023-10-11 01:53:56 simmim_pretrain](main_simmim.py 218): INFO Train: [47/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2494 (0.2499) loss 0.3787 (0.3652) grad_norm 465164.7812 (144529.6719) mem 14543MB +[2023-10-11 01:55:09 simmim_pretrain](main_simmim.py 228): INFO EPOCH 47 training takes 0:28:16 +[2023-10-11 01:55:10 simmim_pretrain](main_simmim.py 218): INFO Train: [48/200][0/6787] eta 2:55:42 lr 0.000200 time 1.5534 (1.5534) loss 0.3367 (0.3367) grad_norm 270580.6875 (270580.6875) mem 14543MB +[2023-10-11 01:57:15 simmim_pretrain](main_simmim.py 218): INFO Train: [48/200][500/6787] eta 0:26:24 lr 0.000200 time 0.2488 (0.2520) loss 0.3529 (0.3638) grad_norm 119517.2656 (233062.7812) mem 14543MB +[2023-10-11 01:59:20 simmim_pretrain](main_simmim.py 218): INFO Train: [48/200][1000/6787] eta 0:24:11 lr 0.000200 time 0.2472 (0.2507) loss 0.3352 (0.3642) grad_norm 247407.5938 (225968.4844) mem 14543MB +[2023-10-11 02:01:24 simmim_pretrain](main_simmim.py 218): INFO Train: [48/200][1500/6787] eta 0:22:03 lr 0.000200 time 0.2517 (0.2504) loss 0.3599 (0.3641) grad_norm 218541.5938 (223707.5938) mem 14543MB +[2023-10-11 02:03:29 simmim_pretrain](main_simmim.py 218): INFO Train: [48/200][2000/6787] eta 0:19:57 lr 0.000200 time 0.2473 (0.2501) loss 0.3621 (0.3640) grad_norm 179888.3750 (225409.7344) mem 14543MB +[2023-10-11 02:05:34 simmim_pretrain](main_simmim.py 218): INFO Train: [48/200][2500/6787] eta 0:17:51 lr 0.000200 time 0.2461 (0.2499) loss 0.3708 (0.3637) grad_norm 216551.4531 (245487.7969) mem 14543MB +[2023-10-11 02:07:39 simmim_pretrain](main_simmim.py 218): INFO Train: [48/200][3000/6787] eta 0:15:46 lr 0.000200 time 0.2525 (0.2499) loss 0.3801 (0.3637) grad_norm 178786.7500 (inf) mem 14543MB +[2023-10-11 02:09:43 simmim_pretrain](main_simmim.py 218): INFO Train: [48/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2508 (0.2498) loss 0.3703 (0.3636) grad_norm 157914.2812 (inf) mem 14543MB +[2023-10-11 02:11:48 simmim_pretrain](main_simmim.py 218): INFO Train: [48/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2535 (0.2498) loss 0.3606 (0.3637) grad_norm 310359.0938 (inf) mem 14543MB +[2023-10-11 02:13:53 simmim_pretrain](main_simmim.py 218): INFO Train: [48/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2473 (0.2497) loss 0.3440 (0.3637) grad_norm 220890.3125 (inf) mem 14543MB +[2023-10-11 02:15:57 simmim_pretrain](main_simmim.py 218): INFO Train: [48/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2489 (0.2497) loss 0.3589 (0.3637) grad_norm 276843.0625 (inf) mem 14543MB +[2023-10-11 02:18:02 simmim_pretrain](main_simmim.py 218): INFO Train: [48/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2513 (0.2497) loss 0.3608 (0.3636) grad_norm 451188.0000 (inf) mem 14543MB +[2023-10-11 02:20:07 simmim_pretrain](main_simmim.py 218): INFO Train: [48/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2519 (0.2497) loss 0.3770 (0.3635) grad_norm 232435.2344 (inf) mem 14543MB +[2023-10-11 02:22:12 simmim_pretrain](main_simmim.py 218): INFO Train: [48/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2554 (0.2497) loss 0.3626 (0.3636) grad_norm 345978.5312 (inf) mem 14543MB +[2023-10-11 02:23:24 simmim_pretrain](main_simmim.py 228): INFO EPOCH 48 training takes 0:28:15 +[2023-10-11 02:23:25 simmim_pretrain](main_simmim.py 218): INFO Train: [49/200][0/6787] eta 2:57:03 lr 0.000200 time 1.5652 (1.5652) loss 0.3558 (0.3558) grad_norm 467449.9375 (467449.9375) mem 14543MB +[2023-10-11 02:25:30 simmim_pretrain](main_simmim.py 218): INFO Train: [49/200][500/6787] eta 0:26:22 lr 0.000200 time 0.2486 (0.2518) loss 0.3571 (0.3626) grad_norm 97589.9141 (inf) mem 14543MB +[2023-10-11 02:27:35 simmim_pretrain](main_simmim.py 218): INFO Train: [49/200][1000/6787] eta 0:24:10 lr 0.000200 time 0.2478 (0.2507) loss 0.3597 (0.3634) grad_norm 234085.4688 (inf) mem 14543MB +[2023-10-11 02:29:40 simmim_pretrain](main_simmim.py 218): INFO Train: [49/200][1500/6787] eta 0:22:03 lr 0.000200 time 0.2478 (0.2504) loss 0.3727 (0.3637) grad_norm 209717.7812 (inf) mem 14543MB +[2023-10-11 02:31:45 simmim_pretrain](main_simmim.py 218): INFO Train: [49/200][2000/6787] eta 0:19:58 lr 0.000200 time 0.2473 (0.2503) loss 0.3841 (0.3640) grad_norm 149170.3125 (inf) mem 14543MB +[2023-10-11 02:33:50 simmim_pretrain](main_simmim.py 218): INFO Train: [49/200][2500/6787] eta 0:17:52 lr 0.000200 time 0.2508 (0.2502) loss 0.3671 (0.3640) grad_norm 304377.5938 (inf) mem 14543MB +[2023-10-11 02:35:54 simmim_pretrain](main_simmim.py 218): INFO Train: [49/200][3000/6787] eta 0:15:47 lr 0.000200 time 0.2500 (0.2501) loss 0.3893 (0.3637) grad_norm 431888.3438 (inf) mem 14543MB +[2023-10-11 02:37:59 simmim_pretrain](main_simmim.py 218): INFO Train: [49/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2553 (0.2501) loss 0.3362 (0.3633) grad_norm 448759.0625 (inf) mem 14543MB +[2023-10-11 02:40:04 simmim_pretrain](main_simmim.py 218): INFO Train: [49/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2502 (0.2500) loss 0.3391 (0.3632) grad_norm 308764.1250 (inf) mem 14543MB +[2023-10-11 02:42:09 simmim_pretrain](main_simmim.py 218): INFO Train: [49/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2470 (0.2499) loss 0.3638 (0.3632) grad_norm 407801.9062 (inf) mem 14543MB +[2023-10-11 02:44:14 simmim_pretrain](main_simmim.py 218): INFO Train: [49/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2511 (0.2499) loss 0.3914 (0.3631) grad_norm 403336.8750 (inf) mem 14543MB +[2023-10-11 02:46:19 simmim_pretrain](main_simmim.py 218): INFO Train: [49/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2473 (0.2499) loss 0.3771 (0.3630) grad_norm 286049.4688 (inf) mem 14543MB +[2023-10-11 02:48:24 simmim_pretrain](main_simmim.py 218): INFO Train: [49/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2479 (0.2499) loss 0.3578 (0.3630) grad_norm 222491.9062 (inf) mem 14543MB +[2023-10-11 02:50:29 simmim_pretrain](main_simmim.py 218): INFO Train: [49/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2478 (0.2499) loss 0.3450 (0.3631) grad_norm 95039.0859 (inf) mem 14543MB +[2023-10-11 02:51:41 simmim_pretrain](main_simmim.py 228): INFO EPOCH 49 training takes 0:28:17 +[2023-10-11 02:51:42 simmim_pretrain](main_simmim.py 218): INFO Train: [50/200][0/6787] eta 2:40:53 lr 0.000200 time 1.4224 (1.4224) loss 0.3475 (0.3475) grad_norm 280531.1562 (280531.1562) mem 14543MB +[2023-10-11 02:53:47 simmim_pretrain](main_simmim.py 218): INFO Train: [50/200][500/6787] eta 0:26:21 lr 0.000200 time 0.2501 (0.2516) loss 0.3760 (0.3630) grad_norm 211297.5469 (253206.5469) mem 14543MB +[2023-10-11 02:55:52 simmim_pretrain](main_simmim.py 218): INFO Train: [50/200][1000/6787] eta 0:24:10 lr 0.000200 time 0.2508 (0.2507) loss 0.3780 (0.3631) grad_norm 234479.2031 (256044.3281) mem 14543MB +[2023-10-11 02:57:57 simmim_pretrain](main_simmim.py 218): INFO Train: [50/200][1500/6787] eta 0:22:03 lr 0.000200 time 0.2462 (0.2503) loss 0.3833 (0.3632) grad_norm 309001.9688 (278049.4375) mem 14543MB +[2023-10-11 03:00:01 simmim_pretrain](main_simmim.py 218): INFO Train: [50/200][2000/6787] eta 0:19:57 lr 0.000200 time 0.2495 (0.2501) loss 0.3651 (0.3630) grad_norm 214604.1250 (inf) mem 14543MB +[2023-10-11 03:02:06 simmim_pretrain](main_simmim.py 218): INFO Train: [50/200][2500/6787] eta 0:17:51 lr 0.000200 time 0.2506 (0.2499) loss 0.3601 (0.3632) grad_norm 202331.9219 (inf) mem 14543MB +[2023-10-11 03:04:11 simmim_pretrain](main_simmim.py 218): INFO Train: [50/200][3000/6787] eta 0:15:46 lr 0.000200 time 0.2470 (0.2499) loss 0.3670 (0.3633) grad_norm 325879.5312 (inf) mem 14543MB +[2023-10-11 03:06:16 simmim_pretrain](main_simmim.py 218): INFO Train: [50/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2446 (0.2499) loss 0.3825 (0.3633) grad_norm 153282.1875 (inf) mem 14543MB +[2023-10-11 03:08:20 simmim_pretrain](main_simmim.py 218): INFO Train: [50/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2458 (0.2498) loss 0.3921 (0.3633) grad_norm 334027.1562 (inf) mem 14543MB +[2023-10-11 03:10:25 simmim_pretrain](main_simmim.py 218): INFO Train: [50/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2496 (0.2497) loss 0.3496 (0.3632) grad_norm 610268.6875 (inf) mem 14543MB +[2023-10-11 03:12:30 simmim_pretrain](main_simmim.py 218): INFO Train: [50/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2576 (0.2497) loss 0.3593 (0.3631) grad_norm 562246.0625 (inf) mem 14543MB +[2023-10-11 03:14:34 simmim_pretrain](main_simmim.py 218): INFO Train: [50/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2496 (0.2497) loss 0.3518 (0.3630) grad_norm 339417.7812 (inf) mem 14543MB +[2023-10-11 03:16:39 simmim_pretrain](main_simmim.py 218): INFO Train: [50/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2490 (0.2496) loss 0.3451 (0.3629) grad_norm 502384.1250 (inf) mem 14543MB +[2023-10-11 03:18:44 simmim_pretrain](main_simmim.py 218): INFO Train: [50/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2500 (0.2496) loss 0.3641 (0.3629) grad_norm 936886.3125 (inf) mem 14543MB +[2023-10-11 03:19:56 simmim_pretrain](main_simmim.py 228): INFO EPOCH 50 training takes 0:28:14 +[2023-10-11 03:19:57 simmim_pretrain](main_simmim.py 218): INFO Train: [51/200][0/6787] eta 2:43:51 lr 0.000200 time 1.4486 (1.4486) loss 0.3680 (0.3680) grad_norm 329809.7812 (329809.7812) mem 14543MB +[2023-10-11 03:22:01 simmim_pretrain](main_simmim.py 218): INFO Train: [51/200][500/6787] eta 0:26:19 lr 0.000200 time 0.2596 (0.2512) loss 0.3668 (0.3619) grad_norm 560819.8125 (451228.8438) mem 14543MB +[2023-10-11 03:24:06 simmim_pretrain](main_simmim.py 218): INFO Train: [51/200][1000/6787] eta 0:24:07 lr 0.000200 time 0.2457 (0.2502) loss 0.3689 (0.3624) grad_norm 610100.0625 (453724.4062) mem 14543MB +[2023-10-11 03:26:11 simmim_pretrain](main_simmim.py 218): INFO Train: [51/200][1500/6787] eta 0:22:01 lr 0.000200 time 0.2473 (0.2500) loss 0.3591 (0.3626) grad_norm 563284.8750 (inf) mem 14543MB +[2023-10-11 03:28:16 simmim_pretrain](main_simmim.py 218): INFO Train: [51/200][2000/6787] eta 0:19:56 lr 0.000200 time 0.2569 (0.2499) loss 0.3576 (0.3625) grad_norm 374223.4375 (inf) mem 14543MB +[2023-10-11 03:30:21 simmim_pretrain](main_simmim.py 218): INFO Train: [51/200][2500/6787] eta 0:17:51 lr 0.000200 time 0.2482 (0.2499) loss 0.3655 (0.3626) grad_norm 427651.9375 (inf) mem 14543MB +[2023-10-11 03:32:25 simmim_pretrain](main_simmim.py 218): INFO Train: [51/200][3000/6787] eta 0:15:46 lr 0.000200 time 0.2514 (0.2499) loss 0.3494 (0.3625) grad_norm 460327.7500 (inf) mem 14543MB +[2023-10-11 03:34:30 simmim_pretrain](main_simmim.py 218): INFO Train: [51/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2503 (0.2498) loss 0.3583 (0.3623) grad_norm 646937.8750 (inf) mem 14543MB +[2023-10-11 03:36:35 simmim_pretrain](main_simmim.py 218): INFO Train: [51/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2524 (0.2498) loss 0.3729 (0.3623) grad_norm 815029.9375 (inf) mem 14543MB +[2023-10-11 03:38:40 simmim_pretrain](main_simmim.py 218): INFO Train: [51/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2481 (0.2498) loss 0.3631 (0.3623) grad_norm 333791.5312 (inf) mem 14543MB +[2023-10-11 03:40:45 simmim_pretrain](main_simmim.py 218): INFO Train: [51/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2532 (0.2498) loss 0.3633 (0.3624) grad_norm 566627.5625 (inf) mem 14543MB +[2023-10-11 03:42:50 simmim_pretrain](main_simmim.py 218): INFO Train: [51/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2463 (0.2499) loss 0.3683 (0.3624) grad_norm 473159.7500 (inf) mem 14543MB +[2023-10-11 03:44:55 simmim_pretrain](main_simmim.py 218): INFO Train: [51/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2470 (0.2499) loss 0.3584 (0.3625) grad_norm 339222.2812 (inf) mem 14543MB +[2023-10-11 03:47:00 simmim_pretrain](main_simmim.py 218): INFO Train: [51/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2525 (0.2499) loss 0.3605 (0.3625) grad_norm 322059.4375 (inf) mem 14543MB +[2023-10-11 03:48:12 simmim_pretrain](main_simmim.py 228): INFO EPOCH 51 training takes 0:28:16 +[2023-10-11 03:48:14 simmim_pretrain](main_simmim.py 218): INFO Train: [52/200][0/6787] eta 2:54:09 lr 0.000200 time 1.5397 (1.5397) loss 0.3724 (0.3724) grad_norm 338129.8750 (338129.8750) mem 14543MB +[2023-10-11 03:50:19 simmim_pretrain](main_simmim.py 218): INFO Train: [52/200][500/6787] eta 0:26:22 lr 0.000200 time 0.2464 (0.2517) loss 0.3580 (0.3647) grad_norm 343022.4375 (311183.4688) mem 14543MB +[2023-10-11 03:52:24 simmim_pretrain](main_simmim.py 218): INFO Train: [52/200][1000/6787] eta 0:24:11 lr 0.000200 time 0.2465 (0.2508) loss 0.3479 (0.3645) grad_norm 200447.3281 (298474.0312) mem 14543MB +[2023-10-11 03:54:28 simmim_pretrain](main_simmim.py 218): INFO Train: [52/200][1500/6787] eta 0:22:04 lr 0.000200 time 0.2545 (0.2505) loss 0.3681 (0.3643) grad_norm 349441.0938 (290169.6875) mem 14543MB +[2023-10-11 03:56:33 simmim_pretrain](main_simmim.py 218): INFO Train: [52/200][2000/6787] eta 0:19:58 lr 0.000200 time 0.2500 (0.2503) loss 0.3560 (0.3641) grad_norm 262786.4375 (302799.0625) mem 14543MB +[2023-10-11 03:58:38 simmim_pretrain](main_simmim.py 218): INFO Train: [52/200][2500/6787] eta 0:17:52 lr 0.000200 time 0.2483 (0.2502) loss 0.3666 (0.3636) grad_norm 584927.7500 (336663.4062) mem 14543MB +[2023-10-11 04:00:43 simmim_pretrain](main_simmim.py 218): INFO Train: [52/200][3000/6787] eta 0:15:47 lr 0.000200 time 0.2468 (0.2501) loss 0.3611 (0.3634) grad_norm 346435.5938 (inf) mem 14543MB +[2023-10-11 04:02:48 simmim_pretrain](main_simmim.py 218): INFO Train: [52/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2502 (0.2500) loss 0.3731 (0.3636) grad_norm 231555.1875 (inf) mem 14543MB +[2023-10-11 04:04:53 simmim_pretrain](main_simmim.py 218): INFO Train: [52/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2479 (0.2500) loss 0.3646 (0.3636) grad_norm 329689.0625 (inf) mem 14543MB +[2023-10-11 04:06:57 simmim_pretrain](main_simmim.py 218): INFO Train: [52/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2516 (0.2499) loss 0.3608 (0.3636) grad_norm 219905.7344 (inf) mem 14543MB +[2023-10-11 04:09:02 simmim_pretrain](main_simmim.py 218): INFO Train: [52/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2467 (0.2498) loss 0.3618 (0.3636) grad_norm 214960.0938 (inf) mem 14543MB +[2023-10-11 04:11:07 simmim_pretrain](main_simmim.py 218): INFO Train: [52/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2464 (0.2498) loss 0.3635 (0.3635) grad_norm 521343.5938 (inf) mem 14543MB +[2023-10-11 04:13:11 simmim_pretrain](main_simmim.py 218): INFO Train: [52/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2492 (0.2497) loss 0.3731 (0.3635) grad_norm 264869.9375 (inf) mem 14543MB +[2023-10-11 04:15:16 simmim_pretrain](main_simmim.py 218): INFO Train: [52/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2502 (0.2497) loss 0.3503 (0.3634) grad_norm 493943.5000 (inf) mem 14543MB +[2023-10-11 04:16:28 simmim_pretrain](main_simmim.py 228): INFO EPOCH 52 training takes 0:28:15 +[2023-10-11 04:16:29 simmim_pretrain](main_simmim.py 218): INFO Train: [53/200][0/6787] eta 2:39:04 lr 0.000200 time 1.4063 (1.4063) loss 0.3741 (0.3741) grad_norm 189475.5312 (189475.5312) mem 14543MB +[2023-10-11 04:18:34 simmim_pretrain](main_simmim.py 218): INFO Train: [53/200][500/6787] eta 0:26:18 lr 0.000200 time 0.2534 (0.2511) loss 0.3836 (0.3623) grad_norm 406536.2812 (inf) mem 14543MB +[2023-10-11 04:20:39 simmim_pretrain](main_simmim.py 218): INFO Train: [53/200][1000/6787] eta 0:24:08 lr 0.000200 time 0.2491 (0.2504) loss 0.3574 (0.3623) grad_norm 598655.6875 (inf) mem 14543MB +[2023-10-11 04:22:43 simmim_pretrain](main_simmim.py 218): INFO Train: [53/200][1500/6787] eta 0:22:01 lr 0.000200 time 0.2523 (0.2500) loss 0.3794 (0.3627) grad_norm 352266.4375 (inf) mem 14543MB +[2023-10-11 04:24:48 simmim_pretrain](main_simmim.py 218): INFO Train: [53/200][2000/6787] eta 0:19:55 lr 0.000200 time 0.2589 (0.2498) loss 0.3681 (0.3630) grad_norm 229203.3125 (inf) mem 14543MB +[2023-10-11 04:26:52 simmim_pretrain](main_simmim.py 218): INFO Train: [53/200][2500/6787] eta 0:17:50 lr 0.000200 time 0.2457 (0.2497) loss 0.3639 (0.3632) grad_norm 428634.5938 (inf) mem 14543MB +[2023-10-11 04:28:57 simmim_pretrain](main_simmim.py 218): INFO Train: [53/200][3000/6787] eta 0:15:45 lr 0.000200 time 0.2469 (0.2497) loss 0.3580 (0.3634) grad_norm 299912.7188 (inf) mem 14543MB +[2023-10-11 04:31:02 simmim_pretrain](main_simmim.py 218): INFO Train: [53/200][3500/6787] eta 0:13:40 lr 0.000200 time 0.2511 (0.2496) loss 0.3597 (0.3634) grad_norm 258500.3594 (inf) mem 14543MB +[2023-10-11 04:33:07 simmim_pretrain](main_simmim.py 218): INFO Train: [53/200][4000/6787] eta 0:11:35 lr 0.000200 time 0.2462 (0.2496) loss 0.3689 (0.3636) grad_norm 188816.4219 (inf) mem 14543MB +[2023-10-11 04:35:11 simmim_pretrain](main_simmim.py 218): INFO Train: [53/200][4500/6787] eta 0:09:30 lr 0.000200 time 0.2465 (0.2496) loss 0.3430 (0.3637) grad_norm 365742.2812 (inf) mem 14543MB +[2023-10-11 04:37:16 simmim_pretrain](main_simmim.py 218): INFO Train: [53/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2496 (0.2496) loss 0.3647 (0.3637) grad_norm 323241.8125 (inf) mem 14543MB +[2023-10-11 04:39:21 simmim_pretrain](main_simmim.py 218): INFO Train: [53/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2494 (0.2496) loss 0.3451 (0.3637) grad_norm 396808.9375 (inf) mem 14543MB +[2023-10-11 04:41:26 simmim_pretrain](main_simmim.py 218): INFO Train: [53/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2511 (0.2496) loss 0.3568 (0.3636) grad_norm 268041.6250 (inf) mem 14543MB +[2023-10-11 04:43:31 simmim_pretrain](main_simmim.py 218): INFO Train: [53/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2497 (0.2496) loss 0.3724 (0.3637) grad_norm 307951.5312 (inf) mem 14543MB +[2023-10-11 04:44:43 simmim_pretrain](main_simmim.py 228): INFO EPOCH 53 training takes 0:28:15 +[2023-10-11 04:44:44 simmim_pretrain](main_simmim.py 218): INFO Train: [54/200][0/6787] eta 2:43:16 lr 0.000200 time 1.4434 (1.4434) loss 0.3816 (0.3816) grad_norm 135215.5000 (135215.5000) mem 14543MB +[2023-10-11 04:46:49 simmim_pretrain](main_simmim.py 218): INFO Train: [54/200][500/6787] eta 0:26:26 lr 0.000200 time 0.2496 (0.2524) loss 0.3786 (0.3637) grad_norm 204125.5156 (290804.1875) mem 14543MB +[2023-10-11 04:48:55 simmim_pretrain](main_simmim.py 218): INFO Train: [54/200][1000/6787] eta 0:24:14 lr 0.000200 time 0.2466 (0.2513) loss 0.3555 (0.3640) grad_norm 337903.7188 (292813.8750) mem 14543MB +[2023-10-11 04:51:00 simmim_pretrain](main_simmim.py 218): INFO Train: [54/200][1500/6787] eta 0:22:06 lr 0.000200 time 0.2474 (0.2508) loss 0.3638 (0.3639) grad_norm 538586.5000 (307389.1875) mem 14543MB +[2023-10-11 04:53:05 simmim_pretrain](main_simmim.py 218): INFO Train: [54/200][2000/6787] eta 0:19:59 lr 0.000200 time 0.2501 (0.2506) loss 0.3684 (0.3638) grad_norm 573000.6875 (338029.5000) mem 14543MB +[2023-10-11 04:55:10 simmim_pretrain](main_simmim.py 218): INFO Train: [54/200][2500/6787] eta 0:17:54 lr 0.000200 time 0.2518 (0.2505) loss 0.3692 (0.3635) grad_norm 613638.8750 (367883.0938) mem 14543MB +[2023-10-11 04:57:15 simmim_pretrain](main_simmim.py 218): INFO Train: [54/200][3000/6787] eta 0:15:48 lr 0.000200 time 0.2521 (0.2505) loss 0.3762 (0.3634) grad_norm 534723.0000 (387348.2812) mem 14543MB +[2023-10-11 04:59:20 simmim_pretrain](main_simmim.py 218): INFO Train: [54/200][3500/6787] eta 0:13:43 lr 0.000200 time 0.2498 (0.2504) loss 0.3880 (0.3635) grad_norm 347911.3438 (inf) mem 14543MB +[2023-10-11 05:01:25 simmim_pretrain](main_simmim.py 218): INFO Train: [54/200][4000/6787] eta 0:11:37 lr 0.000200 time 0.2631 (0.2503) loss 0.3488 (0.3635) grad_norm 324752.1250 (inf) mem 14543MB +[2023-10-11 05:03:30 simmim_pretrain](main_simmim.py 218): INFO Train: [54/200][4500/6787] eta 0:09:32 lr 0.000200 time 0.2465 (0.2503) loss 0.3550 (0.3636) grad_norm 306758.8438 (inf) mem 14543MB +[2023-10-11 05:05:34 simmim_pretrain](main_simmim.py 218): INFO Train: [54/200][5000/6787] eta 0:07:27 lr 0.000200 time 0.2519 (0.2502) loss 0.3842 (0.3638) grad_norm 284463.9375 (inf) mem 14543MB +[2023-10-11 05:07:39 simmim_pretrain](main_simmim.py 218): INFO Train: [54/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2536 (0.2502) loss 0.3631 (0.3638) grad_norm 309042.9062 (inf) mem 14543MB +[2023-10-11 05:09:44 simmim_pretrain](main_simmim.py 218): INFO Train: [54/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2523 (0.2502) loss 0.3498 (0.3637) grad_norm 571494.4375 (inf) mem 14543MB +[2023-10-11 05:11:49 simmim_pretrain](main_simmim.py 218): INFO Train: [54/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2506 (0.2501) loss 0.3789 (0.3636) grad_norm 316518.9375 (inf) mem 14543MB +[2023-10-11 05:13:01 simmim_pretrain](main_simmim.py 228): INFO EPOCH 54 training takes 0:28:18 +[2023-10-11 05:13:03 simmim_pretrain](main_simmim.py 218): INFO Train: [55/200][0/6787] eta 2:39:58 lr 0.000200 time 1.4142 (1.4142) loss 0.3666 (0.3666) grad_norm 373138.6250 (373138.6250) mem 14543MB +[2023-10-11 05:15:07 simmim_pretrain](main_simmim.py 218): INFO Train: [55/200][500/6787] eta 0:26:23 lr 0.000200 time 0.2470 (0.2518) loss 0.3757 (0.3639) grad_norm 201458.0781 (304766.2812) mem 14543MB +[2023-10-11 05:17:12 simmim_pretrain](main_simmim.py 218): INFO Train: [55/200][1000/6787] eta 0:24:10 lr 0.000200 time 0.2594 (0.2506) loss 0.3644 (0.3637) grad_norm 237650.2344 (303395.6250) mem 14543MB +[2023-10-11 05:19:17 simmim_pretrain](main_simmim.py 218): INFO Train: [55/200][1500/6787] eta 0:22:02 lr 0.000200 time 0.2459 (0.2502) loss 0.3680 (0.3641) grad_norm 388402.5938 (304719.1562) mem 14543MB +[2023-10-11 05:21:21 simmim_pretrain](main_simmim.py 218): INFO Train: [55/200][2000/6787] eta 0:19:56 lr 0.000200 time 0.2467 (0.2500) loss 0.3593 (0.3640) grad_norm 432491.8125 (337349.0312) mem 14543MB +[2023-10-11 05:23:26 simmim_pretrain](main_simmim.py 218): INFO Train: [55/200][2500/6787] eta 0:17:51 lr 0.000200 time 0.2547 (0.2498) loss 0.3549 (0.3634) grad_norm 413327.5000 (inf) mem 14543MB +[2023-10-11 05:25:31 simmim_pretrain](main_simmim.py 218): INFO Train: [55/200][3000/6787] eta 0:15:45 lr 0.000200 time 0.2427 (0.2498) loss 0.3729 (0.3636) grad_norm 169436.2656 (inf) mem 14543MB +[2023-10-11 05:27:36 simmim_pretrain](main_simmim.py 218): INFO Train: [55/200][3500/6787] eta 0:13:40 lr 0.000200 time 0.2521 (0.2498) loss 0.3611 (0.3637) grad_norm 251431.1406 (inf) mem 14543MB +[2023-10-11 05:29:40 simmim_pretrain](main_simmim.py 218): INFO Train: [55/200][4000/6787] eta 0:11:35 lr 0.000200 time 0.2472 (0.2497) loss 0.3703 (0.3637) grad_norm 263475.0625 (inf) mem 14543MB +[2023-10-11 05:31:45 simmim_pretrain](main_simmim.py 218): INFO Train: [55/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2472 (0.2497) loss 0.3467 (0.3640) grad_norm 515986.5938 (inf) mem 14543MB +[2023-10-11 05:33:50 simmim_pretrain](main_simmim.py 218): INFO Train: [55/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2469 (0.2497) loss 0.3753 (0.3640) grad_norm 357400.7500 (inf) mem 14543MB +[2023-10-11 05:35:54 simmim_pretrain](main_simmim.py 218): INFO Train: [55/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2465 (0.2496) loss 0.3626 (0.3640) grad_norm 316332.9688 (inf) mem 14543MB +[2023-10-11 05:37:59 simmim_pretrain](main_simmim.py 218): INFO Train: [55/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2474 (0.2496) loss 0.3652 (0.3641) grad_norm 306102.3125 (inf) mem 14543MB +[2023-10-11 05:40:04 simmim_pretrain](main_simmim.py 218): INFO Train: [55/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2457 (0.2496) loss 0.3545 (0.3640) grad_norm 312450.6875 (inf) mem 14543MB +[2023-10-11 05:41:16 simmim_pretrain](main_simmim.py 228): INFO EPOCH 55 training takes 0:28:14 +[2023-10-11 05:41:18 simmim_pretrain](main_simmim.py 218): INFO Train: [56/200][0/6787] eta 2:54:25 lr 0.000200 time 1.5420 (1.5420) loss 0.3633 (0.3633) grad_norm 213465.1719 (213465.1719) mem 14543MB +[2023-10-11 05:43:22 simmim_pretrain](main_simmim.py 218): INFO Train: [56/200][500/6787] eta 0:26:25 lr 0.000200 time 0.2522 (0.2522) loss 0.3426 (0.3637) grad_norm 549147.5000 (410852.0938) mem 14543MB +[2023-10-11 05:45:27 simmim_pretrain](main_simmim.py 218): INFO Train: [56/200][1000/6787] eta 0:24:11 lr 0.000200 time 0.2564 (0.2509) loss 0.3264 (0.3629) grad_norm 712221.4375 (437120.6562) mem 14543MB +[2023-10-11 05:47:32 simmim_pretrain](main_simmim.py 218): INFO Train: [56/200][1500/6787] eta 0:22:04 lr 0.000200 time 0.2465 (0.2506) loss 0.3655 (0.3626) grad_norm 872100.8750 (461076.6562) mem 14543MB +[2023-10-11 05:49:37 simmim_pretrain](main_simmim.py 218): INFO Train: [56/200][2000/6787] eta 0:19:58 lr 0.000200 time 0.2496 (0.2504) loss 0.5545 (0.3697) grad_norm 2022.5883 (inf) mem 14543MB +[2023-10-11 05:51:42 simmim_pretrain](main_simmim.py 218): INFO Train: [56/200][2500/6787] eta 0:17:52 lr 0.000200 time 0.2460 (0.2502) loss 0.4950 (0.3989) grad_norm 5994.0884 (inf) mem 14543MB +[2023-10-11 05:53:46 simmim_pretrain](main_simmim.py 218): INFO Train: [56/200][3000/6787] eta 0:15:46 lr 0.000200 time 0.2493 (0.2500) loss 0.4947 (0.4146) grad_norm 5440.5225 (inf) mem 14543MB +[2023-10-11 05:55:51 simmim_pretrain](main_simmim.py 218): INFO Train: [56/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2496 (0.2499) loss 0.4753 (0.4229) grad_norm 9829.1738 (inf) mem 14543MB +[2023-10-11 05:57:56 simmim_pretrain](main_simmim.py 218): INFO Train: [56/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2491 (0.2498) loss 0.4198 (0.4257) grad_norm 14463.5820 (inf) mem 14543MB +[2023-10-11 06:00:01 simmim_pretrain](main_simmim.py 218): INFO Train: [56/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2492 (0.2498) loss 0.3962 (0.4241) grad_norm 20710.0352 (inf) mem 14543MB +[2023-10-11 06:02:05 simmim_pretrain](main_simmim.py 218): INFO Train: [56/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2490 (0.2498) loss 0.3704 (0.4213) grad_norm 10343.1875 (inf) mem 14543MB +[2023-10-11 06:04:10 simmim_pretrain](main_simmim.py 218): INFO Train: [56/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2523 (0.2498) loss 0.3971 (0.4183) grad_norm 8229.6523 (inf) mem 14543MB +[2023-10-11 06:06:15 simmim_pretrain](main_simmim.py 218): INFO Train: [56/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2467 (0.2498) loss 0.3810 (0.4151) grad_norm 9307.1475 (inf) mem 14543MB +[2023-10-11 06:08:20 simmim_pretrain](main_simmim.py 218): INFO Train: [56/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2593 (0.2498) loss 0.3769 (0.4120) grad_norm 17468.2266 (inf) mem 14543MB +[2023-10-11 06:09:32 simmim_pretrain](main_simmim.py 228): INFO EPOCH 56 training takes 0:28:15 +[2023-10-11 06:09:33 simmim_pretrain](main_simmim.py 218): INFO Train: [57/200][0/6787] eta 2:46:16 lr 0.000200 time 1.4699 (1.4699) loss 0.3819 (0.3819) grad_norm 21356.7773 (21356.7773) mem 14543MB +[2023-10-11 06:11:38 simmim_pretrain](main_simmim.py 218): INFO Train: [57/200][500/6787] eta 0:26:20 lr 0.000200 time 0.2459 (0.2515) loss 0.3795 (0.3728) grad_norm 28660.2031 (24196.7012) mem 14543MB +[2023-10-11 06:13:42 simmim_pretrain](main_simmim.py 218): INFO Train: [57/200][1000/6787] eta 0:24:08 lr 0.000200 time 0.2484 (0.2503) loss 0.3651 (0.3726) grad_norm 20412.0098 (24111.9883) mem 14543MB +[2023-10-11 06:15:47 simmim_pretrain](main_simmim.py 218): INFO Train: [57/200][1500/6787] eta 0:22:01 lr 0.000200 time 0.2529 (0.2499) loss 0.3744 (0.3717) grad_norm 18188.6523 (25952.2031) mem 14543MB +[2023-10-11 06:17:52 simmim_pretrain](main_simmim.py 218): INFO Train: [57/200][2000/6787] eta 0:19:55 lr 0.000200 time 0.2449 (0.2497) loss 0.3675 (0.3710) grad_norm 52481.3477 (28705.1250) mem 14543MB +[2023-10-11 06:19:56 simmim_pretrain](main_simmim.py 218): INFO Train: [57/200][2500/6787] eta 0:17:50 lr 0.000200 time 0.2547 (0.2496) loss 0.3701 (0.3705) grad_norm 44398.6250 (31606.7324) mem 14543MB +[2023-10-11 06:22:01 simmim_pretrain](main_simmim.py 218): INFO Train: [57/200][3000/6787] eta 0:15:45 lr 0.000200 time 0.2451 (0.2495) loss 0.3719 (0.3702) grad_norm 44861.0195 (33857.7969) mem 14543MB +[2023-10-11 06:24:05 simmim_pretrain](main_simmim.py 218): INFO Train: [57/200][3500/6787] eta 0:13:40 lr 0.000200 time 0.2491 (0.2495) loss 0.3695 (0.3697) grad_norm 37820.1406 (36847.7344) mem 14543MB +[2023-10-11 06:26:10 simmim_pretrain](main_simmim.py 218): INFO Train: [57/200][4000/6787] eta 0:11:35 lr 0.000200 time 0.2523 (0.2495) loss 0.3829 (0.3693) grad_norm 89612.6172 (40809.8164) mem 14543MB +[2023-10-11 06:28:15 simmim_pretrain](main_simmim.py 218): INFO Train: [57/200][4500/6787] eta 0:09:30 lr 0.000200 time 0.2495 (0.2495) loss 0.3915 (0.3689) grad_norm 95887.1953 (44727.4023) mem 14543MB +[2023-10-11 06:30:20 simmim_pretrain](main_simmim.py 218): INFO Train: [57/200][5000/6787] eta 0:07:25 lr 0.000200 time 0.2463 (0.2495) loss 0.3682 (0.3686) grad_norm 36438.8633 (48597.1172) mem 14543MB +[2023-10-11 06:32:24 simmim_pretrain](main_simmim.py 218): INFO Train: [57/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2454 (0.2495) loss 0.3444 (0.3683) grad_norm 161019.0781 (54351.9453) mem 14543MB +[2023-10-11 06:34:29 simmim_pretrain](main_simmim.py 218): INFO Train: [57/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2469 (0.2495) loss 0.3418 (0.3681) grad_norm 176578.9219 (63988.3828) mem 14543MB +[2023-10-11 06:36:34 simmim_pretrain](main_simmim.py 218): INFO Train: [57/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2450 (0.2495) loss 0.3682 (0.3678) grad_norm 141154.0781 (71518.5469) mem 14543MB +[2023-10-11 06:37:46 simmim_pretrain](main_simmim.py 228): INFO EPOCH 57 training takes 0:28:14 +[2023-10-11 06:37:47 simmim_pretrain](main_simmim.py 218): INFO Train: [58/200][0/6787] eta 2:47:45 lr 0.000200 time 1.4830 (1.4830) loss 0.3654 (0.3654) grad_norm 121403.5547 (121403.5547) mem 14543MB +[2023-10-11 06:39:52 simmim_pretrain](main_simmim.py 218): INFO Train: [58/200][500/6787] eta 0:26:21 lr 0.000200 time 0.2486 (0.2515) loss 0.3538 (0.3654) grad_norm 267333.2500 (169665.2656) mem 14543MB +[2023-10-11 06:41:57 simmim_pretrain](main_simmim.py 218): INFO Train: [58/200][1000/6787] eta 0:24:10 lr 0.000200 time 0.2522 (0.2506) loss 0.3564 (0.3646) grad_norm 210185.5156 (195570.2812) mem 14543MB +[2023-10-11 06:44:02 simmim_pretrain](main_simmim.py 218): INFO Train: [58/200][1500/6787] eta 0:22:03 lr 0.000200 time 0.2527 (0.2504) loss 0.3647 (0.3641) grad_norm 507732.5625 (231626.2344) mem 14543MB +[2023-10-11 06:46:07 simmim_pretrain](main_simmim.py 218): INFO Train: [58/200][2000/6787] eta 0:19:58 lr 0.000200 time 0.2518 (0.2503) loss 0.3607 (0.3639) grad_norm 597508.8125 (262564.0312) mem 14543MB +[2023-10-11 06:48:12 simmim_pretrain](main_simmim.py 218): INFO Train: [58/200][2500/6787] eta 0:17:53 lr 0.000200 time 0.2594 (0.2503) loss 0.3597 (0.3638) grad_norm 246748.2656 (inf) mem 14543MB +[2023-10-11 06:50:17 simmim_pretrain](main_simmim.py 218): INFO Train: [58/200][3000/6787] eta 0:15:47 lr 0.000200 time 0.2539 (0.2502) loss 0.3564 (0.3639) grad_norm 331113.1875 (inf) mem 14543MB +[2023-10-11 06:52:22 simmim_pretrain](main_simmim.py 218): INFO Train: [58/200][3500/6787] eta 0:13:42 lr 0.000200 time 0.2462 (0.2501) loss 0.3629 (0.3637) grad_norm 260113.2031 (inf) mem 14543MB +[2023-10-11 06:54:27 simmim_pretrain](main_simmim.py 218): INFO Train: [58/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2487 (0.2501) loss 0.3716 (0.3637) grad_norm 329937.1875 (inf) mem 14543MB +[2023-10-11 06:56:32 simmim_pretrain](main_simmim.py 218): INFO Train: [58/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2471 (0.2501) loss 0.3602 (0.3637) grad_norm 300800.4062 (inf) mem 14543MB +[2023-10-11 06:58:36 simmim_pretrain](main_simmim.py 218): INFO Train: [58/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2513 (0.2500) loss 0.3538 (0.3636) grad_norm 385134.5938 (inf) mem 14543MB +[2023-10-11 07:00:41 simmim_pretrain](main_simmim.py 218): INFO Train: [58/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2467 (0.2500) loss 0.3767 (0.3637) grad_norm 488168.4062 (inf) mem 14543MB +[2023-10-11 07:02:46 simmim_pretrain](main_simmim.py 218): INFO Train: [58/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2507 (0.2499) loss 0.3658 (0.3637) grad_norm 295277.1562 (inf) mem 14543MB +[2023-10-11 07:04:51 simmim_pretrain](main_simmim.py 218): INFO Train: [58/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2490 (0.2499) loss 0.3619 (0.3637) grad_norm 318438.1875 (inf) mem 14543MB +[2023-10-11 07:06:03 simmim_pretrain](main_simmim.py 228): INFO EPOCH 58 training takes 0:28:16 +[2023-10-11 07:06:04 simmim_pretrain](main_simmim.py 218): INFO Train: [59/200][0/6787] eta 2:48:09 lr 0.000200 time 1.4866 (1.4866) loss 0.3861 (0.3861) grad_norm 171094.7188 (171094.7188) mem 14543MB +[2023-10-11 07:08:09 simmim_pretrain](main_simmim.py 218): INFO Train: [59/200][500/6787] eta 0:26:22 lr 0.000200 time 0.2556 (0.2517) loss 0.3893 (0.3650) grad_norm 263277.6875 (234104.7812) mem 14543MB +[2023-10-11 07:10:14 simmim_pretrain](main_simmim.py 218): INFO Train: [59/200][1000/6787] eta 0:24:10 lr 0.000200 time 0.2506 (0.2507) loss 0.3592 (0.3648) grad_norm 168489.4688 (224050.7500) mem 14543MB +[2023-10-11 07:12:18 simmim_pretrain](main_simmim.py 218): INFO Train: [59/200][1500/6787] eta 0:22:03 lr 0.000200 time 0.2592 (0.2502) loss 0.3361 (0.3645) grad_norm 201829.3594 (227714.0938) mem 14543MB +[2023-10-11 07:14:23 simmim_pretrain](main_simmim.py 218): INFO Train: [59/200][2000/6787] eta 0:19:57 lr 0.000200 time 0.2533 (0.2501) loss 0.3629 (0.3643) grad_norm 548683.4375 (247417.6250) mem 14543MB +[2023-10-11 07:16:30 simmim_pretrain](main_simmim.py 218): INFO Train: [59/200][2500/6787] eta 0:17:55 lr 0.000200 time 0.2540 (0.2509) loss 0.3416 (0.3641) grad_norm 536888.0000 (278132.8750) mem 14543MB +[2023-10-11 07:18:37 simmim_pretrain](main_simmim.py 218): INFO Train: [59/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2593 (0.2514) loss 0.3531 (0.3641) grad_norm 857486.1875 (288827.4375) mem 14543MB +[2023-10-11 07:20:44 simmim_pretrain](main_simmim.py 218): INFO Train: [59/200][3500/6787] eta 0:13:47 lr 0.000200 time 0.2535 (0.2518) loss 0.3582 (0.3637) grad_norm 247973.7969 (inf) mem 14543MB +[2023-10-11 07:22:51 simmim_pretrain](main_simmim.py 218): INFO Train: [59/200][4000/6787] eta 0:11:42 lr 0.000200 time 0.2529 (0.2520) loss 0.3634 (0.3639) grad_norm 270071.6875 (inf) mem 14543MB +[2023-10-11 07:24:58 simmim_pretrain](main_simmim.py 218): INFO Train: [59/200][4500/6787] eta 0:09:36 lr 0.000200 time 0.2536 (0.2522) loss 0.3653 (0.3640) grad_norm 241580.2812 (inf) mem 14543MB +[2023-10-11 07:27:05 simmim_pretrain](main_simmim.py 218): INFO Train: [59/200][5000/6787] eta 0:07:31 lr 0.000200 time 0.2531 (0.2524) loss 0.3675 (0.3640) grad_norm 176652.3750 (inf) mem 14543MB +[2023-10-11 07:29:12 simmim_pretrain](main_simmim.py 218): INFO Train: [59/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2531 (0.2525) loss 0.3799 (0.3639) grad_norm 274522.0625 (inf) mem 14543MB +[2023-10-11 07:31:19 simmim_pretrain](main_simmim.py 218): INFO Train: [59/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2531 (0.2526) loss 0.3697 (0.3639) grad_norm 293115.8125 (inf) mem 14543MB +[2023-10-11 07:33:26 simmim_pretrain](main_simmim.py 218): INFO Train: [59/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2538 (0.2527) loss 0.3594 (0.3639) grad_norm 525726.6250 (inf) mem 14543MB +[2023-10-11 07:34:39 simmim_pretrain](main_simmim.py 228): INFO EPOCH 59 training takes 0:28:36 +[2023-10-11 07:34:41 simmim_pretrain](main_simmim.py 218): INFO Train: [60/200][0/6787] eta 3:02:51 lr 0.000200 time 1.6166 (1.6166) loss 0.3566 (0.3566) grad_norm 179210.2656 (179210.2656) mem 14543MB +[2023-10-11 07:36:45 simmim_pretrain](main_simmim.py 218): INFO Train: [60/200][500/6787] eta 0:26:21 lr 0.000200 time 0.2518 (0.2516) loss 0.3408 (0.3640) grad_norm 217925.3125 (245494.9688) mem 14543MB +[2023-10-11 07:38:50 simmim_pretrain](main_simmim.py 218): INFO Train: [60/200][1000/6787] eta 0:24:09 lr 0.000200 time 0.2532 (0.2505) loss 0.3674 (0.3640) grad_norm 250901.8125 (238155.1094) mem 14543MB +[2023-10-11 07:40:54 simmim_pretrain](main_simmim.py 218): INFO Train: [60/200][1500/6787] eta 0:22:02 lr 0.000200 time 0.2460 (0.2501) loss 0.3567 (0.3637) grad_norm 227277.7812 (243554.0469) mem 14543MB +[2023-10-11 07:42:59 simmim_pretrain](main_simmim.py 218): INFO Train: [60/200][2000/6787] eta 0:19:56 lr 0.000200 time 0.2484 (0.2500) loss 0.3621 (0.3638) grad_norm 438943.6250 (242379.9688) mem 14543MB +[2023-10-11 07:45:04 simmim_pretrain](main_simmim.py 218): INFO Train: [60/200][2500/6787] eta 0:17:51 lr 0.000200 time 0.2474 (0.2499) loss 0.3718 (0.3635) grad_norm 275961.3438 (259831.9062) mem 14543MB +[2023-10-11 07:47:09 simmim_pretrain](main_simmim.py 218): INFO Train: [60/200][3000/6787] eta 0:15:46 lr 0.000200 time 0.2547 (0.2499) loss 0.3587 (0.3635) grad_norm 506672.1875 (278491.7188) mem 14543MB +[2023-10-11 07:49:14 simmim_pretrain](main_simmim.py 218): INFO Train: [60/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2521 (0.2499) loss 0.3585 (0.3634) grad_norm 366454.0312 (289703.6250) mem 14543MB +[2023-10-11 07:51:19 simmim_pretrain](main_simmim.py 218): INFO Train: [60/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2462 (0.2499) loss 0.3689 (0.3632) grad_norm 881423.0625 (inf) mem 14543MB +[2023-10-11 07:53:24 simmim_pretrain](main_simmim.py 218): INFO Train: [60/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2464 (0.2499) loss 0.3500 (0.3630) grad_norm 222504.4375 (inf) mem 14543MB +[2023-10-11 07:55:28 simmim_pretrain](main_simmim.py 218): INFO Train: [60/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2489 (0.2498) loss 0.3705 (0.3629) grad_norm 283103.1250 (inf) mem 14543MB +[2023-10-11 07:57:33 simmim_pretrain](main_simmim.py 218): INFO Train: [60/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2499 (0.2498) loss 0.3385 (0.3628) grad_norm 414764.5938 (inf) mem 14543MB +[2023-10-11 07:59:38 simmim_pretrain](main_simmim.py 218): INFO Train: [60/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2474 (0.2498) loss 0.3709 (0.3627) grad_norm 751219.0625 (inf) mem 14543MB +[2023-10-11 08:01:43 simmim_pretrain](main_simmim.py 218): INFO Train: [60/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2481 (0.2498) loss 0.3724 (0.3627) grad_norm 497558.5625 (inf) mem 14543MB +[2023-10-11 08:02:55 simmim_pretrain](main_simmim.py 228): INFO EPOCH 60 training takes 0:28:16 +[2023-10-11 08:02:55 simmim_pretrain](utils.py 62): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_60.pth saving...... +[2023-10-11 08:02:56 simmim_pretrain](utils.py 64): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_60.pth saved !!! +[2023-10-11 08:02:57 simmim_pretrain](main_simmim.py 218): INFO Train: [61/200][0/6787] eta 2:28:35 lr 0.000200 time 1.3136 (1.3136) loss 0.3442 (0.3442) grad_norm 527017.0000 (527017.0000) mem 14543MB +[2023-10-11 08:05:02 simmim_pretrain](main_simmim.py 218): INFO Train: [61/200][500/6787] eta 0:26:19 lr 0.000200 time 0.2487 (0.2512) loss 0.3651 (0.3636) grad_norm 251129.5156 (467523.2500) mem 14543MB +[2023-10-11 08:07:07 simmim_pretrain](main_simmim.py 218): INFO Train: [61/200][1000/6787] eta 0:24:09 lr 0.000200 time 0.2459 (0.2504) loss 0.3940 (0.3634) grad_norm 328609.7812 (inf) mem 14543MB +[2023-10-11 08:09:11 simmim_pretrain](main_simmim.py 218): INFO Train: [61/200][1500/6787] eta 0:22:02 lr 0.000200 time 0.2454 (0.2501) loss 0.3607 (0.3638) grad_norm 265209.6875 (inf) mem 14543MB +[2023-10-11 08:11:16 simmim_pretrain](main_simmim.py 218): INFO Train: [61/200][2000/6787] eta 0:19:56 lr 0.000200 time 0.2449 (0.2500) loss 0.3559 (0.3638) grad_norm 239099.8906 (inf) mem 14543MB +[2023-10-11 08:13:21 simmim_pretrain](main_simmim.py 218): INFO Train: [61/200][2500/6787] eta 0:17:51 lr 0.000200 time 0.2492 (0.2499) loss 0.3824 (0.3638) grad_norm 151659.6094 (inf) mem 14543MB +[2023-10-11 08:15:26 simmim_pretrain](main_simmim.py 218): INFO Train: [61/200][3000/6787] eta 0:15:46 lr 0.000200 time 0.2590 (0.2498) loss 0.3791 (0.3642) grad_norm 159163.5938 (inf) mem 14543MB +[2023-10-11 08:17:30 simmim_pretrain](main_simmim.py 218): INFO Train: [61/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2501 (0.2498) loss 0.3595 (0.3645) grad_norm 166781.5938 (inf) mem 14543MB +[2023-10-11 08:19:35 simmim_pretrain](main_simmim.py 218): INFO Train: [61/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2500 (0.2498) loss 0.3789 (0.3647) grad_norm 167285.2500 (inf) mem 14543MB +[2023-10-11 08:21:40 simmim_pretrain](main_simmim.py 218): INFO Train: [61/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2496 (0.2498) loss 0.3309 (0.3647) grad_norm 169171.1406 (inf) mem 14543MB +[2023-10-11 08:23:45 simmim_pretrain](main_simmim.py 218): INFO Train: [61/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2484 (0.2497) loss 0.3769 (0.3646) grad_norm 96959.6484 (inf) mem 14543MB +[2023-10-11 08:25:50 simmim_pretrain](main_simmim.py 218): INFO Train: [61/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2493 (0.2497) loss 0.3428 (0.3645) grad_norm 251174.3125 (inf) mem 14543MB +[2023-10-11 08:27:55 simmim_pretrain](main_simmim.py 218): INFO Train: [61/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2491 (0.2498) loss 0.3385 (0.3645) grad_norm 231342.7500 (inf) mem 14543MB +[2023-10-11 08:29:59 simmim_pretrain](main_simmim.py 218): INFO Train: [61/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2511 (0.2497) loss 0.3734 (0.3643) grad_norm 293117.8750 (inf) mem 14543MB +[2023-10-11 08:31:12 simmim_pretrain](main_simmim.py 228): INFO EPOCH 61 training takes 0:28:15 +[2023-10-11 08:31:13 simmim_pretrain](main_simmim.py 218): INFO Train: [62/200][0/6787] eta 2:53:16 lr 0.000200 time 1.5318 (1.5318) loss 0.3577 (0.3577) grad_norm 260462.5156 (260462.5156) mem 14543MB +[2023-10-11 08:33:18 simmim_pretrain](main_simmim.py 218): INFO Train: [62/200][500/6787] eta 0:26:21 lr 0.000200 time 0.2467 (0.2516) loss 0.3852 (0.3623) grad_norm 190863.5781 (297772.9062) mem 14543MB +[2023-10-11 08:35:23 simmim_pretrain](main_simmim.py 218): INFO Train: [62/200][1000/6787] eta 0:24:09 lr 0.000200 time 0.2466 (0.2505) loss 0.3516 (0.3625) grad_norm 310195.0938 (340491.5000) mem 14543MB +[2023-10-11 08:37:27 simmim_pretrain](main_simmim.py 218): INFO Train: [62/200][1500/6787] eta 0:22:02 lr 0.000200 time 0.2496 (0.2502) loss 0.3759 (0.3624) grad_norm 224799.9531 (352381.9062) mem 14543MB +[2023-10-11 08:39:32 simmim_pretrain](main_simmim.py 218): INFO Train: [62/200][2000/6787] eta 0:19:56 lr 0.000200 time 0.2490 (0.2500) loss 0.3648 (0.3622) grad_norm 453300.7500 (inf) mem 14543MB +[2023-10-11 08:41:37 simmim_pretrain](main_simmim.py 218): INFO Train: [62/200][2500/6787] eta 0:17:51 lr 0.000200 time 0.2493 (0.2499) loss 0.3595 (0.3622) grad_norm 396486.9688 (inf) mem 14543MB +[2023-10-11 08:43:42 simmim_pretrain](main_simmim.py 218): INFO Train: [62/200][3000/6787] eta 0:15:46 lr 0.000200 time 0.2506 (0.2499) loss 0.3502 (0.3623) grad_norm 285901.0938 (inf) mem 14543MB +[2023-10-11 08:45:48 simmim_pretrain](main_simmim.py 218): INFO Train: [62/200][3500/6787] eta 0:13:42 lr 0.000200 time 0.2506 (0.2502) loss 0.3545 (0.3624) grad_norm 243416.7812 (inf) mem 14543MB +[2023-10-11 08:47:54 simmim_pretrain](main_simmim.py 218): INFO Train: [62/200][4000/6787] eta 0:11:37 lr 0.000200 time 0.2514 (0.2504) loss 0.3689 (0.3624) grad_norm 335249.0312 (inf) mem 14543MB +[2023-10-11 08:50:00 simmim_pretrain](main_simmim.py 218): INFO Train: [62/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2542 (0.2506) loss 0.3760 (0.3626) grad_norm 213098.8438 (inf) mem 14543MB +[2023-10-11 08:52:06 simmim_pretrain](main_simmim.py 218): INFO Train: [62/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2513 (0.2508) loss 0.3536 (0.3625) grad_norm 279225.0312 (inf) mem 14543MB +[2023-10-11 08:54:12 simmim_pretrain](main_simmim.py 218): INFO Train: [62/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2505 (0.2509) loss 0.3821 (0.3625) grad_norm 453808.2812 (inf) mem 14543MB +[2023-10-11 08:56:18 simmim_pretrain](main_simmim.py 218): INFO Train: [62/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2568 (0.2509) loss 0.3696 (0.3625) grad_norm 157112.4062 (inf) mem 14543MB +[2023-10-11 08:58:24 simmim_pretrain](main_simmim.py 218): INFO Train: [62/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2507 (0.2510) loss 0.3533 (0.3625) grad_norm 936199.6875 (inf) mem 14543MB +[2023-10-11 08:59:37 simmim_pretrain](main_simmim.py 228): INFO EPOCH 62 training takes 0:28:24 +[2023-10-11 08:59:38 simmim_pretrain](main_simmim.py 218): INFO Train: [63/200][0/6787] eta 3:01:41 lr 0.000200 time 1.6062 (1.6062) loss 0.3561 (0.3561) grad_norm 489068.6562 (489068.6562) mem 14543MB +[2023-10-11 09:01:43 simmim_pretrain](main_simmim.py 218): INFO Train: [63/200][500/6787] eta 0:26:25 lr 0.000200 time 0.2545 (0.2522) loss 0.3433 (0.3618) grad_norm 523721.9062 (454235.5000) mem 14543MB +[2023-10-11 09:03:48 simmim_pretrain](main_simmim.py 218): INFO Train: [63/200][1000/6787] eta 0:24:11 lr 0.000200 time 0.2458 (0.2509) loss 0.3728 (0.3621) grad_norm 413777.7500 (447603.9688) mem 14543MB +[2023-10-11 09:05:52 simmim_pretrain](main_simmim.py 218): INFO Train: [63/200][1500/6787] eta 0:22:03 lr 0.000200 time 0.2459 (0.2504) loss 0.3750 (0.3621) grad_norm 574363.0625 (459671.6250) mem 14543MB +[2023-10-11 09:07:57 simmim_pretrain](main_simmim.py 218): INFO Train: [63/200][2000/6787] eta 0:19:57 lr 0.000200 time 0.2469 (0.2502) loss 0.3439 (0.3623) grad_norm 399589.4688 (inf) mem 14543MB +[2023-10-11 09:10:02 simmim_pretrain](main_simmim.py 218): INFO Train: [63/200][2500/6787] eta 0:17:52 lr 0.000200 time 0.2477 (0.2501) loss 0.3575 (0.3626) grad_norm 300643.6250 (inf) mem 14543MB +[2023-10-11 09:12:07 simmim_pretrain](main_simmim.py 218): INFO Train: [63/200][3000/6787] eta 0:15:46 lr 0.000200 time 0.2464 (0.2499) loss 0.3751 (0.3629) grad_norm 348417.5312 (inf) mem 14543MB +[2023-10-11 09:14:11 simmim_pretrain](main_simmim.py 218): INFO Train: [63/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2565 (0.2499) loss 0.3590 (0.3630) grad_norm 212575.5156 (inf) mem 14543MB +[2023-10-11 09:16:16 simmim_pretrain](main_simmim.py 218): INFO Train: [63/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2499 (0.2498) loss 0.3504 (0.3629) grad_norm 675239.0000 (inf) mem 14543MB +[2023-10-11 09:18:21 simmim_pretrain](main_simmim.py 218): INFO Train: [63/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2474 (0.2498) loss 0.3493 (0.3628) grad_norm 490092.1875 (inf) mem 14543MB +[2023-10-11 09:20:25 simmim_pretrain](main_simmim.py 218): INFO Train: [63/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2461 (0.2497) loss 0.3670 (0.3628) grad_norm 478417.6875 (inf) mem 14543MB +[2023-10-11 09:22:30 simmim_pretrain](main_simmim.py 218): INFO Train: [63/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2478 (0.2497) loss 0.3536 (0.3628) grad_norm 457909.7500 (inf) mem 14543MB +[2023-10-11 09:24:34 simmim_pretrain](main_simmim.py 218): INFO Train: [63/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2482 (0.2496) loss 0.3527 (0.3628) grad_norm 247812.6875 (inf) mem 14543MB +[2023-10-11 09:26:39 simmim_pretrain](main_simmim.py 218): INFO Train: [63/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2483 (0.2496) loss 0.3410 (0.3628) grad_norm 300270.0938 (inf) mem 14543MB +[2023-10-11 09:27:51 simmim_pretrain](main_simmim.py 228): INFO EPOCH 63 training takes 0:28:14 +[2023-10-11 09:27:53 simmim_pretrain](main_simmim.py 218): INFO Train: [64/200][0/6787] eta 3:07:11 lr 0.000200 time 1.6549 (1.6549) loss 0.3408 (0.3408) grad_norm 326039.5938 (326039.5938) mem 14543MB +[2023-10-11 09:29:58 simmim_pretrain](main_simmim.py 218): INFO Train: [64/200][500/6787] eta 0:26:26 lr 0.000200 time 0.2490 (0.2523) loss 0.3527 (0.3650) grad_norm 271117.4062 (266561.8438) mem 14543MB +[2023-10-11 09:32:03 simmim_pretrain](main_simmim.py 218): INFO Train: [64/200][1000/6787] eta 0:24:14 lr 0.000200 time 0.2486 (0.2513) loss 0.3396 (0.3642) grad_norm 259139.3594 (259407.6562) mem 14543MB +[2023-10-11 09:34:08 simmim_pretrain](main_simmim.py 218): INFO Train: [64/200][1500/6787] eta 0:22:06 lr 0.000200 time 0.2492 (0.2509) loss 0.3250 (0.3638) grad_norm 297275.6250 (inf) mem 14543MB +[2023-10-11 09:36:13 simmim_pretrain](main_simmim.py 218): INFO Train: [64/200][2000/6787] eta 0:20:00 lr 0.000200 time 0.2499 (0.2508) loss 0.3545 (0.3637) grad_norm 264638.2188 (inf) mem 14543MB +[2023-10-11 09:38:18 simmim_pretrain](main_simmim.py 218): INFO Train: [64/200][2500/6787] eta 0:17:54 lr 0.000200 time 0.2481 (0.2507) loss 0.3680 (0.3638) grad_norm 205538.4844 (inf) mem 14543MB +[2023-10-11 09:40:23 simmim_pretrain](main_simmim.py 218): INFO Train: [64/200][3000/6787] eta 0:15:49 lr 0.000200 time 0.2599 (0.2506) loss 0.3544 (0.3636) grad_norm 115810.5391 (inf) mem 14543MB +[2023-10-11 09:42:28 simmim_pretrain](main_simmim.py 218): INFO Train: [64/200][3500/6787] eta 0:13:43 lr 0.000200 time 0.2487 (0.2506) loss 0.3643 (0.3634) grad_norm 496984.2812 (inf) mem 14543MB +[2023-10-11 09:44:34 simmim_pretrain](main_simmim.py 218): INFO Train: [64/200][4000/6787] eta 0:11:38 lr 0.000200 time 0.2483 (0.2506) loss 0.3677 (0.3632) grad_norm 443846.6250 (inf) mem 14543MB +[2023-10-11 09:46:39 simmim_pretrain](main_simmim.py 218): INFO Train: [64/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2552 (0.2506) loss 0.3740 (0.3630) grad_norm 490680.4688 (inf) mem 14543MB +[2023-10-11 09:48:44 simmim_pretrain](main_simmim.py 218): INFO Train: [64/200][5000/6787] eta 0:07:27 lr 0.000200 time 0.2500 (0.2506) loss 0.3661 (0.3628) grad_norm 503764.2188 (inf) mem 14543MB +[2023-10-11 09:50:49 simmim_pretrain](main_simmim.py 218): INFO Train: [64/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2489 (0.2505) loss 0.3502 (0.3628) grad_norm 286947.0938 (inf) mem 14543MB +[2023-10-11 09:52:54 simmim_pretrain](main_simmim.py 218): INFO Train: [64/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2496 (0.2505) loss 0.3489 (0.3629) grad_norm 222102.7344 (inf) mem 14543MB +[2023-10-11 09:55:00 simmim_pretrain](main_simmim.py 218): INFO Train: [64/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2486 (0.2505) loss 0.3438 (0.3630) grad_norm 204317.4062 (inf) mem 14543MB +[2023-10-11 09:56:12 simmim_pretrain](main_simmim.py 228): INFO EPOCH 64 training takes 0:28:20 +[2023-10-11 09:56:13 simmim_pretrain](main_simmim.py 218): INFO Train: [65/200][0/6787] eta 2:49:34 lr 0.000200 time 1.4992 (1.4992) loss 0.3618 (0.3618) grad_norm 131299.1250 (131299.1250) mem 14543MB +[2023-10-11 09:58:18 simmim_pretrain](main_simmim.py 218): INFO Train: [65/200][500/6787] eta 0:26:23 lr 0.000200 time 0.2527 (0.2518) loss 0.3447 (0.3635) grad_norm 348200.5312 (inf) mem 14543MB +[2023-10-11 10:00:23 simmim_pretrain](main_simmim.py 218): INFO Train: [65/200][1000/6787] eta 0:24:10 lr 0.000200 time 0.2461 (0.2507) loss 0.3527 (0.3630) grad_norm 277952.8750 (inf) mem 14543MB +[2023-10-11 10:02:28 simmim_pretrain](main_simmim.py 218): INFO Train: [65/200][1500/6787] eta 0:22:03 lr 0.000200 time 0.2523 (0.2503) loss 0.3643 (0.3628) grad_norm 367702.1562 (inf) mem 14543MB +[2023-10-11 10:04:33 simmim_pretrain](main_simmim.py 218): INFO Train: [65/200][2000/6787] eta 0:19:57 lr 0.000200 time 0.2454 (0.2502) loss 0.3575 (0.3631) grad_norm 337869.9062 (inf) mem 14543MB +[2023-10-11 10:06:37 simmim_pretrain](main_simmim.py 218): INFO Train: [65/200][2500/6787] eta 0:17:52 lr 0.000200 time 0.2485 (0.2501) loss 0.3505 (0.3631) grad_norm 530708.5000 (inf) mem 14543MB +[2023-10-11 10:08:42 simmim_pretrain](main_simmim.py 218): INFO Train: [65/200][3000/6787] eta 0:15:46 lr 0.000200 time 0.2454 (0.2499) loss 0.3634 (0.3629) grad_norm 246915.5938 (inf) mem 14543MB +[2023-10-11 10:10:47 simmim_pretrain](main_simmim.py 218): INFO Train: [65/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2470 (0.2499) loss 0.3667 (0.3627) grad_norm 420803.4062 (inf) mem 14543MB +[2023-10-11 10:12:51 simmim_pretrain](main_simmim.py 218): INFO Train: [65/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2553 (0.2498) loss 0.3589 (0.3628) grad_norm 297858.1562 (inf) mem 14543MB +[2023-10-11 10:14:56 simmim_pretrain](main_simmim.py 218): INFO Train: [65/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2465 (0.2498) loss 0.3863 (0.3629) grad_norm 377202.7812 (inf) mem 14543MB +[2023-10-11 10:17:01 simmim_pretrain](main_simmim.py 218): INFO Train: [65/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2450 (0.2497) loss 0.3784 (0.3629) grad_norm 179266.7344 (inf) mem 14543MB +[2023-10-11 10:19:06 simmim_pretrain](main_simmim.py 218): INFO Train: [65/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2492 (0.2497) loss 0.3795 (0.3629) grad_norm 300468.5625 (inf) mem 14543MB +[2023-10-11 10:21:10 simmim_pretrain](main_simmim.py 218): INFO Train: [65/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2543 (0.2497) loss 0.3467 (0.3632) grad_norm 156159.7812 (inf) mem 14543MB +[2023-10-11 10:23:15 simmim_pretrain](main_simmim.py 218): INFO Train: [65/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2463 (0.2496) loss 0.3842 (0.3633) grad_norm 104296.7266 (inf) mem 14543MB +[2023-10-11 10:24:27 simmim_pretrain](main_simmim.py 228): INFO EPOCH 65 training takes 0:28:15 +[2023-10-11 10:24:29 simmim_pretrain](main_simmim.py 218): INFO Train: [66/200][0/6787] eta 3:00:12 lr 0.000200 time 1.5931 (1.5931) loss 0.3635 (0.3635) grad_norm 122723.4219 (122723.4219) mem 14543MB +[2023-10-11 10:26:33 simmim_pretrain](main_simmim.py 218): INFO Train: [66/200][500/6787] eta 0:26:21 lr 0.000200 time 0.2484 (0.2515) loss 0.3533 (0.3657) grad_norm 147229.4219 (143896.7969) mem 14543MB +[2023-10-11 10:28:38 simmim_pretrain](main_simmim.py 218): INFO Train: [66/200][1000/6787] eta 0:24:09 lr 0.000200 time 0.2466 (0.2505) loss 0.3847 (0.3653) grad_norm 181782.2031 (144858.1562) mem 14543MB +[2023-10-11 10:30:42 simmim_pretrain](main_simmim.py 218): INFO Train: [66/200][1500/6787] eta 0:22:02 lr 0.000200 time 0.2496 (0.2501) loss 0.3636 (0.3648) grad_norm 150437.2656 (159238.7344) mem 14543MB +[2023-10-11 10:32:47 simmim_pretrain](main_simmim.py 218): INFO Train: [66/200][2000/6787] eta 0:19:56 lr 0.000200 time 0.2459 (0.2499) loss 0.3548 (0.3644) grad_norm 159439.0312 (173485.6719) mem 14543MB +[2023-10-11 10:34:52 simmim_pretrain](main_simmim.py 218): INFO Train: [66/200][2500/6787] eta 0:17:51 lr 0.000200 time 0.2466 (0.2498) loss 0.3418 (0.3641) grad_norm 197232.2656 (185502.4844) mem 14543MB +[2023-10-11 10:36:57 simmim_pretrain](main_simmim.py 218): INFO Train: [66/200][3000/6787] eta 0:15:45 lr 0.000200 time 0.2555 (0.2498) loss 0.3732 (0.3640) grad_norm 190845.1562 (195342.1094) mem 14543MB +[2023-10-11 10:39:01 simmim_pretrain](main_simmim.py 218): INFO Train: [66/200][3500/6787] eta 0:13:40 lr 0.000200 time 0.2495 (0.2497) loss 0.3524 (0.3637) grad_norm 325907.9688 (226250.8594) mem 14543MB +[2023-10-11 10:41:06 simmim_pretrain](main_simmim.py 218): INFO Train: [66/200][4000/6787] eta 0:11:35 lr 0.000200 time 0.2471 (0.2496) loss 0.3473 (0.3635) grad_norm 502769.1875 (248052.8438) mem 14543MB +[2023-10-11 10:43:10 simmim_pretrain](main_simmim.py 218): INFO Train: [66/200][4500/6787] eta 0:09:30 lr 0.000200 time 0.2482 (0.2495) loss 0.3539 (0.3632) grad_norm 220479.4688 (271358.7188) mem 14543MB +[2023-10-11 10:45:14 simmim_pretrain](main_simmim.py 218): INFO Train: [66/200][5000/6787] eta 0:07:25 lr 0.000200 time 0.2476 (0.2494) loss 0.3722 (0.3632) grad_norm 346645.1250 (inf) mem 14543MB +[2023-10-11 10:47:19 simmim_pretrain](main_simmim.py 218): INFO Train: [66/200][5500/6787] eta 0:05:20 lr 0.000200 time 0.2514 (0.2493) loss 0.3609 (0.3632) grad_norm 247438.4531 (inf) mem 14543MB +[2023-10-11 10:49:23 simmim_pretrain](main_simmim.py 218): INFO Train: [66/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2494 (0.2493) loss 0.3428 (0.3633) grad_norm 363803.6250 (inf) mem 14543MB +[2023-10-11 10:51:27 simmim_pretrain](main_simmim.py 218): INFO Train: [66/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2486 (0.2492) loss 0.3598 (0.3633) grad_norm 322327.1875 (inf) mem 14543MB +[2023-10-11 10:52:39 simmim_pretrain](main_simmim.py 228): INFO EPOCH 66 training takes 0:28:11 +[2023-10-11 10:52:40 simmim_pretrain](main_simmim.py 218): INFO Train: [67/200][0/6787] eta 2:44:39 lr 0.000200 time 1.4557 (1.4557) loss 0.3438 (0.3438) grad_norm 455356.1875 (455356.1875) mem 14543MB +[2023-10-11 10:54:44 simmim_pretrain](main_simmim.py 218): INFO Train: [67/200][500/6787] eta 0:26:13 lr 0.000200 time 0.2496 (0.2502) loss 0.3794 (0.3621) grad_norm 349354.0312 (348277.7188) mem 14543MB +[2023-10-11 10:56:48 simmim_pretrain](main_simmim.py 218): INFO Train: [67/200][1000/6787] eta 0:24:02 lr 0.000200 time 0.2464 (0.2492) loss 0.3691 (0.3619) grad_norm 512163.8750 (414587.6875) mem 14543MB +[2023-10-11 10:58:52 simmim_pretrain](main_simmim.py 218): INFO Train: [67/200][1500/6787] eta 0:21:56 lr 0.000200 time 0.2564 (0.2490) loss 0.3868 (0.3621) grad_norm 383947.5938 (463544.0312) mem 14543MB +[2023-10-11 11:00:57 simmim_pretrain](main_simmim.py 218): INFO Train: [67/200][2000/6787] eta 0:19:51 lr 0.000200 time 0.2466 (0.2489) loss 0.3638 (0.3620) grad_norm 250414.6094 (inf) mem 14543MB +[2023-10-11 11:03:01 simmim_pretrain](main_simmim.py 218): INFO Train: [67/200][2500/6787] eta 0:17:46 lr 0.000200 time 0.2545 (0.2489) loss 0.3390 (0.3623) grad_norm 149120.5625 (inf) mem 14543MB +[2023-10-11 11:05:06 simmim_pretrain](main_simmim.py 218): INFO Train: [67/200][3000/6787] eta 0:15:42 lr 0.000200 time 0.2526 (0.2489) loss 0.3642 (0.3624) grad_norm 260623.2969 (inf) mem 14543MB +[2023-10-11 11:07:11 simmim_pretrain](main_simmim.py 218): INFO Train: [67/200][3500/6787] eta 0:13:38 lr 0.000200 time 0.2499 (0.2491) loss 0.3849 (0.3625) grad_norm 246417.0781 (inf) mem 14543MB +[2023-10-11 11:09:16 simmim_pretrain](main_simmim.py 218): INFO Train: [67/200][4000/6787] eta 0:11:34 lr 0.000200 time 0.2515 (0.2491) loss 0.3563 (0.3626) grad_norm 372839.0312 (inf) mem 14543MB +[2023-10-11 11:11:20 simmim_pretrain](main_simmim.py 218): INFO Train: [67/200][4500/6787] eta 0:09:29 lr 0.000200 time 0.2491 (0.2492) loss 0.3512 (0.3625) grad_norm 336771.3438 (inf) mem 14543MB +[2023-10-11 11:13:25 simmim_pretrain](main_simmim.py 218): INFO Train: [67/200][5000/6787] eta 0:07:25 lr 0.000200 time 0.2505 (0.2493) loss 0.3544 (0.3624) grad_norm 608553.5625 (inf) mem 14543MB +[2023-10-11 11:15:30 simmim_pretrain](main_simmim.py 218): INFO Train: [67/200][5500/6787] eta 0:05:20 lr 0.000200 time 0.2592 (0.2493) loss 0.3731 (0.3624) grad_norm 258384.6094 (inf) mem 14543MB +[2023-10-11 11:17:35 simmim_pretrain](main_simmim.py 218): INFO Train: [67/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2536 (0.2493) loss 0.3622 (0.3624) grad_norm 183729.2500 (inf) mem 14543MB +[2023-10-11 11:19:40 simmim_pretrain](main_simmim.py 218): INFO Train: [67/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2547 (0.2493) loss 0.3247 (0.3625) grad_norm 233729.1250 (inf) mem 14543MB +[2023-10-11 11:20:52 simmim_pretrain](main_simmim.py 228): INFO EPOCH 67 training takes 0:28:13 +[2023-10-11 11:20:53 simmim_pretrain](main_simmim.py 218): INFO Train: [68/200][0/6787] eta 2:44:58 lr 0.000200 time 1.4584 (1.4584) loss 0.3734 (0.3734) grad_norm 218391.2656 (218391.2656) mem 14543MB +[2023-10-11 11:22:58 simmim_pretrain](main_simmim.py 218): INFO Train: [68/200][500/6787] eta 0:26:23 lr 0.000200 time 0.2456 (0.2519) loss 0.3428 (0.3631) grad_norm 305564.8125 (303168.0938) mem 14543MB +[2023-10-11 11:25:03 simmim_pretrain](main_simmim.py 218): INFO Train: [68/200][1000/6787] eta 0:24:11 lr 0.000200 time 0.2591 (0.2508) loss 0.3522 (0.3626) grad_norm 286851.2188 (331284.9062) mem 14543MB +[2023-10-11 11:27:08 simmim_pretrain](main_simmim.py 218): INFO Train: [68/200][1500/6787] eta 0:22:04 lr 0.000200 time 0.2527 (0.2505) loss 0.3554 (0.3625) grad_norm 615308.8125 (358500.2812) mem 14543MB +[2023-10-11 11:29:13 simmim_pretrain](main_simmim.py 218): INFO Train: [68/200][2000/6787] eta 0:19:58 lr 0.000200 time 0.2484 (0.2503) loss 0.3669 (0.3625) grad_norm 664551.8750 (379617.9688) mem 14543MB +[2023-10-11 11:31:18 simmim_pretrain](main_simmim.py 218): INFO Train: [68/200][2500/6787] eta 0:17:52 lr 0.000200 time 0.2596 (0.2501) loss 0.3807 (0.3625) grad_norm 158800.7656 (inf) mem 14543MB +[2023-10-11 11:33:23 simmim_pretrain](main_simmim.py 218): INFO Train: [68/200][3000/6787] eta 0:15:47 lr 0.000200 time 0.2462 (0.2501) loss 0.3644 (0.3627) grad_norm 270241.2500 (inf) mem 14543MB +[2023-10-11 11:35:27 simmim_pretrain](main_simmim.py 218): INFO Train: [68/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2468 (0.2500) loss 0.3699 (0.3629) grad_norm 259157.4375 (inf) mem 14543MB +[2023-10-11 11:37:32 simmim_pretrain](main_simmim.py 218): INFO Train: [68/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2455 (0.2500) loss 0.3699 (0.3630) grad_norm 234691.3281 (inf) mem 14543MB +[2023-10-11 11:39:37 simmim_pretrain](main_simmim.py 218): INFO Train: [68/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2459 (0.2500) loss 0.3725 (0.3630) grad_norm 355270.2812 (inf) mem 14543MB +[2023-10-11 11:41:42 simmim_pretrain](main_simmim.py 218): INFO Train: [68/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2464 (0.2500) loss 0.3569 (0.3630) grad_norm 305011.2188 (inf) mem 14543MB +[2023-10-11 11:43:47 simmim_pretrain](main_simmim.py 218): INFO Train: [68/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2519 (0.2499) loss 0.3695 (0.3630) grad_norm 280601.3750 (inf) mem 14543MB +[2023-10-11 11:45:52 simmim_pretrain](main_simmim.py 218): INFO Train: [68/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2517 (0.2499) loss 0.3858 (0.3630) grad_norm 334819.2500 (inf) mem 14543MB +[2023-10-11 11:47:56 simmim_pretrain](main_simmim.py 218): INFO Train: [68/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2488 (0.2499) loss 0.3354 (0.3630) grad_norm 458634.2812 (inf) mem 14543MB +[2023-10-11 11:49:09 simmim_pretrain](main_simmim.py 228): INFO EPOCH 68 training takes 0:28:16 +[2023-10-11 11:49:10 simmim_pretrain](main_simmim.py 218): INFO Train: [69/200][0/6787] eta 2:46:54 lr 0.000200 time 1.4756 (1.4756) loss 0.3376 (0.3376) grad_norm 326984.8750 (326984.8750) mem 14543MB +[2023-10-11 11:51:15 simmim_pretrain](main_simmim.py 218): INFO Train: [69/200][500/6787] eta 0:26:23 lr 0.000200 time 0.2489 (0.2519) loss 0.3692 (0.3630) grad_norm 339139.3750 (inf) mem 14543MB +[2023-10-11 11:53:20 simmim_pretrain](main_simmim.py 218): INFO Train: [69/200][1000/6787] eta 0:24:12 lr 0.000200 time 0.2495 (0.2509) loss 0.3601 (0.3627) grad_norm 287679.7188 (inf) mem 14543MB +[2023-10-11 11:55:25 simmim_pretrain](main_simmim.py 218): INFO Train: [69/200][1500/6787] eta 0:22:04 lr 0.000200 time 0.2519 (0.2506) loss 0.3744 (0.3630) grad_norm 275359.9375 (inf) mem 14543MB +[2023-10-11 11:57:30 simmim_pretrain](main_simmim.py 218): INFO Train: [69/200][2000/6787] eta 0:19:58 lr 0.000200 time 0.2461 (0.2504) loss 0.3774 (0.3628) grad_norm 267005.1250 (inf) mem 14543MB +[2023-10-11 11:59:34 simmim_pretrain](main_simmim.py 218): INFO Train: [69/200][2500/6787] eta 0:17:52 lr 0.000200 time 0.2532 (0.2502) loss 0.3667 (0.3628) grad_norm 445587.9062 (inf) mem 14543MB +[2023-10-11 12:01:39 simmim_pretrain](main_simmim.py 218): INFO Train: [69/200][3000/6787] eta 0:15:47 lr 0.000200 time 0.2493 (0.2502) loss 0.3638 (0.3627) grad_norm 191623.6562 (inf) mem 14543MB +[2023-10-11 12:03:44 simmim_pretrain](main_simmim.py 218): INFO Train: [69/200][3500/6787] eta 0:13:42 lr 0.000200 time 0.2500 (0.2502) loss 0.3533 (0.3627) grad_norm 283179.1875 (inf) mem 14543MB +[2023-10-11 12:05:49 simmim_pretrain](main_simmim.py 218): INFO Train: [69/200][4000/6787] eta 0:11:37 lr 0.000200 time 0.2476 (0.2501) loss 0.3727 (0.3627) grad_norm 287005.5312 (inf) mem 14543MB +[2023-10-11 12:07:54 simmim_pretrain](main_simmim.py 218): INFO Train: [69/200][4500/6787] eta 0:09:32 lr 0.000200 time 0.2481 (0.2501) loss 0.3441 (0.3629) grad_norm 260419.6875 (inf) mem 14543MB +[2023-10-11 12:09:59 simmim_pretrain](main_simmim.py 218): INFO Train: [69/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2496 (0.2501) loss 0.3788 (0.3628) grad_norm 252886.3125 (inf) mem 14543MB +[2023-10-11 12:12:04 simmim_pretrain](main_simmim.py 218): INFO Train: [69/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2516 (0.2501) loss 0.3720 (0.3628) grad_norm 431046.2188 (inf) mem 14543MB +[2023-10-11 12:14:09 simmim_pretrain](main_simmim.py 218): INFO Train: [69/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2523 (0.2501) loss 0.3665 (0.3627) grad_norm 295240.2188 (inf) mem 14543MB +[2023-10-11 12:16:14 simmim_pretrain](main_simmim.py 218): INFO Train: [69/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2463 (0.2501) loss 0.3838 (0.3625) grad_norm 358034.8750 (inf) mem 14543MB +[2023-10-11 12:17:27 simmim_pretrain](main_simmim.py 228): INFO EPOCH 69 training takes 0:28:18 +[2023-10-11 12:17:28 simmim_pretrain](main_simmim.py 218): INFO Train: [70/200][0/6787] eta 2:55:12 lr 0.000200 time 1.5490 (1.5490) loss 0.3655 (0.3655) grad_norm 379997.3750 (379997.3750) mem 14543MB +[2023-10-11 12:19:33 simmim_pretrain](main_simmim.py 218): INFO Train: [70/200][500/6787] eta 0:26:25 lr 0.000200 time 0.2501 (0.2522) loss 0.3458 (0.3645) grad_norm 241836.0000 (290924.5625) mem 14543MB +[2023-10-11 12:21:38 simmim_pretrain](main_simmim.py 218): INFO Train: [70/200][1000/6787] eta 0:24:12 lr 0.000200 time 0.2478 (0.2510) loss 0.3679 (0.3634) grad_norm 198858.2188 (292619.5312) mem 14543MB +[2023-10-11 12:23:43 simmim_pretrain](main_simmim.py 218): INFO Train: [70/200][1500/6787] eta 0:22:05 lr 0.000200 time 0.2523 (0.2506) loss 0.3650 (0.3639) grad_norm 248836.2031 (293684.5938) mem 14543MB +[2023-10-11 12:25:48 simmim_pretrain](main_simmim.py 218): INFO Train: [70/200][2000/6787] eta 0:19:58 lr 0.000200 time 0.2516 (0.2504) loss 0.3665 (0.3634) grad_norm 403359.8438 (346234.5938) mem 14543MB +[2023-10-11 12:27:53 simmim_pretrain](main_simmim.py 218): INFO Train: [70/200][2500/6787] eta 0:17:53 lr 0.000200 time 0.2492 (0.2503) loss 0.3832 (0.3632) grad_norm 183216.5312 (362120.5000) mem 14543MB +[2023-10-11 12:29:58 simmim_pretrain](main_simmim.py 218): INFO Train: [70/200][3000/6787] eta 0:15:47 lr 0.000200 time 0.2467 (0.2503) loss 0.3655 (0.3630) grad_norm 340075.1250 (inf) mem 14543MB +[2023-10-11 12:32:03 simmim_pretrain](main_simmim.py 218): INFO Train: [70/200][3500/6787] eta 0:13:42 lr 0.000200 time 0.2526 (0.2502) loss 0.3470 (0.3632) grad_norm 477674.0625 (inf) mem 14543MB +[2023-10-11 12:34:08 simmim_pretrain](main_simmim.py 218): INFO Train: [70/200][4000/6787] eta 0:11:37 lr 0.000200 time 0.2484 (0.2502) loss 0.3536 (0.3631) grad_norm 222605.5938 (inf) mem 14543MB +[2023-10-11 12:36:13 simmim_pretrain](main_simmim.py 218): INFO Train: [70/200][4500/6787] eta 0:09:32 lr 0.000200 time 0.2517 (0.2502) loss 0.3599 (0.3630) grad_norm 337199.7812 (inf) mem 14543MB +[2023-10-11 12:38:18 simmim_pretrain](main_simmim.py 218): INFO Train: [70/200][5000/6787] eta 0:07:27 lr 0.000200 time 0.2482 (0.2502) loss 0.3541 (0.3630) grad_norm 366515.6250 (inf) mem 14543MB +[2023-10-11 12:40:23 simmim_pretrain](main_simmim.py 218): INFO Train: [70/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2463 (0.2501) loss 0.3852 (0.3630) grad_norm 438358.9062 (inf) mem 14543MB +[2023-10-11 12:42:28 simmim_pretrain](main_simmim.py 218): INFO Train: [70/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2500 (0.2501) loss 0.3463 (0.3629) grad_norm 425843.0938 (inf) mem 14543MB +[2023-10-11 12:44:33 simmim_pretrain](main_simmim.py 218): INFO Train: [70/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2494 (0.2501) loss 0.5558 (0.3640) grad_norm 16006.6729 (inf) mem 14543MB +[2023-10-11 12:45:45 simmim_pretrain](main_simmim.py 228): INFO EPOCH 70 training takes 0:28:17 +[2023-10-11 12:45:46 simmim_pretrain](main_simmim.py 218): INFO Train: [71/200][0/6787] eta 3:00:56 lr 0.000200 time 1.5996 (1.5996) loss 0.4845 (0.4845) grad_norm 17869.7910 (17869.7910) mem 14543MB +[2023-10-11 12:47:51 simmim_pretrain](main_simmim.py 218): INFO Train: [71/200][500/6787] eta 0:26:24 lr 0.000200 time 0.2475 (0.2520) loss 0.4192 (0.4687) grad_norm 71428.1094 (47209.5977) mem 14543MB +[2023-10-11 12:49:56 simmim_pretrain](main_simmim.py 218): INFO Train: [71/200][1000/6787] eta 0:24:11 lr 0.000200 time 0.2485 (0.2508) loss 0.3748 (0.4274) grad_norm 35970.5273 (44199.7617) mem 14543MB +[2023-10-11 12:52:01 simmim_pretrain](main_simmim.py 218): INFO Train: [71/200][1500/6787] eta 0:22:04 lr 0.000200 time 0.2548 (0.2505) loss 0.3567 (0.4093) grad_norm 24828.1074 (40550.5078) mem 14543MB +[2023-10-11 12:54:05 simmim_pretrain](main_simmim.py 218): INFO Train: [71/200][2000/6787] eta 0:19:58 lr 0.000200 time 0.2469 (0.2503) loss 0.3760 (0.3992) grad_norm 43446.0703 (40208.3281) mem 14543MB +[2023-10-11 12:56:10 simmim_pretrain](main_simmim.py 218): INFO Train: [71/200][2500/6787] eta 0:17:52 lr 0.000200 time 0.2465 (0.2502) loss 0.3934 (0.3927) grad_norm 68785.9688 (42080.1523) mem 14543MB +[2023-10-11 12:58:15 simmim_pretrain](main_simmim.py 218): INFO Train: [71/200][3000/6787] eta 0:15:47 lr 0.000200 time 0.2448 (0.2501) loss 0.3601 (0.3883) grad_norm 51108.8906 (44841.0234) mem 14543MB +[2023-10-11 13:00:20 simmim_pretrain](main_simmim.py 218): INFO Train: [71/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2484 (0.2500) loss 0.3635 (0.3851) grad_norm 54616.0352 (46022.2930) mem 14543MB +[2023-10-11 13:02:25 simmim_pretrain](main_simmim.py 218): INFO Train: [71/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2482 (0.2500) loss 0.3467 (0.3828) grad_norm 110293.1016 (48538.6172) mem 14543MB +[2023-10-11 13:04:30 simmim_pretrain](main_simmim.py 218): INFO Train: [71/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2532 (0.2500) loss 0.3648 (0.3807) grad_norm 121720.9609 (52467.9844) mem 14543MB +[2023-10-11 13:06:34 simmim_pretrain](main_simmim.py 218): INFO Train: [71/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2463 (0.2499) loss 0.3804 (0.3790) grad_norm 52323.6992 (56691.4531) mem 14543MB +[2023-10-11 13:08:39 simmim_pretrain](main_simmim.py 218): INFO Train: [71/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2500 (0.2498) loss 0.3665 (0.3776) grad_norm 74710.8516 (60506.1133) mem 14543MB +[2023-10-11 13:10:43 simmim_pretrain](main_simmim.py 218): INFO Train: [71/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2496 (0.2498) loss 0.3732 (0.3764) grad_norm 128692.6875 (66529.7109) mem 14543MB +[2023-10-11 13:12:48 simmim_pretrain](main_simmim.py 218): INFO Train: [71/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2502 (0.2498) loss 0.3521 (0.3753) grad_norm 196150.9531 (72503.0625) mem 14543MB +[2023-10-11 13:14:00 simmim_pretrain](main_simmim.py 228): INFO EPOCH 71 training takes 0:28:15 +[2023-10-11 13:14:02 simmim_pretrain](main_simmim.py 218): INFO Train: [72/200][0/6787] eta 2:51:26 lr 0.000200 time 1.5156 (1.5156) loss 0.3558 (0.3558) grad_norm 125773.7266 (125773.7266) mem 14543MB +[2023-10-11 13:16:07 simmim_pretrain](main_simmim.py 218): INFO Train: [72/200][500/6787] eta 0:26:25 lr 0.000200 time 0.2576 (0.2522) loss 0.3832 (0.3619) grad_norm 137979.9688 (182666.3594) mem 14543MB +[2023-10-11 13:18:12 simmim_pretrain](main_simmim.py 218): INFO Train: [72/200][1000/6787] eta 0:24:11 lr 0.000200 time 0.2500 (0.2509) loss 0.3589 (0.3622) grad_norm 172224.1250 (199330.5469) mem 14543MB +[2023-10-11 13:20:17 simmim_pretrain](main_simmim.py 218): INFO Train: [72/200][1500/6787] eta 0:22:04 lr 0.000200 time 0.2550 (0.2506) loss 0.3438 (0.3615) grad_norm 447376.9062 (245628.9531) mem 14543MB +[2023-10-11 13:22:22 simmim_pretrain](main_simmim.py 218): INFO Train: [72/200][2000/6787] eta 0:19:58 lr 0.000200 time 0.2496 (0.2504) loss 0.3485 (0.3612) grad_norm 164869.8125 (273051.0938) mem 14543MB +[2023-10-11 13:24:27 simmim_pretrain](main_simmim.py 218): INFO Train: [72/200][2500/6787] eta 0:17:53 lr 0.000200 time 0.2474 (0.2503) loss 0.3672 (0.3611) grad_norm 458103.4062 (312458.3750) mem 14543MB +[2023-10-11 13:26:32 simmim_pretrain](main_simmim.py 218): INFO Train: [72/200][3000/6787] eta 0:15:47 lr 0.000200 time 0.2455 (0.2503) loss 0.3961 (0.3611) grad_norm 334672.4688 (321120.7500) mem 14543MB +[2023-10-11 13:28:37 simmim_pretrain](main_simmim.py 218): INFO Train: [72/200][3500/6787] eta 0:13:42 lr 0.000200 time 0.2546 (0.2502) loss 0.3627 (0.3610) grad_norm 455840.0312 (342745.6250) mem 14543MB +[2023-10-11 13:30:42 simmim_pretrain](main_simmim.py 218): INFO Train: [72/200][4000/6787] eta 0:11:37 lr 0.000200 time 0.2516 (0.2502) loss 0.3711 (0.3611) grad_norm 275151.7500 (inf) mem 14543MB +[2023-10-11 13:32:47 simmim_pretrain](main_simmim.py 218): INFO Train: [72/200][4500/6787] eta 0:09:32 lr 0.000200 time 0.2538 (0.2502) loss 0.3518 (0.3612) grad_norm 230955.5625 (inf) mem 14543MB +[2023-10-11 13:34:52 simmim_pretrain](main_simmim.py 218): INFO Train: [72/200][5000/6787] eta 0:07:27 lr 0.000200 time 0.2515 (0.2502) loss 0.3756 (0.3614) grad_norm 113382.4375 (inf) mem 14543MB +[2023-10-11 13:36:57 simmim_pretrain](main_simmim.py 218): INFO Train: [72/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2464 (0.2502) loss 0.3721 (0.3617) grad_norm 202803.6406 (inf) mem 14543MB +[2023-10-11 13:39:02 simmim_pretrain](main_simmim.py 218): INFO Train: [72/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2514 (0.2502) loss 0.3502 (0.3617) grad_norm 164396.1094 (inf) mem 14543MB +[2023-10-11 13:41:07 simmim_pretrain](main_simmim.py 218): INFO Train: [72/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2455 (0.2502) loss 0.3586 (0.3617) grad_norm 372818.0000 (inf) mem 14543MB +[2023-10-11 13:42:19 simmim_pretrain](main_simmim.py 228): INFO EPOCH 72 training takes 0:28:18 +[2023-10-11 13:42:20 simmim_pretrain](main_simmim.py 218): INFO Train: [73/200][0/6787] eta 2:51:43 lr 0.000200 time 1.5182 (1.5182) loss 0.3679 (0.3679) grad_norm 403169.1250 (403169.1250) mem 14543MB +[2023-10-11 13:44:25 simmim_pretrain](main_simmim.py 218): INFO Train: [73/200][500/6787] eta 0:26:19 lr 0.000200 time 0.2459 (0.2512) loss 0.3514 (0.3614) grad_norm 358541.9688 (354899.9375) mem 14543MB +[2023-10-11 13:46:29 simmim_pretrain](main_simmim.py 218): INFO Train: [73/200][1000/6787] eta 0:24:08 lr 0.000200 time 0.2499 (0.2502) loss 0.3513 (0.3611) grad_norm 309928.3125 (368312.7500) mem 14543MB +[2023-10-11 13:48:34 simmim_pretrain](main_simmim.py 218): INFO Train: [73/200][1500/6787] eta 0:22:01 lr 0.000200 time 0.2463 (0.2499) loss 0.3622 (0.3611) grad_norm 474251.8750 (inf) mem 14543MB +[2023-10-11 13:50:39 simmim_pretrain](main_simmim.py 218): INFO Train: [73/200][2000/6787] eta 0:19:56 lr 0.000200 time 0.2446 (0.2498) loss 0.3790 (0.3612) grad_norm 607006.0625 (inf) mem 14543MB +[2023-10-11 13:52:44 simmim_pretrain](main_simmim.py 218): INFO Train: [73/200][2500/6787] eta 0:17:51 lr 0.000200 time 0.2465 (0.2498) loss 0.3406 (0.3610) grad_norm 487118.5625 (inf) mem 14543MB +[2023-10-11 13:54:49 simmim_pretrain](main_simmim.py 218): INFO Train: [73/200][3000/6787] eta 0:15:46 lr 0.000200 time 0.2529 (0.2498) loss 0.3560 (0.3611) grad_norm 320547.8125 (inf) mem 14543MB +[2023-10-11 13:56:54 simmim_pretrain](main_simmim.py 218): INFO Train: [73/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2462 (0.2499) loss 0.3710 (0.3611) grad_norm 330880.5625 (inf) mem 14543MB +[2023-10-11 13:58:59 simmim_pretrain](main_simmim.py 218): INFO Train: [73/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2487 (0.2499) loss 0.3595 (0.3612) grad_norm 297180.5000 (inf) mem 14543MB +[2023-10-11 14:01:03 simmim_pretrain](main_simmim.py 218): INFO Train: [73/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2498 (0.2498) loss 0.3578 (0.3614) grad_norm 346602.9062 (inf) mem 14543MB +[2023-10-11 14:03:08 simmim_pretrain](main_simmim.py 218): INFO Train: [73/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2470 (0.2498) loss 0.3749 (0.3615) grad_norm 308222.3438 (inf) mem 14543MB +[2023-10-11 14:05:13 simmim_pretrain](main_simmim.py 218): INFO Train: [73/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2469 (0.2498) loss 0.3708 (0.3614) grad_norm 141277.2344 (inf) mem 14543MB +[2023-10-11 14:07:18 simmim_pretrain](main_simmim.py 218): INFO Train: [73/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2520 (0.2498) loss 0.3703 (0.3614) grad_norm 304034.0000 (inf) mem 14543MB +[2023-10-11 14:09:23 simmim_pretrain](main_simmim.py 218): INFO Train: [73/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2463 (0.2498) loss 0.3370 (0.3617) grad_norm 178278.9375 (inf) mem 14543MB +[2023-10-11 14:10:35 simmim_pretrain](main_simmim.py 228): INFO EPOCH 73 training takes 0:28:16 +[2023-10-11 14:10:37 simmim_pretrain](main_simmim.py 218): INFO Train: [74/200][0/6787] eta 2:51:53 lr 0.000200 time 1.5196 (1.5196) loss 0.3592 (0.3592) grad_norm 235316.8281 (235316.8281) mem 14543MB +[2023-10-11 14:12:41 simmim_pretrain](main_simmim.py 218): INFO Train: [74/200][500/6787] eta 0:26:23 lr 0.000200 time 0.2501 (0.2519) loss 0.3644 (0.3642) grad_norm 147381.8281 (135425.0625) mem 14543MB +[2023-10-11 14:14:46 simmim_pretrain](main_simmim.py 218): INFO Train: [74/200][1000/6787] eta 0:24:11 lr 0.000200 time 0.2467 (0.2509) loss 0.3613 (0.3641) grad_norm 91527.8516 (130933.0547) mem 14543MB +[2023-10-11 14:16:51 simmim_pretrain](main_simmim.py 218): INFO Train: [74/200][1500/6787] eta 0:22:04 lr 0.000200 time 0.2453 (0.2505) loss 0.3784 (0.3643) grad_norm 211484.9688 (130610.3516) mem 14543MB +[2023-10-11 14:18:56 simmim_pretrain](main_simmim.py 218): INFO Train: [74/200][2000/6787] eta 0:19:58 lr 0.000200 time 0.2489 (0.2504) loss 0.3523 (0.3640) grad_norm 190343.1406 (143885.9219) mem 14543MB +[2023-10-11 14:21:01 simmim_pretrain](main_simmim.py 218): INFO Train: [74/200][2500/6787] eta 0:17:52 lr 0.000200 time 0.2468 (0.2503) loss 0.3489 (0.3636) grad_norm 287275.7188 (160661.9062) mem 14543MB +[2023-10-11 14:23:06 simmim_pretrain](main_simmim.py 218): INFO Train: [74/200][3000/6787] eta 0:15:47 lr 0.000200 time 0.2482 (0.2503) loss 0.3685 (0.3633) grad_norm 210228.7812 (166119.8281) mem 14543MB +[2023-10-11 14:25:11 simmim_pretrain](main_simmim.py 218): INFO Train: [74/200][3500/6787] eta 0:13:42 lr 0.000200 time 0.2468 (0.2502) loss 0.3684 (0.3634) grad_norm 177964.5312 (171908.0000) mem 14543MB +[2023-10-11 14:27:16 simmim_pretrain](main_simmim.py 218): INFO Train: [74/200][4000/6787] eta 0:11:37 lr 0.000200 time 0.2500 (0.2502) loss 0.3702 (0.3631) grad_norm 448338.3125 (184502.0312) mem 14543MB +[2023-10-11 14:29:21 simmim_pretrain](main_simmim.py 218): INFO Train: [74/200][4500/6787] eta 0:09:32 lr 0.000200 time 0.2509 (0.2501) loss 0.3485 (0.3628) grad_norm 389904.9375 (201466.6719) mem 14543MB +[2023-10-11 14:31:26 simmim_pretrain](main_simmim.py 218): INFO Train: [74/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2487 (0.2501) loss 0.3738 (0.3627) grad_norm 589308.5000 (217718.3750) mem 14543MB +[2023-10-11 14:33:31 simmim_pretrain](main_simmim.py 218): INFO Train: [74/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2469 (0.2501) loss 0.3685 (0.3627) grad_norm 362302.9688 (inf) mem 14543MB +[2023-10-11 14:35:36 simmim_pretrain](main_simmim.py 218): INFO Train: [74/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2499 (0.2501) loss 0.3837 (0.3625) grad_norm 462494.1250 (inf) mem 14543MB +[2023-10-11 14:37:41 simmim_pretrain](main_simmim.py 218): INFO Train: [74/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2487 (0.2501) loss 0.3586 (0.3623) grad_norm 214753.9531 (inf) mem 14543MB +[2023-10-11 14:38:53 simmim_pretrain](main_simmim.py 228): INFO EPOCH 74 training takes 0:28:18 +[2023-10-11 14:38:55 simmim_pretrain](main_simmim.py 218): INFO Train: [75/200][0/6787] eta 2:49:56 lr 0.000200 time 1.5023 (1.5023) loss 0.3743 (0.3743) grad_norm 473136.8438 (473136.8438) mem 14543MB +[2023-10-11 14:41:00 simmim_pretrain](main_simmim.py 218): INFO Train: [75/200][500/6787] eta 0:26:24 lr 0.000200 time 0.2530 (0.2519) loss 0.3643 (0.3612) grad_norm 401562.3438 (498453.3750) mem 14543MB +[2023-10-11 14:43:05 simmim_pretrain](main_simmim.py 218): INFO Train: [75/200][1000/6787] eta 0:24:12 lr 0.000200 time 0.2492 (0.2509) loss 0.3621 (0.3613) grad_norm 355571.7812 (inf) mem 14543MB +[2023-10-11 14:45:10 simmim_pretrain](main_simmim.py 218): INFO Train: [75/200][1500/6787] eta 0:22:04 lr 0.000200 time 0.2489 (0.2506) loss 0.3686 (0.3616) grad_norm 374324.2500 (inf) mem 14543MB +[2023-10-11 14:47:15 simmim_pretrain](main_simmim.py 218): INFO Train: [75/200][2000/6787] eta 0:19:58 lr 0.000200 time 0.2504 (0.2505) loss 0.3440 (0.3618) grad_norm 224979.5312 (inf) mem 14543MB +[2023-10-11 14:49:19 simmim_pretrain](main_simmim.py 218): INFO Train: [75/200][2500/6787] eta 0:17:52 lr 0.000200 time 0.2508 (0.2503) loss 0.3786 (0.3620) grad_norm 132924.9219 (inf) mem 14543MB +[2023-10-11 14:51:24 simmim_pretrain](main_simmim.py 218): INFO Train: [75/200][3000/6787] eta 0:15:47 lr 0.000200 time 0.2468 (0.2503) loss 0.3687 (0.3621) grad_norm 161180.7188 (inf) mem 14543MB +[2023-10-11 14:53:30 simmim_pretrain](main_simmim.py 218): INFO Train: [75/200][3500/6787] eta 0:13:42 lr 0.000200 time 0.2582 (0.2502) loss 0.3826 (0.3620) grad_norm 627796.8125 (inf) mem 14543MB +[2023-10-11 14:55:34 simmim_pretrain](main_simmim.py 218): INFO Train: [75/200][4000/6787] eta 0:11:37 lr 0.000200 time 0.2449 (0.2502) loss 0.3509 (0.3619) grad_norm 363978.8438 (inf) mem 14543MB +[2023-10-11 14:57:39 simmim_pretrain](main_simmim.py 218): INFO Train: [75/200][4500/6787] eta 0:09:32 lr 0.000200 time 0.2460 (0.2502) loss 0.3666 (0.3620) grad_norm 219696.4219 (inf) mem 14543MB +[2023-10-11 14:59:44 simmim_pretrain](main_simmim.py 218): INFO Train: [75/200][5000/6787] eta 0:07:27 lr 0.000200 time 0.2466 (0.2502) loss 0.3493 (0.3619) grad_norm 174469.9375 (inf) mem 14543MB +[2023-10-11 15:01:49 simmim_pretrain](main_simmim.py 218): INFO Train: [75/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2589 (0.2501) loss 0.3502 (0.3619) grad_norm 237777.6875 (inf) mem 14543MB +[2023-10-11 15:03:54 simmim_pretrain](main_simmim.py 218): INFO Train: [75/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2489 (0.2501) loss 0.3609 (0.3619) grad_norm 240592.1250 (inf) mem 14543MB +[2023-10-11 15:05:59 simmim_pretrain](main_simmim.py 218): INFO Train: [75/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2515 (0.2501) loss 0.3725 (0.3618) grad_norm 801763.3750 (inf) mem 14543MB +[2023-10-11 15:07:12 simmim_pretrain](main_simmim.py 228): INFO EPOCH 75 training takes 0:28:18 +[2023-10-11 15:07:13 simmim_pretrain](main_simmim.py 218): INFO Train: [76/200][0/6787] eta 2:58:29 lr 0.000200 time 1.5780 (1.5780) loss 0.3845 (0.3845) grad_norm 392027.3438 (392027.3438) mem 14543MB +[2023-10-11 15:09:18 simmim_pretrain](main_simmim.py 218): INFO Train: [76/200][500/6787] eta 0:26:24 lr 0.000200 time 0.2538 (0.2521) loss 0.3667 (0.3606) grad_norm 292784.3750 (387393.4688) mem 14543MB +[2023-10-11 15:11:23 simmim_pretrain](main_simmim.py 218): INFO Train: [76/200][1000/6787] eta 0:24:12 lr 0.000200 time 0.2473 (0.2509) loss 0.3499 (0.3613) grad_norm 184588.2656 (inf) mem 14543MB +[2023-10-11 15:13:28 simmim_pretrain](main_simmim.py 218): INFO Train: [76/200][1500/6787] eta 0:22:04 lr 0.000200 time 0.2499 (0.2505) loss 0.3435 (0.3616) grad_norm 166362.0312 (inf) mem 14543MB +[2023-10-11 15:15:33 simmim_pretrain](main_simmim.py 218): INFO Train: [76/200][2000/6787] eta 0:19:58 lr 0.000200 time 0.2532 (0.2503) loss 0.3577 (0.3616) grad_norm 220921.1094 (inf) mem 14543MB +[2023-10-11 15:17:38 simmim_pretrain](main_simmim.py 218): INFO Train: [76/200][2500/6787] eta 0:17:52 lr 0.000200 time 0.2529 (0.2502) loss 0.3761 (0.3617) grad_norm 278814.4688 (inf) mem 14543MB +[2023-10-11 15:19:43 simmim_pretrain](main_simmim.py 218): INFO Train: [76/200][3000/6787] eta 0:15:47 lr 0.000200 time 0.2559 (0.2502) loss 0.3649 (0.3618) grad_norm 548531.9375 (inf) mem 14543MB +[2023-10-11 15:21:47 simmim_pretrain](main_simmim.py 218): INFO Train: [76/200][3500/6787] eta 0:13:42 lr 0.000200 time 0.2506 (0.2501) loss 0.3596 (0.3619) grad_norm 196466.1094 (inf) mem 14543MB +[2023-10-11 15:23:52 simmim_pretrain](main_simmim.py 218): INFO Train: [76/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2468 (0.2501) loss 0.3490 (0.3621) grad_norm 236983.8281 (inf) mem 14543MB +[2023-10-11 15:25:57 simmim_pretrain](main_simmim.py 218): INFO Train: [76/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2493 (0.2500) loss 0.3691 (0.3621) grad_norm 183180.9531 (inf) mem 14543MB +[2023-10-11 15:28:02 simmim_pretrain](main_simmim.py 218): INFO Train: [76/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2472 (0.2500) loss 0.3562 (0.3622) grad_norm 276836.8438 (inf) mem 14543MB +[2023-10-11 15:30:07 simmim_pretrain](main_simmim.py 218): INFO Train: [76/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2479 (0.2500) loss 0.3625 (0.3621) grad_norm 195489.7031 (inf) mem 14543MB +[2023-10-11 15:32:12 simmim_pretrain](main_simmim.py 218): INFO Train: [76/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2506 (0.2500) loss 0.3577 (0.3621) grad_norm 350405.1250 (inf) mem 14543MB +[2023-10-11 15:34:17 simmim_pretrain](main_simmim.py 218): INFO Train: [76/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2529 (0.2500) loss 0.3712 (0.3621) grad_norm 190940.9375 (inf) mem 14543MB +[2023-10-11 15:35:29 simmim_pretrain](main_simmim.py 228): INFO EPOCH 76 training takes 0:28:17 +[2023-10-11 15:35:31 simmim_pretrain](main_simmim.py 218): INFO Train: [77/200][0/6787] eta 3:01:30 lr 0.000200 time 1.6046 (1.6046) loss 0.3636 (0.3636) grad_norm 181944.9688 (181944.9688) mem 14543MB +[2023-10-11 15:37:36 simmim_pretrain](main_simmim.py 218): INFO Train: [77/200][500/6787] eta 0:26:25 lr 0.000200 time 0.2520 (0.2521) loss 0.3796 (0.3618) grad_norm 189700.7812 (nan) mem 14543MB +[2023-10-11 15:39:40 simmim_pretrain](main_simmim.py 218): INFO Train: [77/200][1000/6787] eta 0:24:11 lr 0.000200 time 0.2547 (0.2508) loss 0.3549 (0.3621) grad_norm 240478.3594 (nan) mem 14543MB +[2023-10-11 15:41:45 simmim_pretrain](main_simmim.py 218): INFO Train: [77/200][1500/6787] eta 0:22:04 lr 0.000200 time 0.2499 (0.2505) loss 0.3316 (0.3623) grad_norm 131342.3906 (nan) mem 14543MB +[2023-10-11 15:43:50 simmim_pretrain](main_simmim.py 218): INFO Train: [77/200][2000/6787] eta 0:19:58 lr 0.000200 time 0.2473 (0.2504) loss 0.3599 (0.3622) grad_norm 176176.1406 (nan) mem 14543MB +[2023-10-11 15:45:55 simmim_pretrain](main_simmim.py 218): INFO Train: [77/200][2500/6787] eta 0:17:52 lr 0.000200 time 0.2463 (0.2503) loss 0.3551 (0.3621) grad_norm 196923.6250 (nan) mem 14543MB +[2023-10-11 15:48:00 simmim_pretrain](main_simmim.py 218): INFO Train: [77/200][3000/6787] eta 0:15:47 lr 0.000200 time 0.2537 (0.2502) loss 0.3863 (0.3617) grad_norm 781097.8125 (nan) mem 14543MB +[2023-10-11 15:50:05 simmim_pretrain](main_simmim.py 218): INFO Train: [77/200][3500/6787] eta 0:13:42 lr 0.000200 time 0.2472 (0.2502) loss 0.3788 (0.3616) grad_norm 429625.0000 (nan) mem 14543MB +[2023-10-11 15:52:10 simmim_pretrain](main_simmim.py 218): INFO Train: [77/200][4000/6787] eta 0:11:37 lr 0.000200 time 0.2477 (0.2502) loss 0.3408 (0.3615) grad_norm 366762.7812 (nan) mem 14543MB +[2023-10-11 15:54:15 simmim_pretrain](main_simmim.py 218): INFO Train: [77/200][4500/6787] eta 0:09:32 lr 0.000200 time 0.2521 (0.2502) loss 0.3799 (0.3617) grad_norm 168418.0938 (nan) mem 14543MB +[2023-10-11 15:56:20 simmim_pretrain](main_simmim.py 218): INFO Train: [77/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2461 (0.2501) loss 0.3514 (0.3620) grad_norm 160842.2031 (nan) mem 14543MB +[2023-10-11 15:58:25 simmim_pretrain](main_simmim.py 218): INFO Train: [77/200][5500/6787] eta 0:05:21 lr 0.000200 time 0.2466 (0.2500) loss 0.3614 (0.3622) grad_norm 74557.4062 (nan) mem 14543MB +[2023-10-11 16:00:29 simmim_pretrain](main_simmim.py 218): INFO Train: [77/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2482 (0.2499) loss 0.3413 (0.3625) grad_norm 72795.3281 (nan) mem 14543MB +[2023-10-11 16:02:33 simmim_pretrain](main_simmim.py 218): INFO Train: [77/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2488 (0.2498) loss 0.3692 (0.3626) grad_norm 208724.7188 (nan) mem 14543MB +[2023-10-11 16:03:45 simmim_pretrain](main_simmim.py 228): INFO EPOCH 77 training takes 0:28:16 +[2023-10-11 16:03:47 simmim_pretrain](main_simmim.py 218): INFO Train: [78/200][0/6787] eta 2:39:15 lr 0.000200 time 1.4079 (1.4079) loss 0.3440 (0.3440) grad_norm 234052.7656 (234052.7656) mem 14543MB +[2023-10-11 16:05:51 simmim_pretrain](main_simmim.py 218): INFO Train: [78/200][500/6787] eta 0:26:22 lr 0.000200 time 0.2493 (0.2517) loss 0.3596 (0.3609) grad_norm 225183.4375 (201117.8125) mem 14543MB +[2023-10-11 16:07:57 simmim_pretrain](main_simmim.py 218): INFO Train: [78/200][1000/6787] eta 0:24:15 lr 0.000200 time 0.2542 (0.2516) loss 0.3324 (0.3614) grad_norm 243454.3906 (214886.5156) mem 14543MB +[2023-10-11 16:10:04 simmim_pretrain](main_simmim.py 218): INFO Train: [78/200][1500/6787] eta 0:22:12 lr 0.000200 time 0.2487 (0.2521) loss 0.3631 (0.3613) grad_norm 205800.1094 (225255.0000) mem 14543MB +[2023-10-11 16:12:11 simmim_pretrain](main_simmim.py 218): INFO Train: [78/200][2000/6787] eta 0:20:09 lr 0.000200 time 0.2551 (0.2526) loss 0.3501 (0.3612) grad_norm 332029.8125 (243493.6094) mem 14543MB +[2023-10-11 16:14:19 simmim_pretrain](main_simmim.py 218): INFO Train: [78/200][2500/6787] eta 0:18:06 lr 0.000200 time 0.2589 (0.2534) loss 0.3330 (0.3613) grad_norm 560723.2500 (273898.3125) mem 14543MB +[2023-10-11 16:16:27 simmim_pretrain](main_simmim.py 218): INFO Train: [78/200][3000/6787] eta 0:16:00 lr 0.000200 time 0.2519 (0.2537) loss 0.3506 (0.3612) grad_norm 176057.0625 (292745.9062) mem 14543MB +[2023-10-11 16:18:35 simmim_pretrain](main_simmim.py 218): INFO Train: [78/200][3500/6787] eta 0:13:55 lr 0.000200 time 0.2577 (0.2541) loss 0.3642 (0.3610) grad_norm 426102.3438 (311127.1562) mem 14543MB +[2023-10-11 16:20:43 simmim_pretrain](main_simmim.py 218): INFO Train: [78/200][4000/6787] eta 0:11:49 lr 0.000200 time 0.2540 (0.2545) loss 0.3668 (0.3611) grad_norm 453320.5000 (inf) mem 14543MB +[2023-10-11 16:22:52 simmim_pretrain](main_simmim.py 218): INFO Train: [78/200][4500/6787] eta 0:09:42 lr 0.000200 time 0.2584 (0.2547) loss 0.3489 (0.3614) grad_norm 162307.7500 (inf) mem 14543MB +[2023-10-11 16:25:00 simmim_pretrain](main_simmim.py 218): INFO Train: [78/200][5000/6787] eta 0:07:35 lr 0.000200 time 0.2564 (0.2549) loss 0.3603 (0.3615) grad_norm 173276.1094 (inf) mem 14543MB +[2023-10-11 16:27:10 simmim_pretrain](main_simmim.py 218): INFO Train: [78/200][5500/6787] eta 0:05:28 lr 0.000200 time 0.2601 (0.2553) loss 0.3583 (0.3615) grad_norm 251165.3125 (inf) mem 14543MB +[2023-10-11 16:29:20 simmim_pretrain](main_simmim.py 218): INFO Train: [78/200][6000/6787] eta 0:03:21 lr 0.000200 time 0.2543 (0.2557) loss 0.3738 (0.3615) grad_norm 341415.2500 (inf) mem 14543MB +[2023-10-11 16:31:30 simmim_pretrain](main_simmim.py 218): INFO Train: [78/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2512 (0.2560) loss 0.3524 (0.3615) grad_norm 320658.1250 (inf) mem 14543MB +[2023-10-11 16:32:45 simmim_pretrain](main_simmim.py 228): INFO EPOCH 78 training takes 0:28:59 +[2023-10-11 16:32:47 simmim_pretrain](main_simmim.py 218): INFO Train: [79/200][0/6787] eta 2:58:46 lr 0.000200 time 1.5805 (1.5805) loss 0.3542 (0.3542) grad_norm 284096.3125 (284096.3125) mem 14543MB +[2023-10-11 16:34:53 simmim_pretrain](main_simmim.py 218): INFO Train: [79/200][500/6787] eta 0:26:40 lr 0.000200 time 0.2592 (0.2546) loss 0.3562 (0.3602) grad_norm 598060.8125 (441873.5312) mem 14543MB +[2023-10-11 16:36:59 simmim_pretrain](main_simmim.py 218): INFO Train: [79/200][1000/6787] eta 0:24:26 lr 0.000200 time 0.2484 (0.2534) loss 0.3736 (0.3604) grad_norm 376229.7188 (inf) mem 14543MB +[2023-10-11 16:39:05 simmim_pretrain](main_simmim.py 218): INFO Train: [79/200][1500/6787] eta 0:22:17 lr 0.000200 time 0.2519 (0.2530) loss 0.3801 (0.3607) grad_norm 369283.5312 (inf) mem 14543MB +[2023-10-11 16:41:12 simmim_pretrain](main_simmim.py 218): INFO Train: [79/200][2000/6787] eta 0:20:13 lr 0.000200 time 0.2545 (0.2535) loss 0.3681 (0.3610) grad_norm 221120.5469 (inf) mem 14543MB +[2023-10-11 16:43:19 simmim_pretrain](main_simmim.py 218): INFO Train: [79/200][2500/6787] eta 0:18:07 lr 0.000200 time 0.2511 (0.2536) loss 0.3588 (0.3612) grad_norm 348191.0938 (inf) mem 14543MB +[2023-10-11 16:45:26 simmim_pretrain](main_simmim.py 218): INFO Train: [79/200][3000/6787] eta 0:15:59 lr 0.000200 time 0.2567 (0.2534) loss 0.3691 (0.3613) grad_norm 258462.2188 (inf) mem 14543MB +[2023-10-11 16:47:32 simmim_pretrain](main_simmim.py 218): INFO Train: [79/200][3500/6787] eta 0:13:52 lr 0.000200 time 0.2536 (0.2533) loss 0.3645 (0.3610) grad_norm 322938.9688 (inf) mem 14543MB +[2023-10-11 16:49:38 simmim_pretrain](main_simmim.py 218): INFO Train: [79/200][4000/6787] eta 0:11:45 lr 0.000200 time 0.2478 (0.2532) loss 0.3653 (0.3613) grad_norm 166447.4531 (inf) mem 14543MB +[2023-10-11 16:51:45 simmim_pretrain](main_simmim.py 218): INFO Train: [79/200][4500/6787] eta 0:09:39 lr 0.000200 time 0.2506 (0.2533) loss 0.3666 (0.3615) grad_norm 354756.0625 (inf) mem 14543MB +[2023-10-11 16:53:51 simmim_pretrain](main_simmim.py 218): INFO Train: [79/200][5000/6787] eta 0:07:32 lr 0.000200 time 0.2529 (0.2532) loss 0.3596 (0.3616) grad_norm 384136.1875 (inf) mem 14543MB +[2023-10-11 16:55:58 simmim_pretrain](main_simmim.py 218): INFO Train: [79/200][5500/6787] eta 0:05:25 lr 0.000200 time 0.2511 (0.2532) loss 0.3690 (0.3617) grad_norm 499219.9688 (inf) mem 14543MB +[2023-10-11 16:58:04 simmim_pretrain](main_simmim.py 218): INFO Train: [79/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2516 (0.2531) loss 0.3602 (0.3615) grad_norm 352363.7500 (inf) mem 14543MB +[2023-10-11 17:00:10 simmim_pretrain](main_simmim.py 218): INFO Train: [79/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2600 (0.2531) loss 0.3549 (0.3615) grad_norm 362080.4688 (inf) mem 14543MB +[2023-10-11 17:01:24 simmim_pretrain](main_simmim.py 228): INFO EPOCH 79 training takes 0:28:38 +[2023-10-11 17:01:25 simmim_pretrain](main_simmim.py 218): INFO Train: [80/200][0/6787] eta 2:49:58 lr 0.000200 time 1.5027 (1.5027) loss 0.3561 (0.3561) grad_norm 392068.9062 (392068.9062) mem 14543MB +[2023-10-11 17:03:31 simmim_pretrain](main_simmim.py 218): INFO Train: [80/200][500/6787] eta 0:26:40 lr 0.000200 time 0.2542 (0.2546) loss 0.3543 (0.3606) grad_norm 386334.5625 (inf) mem 14543MB +[2023-10-11 17:05:38 simmim_pretrain](main_simmim.py 218): INFO Train: [80/200][1000/6787] eta 0:24:29 lr 0.000200 time 0.2489 (0.2538) loss 0.4944 (0.4158) grad_norm 41777.4336 (inf) mem 14543MB +[2023-10-11 17:07:46 simmim_pretrain](main_simmim.py 218): INFO Train: [80/200][1500/6787] eta 0:22:27 lr 0.000200 time 0.2578 (0.2549) loss 0.3775 (0.4177) grad_norm 29608.2852 (inf) mem 14543MB +[2023-10-11 17:09:56 simmim_pretrain](main_simmim.py 218): INFO Train: [80/200][2000/6787] eta 0:20:25 lr 0.000200 time 0.2609 (0.2559) loss 0.3715 (0.4066) grad_norm 34911.1953 (inf) mem 14543MB +[2023-10-11 17:12:05 simmim_pretrain](main_simmim.py 218): INFO Train: [80/200][2500/6787] eta 0:18:19 lr 0.000200 time 0.2522 (0.2564) loss 0.3555 (0.3991) grad_norm 30530.7637 (inf) mem 14543MB +[2023-10-11 17:14:13 simmim_pretrain](main_simmim.py 218): INFO Train: [80/200][3000/6787] eta 0:16:11 lr 0.000200 time 0.2571 (0.2565) loss 0.3648 (0.3939) grad_norm 49212.6211 (inf) mem 14543MB +[2023-10-11 17:16:22 simmim_pretrain](main_simmim.py 218): INFO Train: [80/200][3500/6787] eta 0:14:03 lr 0.000200 time 0.2583 (0.2567) loss 0.3896 (0.3898) grad_norm 43617.4805 (inf) mem 14543MB +[2023-10-11 17:18:31 simmim_pretrain](main_simmim.py 218): INFO Train: [80/200][4000/6787] eta 0:11:55 lr 0.000200 time 0.2573 (0.2567) loss 0.3550 (0.3868) grad_norm 53002.8789 (inf) mem 14543MB +[2023-10-11 17:20:38 simmim_pretrain](main_simmim.py 218): INFO Train: [80/200][4500/6787] eta 0:09:46 lr 0.000200 time 0.2511 (0.2564) loss 0.3633 (0.3843) grad_norm 44977.1523 (inf) mem 14543MB +[2023-10-11 17:22:44 simmim_pretrain](main_simmim.py 218): INFO Train: [80/200][5000/6787] eta 0:07:37 lr 0.000200 time 0.2477 (0.2561) loss 0.3480 (0.3823) grad_norm 46885.5469 (inf) mem 14543MB +[2023-10-11 17:24:52 simmim_pretrain](main_simmim.py 218): INFO Train: [80/200][5500/6787] eta 0:05:29 lr 0.000200 time 0.2513 (0.2560) loss 0.3505 (0.3804) grad_norm 72134.4531 (inf) mem 14543MB +[2023-10-11 17:27:00 simmim_pretrain](main_simmim.py 218): INFO Train: [80/200][6000/6787] eta 0:03:21 lr 0.000200 time 0.2508 (0.2559) loss 0.3730 (0.3790) grad_norm 192133.0469 (inf) mem 14543MB +[2023-10-11 17:29:07 simmim_pretrain](main_simmim.py 218): INFO Train: [80/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2571 (0.2558) loss 0.3550 (0.3778) grad_norm 133953.7812 (inf) mem 14543MB +[2023-10-11 17:30:22 simmim_pretrain](main_simmim.py 228): INFO EPOCH 80 training takes 0:28:58 +[2023-10-11 17:30:22 simmim_pretrain](utils.py 62): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_80.pth saving...... +[2023-10-11 17:30:23 simmim_pretrain](utils.py 64): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_80.pth saved !!! +[2023-10-11 17:30:24 simmim_pretrain](main_simmim.py 218): INFO Train: [81/200][0/6787] eta 2:48:38 lr 0.000200 time 1.4909 (1.4909) loss 0.3771 (0.3771) grad_norm 214235.6719 (214235.6719) mem 14543MB +[2023-10-11 17:32:31 simmim_pretrain](main_simmim.py 218): INFO Train: [81/200][500/6787] eta 0:26:55 lr 0.000200 time 0.2536 (0.2569) loss 0.3627 (0.3618) grad_norm 281367.5625 (138115.6875) mem 14543MB +[2023-10-11 17:34:39 simmim_pretrain](main_simmim.py 218): INFO Train: [81/200][1000/6787] eta 0:24:43 lr 0.000200 time 0.2565 (0.2563) loss 0.3433 (0.3618) grad_norm 121110.8594 (144462.2812) mem 14543MB +[2023-10-11 17:36:47 simmim_pretrain](main_simmim.py 218): INFO Train: [81/200][1500/6787] eta 0:22:33 lr 0.000200 time 0.2591 (0.2561) loss 0.3675 (0.3615) grad_norm 192743.3750 (167388.7969) mem 14543MB +[2023-10-11 17:38:54 simmim_pretrain](main_simmim.py 218): INFO Train: [81/200][2000/6787] eta 0:20:24 lr 0.000200 time 0.2534 (0.2557) loss 0.3458 (0.3615) grad_norm 216679.9688 (169846.1562) mem 14543MB +[2023-10-11 17:41:03 simmim_pretrain](main_simmim.py 218): INFO Train: [81/200][2500/6787] eta 0:18:18 lr 0.000200 time 0.2609 (0.2563) loss 0.3658 (0.3613) grad_norm 210193.6250 (188988.0156) mem 14543MB +[2023-10-11 17:43:14 simmim_pretrain](main_simmim.py 218): INFO Train: [81/200][3000/6787] eta 0:16:13 lr 0.000200 time 0.2609 (0.2569) loss 0.3578 (0.3612) grad_norm 224940.8750 (202727.4219) mem 14543MB +[2023-10-11 17:45:24 simmim_pretrain](main_simmim.py 218): INFO Train: [81/200][3500/6787] eta 0:14:06 lr 0.000200 time 0.2610 (0.2574) loss 0.3432 (0.3611) grad_norm 385088.5000 (219961.5938) mem 14543MB +[2023-10-11 17:47:34 simmim_pretrain](main_simmim.py 218): INFO Train: [81/200][4000/6787] eta 0:11:58 lr 0.000200 time 0.2607 (0.2578) loss 0.3515 (0.3608) grad_norm 248427.4375 (234672.3750) mem 14543MB +[2023-10-11 17:49:44 simmim_pretrain](main_simmim.py 218): INFO Train: [81/200][4500/6787] eta 0:09:50 lr 0.000200 time 0.2610 (0.2581) loss 0.3676 (0.3608) grad_norm 588371.0000 (inf) mem 14543MB +[2023-10-11 17:51:52 simmim_pretrain](main_simmim.py 218): INFO Train: [81/200][5000/6787] eta 0:07:40 lr 0.000200 time 0.2614 (0.2578) loss 0.3413 (0.3606) grad_norm 172051.9375 (inf) mem 14543MB +[2023-10-11 17:54:00 simmim_pretrain](main_simmim.py 218): INFO Train: [81/200][5500/6787] eta 0:05:31 lr 0.000200 time 0.2612 (0.2577) loss 0.3598 (0.3605) grad_norm 481040.5312 (inf) mem 14543MB +[2023-10-11 17:56:08 simmim_pretrain](main_simmim.py 218): INFO Train: [81/200][6000/6787] eta 0:03:22 lr 0.000200 time 0.2521 (0.2576) loss 0.3589 (0.3605) grad_norm 402359.4375 (inf) mem 14543MB +[2023-10-11 17:58:15 simmim_pretrain](main_simmim.py 218): INFO Train: [81/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2574 (0.2573) loss 0.3493 (0.3604) grad_norm 186054.0469 (inf) mem 14543MB +[2023-10-11 17:59:29 simmim_pretrain](main_simmim.py 228): INFO EPOCH 81 training takes 0:29:06 +[2023-10-11 17:59:30 simmim_pretrain](main_simmim.py 218): INFO Train: [82/200][0/6787] eta 2:57:24 lr 0.000200 time 1.5684 (1.5684) loss 0.3604 (0.3604) grad_norm 511990.7500 (511990.7500) mem 14543MB +[2023-10-11 18:01:38 simmim_pretrain](main_simmim.py 218): INFO Train: [82/200][500/6787] eta 0:27:00 lr 0.000200 time 0.2607 (0.2577) loss 0.3459 (0.3604) grad_norm 247524.6094 (inf) mem 14543MB +[2023-10-11 18:03:48 simmim_pretrain](main_simmim.py 218): INFO Train: [82/200][1000/6787] eta 0:24:58 lr 0.000200 time 0.2607 (0.2589) loss 0.3563 (0.3606) grad_norm 138009.5312 (inf) mem 14543MB +[2023-10-11 18:05:58 simmim_pretrain](main_simmim.py 218): INFO Train: [82/200][1500/6787] eta 0:22:50 lr 0.000200 time 0.2604 (0.2593) loss 0.3378 (0.3610) grad_norm 162408.1406 (inf) mem 14543MB +[2023-10-11 18:08:08 simmim_pretrain](main_simmim.py 218): INFO Train: [82/200][2000/6787] eta 0:20:42 lr 0.000200 time 0.2608 (0.2595) loss 0.3687 (0.3619) grad_norm 86026.9688 (inf) mem 14543MB +[2023-10-11 18:10:18 simmim_pretrain](main_simmim.py 218): INFO Train: [82/200][2500/6787] eta 0:18:32 lr 0.000200 time 0.2605 (0.2596) loss 0.3531 (0.3621) grad_norm 142472.1250 (inf) mem 14543MB +[2023-10-11 18:12:28 simmim_pretrain](main_simmim.py 218): INFO Train: [82/200][3000/6787] eta 0:16:23 lr 0.000200 time 0.2611 (0.2597) loss 0.3376 (0.3621) grad_norm 96795.3359 (inf) mem 14543MB +[2023-10-11 18:14:38 simmim_pretrain](main_simmim.py 218): INFO Train: [82/200][3500/6787] eta 0:14:13 lr 0.000200 time 0.2625 (0.2598) loss 0.3624 (0.3623) grad_norm 117470.1875 (inf) mem 14543MB +[2023-10-11 18:16:48 simmim_pretrain](main_simmim.py 218): INFO Train: [82/200][4000/6787] eta 0:12:04 lr 0.000200 time 0.2615 (0.2598) loss 0.3521 (0.3623) grad_norm 137257.2812 (inf) mem 14543MB +[2023-10-11 18:18:58 simmim_pretrain](main_simmim.py 218): INFO Train: [82/200][4500/6787] eta 0:09:54 lr 0.000200 time 0.2612 (0.2599) loss 0.3631 (0.3622) grad_norm 346034.4062 (inf) mem 14543MB +[2023-10-11 18:21:09 simmim_pretrain](main_simmim.py 218): INFO Train: [82/200][5000/6787] eta 0:07:44 lr 0.000200 time 0.2611 (0.2600) loss 0.3672 (0.3621) grad_norm 239760.8906 (inf) mem 14543MB +[2023-10-11 18:23:19 simmim_pretrain](main_simmim.py 218): INFO Train: [82/200][5500/6787] eta 0:05:34 lr 0.000200 time 0.2610 (0.2600) loss 0.3961 (0.3621) grad_norm 140626.7812 (inf) mem 14543MB +[2023-10-11 18:25:29 simmim_pretrain](main_simmim.py 218): INFO Train: [82/200][6000/6787] eta 0:03:24 lr 0.000200 time 0.2611 (0.2600) loss 0.3755 (0.3620) grad_norm 170600.2188 (inf) mem 14543MB +[2023-10-11 18:27:39 simmim_pretrain](main_simmim.py 218): INFO Train: [82/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2602 (0.2601) loss 0.3622 (0.3618) grad_norm 550202.6250 (inf) mem 14543MB +[2023-10-11 18:28:55 simmim_pretrain](main_simmim.py 228): INFO EPOCH 82 training takes 0:29:26 +[2023-10-11 18:28:56 simmim_pretrain](main_simmim.py 218): INFO Train: [83/200][0/6787] eta 2:48:42 lr 0.000200 time 1.4914 (1.4914) loss 0.3392 (0.3392) grad_norm 215142.2188 (215142.2188) mem 14543MB +[2023-10-11 18:31:03 simmim_pretrain](main_simmim.py 218): INFO Train: [83/200][500/6787] eta 0:26:56 lr 0.000200 time 0.2585 (0.2572) loss 0.3776 (0.3601) grad_norm 395189.3750 (380096.2188) mem 14543MB +[2023-10-11 18:33:11 simmim_pretrain](main_simmim.py 218): INFO Train: [83/200][1000/6787] eta 0:24:43 lr 0.000200 time 0.2471 (0.2563) loss 0.3595 (0.3602) grad_norm 392986.3438 (380368.7188) mem 14543MB +[2023-10-11 18:35:18 simmim_pretrain](main_simmim.py 218): INFO Train: [83/200][1500/6787] eta 0:22:31 lr 0.000200 time 0.2552 (0.2557) loss 0.3519 (0.3599) grad_norm 473086.0000 (inf) mem 14543MB +[2023-10-11 18:37:26 simmim_pretrain](main_simmim.py 218): INFO Train: [83/200][2000/6787] eta 0:20:23 lr 0.000200 time 0.2553 (0.2557) loss 0.3864 (0.3597) grad_norm 414694.6250 (inf) mem 14543MB +[2023-10-11 18:39:34 simmim_pretrain](main_simmim.py 218): INFO Train: [83/200][2500/6787] eta 0:18:16 lr 0.000200 time 0.2553 (0.2557) loss 0.3424 (0.3596) grad_norm 778571.1250 (inf) mem 14543MB +[2023-10-11 18:41:42 simmim_pretrain](main_simmim.py 218): INFO Train: [83/200][3000/6787] eta 0:16:08 lr 0.000200 time 0.2551 (0.2558) loss 0.3450 (0.3598) grad_norm 249816.1406 (inf) mem 14543MB +[2023-10-11 18:43:50 simmim_pretrain](main_simmim.py 218): INFO Train: [83/200][3500/6787] eta 0:14:00 lr 0.000200 time 0.2557 (0.2558) loss 0.3611 (0.3599) grad_norm 411463.0000 (inf) mem 14543MB +[2023-10-11 18:45:58 simmim_pretrain](main_simmim.py 218): INFO Train: [83/200][4000/6787] eta 0:11:52 lr 0.000200 time 0.2506 (0.2557) loss 0.3607 (0.3599) grad_norm 333321.7500 (inf) mem 14543MB +[2023-10-11 18:48:05 simmim_pretrain](main_simmim.py 218): INFO Train: [83/200][4500/6787] eta 0:09:44 lr 0.000200 time 0.2550 (0.2555) loss 0.3612 (0.3600) grad_norm 231394.6562 (inf) mem 14543MB +[2023-10-11 18:50:12 simmim_pretrain](main_simmim.py 218): INFO Train: [83/200][5000/6787] eta 0:07:36 lr 0.000200 time 0.2563 (0.2555) loss 0.3775 (0.3602) grad_norm 284874.9375 (inf) mem 14543MB +[2023-10-11 18:52:20 simmim_pretrain](main_simmim.py 218): INFO Train: [83/200][5500/6787] eta 0:05:28 lr 0.000200 time 0.2531 (0.2554) loss 0.3680 (0.3604) grad_norm 291277.3750 (inf) mem 14543MB +[2023-10-11 18:54:27 simmim_pretrain](main_simmim.py 218): INFO Train: [83/200][6000/6787] eta 0:03:21 lr 0.000200 time 0.2527 (0.2554) loss 0.3421 (0.3605) grad_norm 224829.9062 (inf) mem 14543MB +[2023-10-11 18:56:35 simmim_pretrain](main_simmim.py 218): INFO Train: [83/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2537 (0.2554) loss 0.3562 (0.3605) grad_norm 302924.4688 (inf) mem 14543MB +[2023-10-11 18:57:48 simmim_pretrain](main_simmim.py 228): INFO EPOCH 83 training takes 0:28:53 +[2023-10-11 18:57:50 simmim_pretrain](main_simmim.py 218): INFO Train: [84/200][0/6787] eta 3:00:49 lr 0.000200 time 1.5986 (1.5986) loss 0.3934 (0.3934) grad_norm 218183.4844 (218183.4844) mem 14543MB +[2023-10-11 18:59:56 simmim_pretrain](main_simmim.py 218): INFO Train: [84/200][500/6787] eta 0:26:44 lr 0.000200 time 0.2492 (0.2552) loss 0.3576 (0.3597) grad_norm 283566.3750 (inf) mem 14543MB +[2023-10-11 19:02:02 simmim_pretrain](main_simmim.py 218): INFO Train: [84/200][1000/6787] eta 0:24:29 lr 0.000200 time 0.2540 (0.2538) loss 0.3536 (0.3601) grad_norm 286184.2812 (inf) mem 14543MB +[2023-10-11 19:04:09 simmim_pretrain](main_simmim.py 218): INFO Train: [84/200][1500/6787] eta 0:22:18 lr 0.000200 time 0.2571 (0.2532) loss 0.3668 (0.3605) grad_norm 231786.2188 (inf) mem 14543MB +[2023-10-11 19:06:14 simmim_pretrain](main_simmim.py 218): INFO Train: [84/200][2000/6787] eta 0:20:10 lr 0.000200 time 0.2495 (0.2528) loss 0.3863 (0.3608) grad_norm 166839.6875 (inf) mem 14543MB +[2023-10-11 19:08:20 simmim_pretrain](main_simmim.py 218): INFO Train: [84/200][2500/6787] eta 0:18:02 lr 0.000200 time 0.2489 (0.2524) loss 0.3650 (0.3609) grad_norm 162355.9219 (inf) mem 14543MB +[2023-10-11 19:10:25 simmim_pretrain](main_simmim.py 218): INFO Train: [84/200][3000/6787] eta 0:15:54 lr 0.000200 time 0.2509 (0.2521) loss 0.3648 (0.3606) grad_norm 345335.5625 (inf) mem 14543MB +[2023-10-11 19:12:30 simmim_pretrain](main_simmim.py 218): INFO Train: [84/200][3500/6787] eta 0:13:47 lr 0.000200 time 0.2489 (0.2519) loss 0.3722 (0.3606) grad_norm 677765.6250 (inf) mem 14543MB +[2023-10-11 19:14:35 simmim_pretrain](main_simmim.py 218): INFO Train: [84/200][4000/6787] eta 0:11:41 lr 0.000200 time 0.2461 (0.2516) loss 0.3652 (0.3606) grad_norm 385948.4062 (inf) mem 14543MB +[2023-10-11 19:16:40 simmim_pretrain](main_simmim.py 218): INFO Train: [84/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2461 (0.2514) loss 0.3781 (0.3605) grad_norm 368063.0000 (inf) mem 14543MB +[2023-10-11 19:18:45 simmim_pretrain](main_simmim.py 218): INFO Train: [84/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2504 (0.2512) loss 0.3653 (0.3605) grad_norm 210457.3281 (inf) mem 14543MB +[2023-10-11 19:20:50 simmim_pretrain](main_simmim.py 218): INFO Train: [84/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2463 (0.2511) loss 0.3665 (0.3604) grad_norm 213639.8438 (inf) mem 14543MB +[2023-10-11 19:22:55 simmim_pretrain](main_simmim.py 218): INFO Train: [84/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2473 (0.2510) loss 0.3867 (0.3603) grad_norm 284046.3125 (inf) mem 14543MB +[2023-10-11 19:25:00 simmim_pretrain](main_simmim.py 218): INFO Train: [84/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2533 (0.2509) loss 0.3559 (0.3603) grad_norm 346463.3750 (inf) mem 14543MB +[2023-10-11 19:26:12 simmim_pretrain](main_simmim.py 228): INFO EPOCH 84 training takes 0:28:23 +[2023-10-11 19:26:13 simmim_pretrain](main_simmim.py 218): INFO Train: [85/200][0/6787] eta 2:52:16 lr 0.000200 time 1.5229 (1.5229) loss 0.3701 (0.3701) grad_norm 231430.0469 (231430.0469) mem 14543MB +[2023-10-11 19:28:18 simmim_pretrain](main_simmim.py 218): INFO Train: [85/200][500/6787] eta 0:26:27 lr 0.000200 time 0.2488 (0.2526) loss 0.3522 (0.3620) grad_norm 272557.3125 (250176.9062) mem 14543MB +[2023-10-11 19:30:23 simmim_pretrain](main_simmim.py 218): INFO Train: [85/200][1000/6787] eta 0:24:13 lr 0.000200 time 0.2470 (0.2512) loss 0.3777 (0.3619) grad_norm 284231.1250 (244329.8750) mem 14543MB +[2023-10-11 19:32:28 simmim_pretrain](main_simmim.py 218): INFO Train: [85/200][1500/6787] eta 0:22:06 lr 0.000200 time 0.2463 (0.2508) loss 0.3492 (0.3613) grad_norm 226388.4062 (249174.1719) mem 14543MB +[2023-10-11 19:34:34 simmim_pretrain](main_simmim.py 218): INFO Train: [85/200][2000/6787] eta 0:20:00 lr 0.000200 time 0.2501 (0.2507) loss 0.3615 (0.3610) grad_norm 441312.4688 (267987.4688) mem 14543MB +[2023-10-11 19:36:39 simmim_pretrain](main_simmim.py 218): INFO Train: [85/200][2500/6787] eta 0:17:54 lr 0.000200 time 0.2510 (0.2507) loss 0.3541 (0.3609) grad_norm 245185.4375 (inf) mem 14543MB +[2023-10-11 19:38:44 simmim_pretrain](main_simmim.py 218): INFO Train: [85/200][3000/6787] eta 0:15:49 lr 0.000200 time 0.2533 (0.2508) loss 0.3656 (0.3612) grad_norm 243705.8594 (inf) mem 14543MB +[2023-10-11 19:40:50 simmim_pretrain](main_simmim.py 218): INFO Train: [85/200][3500/6787] eta 0:13:44 lr 0.000200 time 0.2491 (0.2508) loss 0.3729 (0.3610) grad_norm 229829.5469 (inf) mem 14543MB +[2023-10-11 19:42:56 simmim_pretrain](main_simmim.py 218): INFO Train: [85/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2560 (0.2509) loss 0.3640 (0.3609) grad_norm 227095.2344 (inf) mem 14543MB +[2023-10-11 19:45:01 simmim_pretrain](main_simmim.py 218): INFO Train: [85/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2498 (0.2509) loss 0.3661 (0.3608) grad_norm 178129.6250 (inf) mem 14543MB +[2023-10-11 19:47:07 simmim_pretrain](main_simmim.py 218): INFO Train: [85/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2462 (0.2509) loss 0.3586 (0.3606) grad_norm 253679.6719 (inf) mem 14543MB +[2023-10-11 19:49:12 simmim_pretrain](main_simmim.py 218): INFO Train: [85/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2463 (0.2509) loss 0.3660 (0.3606) grad_norm 396014.1562 (inf) mem 14543MB +[2023-10-11 19:51:18 simmim_pretrain](main_simmim.py 218): INFO Train: [85/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2516 (0.2509) loss 0.3699 (0.3605) grad_norm 486671.0938 (inf) mem 14543MB +[2023-10-11 19:53:23 simmim_pretrain](main_simmim.py 218): INFO Train: [85/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2506 (0.2509) loss 0.3733 (0.3604) grad_norm 221600.2031 (inf) mem 14543MB +[2023-10-11 19:54:36 simmim_pretrain](main_simmim.py 228): INFO EPOCH 85 training takes 0:28:23 +[2023-10-11 19:54:37 simmim_pretrain](main_simmim.py 218): INFO Train: [86/200][0/6787] eta 2:47:26 lr 0.000200 time 1.4803 (1.4803) loss 0.3559 (0.3559) grad_norm 373023.6562 (373023.6562) mem 14543MB +[2023-10-11 19:56:42 simmim_pretrain](main_simmim.py 218): INFO Train: [86/200][500/6787] eta 0:26:28 lr 0.000200 time 0.2570 (0.2527) loss 0.3705 (0.3608) grad_norm 274661.5312 (263276.8125) mem 14543MB +[2023-10-11 19:58:47 simmim_pretrain](main_simmim.py 218): INFO Train: [86/200][1000/6787] eta 0:24:14 lr 0.000200 time 0.2516 (0.2513) loss 0.3515 (0.3604) grad_norm 292293.8750 (261800.0312) mem 14543MB +[2023-10-11 20:00:52 simmim_pretrain](main_simmim.py 218): INFO Train: [86/200][1500/6787] eta 0:22:05 lr 0.000200 time 0.2536 (0.2508) loss 0.3623 (0.3608) grad_norm 316428.7500 (257527.7188) mem 14543MB +[2023-10-11 20:02:57 simmim_pretrain](main_simmim.py 218): INFO Train: [86/200][2000/6787] eta 0:19:59 lr 0.000200 time 0.2489 (0.2505) loss 0.3681 (0.3609) grad_norm 447697.5938 (269637.5625) mem 14543MB +[2023-10-11 20:05:02 simmim_pretrain](main_simmim.py 218): INFO Train: [86/200][2500/6787] eta 0:17:53 lr 0.000200 time 0.2509 (0.2504) loss 0.3650 (0.3609) grad_norm 243482.3594 (inf) mem 14543MB +[2023-10-11 20:07:07 simmim_pretrain](main_simmim.py 218): INFO Train: [86/200][3000/6787] eta 0:15:48 lr 0.000200 time 0.2478 (0.2503) loss 0.3643 (0.3610) grad_norm 221536.2500 (inf) mem 14543MB +[2023-10-11 20:09:12 simmim_pretrain](main_simmim.py 218): INFO Train: [86/200][3500/6787] eta 0:13:42 lr 0.000200 time 0.2533 (0.2503) loss 0.3673 (0.3610) grad_norm 199322.9219 (inf) mem 14543MB +[2023-10-11 20:11:17 simmim_pretrain](main_simmim.py 218): INFO Train: [86/200][4000/6787] eta 0:11:37 lr 0.000200 time 0.2463 (0.2503) loss 0.3743 (0.3612) grad_norm 195014.1094 (inf) mem 14543MB +[2023-10-11 20:13:22 simmim_pretrain](main_simmim.py 218): INFO Train: [86/200][4500/6787] eta 0:09:32 lr 0.000200 time 0.2511 (0.2502) loss 0.3910 (0.3610) grad_norm 244593.2031 (inf) mem 14543MB +[2023-10-11 20:15:27 simmim_pretrain](main_simmim.py 218): INFO Train: [86/200][5000/6787] eta 0:07:27 lr 0.000200 time 0.2461 (0.2502) loss 0.3568 (0.3609) grad_norm 317116.0000 (inf) mem 14543MB +[2023-10-11 20:17:32 simmim_pretrain](main_simmim.py 218): INFO Train: [86/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2464 (0.2503) loss 0.3654 (0.3609) grad_norm 265173.1250 (inf) mem 14543MB +[2023-10-11 20:19:38 simmim_pretrain](main_simmim.py 218): INFO Train: [86/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2503 (0.2503) loss 0.3632 (0.3610) grad_norm 371299.0625 (inf) mem 14543MB +[2023-10-11 20:21:43 simmim_pretrain](main_simmim.py 218): INFO Train: [86/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2511 (0.2503) loss 0.3729 (0.3609) grad_norm 340506.0625 (inf) mem 14543MB +[2023-10-11 20:22:56 simmim_pretrain](main_simmim.py 228): INFO EPOCH 86 training takes 0:28:20 +[2023-10-11 20:22:57 simmim_pretrain](main_simmim.py 218): INFO Train: [87/200][0/6787] eta 2:54:11 lr 0.000200 time 1.5400 (1.5400) loss 0.3515 (0.3515) grad_norm 261586.7969 (261586.7969) mem 14543MB +[2023-10-11 20:25:02 simmim_pretrain](main_simmim.py 218): INFO Train: [87/200][500/6787] eta 0:26:29 lr 0.000200 time 0.2472 (0.2528) loss 0.3369 (0.3603) grad_norm 282509.0312 (283270.1562) mem 14543MB +[2023-10-11 20:27:08 simmim_pretrain](main_simmim.py 218): INFO Train: [87/200][1000/6787] eta 0:24:17 lr 0.000200 time 0.2496 (0.2519) loss 0.3476 (0.3604) grad_norm 320019.9688 (325104.2500) mem 14543MB +[2023-10-11 20:29:13 simmim_pretrain](main_simmim.py 218): INFO Train: [87/200][1500/6787] eta 0:22:09 lr 0.000200 time 0.2510 (0.2515) loss 0.3502 (0.3604) grad_norm 477013.1875 (350222.0625) mem 14543MB +[2023-10-11 20:31:18 simmim_pretrain](main_simmim.py 218): INFO Train: [87/200][2000/6787] eta 0:20:03 lr 0.000200 time 0.2462 (0.2513) loss 0.3574 (0.3602) grad_norm 289423.2188 (375225.8438) mem 14543MB +[2023-10-11 20:33:24 simmim_pretrain](main_simmim.py 218): INFO Train: [87/200][2500/6787] eta 0:17:57 lr 0.000200 time 0.2538 (0.2513) loss 0.3684 (0.3604) grad_norm 387449.8125 (inf) mem 14543MB +[2023-10-11 20:35:29 simmim_pretrain](main_simmim.py 218): INFO Train: [87/200][3000/6787] eta 0:15:51 lr 0.000200 time 0.2521 (0.2512) loss 0.3710 (0.3604) grad_norm 315114.2188 (inf) mem 14543MB +[2023-10-11 20:37:35 simmim_pretrain](main_simmim.py 218): INFO Train: [87/200][3500/6787] eta 0:13:45 lr 0.000200 time 0.2503 (0.2511) loss 0.3631 (0.3606) grad_norm 272360.9375 (inf) mem 14543MB +[2023-10-11 20:39:40 simmim_pretrain](main_simmim.py 218): INFO Train: [87/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2483 (0.2510) loss 0.3713 (0.3605) grad_norm 260816.7656 (inf) mem 14543MB +[2023-10-11 20:41:45 simmim_pretrain](main_simmim.py 218): INFO Train: [87/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2463 (0.2509) loss 0.3785 (0.3607) grad_norm 167021.7188 (inf) mem 14543MB +[2023-10-11 20:43:50 simmim_pretrain](main_simmim.py 218): INFO Train: [87/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2466 (0.2508) loss 0.3553 (0.3609) grad_norm 387843.6875 (inf) mem 14543MB +[2023-10-11 20:45:55 simmim_pretrain](main_simmim.py 218): INFO Train: [87/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2468 (0.2508) loss 0.3530 (0.3609) grad_norm 240784.5781 (inf) mem 14543MB +[2023-10-11 20:48:00 simmim_pretrain](main_simmim.py 218): INFO Train: [87/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2526 (0.2508) loss 0.3655 (0.3608) grad_norm 350282.4375 (inf) mem 14543MB +[2023-10-11 20:50:06 simmim_pretrain](main_simmim.py 218): INFO Train: [87/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2460 (0.2507) loss 0.3839 (0.3610) grad_norm 172207.4375 (inf) mem 14543MB +[2023-10-11 20:51:18 simmim_pretrain](main_simmim.py 228): INFO EPOCH 87 training takes 0:28:22 +[2023-10-11 20:51:20 simmim_pretrain](main_simmim.py 218): INFO Train: [88/200][0/6787] eta 2:59:01 lr 0.000200 time 1.5827 (1.5827) loss 0.3547 (0.3547) grad_norm 227045.6562 (227045.6562) mem 14543MB +[2023-10-11 20:53:25 simmim_pretrain](main_simmim.py 218): INFO Train: [88/200][500/6787] eta 0:26:28 lr 0.000200 time 0.2552 (0.2527) loss 0.3458 (0.3622) grad_norm 160340.3438 (257354.3281) mem 14543MB +[2023-10-11 20:55:30 simmim_pretrain](main_simmim.py 218): INFO Train: [88/200][1000/6787] eta 0:24:16 lr 0.000200 time 0.2488 (0.2517) loss 0.3636 (0.3614) grad_norm 298845.7812 (259312.7812) mem 14543MB +[2023-10-11 20:57:35 simmim_pretrain](main_simmim.py 218): INFO Train: [88/200][1500/6787] eta 0:22:07 lr 0.000200 time 0.2520 (0.2512) loss 0.4119 (0.3970) grad_norm 56660.4883 (inf) mem 14543MB +[2023-10-11 20:59:40 simmim_pretrain](main_simmim.py 218): INFO Train: [88/200][2000/6787] eta 0:20:01 lr 0.000200 time 0.2495 (0.2511) loss 0.3587 (0.3927) grad_norm 34838.4766 (inf) mem 14543MB +[2023-10-11 21:01:46 simmim_pretrain](main_simmim.py 218): INFO Train: [88/200][2500/6787] eta 0:17:56 lr 0.000200 time 0.2513 (0.2510) loss 0.3521 (0.3880) grad_norm 30103.2676 (inf) mem 14543MB +[2023-10-11 21:03:51 simmim_pretrain](main_simmim.py 218): INFO Train: [88/200][3000/6787] eta 0:15:50 lr 0.000200 time 0.2517 (0.2510) loss 0.3868 (0.3848) grad_norm 24777.1641 (inf) mem 14543MB +[2023-10-11 21:05:57 simmim_pretrain](main_simmim.py 218): INFO Train: [88/200][3500/6787] eta 0:13:45 lr 0.000200 time 0.2518 (0.2510) loss 0.3731 (0.3819) grad_norm 57457.0273 (inf) mem 14543MB +[2023-10-11 21:08:02 simmim_pretrain](main_simmim.py 218): INFO Train: [88/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2495 (0.2510) loss 0.3543 (0.3798) grad_norm 33334.3750 (inf) mem 14543MB +[2023-10-11 21:10:08 simmim_pretrain](main_simmim.py 218): INFO Train: [88/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2524 (0.2510) loss 0.3466 (0.3780) grad_norm 49983.0938 (inf) mem 14543MB +[2023-10-11 21:12:13 simmim_pretrain](main_simmim.py 218): INFO Train: [88/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2505 (0.2510) loss 0.3361 (0.3767) grad_norm 35029.6875 (inf) mem 14543MB +[2023-10-11 21:14:18 simmim_pretrain](main_simmim.py 218): INFO Train: [88/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2463 (0.2509) loss 0.3874 (0.3754) grad_norm 62630.1875 (inf) mem 14543MB +[2023-10-11 21:16:23 simmim_pretrain](main_simmim.py 218): INFO Train: [88/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2557 (0.2508) loss 0.3653 (0.3743) grad_norm 85851.2734 (inf) mem 14543MB +[2023-10-11 21:18:28 simmim_pretrain](main_simmim.py 218): INFO Train: [88/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2517 (0.2508) loss 0.3529 (0.3734) grad_norm 68912.6094 (inf) mem 14543MB +[2023-10-11 21:19:40 simmim_pretrain](main_simmim.py 228): INFO EPOCH 88 training takes 0:28:22 +[2023-10-11 21:19:42 simmim_pretrain](main_simmim.py 218): INFO Train: [89/200][0/6787] eta 2:50:25 lr 0.000200 time 1.5067 (1.5067) loss 0.3623 (0.3623) grad_norm 91225.9297 (91225.9297) mem 14543MB +[2023-10-11 21:21:47 simmim_pretrain](main_simmim.py 218): INFO Train: [89/200][500/6787] eta 0:26:25 lr 0.000200 time 0.2455 (0.2522) loss 0.3705 (0.3607) grad_norm 68872.0234 (99398.5938) mem 14543MB +[2023-10-11 21:23:52 simmim_pretrain](main_simmim.py 218): INFO Train: [89/200][1000/6787] eta 0:24:14 lr 0.000200 time 0.2544 (0.2513) loss 0.3696 (0.3612) grad_norm 98099.3047 (115776.9297) mem 14543MB +[2023-10-11 21:25:57 simmim_pretrain](main_simmim.py 218): INFO Train: [89/200][1500/6787] eta 0:22:07 lr 0.000200 time 0.2497 (0.2510) loss 0.3465 (0.3612) grad_norm 196062.1094 (136350.5938) mem 14543MB +[2023-10-11 21:28:03 simmim_pretrain](main_simmim.py 218): INFO Train: [89/200][2000/6787] eta 0:20:01 lr 0.000200 time 0.2492 (0.2510) loss 0.3600 (0.3610) grad_norm 161352.8125 (143979.5469) mem 14543MB +[2023-10-11 21:30:08 simmim_pretrain](main_simmim.py 218): INFO Train: [89/200][2500/6787] eta 0:17:55 lr 0.000200 time 0.2591 (0.2510) loss 0.3673 (0.3609) grad_norm 251705.7188 (155916.9219) mem 14543MB +[2023-10-11 21:32:14 simmim_pretrain](main_simmim.py 218): INFO Train: [89/200][3000/6787] eta 0:15:50 lr 0.000200 time 0.2528 (0.2510) loss 0.3481 (0.3606) grad_norm 197844.5938 (175195.7656) mem 14543MB +[2023-10-11 21:34:19 simmim_pretrain](main_simmim.py 218): INFO Train: [89/200][3500/6787] eta 0:13:45 lr 0.000200 time 0.2502 (0.2510) loss 0.3600 (0.3604) grad_norm 647859.2500 (195491.0781) mem 14543MB +[2023-10-11 21:36:25 simmim_pretrain](main_simmim.py 218): INFO Train: [89/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2488 (0.2511) loss 0.3541 (0.3603) grad_norm 447311.7812 (229069.8125) mem 14543MB +[2023-10-11 21:38:31 simmim_pretrain](main_simmim.py 218): INFO Train: [89/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2518 (0.2511) loss 0.3644 (0.3602) grad_norm 410143.4375 (inf) mem 14543MB +[2023-10-11 21:40:37 simmim_pretrain](main_simmim.py 218): INFO Train: [89/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2484 (0.2512) loss 0.3442 (0.3601) grad_norm 408016.3438 (inf) mem 14543MB +[2023-10-11 21:42:41 simmim_pretrain](main_simmim.py 218): INFO Train: [89/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2463 (0.2510) loss 0.3622 (0.3600) grad_norm 446225.4688 (inf) mem 14543MB +[2023-10-11 21:44:44 simmim_pretrain](main_simmim.py 218): INFO Train: [89/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2455 (0.2506) loss 0.3564 (0.3599) grad_norm 261182.1094 (inf) mem 14543MB +[2023-10-11 21:46:47 simmim_pretrain](main_simmim.py 218): INFO Train: [89/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2442 (0.2502) loss 0.3668 (0.3598) grad_norm 386898.7188 (inf) mem 14543MB +[2023-10-11 21:47:58 simmim_pretrain](main_simmim.py 228): INFO EPOCH 89 training takes 0:28:17 +[2023-10-11 21:47:59 simmim_pretrain](main_simmim.py 218): INFO Train: [90/200][0/6787] eta 2:22:38 lr 0.000200 time 1.2610 (1.2610) loss 0.3760 (0.3760) grad_norm 170557.5000 (170557.5000) mem 14543MB +[2023-10-11 21:50:01 simmim_pretrain](main_simmim.py 218): INFO Train: [90/200][500/6787] eta 0:25:48 lr 0.000200 time 0.2442 (0.2463) loss 0.3660 (0.3600) grad_norm 168590.0938 (240454.4844) mem 14543MB +[2023-10-11 21:52:03 simmim_pretrain](main_simmim.py 218): INFO Train: [90/200][1000/6787] eta 0:23:38 lr 0.000200 time 0.2438 (0.2452) loss 0.3698 (0.3605) grad_norm 240785.2344 (231088.2344) mem 14543MB +[2023-10-11 21:54:05 simmim_pretrain](main_simmim.py 218): INFO Train: [90/200][1500/6787] eta 0:21:34 lr 0.000200 time 0.2437 (0.2448) loss 0.3605 (0.3607) grad_norm 224320.6719 (225170.1719) mem 14543MB +[2023-10-11 21:56:07 simmim_pretrain](main_simmim.py 218): INFO Train: [90/200][2000/6787] eta 0:19:31 lr 0.000200 time 0.2442 (0.2446) loss 0.3573 (0.3609) grad_norm 482738.2812 (220991.0312) mem 14543MB +[2023-10-11 21:58:09 simmim_pretrain](main_simmim.py 218): INFO Train: [90/200][2500/6787] eta 0:17:28 lr 0.000200 time 0.2442 (0.2445) loss 0.3836 (0.3607) grad_norm 305034.9062 (237426.5625) mem 14543MB +[2023-10-11 22:00:11 simmim_pretrain](main_simmim.py 218): INFO Train: [90/200][3000/6787] eta 0:15:25 lr 0.000200 time 0.2436 (0.2444) loss 0.3755 (0.3606) grad_norm 305304.8750 (254154.1406) mem 14543MB +[2023-10-11 22:02:13 simmim_pretrain](main_simmim.py 218): INFO Train: [90/200][3500/6787] eta 0:13:23 lr 0.000200 time 0.2437 (0.2444) loss 0.3746 (0.3604) grad_norm 227178.0000 (280693.0938) mem 14543MB +[2023-10-11 22:04:15 simmim_pretrain](main_simmim.py 218): INFO Train: [90/200][4000/6787] eta 0:11:20 lr 0.000200 time 0.2437 (0.2443) loss 0.3537 (0.3604) grad_norm 447759.7500 (287622.4688) mem 14543MB +[2023-10-11 22:06:17 simmim_pretrain](main_simmim.py 218): INFO Train: [90/200][4500/6787] eta 0:09:18 lr 0.000200 time 0.2442 (0.2443) loss 0.3510 (0.3602) grad_norm 285604.6250 (inf) mem 14543MB +[2023-10-11 22:08:19 simmim_pretrain](main_simmim.py 218): INFO Train: [90/200][5000/6787] eta 0:07:16 lr 0.000200 time 0.2443 (0.2443) loss 0.3486 (0.3604) grad_norm 331259.1875 (inf) mem 14543MB +[2023-10-11 22:10:21 simmim_pretrain](main_simmim.py 218): INFO Train: [90/200][5500/6787] eta 0:05:14 lr 0.000200 time 0.2439 (0.2442) loss 0.3699 (0.3604) grad_norm 276170.1250 (inf) mem 14543MB +[2023-10-11 22:12:23 simmim_pretrain](main_simmim.py 218): INFO Train: [90/200][6000/6787] eta 0:03:12 lr 0.000200 time 0.2440 (0.2442) loss 0.3365 (0.3603) grad_norm 278261.8750 (inf) mem 14543MB +[2023-10-11 22:14:25 simmim_pretrain](main_simmim.py 218): INFO Train: [90/200][6500/6787] eta 0:01:10 lr 0.000200 time 0.2441 (0.2442) loss 0.3590 (0.3604) grad_norm 334744.2812 (inf) mem 14543MB +[2023-10-11 22:15:36 simmim_pretrain](main_simmim.py 228): INFO EPOCH 90 training takes 0:27:38 +[2023-10-11 22:15:37 simmim_pretrain](main_simmim.py 218): INFO Train: [91/200][0/6787] eta 2:23:35 lr 0.000200 time 1.2693 (1.2693) loss 0.3623 (0.3623) grad_norm 191644.7969 (191644.7969) mem 14543MB +[2023-10-11 22:17:39 simmim_pretrain](main_simmim.py 218): INFO Train: [91/200][500/6787] eta 0:25:47 lr 0.000200 time 0.2442 (0.2462) loss 0.3624 (0.3594) grad_norm 487173.9375 (310834.7812) mem 14543MB +[2023-10-11 22:19:41 simmim_pretrain](main_simmim.py 218): INFO Train: [91/200][1000/6787] eta 0:23:38 lr 0.000200 time 0.2443 (0.2451) loss 0.3755 (0.3593) grad_norm 414266.3750 (338198.0625) mem 14543MB +[2023-10-11 22:21:43 simmim_pretrain](main_simmim.py 218): INFO Train: [91/200][1500/6787] eta 0:21:34 lr 0.000200 time 0.2440 (0.2448) loss 0.3474 (0.3596) grad_norm 473233.6562 (340883.9062) mem 14543MB +[2023-10-11 22:23:45 simmim_pretrain](main_simmim.py 218): INFO Train: [91/200][2000/6787] eta 0:19:30 lr 0.000200 time 0.2442 (0.2446) loss 0.3799 (0.3592) grad_norm 416249.3750 (inf) mem 14543MB +[2023-10-11 22:25:47 simmim_pretrain](main_simmim.py 218): INFO Train: [91/200][2500/6787] eta 0:17:28 lr 0.000200 time 0.2443 (0.2445) loss 0.3519 (0.3591) grad_norm 516312.1562 (inf) mem 14543MB +[2023-10-11 22:27:49 simmim_pretrain](main_simmim.py 218): INFO Train: [91/200][3000/6787] eta 0:15:25 lr 0.000200 time 0.2444 (0.2444) loss 0.3723 (0.3593) grad_norm 633295.6875 (inf) mem 14543MB +[2023-10-11 22:29:52 simmim_pretrain](main_simmim.py 218): INFO Train: [91/200][3500/6787] eta 0:13:23 lr 0.000200 time 0.2441 (0.2444) loss 0.3532 (0.3594) grad_norm 445285.3125 (inf) mem 14543MB +[2023-10-11 22:31:54 simmim_pretrain](main_simmim.py 218): INFO Train: [91/200][4000/6787] eta 0:11:20 lr 0.000200 time 0.2438 (0.2443) loss 0.3451 (0.3594) grad_norm 415215.3438 (inf) mem 14543MB +[2023-10-11 22:33:56 simmim_pretrain](main_simmim.py 218): INFO Train: [91/200][4500/6787] eta 0:09:18 lr 0.000200 time 0.2444 (0.2443) loss 0.3935 (0.3597) grad_norm 307076.5625 (inf) mem 14543MB +[2023-10-11 22:35:58 simmim_pretrain](main_simmim.py 218): INFO Train: [91/200][5000/6787] eta 0:07:16 lr 0.000200 time 0.2442 (0.2443) loss 0.3607 (0.3598) grad_norm 282594.4375 (inf) mem 14543MB +[2023-10-11 22:38:00 simmim_pretrain](main_simmim.py 218): INFO Train: [91/200][5500/6787] eta 0:05:14 lr 0.000200 time 0.2437 (0.2442) loss 0.3528 (0.3600) grad_norm 171084.9219 (inf) mem 14543MB +[2023-10-11 22:40:02 simmim_pretrain](main_simmim.py 218): INFO Train: [91/200][6000/6787] eta 0:03:12 lr 0.000200 time 0.2445 (0.2442) loss 0.3498 (0.3601) grad_norm 258422.8750 (inf) mem 14543MB +[2023-10-11 22:42:04 simmim_pretrain](main_simmim.py 218): INFO Train: [91/200][6500/6787] eta 0:01:10 lr 0.000200 time 0.2439 (0.2442) loss 0.3478 (0.3601) grad_norm 315594.7500 (inf) mem 14543MB +[2023-10-11 22:43:14 simmim_pretrain](main_simmim.py 228): INFO EPOCH 91 training takes 0:27:38 +[2023-10-11 22:43:15 simmim_pretrain](main_simmim.py 218): INFO Train: [92/200][0/6787] eta 2:23:13 lr 0.000200 time 1.2662 (1.2662) loss 0.3728 (0.3728) grad_norm 404795.1562 (404795.1562) mem 14543MB +[2023-10-11 22:45:18 simmim_pretrain](main_simmim.py 218): INFO Train: [92/200][500/6787] eta 0:25:47 lr 0.000200 time 0.2442 (0.2462) loss 0.3567 (0.3595) grad_norm 295807.7500 (349724.4688) mem 14543MB +[2023-10-11 22:47:20 simmim_pretrain](main_simmim.py 218): INFO Train: [92/200][1000/6787] eta 0:23:38 lr 0.000200 time 0.2437 (0.2451) loss 0.3405 (0.3593) grad_norm 251234.4375 (364011.5000) mem 14543MB +[2023-10-11 22:49:22 simmim_pretrain](main_simmim.py 218): INFO Train: [92/200][1500/6787] eta 0:21:33 lr 0.000200 time 0.2441 (0.2447) loss 0.3476 (0.3590) grad_norm 234071.4375 (inf) mem 14543MB +[2023-10-11 22:51:24 simmim_pretrain](main_simmim.py 218): INFO Train: [92/200][2000/6787] eta 0:19:30 lr 0.000200 time 0.2444 (0.2446) loss 0.3648 (0.3591) grad_norm 249882.7656 (inf) mem 14543MB +[2023-10-11 22:53:26 simmim_pretrain](main_simmim.py 218): INFO Train: [92/200][2500/6787] eta 0:17:28 lr 0.000200 time 0.2438 (0.2445) loss 0.3571 (0.3595) grad_norm 260261.1719 (inf) mem 14543MB +[2023-10-11 22:55:28 simmim_pretrain](main_simmim.py 218): INFO Train: [92/200][3000/6787] eta 0:15:25 lr 0.000200 time 0.2444 (0.2444) loss 0.3584 (0.3596) grad_norm 249233.6406 (inf) mem 14543MB +[2023-10-11 22:57:30 simmim_pretrain](main_simmim.py 218): INFO Train: [92/200][3500/6787] eta 0:13:23 lr 0.000200 time 0.2437 (0.2443) loss 0.3658 (0.3597) grad_norm 335225.8750 (inf) mem 14543MB +[2023-10-11 22:59:32 simmim_pretrain](main_simmim.py 218): INFO Train: [92/200][4000/6787] eta 0:11:20 lr 0.000200 time 0.2437 (0.2443) loss 0.3524 (0.3596) grad_norm 342623.4375 (inf) mem 14543MB +[2023-10-11 23:01:34 simmim_pretrain](main_simmim.py 218): INFO Train: [92/200][4500/6787] eta 0:09:18 lr 0.000200 time 0.2437 (0.2443) loss 0.3663 (0.3595) grad_norm 245564.4375 (inf) mem 14543MB +[2023-10-11 23:03:36 simmim_pretrain](main_simmim.py 218): INFO Train: [92/200][5000/6787] eta 0:07:16 lr 0.000200 time 0.2437 (0.2442) loss 0.3655 (0.3595) grad_norm 225061.4688 (inf) mem 14543MB +[2023-10-11 23:05:38 simmim_pretrain](main_simmim.py 218): INFO Train: [92/200][5500/6787] eta 0:05:14 lr 0.000200 time 0.2439 (0.2442) loss 0.3683 (0.3598) grad_norm 179358.0625 (inf) mem 14543MB +[2023-10-11 23:07:40 simmim_pretrain](main_simmim.py 218): INFO Train: [92/200][6000/6787] eta 0:03:12 lr 0.000200 time 0.2442 (0.2442) loss 0.3419 (0.3601) grad_norm 158925.0312 (inf) mem 14543MB +[2023-10-11 23:09:42 simmim_pretrain](main_simmim.py 218): INFO Train: [92/200][6500/6787] eta 0:01:10 lr 0.000200 time 0.2441 (0.2442) loss 0.3405 (0.3604) grad_norm 106705.7812 (inf) mem 14543MB +[2023-10-11 23:10:52 simmim_pretrain](main_simmim.py 228): INFO EPOCH 92 training takes 0:27:37 +[2023-10-11 23:10:53 simmim_pretrain](main_simmim.py 218): INFO Train: [93/200][0/6787] eta 2:25:57 lr 0.000200 time 1.2904 (1.2904) loss 0.3629 (0.3629) grad_norm 127502.2109 (127502.2109) mem 14543MB +[2023-10-11 23:12:55 simmim_pretrain](main_simmim.py 218): INFO Train: [93/200][500/6787] eta 0:25:47 lr 0.000200 time 0.2440 (0.2461) loss 0.3568 (0.3615) grad_norm 104398.9219 (139989.8125) mem 14543MB +[2023-10-11 23:14:58 simmim_pretrain](main_simmim.py 218): INFO Train: [93/200][1000/6787] eta 0:23:38 lr 0.000200 time 0.2438 (0.2451) loss 0.3605 (0.3616) grad_norm 172516.2344 (156020.9219) mem 14543MB +[2023-10-11 23:17:00 simmim_pretrain](main_simmim.py 218): INFO Train: [93/200][1500/6787] eta 0:21:35 lr 0.000200 time 0.2511 (0.2451) loss 0.3330 (0.3615) grad_norm 134507.4219 (169800.7969) mem 14543MB +[2023-10-11 23:19:04 simmim_pretrain](main_simmim.py 218): INFO Train: [93/200][2000/6787] eta 0:19:35 lr 0.000200 time 0.2500 (0.2457) loss 0.3781 (0.3612) grad_norm 489067.6562 (177929.1562) mem 14543MB +[2023-10-11 23:21:09 simmim_pretrain](main_simmim.py 218): INFO Train: [93/200][2500/6787] eta 0:17:36 lr 0.000200 time 0.2508 (0.2465) loss 0.3498 (0.3610) grad_norm 274301.0625 (192479.8750) mem 14543MB +[2023-10-11 23:23:14 simmim_pretrain](main_simmim.py 218): INFO Train: [93/200][3000/6787] eta 0:15:35 lr 0.000200 time 0.2497 (0.2471) loss 0.3493 (0.3605) grad_norm 194772.6875 (209354.0156) mem 14543MB +[2023-10-11 23:25:19 simmim_pretrain](main_simmim.py 218): INFO Train: [93/200][3500/6787] eta 0:13:33 lr 0.000200 time 0.2512 (0.2476) loss 0.3668 (0.3604) grad_norm 350475.1875 (231430.6406) mem 14543MB +[2023-10-11 23:27:25 simmim_pretrain](main_simmim.py 218): INFO Train: [93/200][4000/6787] eta 0:11:31 lr 0.000200 time 0.2496 (0.2480) loss 0.3527 (0.3601) grad_norm 927230.5625 (250409.8125) mem 14543MB +[2023-10-11 23:29:30 simmim_pretrain](main_simmim.py 218): INFO Train: [93/200][4500/6787] eta 0:09:27 lr 0.000200 time 0.2495 (0.2483) loss 0.3624 (0.3599) grad_norm 475389.3438 (inf) mem 14543MB +[2023-10-11 23:31:36 simmim_pretrain](main_simmim.py 218): INFO Train: [93/200][5000/6787] eta 0:07:24 lr 0.000200 time 0.2547 (0.2487) loss 0.3646 (0.3599) grad_norm 502091.1875 (inf) mem 14543MB +[2023-10-11 23:33:42 simmim_pretrain](main_simmim.py 218): INFO Train: [93/200][5500/6787] eta 0:05:20 lr 0.000200 time 0.2567 (0.2491) loss 0.3861 (0.3598) grad_norm 338733.1875 (inf) mem 14543MB +[2023-10-11 23:35:50 simmim_pretrain](main_simmim.py 218): INFO Train: [93/200][6000/6787] eta 0:03:16 lr 0.000200 time 0.2562 (0.2497) loss 0.3481 (0.3598) grad_norm 450744.2500 (inf) mem 14543MB +[2023-10-11 23:37:59 simmim_pretrain](main_simmim.py 218): INFO Train: [93/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2571 (0.2502) loss 0.3701 (0.3599) grad_norm 252458.0781 (inf) mem 14543MB +[2023-10-11 23:39:13 simmim_pretrain](main_simmim.py 228): INFO EPOCH 93 training takes 0:28:20 +[2023-10-11 23:39:14 simmim_pretrain](main_simmim.py 218): INFO Train: [94/200][0/6787] eta 2:33:10 lr 0.000200 time 1.3542 (1.3542) loss 0.3598 (0.3598) grad_norm 195948.2500 (195948.2500) mem 14543MB +[2023-10-11 23:41:19 simmim_pretrain](main_simmim.py 218): INFO Train: [94/200][500/6787] eta 0:26:30 lr 0.000200 time 0.2515 (0.2530) loss 0.3767 (0.3613) grad_norm 239986.7188 (278015.4688) mem 14543MB +[2023-10-11 23:43:25 simmim_pretrain](main_simmim.py 218): INFO Train: [94/200][1000/6787] eta 0:24:20 lr 0.000200 time 0.2534 (0.2523) loss 0.3650 (0.3616) grad_norm 243425.3594 (254542.3594) mem 14543MB +[2023-10-11 23:45:31 simmim_pretrain](main_simmim.py 218): INFO Train: [94/200][1500/6787] eta 0:22:12 lr 0.000200 time 0.2472 (0.2521) loss 0.3584 (0.3610) grad_norm 338258.1250 (259721.9219) mem 14543MB +[2023-10-11 23:47:37 simmim_pretrain](main_simmim.py 218): INFO Train: [94/200][2000/6787] eta 0:20:05 lr 0.000200 time 0.2514 (0.2519) loss 0.3427 (0.3605) grad_norm 242770.8281 (276487.8438) mem 14543MB +[2023-10-11 23:49:42 simmim_pretrain](main_simmim.py 218): INFO Train: [94/200][2500/6787] eta 0:17:59 lr 0.000200 time 0.2465 (0.2517) loss 0.3694 (0.3604) grad_norm 280177.0000 (inf) mem 14543MB +[2023-10-11 23:51:48 simmim_pretrain](main_simmim.py 218): INFO Train: [94/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2500 (0.2516) loss 0.3510 (0.3604) grad_norm 322366.2188 (inf) mem 14543MB +[2023-10-11 23:53:53 simmim_pretrain](main_simmim.py 218): INFO Train: [94/200][3500/6787] eta 0:13:46 lr 0.000200 time 0.2501 (0.2515) loss 0.3643 (0.3605) grad_norm 267451.7500 (inf) mem 14543MB +[2023-10-11 23:55:59 simmim_pretrain](main_simmim.py 218): INFO Train: [94/200][4000/6787] eta 0:11:40 lr 0.000200 time 0.2521 (0.2514) loss 0.3388 (0.3605) grad_norm 349595.1250 (inf) mem 14543MB +[2023-10-11 23:58:04 simmim_pretrain](main_simmim.py 218): INFO Train: [94/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2538 (0.2514) loss 0.3665 (0.3605) grad_norm 487705.5938 (inf) mem 14543MB +[2023-10-12 00:00:10 simmim_pretrain](main_simmim.py 218): INFO Train: [94/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2485 (0.2513) loss 0.3733 (0.3604) grad_norm 241284.5156 (inf) mem 14543MB +[2023-10-12 00:02:15 simmim_pretrain](main_simmim.py 218): INFO Train: [94/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2593 (0.2513) loss 0.3442 (0.3603) grad_norm 681812.5625 (inf) mem 14543MB +[2023-10-12 00:04:21 simmim_pretrain](main_simmim.py 218): INFO Train: [94/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2503 (0.2513) loss 0.3487 (0.3602) grad_norm 261522.1719 (inf) mem 14543MB +[2023-10-12 00:06:26 simmim_pretrain](main_simmim.py 218): INFO Train: [94/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2518 (0.2512) loss 0.3639 (0.3602) grad_norm 314174.4688 (inf) mem 14543MB +[2023-10-12 00:07:38 simmim_pretrain](main_simmim.py 228): INFO EPOCH 94 training takes 0:28:25 +[2023-10-12 00:07:40 simmim_pretrain](main_simmim.py 218): INFO Train: [95/200][0/6787] eta 3:01:26 lr 0.000200 time 1.6040 (1.6040) loss 0.3612 (0.3612) grad_norm 330066.4688 (330066.4688) mem 14543MB +[2023-10-12 00:09:45 simmim_pretrain](main_simmim.py 218): INFO Train: [95/200][500/6787] eta 0:26:30 lr 0.000200 time 0.2519 (0.2530) loss 0.3535 (0.3590) grad_norm 338333.1562 (inf) mem 14543MB +[2023-10-12 00:11:51 simmim_pretrain](main_simmim.py 218): INFO Train: [95/200][1000/6787] eta 0:24:17 lr 0.000200 time 0.2521 (0.2519) loss 0.3642 (0.3598) grad_norm 342719.0938 (inf) mem 14543MB +[2023-10-12 00:13:56 simmim_pretrain](main_simmim.py 218): INFO Train: [95/200][1500/6787] eta 0:22:10 lr 0.000200 time 0.2489 (0.2516) loss 0.3549 (0.3603) grad_norm 198139.5781 (inf) mem 14543MB +[2023-10-12 00:16:02 simmim_pretrain](main_simmim.py 218): INFO Train: [95/200][2000/6787] eta 0:20:03 lr 0.000200 time 0.2553 (0.2514) loss 0.3507 (0.3603) grad_norm 303251.9062 (inf) mem 14543MB +[2023-10-12 00:18:07 simmim_pretrain](main_simmim.py 218): INFO Train: [95/200][2500/6787] eta 0:17:57 lr 0.000200 time 0.2485 (0.2513) loss 0.3645 (0.3604) grad_norm 269514.3125 (inf) mem 14543MB +[2023-10-12 00:20:12 simmim_pretrain](main_simmim.py 218): INFO Train: [95/200][3000/6787] eta 0:15:51 lr 0.000200 time 0.2492 (0.2512) loss 0.3571 (0.3603) grad_norm 459835.5938 (inf) mem 14543MB +[2023-10-12 00:22:18 simmim_pretrain](main_simmim.py 218): INFO Train: [95/200][3500/6787] eta 0:13:45 lr 0.000200 time 0.2531 (0.2512) loss 0.3537 (0.3601) grad_norm 290855.5625 (inf) mem 14543MB +[2023-10-12 00:24:23 simmim_pretrain](main_simmim.py 218): INFO Train: [95/200][4000/6787] eta 0:11:40 lr 0.000200 time 0.2593 (0.2512) loss 0.3527 (0.3600) grad_norm 662410.2500 (inf) mem 14543MB +[2023-10-12 00:26:29 simmim_pretrain](main_simmim.py 218): INFO Train: [95/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2458 (0.2512) loss 0.3524 (0.3602) grad_norm 312092.1250 (inf) mem 14543MB +[2023-10-12 00:28:34 simmim_pretrain](main_simmim.py 218): INFO Train: [95/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2584 (0.2511) loss 0.3704 (0.3602) grad_norm 263137.7812 (inf) mem 14543MB +[2023-10-12 00:30:40 simmim_pretrain](main_simmim.py 218): INFO Train: [95/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2488 (0.2511) loss 0.3380 (0.3602) grad_norm 217898.2188 (inf) mem 14543MB +[2023-10-12 00:32:45 simmim_pretrain](main_simmim.py 218): INFO Train: [95/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2481 (0.2511) loss 0.3524 (0.3602) grad_norm 187770.5312 (inf) mem 14543MB +[2023-10-12 00:34:51 simmim_pretrain](main_simmim.py 218): INFO Train: [95/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2498 (0.2511) loss 0.3603 (0.3602) grad_norm 239238.5156 (nan) mem 14543MB +[2023-10-12 00:36:03 simmim_pretrain](main_simmim.py 228): INFO EPOCH 95 training takes 0:28:24 +[2023-10-12 00:36:05 simmim_pretrain](main_simmim.py 218): INFO Train: [96/200][0/6787] eta 2:35:28 lr 0.000200 time 1.3745 (1.3745) loss 0.3515 (0.3515) grad_norm 338666.8125 (338666.8125) mem 14543MB +[2023-10-12 00:38:10 simmim_pretrain](main_simmim.py 218): INFO Train: [96/200][500/6787] eta 0:26:31 lr 0.000200 time 0.2483 (0.2532) loss 0.3654 (0.3613) grad_norm 269618.0312 (263035.4062) mem 14543MB +[2023-10-12 00:40:16 simmim_pretrain](main_simmim.py 218): INFO Train: [96/200][1000/6787] eta 0:24:19 lr 0.000200 time 0.2553 (0.2522) loss 0.3493 (0.3618) grad_norm 159350.4844 (inf) mem 14543MB +[2023-10-12 00:42:21 simmim_pretrain](main_simmim.py 218): INFO Train: [96/200][1500/6787] eta 0:22:11 lr 0.000200 time 0.2522 (0.2519) loss 0.3694 (0.3623) grad_norm 143001.4844 (inf) mem 14543MB +[2023-10-12 00:44:27 simmim_pretrain](main_simmim.py 218): INFO Train: [96/200][2000/6787] eta 0:20:05 lr 0.000200 time 0.2546 (0.2518) loss 0.3682 (0.3624) grad_norm 126866.6172 (inf) mem 14543MB +[2023-10-12 00:46:33 simmim_pretrain](main_simmim.py 218): INFO Train: [96/200][2500/6787] eta 0:17:59 lr 0.000200 time 0.2510 (0.2518) loss 0.3668 (0.3626) grad_norm 86701.1094 (inf) mem 14543MB +[2023-10-12 00:48:39 simmim_pretrain](main_simmim.py 218): INFO Train: [96/200][3000/6787] eta 0:15:53 lr 0.000200 time 0.2472 (0.2518) loss 0.3802 (0.3624) grad_norm 99419.3984 (inf) mem 14543MB +[2023-10-12 00:50:45 simmim_pretrain](main_simmim.py 218): INFO Train: [96/200][3500/6787] eta 0:13:47 lr 0.000200 time 0.2505 (0.2518) loss 0.3649 (0.3621) grad_norm 203786.1250 (inf) mem 14543MB +[2023-10-12 00:52:51 simmim_pretrain](main_simmim.py 218): INFO Train: [96/200][4000/6787] eta 0:11:41 lr 0.000200 time 0.2538 (0.2518) loss 0.3304 (0.3620) grad_norm 193049.5000 (inf) mem 14543MB +[2023-10-12 00:54:57 simmim_pretrain](main_simmim.py 218): INFO Train: [96/200][4500/6787] eta 0:09:35 lr 0.000200 time 0.2587 (0.2518) loss 0.3694 (0.3618) grad_norm 285247.9688 (inf) mem 14543MB +[2023-10-12 00:57:02 simmim_pretrain](main_simmim.py 218): INFO Train: [96/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2504 (0.2518) loss 0.3561 (0.3617) grad_norm 334870.4375 (inf) mem 14543MB +[2023-10-12 00:59:08 simmim_pretrain](main_simmim.py 218): INFO Train: [96/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2472 (0.2517) loss 0.3654 (0.3615) grad_norm 336901.1250 (inf) mem 14543MB +[2023-10-12 01:01:14 simmim_pretrain](main_simmim.py 218): INFO Train: [96/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2558 (0.2517) loss 0.3631 (0.3614) grad_norm 312311.0938 (inf) mem 14543MB +[2023-10-12 01:03:20 simmim_pretrain](main_simmim.py 218): INFO Train: [96/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2701 (0.2517) loss 0.3649 (0.3612) grad_norm 531862.7500 (inf) mem 14543MB +[2023-10-12 01:04:32 simmim_pretrain](main_simmim.py 228): INFO EPOCH 96 training takes 0:28:28 +[2023-10-12 01:04:34 simmim_pretrain](main_simmim.py 218): INFO Train: [97/200][0/6787] eta 2:53:17 lr 0.000200 time 1.5319 (1.5319) loss 0.3422 (0.3422) grad_norm 364519.9062 (364519.9062) mem 14543MB +[2023-10-12 01:06:39 simmim_pretrain](main_simmim.py 218): INFO Train: [97/200][500/6787] eta 0:26:33 lr 0.000200 time 0.2500 (0.2535) loss 0.3498 (0.3588) grad_norm 413153.1562 (inf) mem 14543MB +[2023-10-12 01:08:45 simmim_pretrain](main_simmim.py 218): INFO Train: [97/200][1000/6787] eta 0:24:20 lr 0.000200 time 0.2476 (0.2524) loss 0.3515 (0.3593) grad_norm 416699.2812 (inf) mem 14543MB +[2023-10-12 01:10:50 simmim_pretrain](main_simmim.py 218): INFO Train: [97/200][1500/6787] eta 0:22:12 lr 0.000200 time 0.2503 (0.2521) loss 0.3595 (0.3601) grad_norm 188335.2656 (inf) mem 14543MB +[2023-10-12 01:12:56 simmim_pretrain](main_simmim.py 218): INFO Train: [97/200][2000/6787] eta 0:20:05 lr 0.000200 time 0.2513 (0.2518) loss 0.3605 (0.3608) grad_norm 159172.7031 (inf) mem 14543MB +[2023-10-12 01:15:01 simmim_pretrain](main_simmim.py 218): INFO Train: [97/200][2500/6787] eta 0:17:58 lr 0.000200 time 0.2498 (0.2516) loss 0.3568 (0.3612) grad_norm 163809.4844 (inf) mem 14543MB +[2023-10-12 01:17:07 simmim_pretrain](main_simmim.py 218): INFO Train: [97/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2538 (0.2515) loss 0.3545 (0.3615) grad_norm 129363.6953 (inf) mem 14543MB +[2023-10-12 01:19:12 simmim_pretrain](main_simmim.py 218): INFO Train: [97/200][3500/6787] eta 0:13:46 lr 0.000200 time 0.2490 (0.2514) loss 0.3679 (0.3617) grad_norm 84584.1016 (inf) mem 14543MB +[2023-10-12 01:21:18 simmim_pretrain](main_simmim.py 218): INFO Train: [97/200][4000/6787] eta 0:11:40 lr 0.000200 time 0.2564 (0.2513) loss 0.3545 (0.3617) grad_norm 236204.7031 (inf) mem 14543MB +[2023-10-12 01:23:23 simmim_pretrain](main_simmim.py 218): INFO Train: [97/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2520 (0.2513) loss 0.3555 (0.3616) grad_norm 91257.7031 (inf) mem 14543MB +[2023-10-12 01:25:28 simmim_pretrain](main_simmim.py 218): INFO Train: [97/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2459 (0.2512) loss 0.3821 (0.3614) grad_norm 192299.5156 (inf) mem 14543MB +[2023-10-12 01:27:34 simmim_pretrain](main_simmim.py 218): INFO Train: [97/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2520 (0.2512) loss 0.3765 (0.3613) grad_norm 231435.8125 (inf) mem 14543MB +[2023-10-12 01:29:40 simmim_pretrain](main_simmim.py 218): INFO Train: [97/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2518 (0.2512) loss 0.3583 (0.3612) grad_norm 230838.4219 (inf) mem 14543MB +[2023-10-12 01:31:46 simmim_pretrain](main_simmim.py 218): INFO Train: [97/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2509 (0.2513) loss 0.3828 (0.3611) grad_norm 249152.5312 (inf) mem 14543MB +[2023-10-12 01:32:59 simmim_pretrain](main_simmim.py 228): INFO EPOCH 97 training takes 0:28:26 +[2023-10-12 01:33:00 simmim_pretrain](main_simmim.py 218): INFO Train: [98/200][0/6787] eta 2:45:34 lr 0.000200 time 1.4638 (1.4638) loss 0.3622 (0.3622) grad_norm 291276.5938 (291276.5938) mem 14543MB +[2023-10-12 01:35:06 simmim_pretrain](main_simmim.py 218): INFO Train: [98/200][500/6787] eta 0:26:32 lr 0.000200 time 0.2512 (0.2534) loss 0.3479 (0.3611) grad_norm 338077.3750 (237961.6562) mem 14543MB +[2023-10-12 01:37:12 simmim_pretrain](main_simmim.py 218): INFO Train: [98/200][1000/6787] eta 0:24:22 lr 0.000200 time 0.2493 (0.2527) loss 0.3441 (0.3609) grad_norm 278081.3750 (243458.1406) mem 14543MB +[2023-10-12 01:39:18 simmim_pretrain](main_simmim.py 218): INFO Train: [98/200][1500/6787] eta 0:22:15 lr 0.000200 time 0.2590 (0.2526) loss 0.3558 (0.3603) grad_norm 653827.8125 (276654.0625) mem 14543MB +[2023-10-12 01:41:24 simmim_pretrain](main_simmim.py 218): INFO Train: [98/200][2000/6787] eta 0:20:08 lr 0.000200 time 0.2488 (0.2525) loss 0.3495 (0.3601) grad_norm 178403.3125 (inf) mem 14543MB +[2023-10-12 01:43:30 simmim_pretrain](main_simmim.py 218): INFO Train: [98/200][2500/6787] eta 0:18:02 lr 0.000200 time 0.2530 (0.2525) loss 0.3604 (0.3601) grad_norm 339051.9375 (inf) mem 14543MB +[2023-10-12 01:45:37 simmim_pretrain](main_simmim.py 218): INFO Train: [98/200][3000/6787] eta 0:15:56 lr 0.000200 time 0.2593 (0.2525) loss 0.3737 (0.3600) grad_norm 275548.2500 (inf) mem 14543MB +[2023-10-12 01:47:43 simmim_pretrain](main_simmim.py 218): INFO Train: [98/200][3500/6787] eta 0:13:50 lr 0.000200 time 0.2503 (0.2526) loss 0.3631 (0.3600) grad_norm 264094.0625 (inf) mem 14543MB +[2023-10-12 01:49:49 simmim_pretrain](main_simmim.py 218): INFO Train: [98/200][4000/6787] eta 0:11:43 lr 0.000200 time 0.2482 (0.2525) loss 0.3779 (0.3600) grad_norm 299249.4062 (inf) mem 14543MB +[2023-10-12 01:51:55 simmim_pretrain](main_simmim.py 218): INFO Train: [98/200][4500/6787] eta 0:09:37 lr 0.000200 time 0.2539 (0.2525) loss 0.3538 (0.3602) grad_norm 153941.1875 (nan) mem 14543MB +[2023-10-12 01:54:03 simmim_pretrain](main_simmim.py 218): INFO Train: [98/200][5000/6787] eta 0:07:31 lr 0.000200 time 0.2573 (0.2529) loss 0.3554 (0.3605) grad_norm 181709.2031 (nan) mem 14543MB +[2023-10-12 01:56:12 simmim_pretrain](main_simmim.py 218): INFO Train: [98/200][5500/6787] eta 0:05:25 lr 0.000200 time 0.2581 (0.2533) loss 0.3563 (0.3608) grad_norm 181319.8750 (nan) mem 14543MB +[2023-10-12 01:58:21 simmim_pretrain](main_simmim.py 218): INFO Train: [98/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2573 (0.2536) loss 0.3728 (0.3612) grad_norm 75110.2344 (nan) mem 14543MB +[2023-10-12 02:00:30 simmim_pretrain](main_simmim.py 218): INFO Train: [98/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2578 (0.2539) loss 0.3759 (0.3617) grad_norm 73465.4375 (nan) mem 14543MB +[2023-10-12 02:01:44 simmim_pretrain](main_simmim.py 228): INFO EPOCH 98 training takes 0:28:45 +[2023-10-12 02:01:45 simmim_pretrain](main_simmim.py 218): INFO Train: [99/200][0/6787] eta 2:55:50 lr 0.000200 time 1.5545 (1.5545) loss 0.3537 (0.3537) grad_norm 65805.4844 (65805.4844) mem 14543MB +[2023-10-12 02:03:51 simmim_pretrain](main_simmim.py 218): INFO Train: [99/200][500/6787] eta 0:26:31 lr 0.000200 time 0.2534 (0.2531) loss 0.3779 (0.3640) grad_norm 57297.2031 (64116.5977) mem 14543MB +[2023-10-12 02:05:57 simmim_pretrain](main_simmim.py 218): INFO Train: [99/200][1000/6787] eta 0:24:20 lr 0.000200 time 0.2533 (0.2524) loss 0.3790 (0.3651) grad_norm 70941.5391 (64241.7031) mem 14543MB +[2023-10-12 02:08:02 simmim_pretrain](main_simmim.py 218): INFO Train: [99/200][1500/6787] eta 0:22:13 lr 0.000200 time 0.2526 (0.2522) loss 0.3404 (0.3641) grad_norm 89075.2422 (69660.6641) mem 14543MB +[2023-10-12 02:10:08 simmim_pretrain](main_simmim.py 218): INFO Train: [99/200][2000/6787] eta 0:20:06 lr 0.000200 time 0.2496 (0.2521) loss 0.3739 (0.3636) grad_norm 75972.2344 (76073.1953) mem 14543MB +[2023-10-12 02:12:14 simmim_pretrain](main_simmim.py 218): INFO Train: [99/200][2500/6787] eta 0:18:00 lr 0.000200 time 0.2523 (0.2520) loss 0.3731 (0.3634) grad_norm 62650.6758 (81671.0547) mem 14543MB +[2023-10-12 02:14:20 simmim_pretrain](main_simmim.py 218): INFO Train: [99/200][3000/6787] eta 0:15:54 lr 0.000200 time 0.2512 (0.2520) loss 0.3580 (0.3632) grad_norm 137453.0781 (85854.5703) mem 14543MB +[2023-10-12 02:16:26 simmim_pretrain](main_simmim.py 218): INFO Train: [99/200][3500/6787] eta 0:13:48 lr 0.000200 time 0.2500 (0.2520) loss 0.3592 (0.3628) grad_norm 276063.9062 (93401.7188) mem 14543MB +[2023-10-12 02:18:32 simmim_pretrain](main_simmim.py 218): INFO Train: [99/200][4000/6787] eta 0:11:42 lr 0.000200 time 0.2504 (0.2520) loss 0.3813 (0.3625) grad_norm 192551.4375 (103270.8281) mem 14543MB +[2023-10-12 02:20:38 simmim_pretrain](main_simmim.py 218): INFO Train: [99/200][4500/6787] eta 0:09:36 lr 0.000200 time 0.2540 (0.2520) loss 0.3612 (0.3623) grad_norm 157966.5938 (inf) mem 14543MB +[2023-10-12 02:22:44 simmim_pretrain](main_simmim.py 218): INFO Train: [99/200][5000/6787] eta 0:07:30 lr 0.000200 time 0.2591 (0.2520) loss 0.3608 (0.3623) grad_norm 126726.2969 (inf) mem 14543MB +[2023-10-12 02:24:50 simmim_pretrain](main_simmim.py 218): INFO Train: [99/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2490 (0.2520) loss 0.3756 (0.3622) grad_norm 124848.4609 (inf) mem 14543MB +[2023-10-12 02:26:57 simmim_pretrain](main_simmim.py 218): INFO Train: [99/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2599 (0.2521) loss 0.3770 (0.3623) grad_norm 144055.2500 (inf) mem 14543MB +[2023-10-12 02:29:03 simmim_pretrain](main_simmim.py 218): INFO Train: [99/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2597 (0.2521) loss 0.3378 (0.3622) grad_norm 181633.9844 (inf) mem 14543MB +[2023-10-12 02:30:16 simmim_pretrain](main_simmim.py 228): INFO EPOCH 99 training takes 0:28:31 +[2023-10-12 02:30:17 simmim_pretrain](main_simmim.py 218): INFO Train: [100/200][0/6787] eta 2:41:47 lr 0.000200 time 1.4303 (1.4303) loss 0.3415 (0.3415) grad_norm 178395.6406 (178395.6406) mem 14543MB +[2023-10-12 02:32:23 simmim_pretrain](main_simmim.py 218): INFO Train: [100/200][500/6787] eta 0:26:32 lr 0.000200 time 0.2483 (0.2533) loss 0.3766 (0.3603) grad_norm 277789.6875 (171691.6094) mem 14543MB +[2023-10-12 02:34:28 simmim_pretrain](main_simmim.py 218): INFO Train: [100/200][1000/6787] eta 0:24:20 lr 0.000200 time 0.2527 (0.2524) loss 0.3692 (0.3605) grad_norm 183919.0781 (181329.0156) mem 14543MB +[2023-10-12 02:36:34 simmim_pretrain](main_simmim.py 218): INFO Train: [100/200][1500/6787] eta 0:22:12 lr 0.000200 time 0.2463 (0.2520) loss 0.3559 (0.3600) grad_norm 154250.5781 (208835.6094) mem 14543MB +[2023-10-12 02:38:39 simmim_pretrain](main_simmim.py 218): INFO Train: [100/200][2000/6787] eta 0:20:05 lr 0.000200 time 0.2529 (0.2517) loss 0.3514 (0.3597) grad_norm 241654.4219 (221784.2344) mem 14543MB +[2023-10-12 02:40:45 simmim_pretrain](main_simmim.py 218): INFO Train: [100/200][2500/6787] eta 0:17:58 lr 0.000200 time 0.2483 (0.2516) loss 0.3692 (0.3592) grad_norm 243274.0000 (243056.1094) mem 14543MB +[2023-10-12 02:42:50 simmim_pretrain](main_simmim.py 218): INFO Train: [100/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2517 (0.2514) loss 0.3677 (0.3593) grad_norm 377185.2188 (255469.4688) mem 14543MB +[2023-10-12 02:44:56 simmim_pretrain](main_simmim.py 218): INFO Train: [100/200][3500/6787] eta 0:13:46 lr 0.000200 time 0.2503 (0.2514) loss 0.3582 (0.3592) grad_norm 296182.0938 (inf) mem 14543MB +[2023-10-12 02:47:02 simmim_pretrain](main_simmim.py 218): INFO Train: [100/200][4000/6787] eta 0:11:40 lr 0.000200 time 0.2520 (0.2514) loss 0.3682 (0.3592) grad_norm 281465.1562 (inf) mem 14543MB +[2023-10-12 02:49:07 simmim_pretrain](main_simmim.py 218): INFO Train: [100/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2464 (0.2514) loss 0.3634 (0.3592) grad_norm 170077.6562 (inf) mem 14543MB +[2023-10-12 02:51:13 simmim_pretrain](main_simmim.py 218): INFO Train: [100/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2512 (0.2514) loss 0.3968 (0.3593) grad_norm 97890.8047 (inf) mem 14543MB +[2023-10-12 02:53:19 simmim_pretrain](main_simmim.py 218): INFO Train: [100/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2502 (0.2514) loss 0.3701 (0.3593) grad_norm 229536.2969 (inf) mem 14543MB +[2023-10-12 02:55:25 simmim_pretrain](main_simmim.py 218): INFO Train: [100/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2537 (0.2514) loss 0.3568 (0.3593) grad_norm 325122.0625 (inf) mem 14543MB +[2023-10-12 02:57:33 simmim_pretrain](main_simmim.py 218): INFO Train: [100/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2569 (0.2518) loss 0.3482 (0.3593) grad_norm 341409.8438 (inf) mem 14543MB +[2023-10-12 02:58:47 simmim_pretrain](main_simmim.py 228): INFO EPOCH 100 training takes 0:28:31 +[2023-10-12 02:58:47 simmim_pretrain](utils.py 62): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_100.pth saving...... +[2023-10-12 02:58:48 simmim_pretrain](utils.py 64): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_100.pth saved !!! +[2023-10-12 02:58:49 simmim_pretrain](main_simmim.py 218): INFO Train: [101/200][0/6787] eta 2:30:44 lr 0.000200 time 1.3327 (1.3327) loss 0.3659 (0.3659) grad_norm 277496.2500 (277496.2500) mem 14543MB +[2023-10-12 03:00:55 simmim_pretrain](main_simmim.py 218): INFO Train: [101/200][500/6787] eta 0:26:33 lr 0.000200 time 0.2546 (0.2535) loss 0.3800 (0.3581) grad_norm 271890.7812 (374255.9688) mem 14543MB +[2023-10-12 03:03:01 simmim_pretrain](main_simmim.py 218): INFO Train: [101/200][1000/6787] eta 0:24:22 lr 0.000200 time 0.2483 (0.2527) loss 0.3794 (0.3588) grad_norm 330771.1562 (inf) mem 14543MB +[2023-10-12 03:05:08 simmim_pretrain](main_simmim.py 218): INFO Train: [101/200][1500/6787] eta 0:22:18 lr 0.000200 time 0.2548 (0.2531) loss 0.3560 (0.3586) grad_norm 174398.2344 (inf) mem 14543MB +[2023-10-12 03:07:15 simmim_pretrain](main_simmim.py 218): INFO Train: [101/200][2000/6787] eta 0:20:12 lr 0.000200 time 0.2535 (0.2533) loss 0.3580 (0.3588) grad_norm 265289.6562 (inf) mem 14543MB +[2023-10-12 03:09:22 simmim_pretrain](main_simmim.py 218): INFO Train: [101/200][2500/6787] eta 0:18:06 lr 0.000200 time 0.2548 (0.2534) loss 0.3587 (0.3592) grad_norm 203074.8594 (inf) mem 14543MB +[2023-10-12 03:11:29 simmim_pretrain](main_simmim.py 218): INFO Train: [101/200][3000/6787] eta 0:16:00 lr 0.000200 time 0.2545 (0.2535) loss 0.3532 (0.3594) grad_norm 335170.5625 (inf) mem 14543MB +[2023-10-12 03:13:35 simmim_pretrain](main_simmim.py 218): INFO Train: [101/200][3500/6787] eta 0:13:53 lr 0.000200 time 0.2531 (0.2535) loss 0.3747 (0.3595) grad_norm 210431.0156 (inf) mem 14543MB +[2023-10-12 03:15:43 simmim_pretrain](main_simmim.py 218): INFO Train: [101/200][4000/6787] eta 0:11:46 lr 0.000200 time 0.2533 (0.2536) loss 0.3657 (0.3595) grad_norm 286953.2188 (inf) mem 14543MB +[2023-10-12 03:17:49 simmim_pretrain](main_simmim.py 218): INFO Train: [101/200][4500/6787] eta 0:09:40 lr 0.000200 time 0.2526 (0.2536) loss 0.3536 (0.3594) grad_norm 355317.6562 (inf) mem 14543MB +[2023-10-12 03:19:56 simmim_pretrain](main_simmim.py 218): INFO Train: [101/200][5000/6787] eta 0:07:33 lr 0.000200 time 0.2541 (0.2537) loss 0.3372 (0.3593) grad_norm 365518.5312 (inf) mem 14543MB +[2023-10-12 03:22:03 simmim_pretrain](main_simmim.py 218): INFO Train: [101/200][5500/6787] eta 0:05:26 lr 0.000200 time 0.2524 (0.2537) loss 0.3708 (0.3595) grad_norm 208737.4062 (inf) mem 14543MB +[2023-10-12 03:24:10 simmim_pretrain](main_simmim.py 218): INFO Train: [101/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2541 (0.2537) loss 0.3531 (0.3595) grad_norm 82415.5078 (inf) mem 14543MB +[2023-10-12 03:26:17 simmim_pretrain](main_simmim.py 218): INFO Train: [101/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2535 (0.2537) loss 0.3573 (0.3596) grad_norm 162724.8438 (inf) mem 14543MB +[2023-10-12 03:27:30 simmim_pretrain](main_simmim.py 228): INFO EPOCH 101 training takes 0:28:42 +[2023-10-12 03:27:32 simmim_pretrain](main_simmim.py 218): INFO Train: [102/200][0/6787] eta 2:47:31 lr 0.000200 time 1.4810 (1.4810) loss 0.3605 (0.3605) grad_norm 239233.9844 (239233.9844) mem 14543MB +[2023-10-12 03:29:37 simmim_pretrain](main_simmim.py 218): INFO Train: [102/200][500/6787] eta 0:26:31 lr 0.000200 time 0.2485 (0.2531) loss 0.3649 (0.3589) grad_norm 235283.1406 (283245.0938) mem 14543MB +[2023-10-12 03:31:42 simmim_pretrain](main_simmim.py 218): INFO Train: [102/200][1000/6787] eta 0:24:17 lr 0.000200 time 0.2515 (0.2519) loss 0.3581 (0.3584) grad_norm 713913.6250 (330579.7812) mem 14543MB +[2023-10-12 03:33:48 simmim_pretrain](main_simmim.py 218): INFO Train: [102/200][1500/6787] eta 0:22:10 lr 0.000200 time 0.2515 (0.2516) loss 0.3636 (0.3587) grad_norm 139662.3594 (inf) mem 14543MB +[2023-10-12 03:35:54 simmim_pretrain](main_simmim.py 218): INFO Train: [102/200][2000/6787] eta 0:20:04 lr 0.000200 time 0.2500 (0.2515) loss 0.3528 (0.3598) grad_norm 173560.1562 (inf) mem 14543MB +[2023-10-12 03:37:59 simmim_pretrain](main_simmim.py 218): INFO Train: [102/200][2500/6787] eta 0:17:57 lr 0.000200 time 0.2527 (0.2515) loss 0.3590 (0.3602) grad_norm 130914.5703 (inf) mem 14543MB +[2023-10-12 03:40:05 simmim_pretrain](main_simmim.py 218): INFO Train: [102/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2551 (0.2514) loss 0.3533 (0.3607) grad_norm 103083.4766 (inf) mem 14543MB +[2023-10-12 03:42:12 simmim_pretrain](main_simmim.py 218): INFO Train: [102/200][3500/6787] eta 0:13:47 lr 0.000200 time 0.2571 (0.2517) loss 0.3387 (0.3608) grad_norm 133232.7812 (inf) mem 14543MB +[2023-10-12 03:44:20 simmim_pretrain](main_simmim.py 218): INFO Train: [102/200][4000/6787] eta 0:11:43 lr 0.000200 time 0.2510 (0.2523) loss 0.3599 (0.3608) grad_norm 233490.5938 (inf) mem 14543MB +[2023-10-12 03:46:28 simmim_pretrain](main_simmim.py 218): INFO Train: [102/200][4500/6787] eta 0:09:37 lr 0.000200 time 0.2579 (0.2527) loss 0.3705 (0.3607) grad_norm 205447.2500 (inf) mem 14543MB +[2023-10-12 03:48:36 simmim_pretrain](main_simmim.py 218): INFO Train: [102/200][5000/6787] eta 0:07:32 lr 0.000200 time 0.2500 (0.2530) loss 0.3592 (0.3606) grad_norm 286590.4375 (inf) mem 14543MB +[2023-10-12 03:50:44 simmim_pretrain](main_simmim.py 218): INFO Train: [102/200][5500/6787] eta 0:05:25 lr 0.000200 time 0.2504 (0.2533) loss 0.3661 (0.3605) grad_norm 182781.5938 (inf) mem 14543MB +[2023-10-12 03:52:52 simmim_pretrain](main_simmim.py 218): INFO Train: [102/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2547 (0.2535) loss 0.3656 (0.3603) grad_norm 478015.0312 (inf) mem 14543MB +[2023-10-12 03:55:00 simmim_pretrain](main_simmim.py 218): INFO Train: [102/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2554 (0.2537) loss 0.3759 (0.3602) grad_norm 283926.3750 (inf) mem 14543MB +[2023-10-12 03:56:14 simmim_pretrain](main_simmim.py 228): INFO EPOCH 102 training takes 0:28:43 +[2023-10-12 03:56:15 simmim_pretrain](main_simmim.py 218): INFO Train: [103/200][0/6787] eta 2:45:59 lr 0.000200 time 1.4675 (1.4675) loss 0.3634 (0.3634) grad_norm 388951.9062 (388951.9062) mem 14543MB +[2023-10-12 03:58:20 simmim_pretrain](main_simmim.py 218): INFO Train: [103/200][500/6787] eta 0:26:32 lr 0.000200 time 0.2503 (0.2532) loss 0.3666 (0.3591) grad_norm 154496.8594 (246824.7188) mem 14543MB +[2023-10-12 04:00:26 simmim_pretrain](main_simmim.py 218): INFO Train: [103/200][1000/6787] eta 0:24:19 lr 0.000200 time 0.2494 (0.2521) loss 0.3690 (0.3599) grad_norm 312323.1875 (240409.6250) mem 14543MB +[2023-10-12 04:02:32 simmim_pretrain](main_simmim.py 218): INFO Train: [103/200][1500/6787] eta 0:22:11 lr 0.000200 time 0.2484 (0.2518) loss 0.3775 (0.3602) grad_norm 263561.6250 (236388.0312) mem 14543MB +[2023-10-12 04:04:37 simmim_pretrain](main_simmim.py 218): INFO Train: [103/200][2000/6787] eta 0:20:04 lr 0.000200 time 0.2511 (0.2517) loss 0.3555 (0.3601) grad_norm 306334.1875 (261472.4531) mem 14543MB +[2023-10-12 04:06:43 simmim_pretrain](main_simmim.py 218): INFO Train: [103/200][2500/6787] eta 0:17:58 lr 0.000200 time 0.2511 (0.2517) loss 0.3403 (0.3596) grad_norm 410641.6875 (280779.8125) mem 14543MB +[2023-10-12 04:08:49 simmim_pretrain](main_simmim.py 218): INFO Train: [103/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2567 (0.2516) loss 0.3648 (0.3594) grad_norm 538252.2500 (299305.2188) mem 14543MB +[2023-10-12 04:10:54 simmim_pretrain](main_simmim.py 218): INFO Train: [103/200][3500/6787] eta 0:13:46 lr 0.000200 time 0.2521 (0.2515) loss 0.3670 (0.3592) grad_norm 327268.6875 (312771.9062) mem 14543MB +[2023-10-12 04:13:00 simmim_pretrain](main_simmim.py 218): INFO Train: [103/200][4000/6787] eta 0:11:40 lr 0.000200 time 0.2463 (0.2515) loss 0.3620 (0.3591) grad_norm 502099.7500 (inf) mem 14543MB +[2023-10-12 04:15:05 simmim_pretrain](main_simmim.py 218): INFO Train: [103/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2498 (0.2514) loss 0.3493 (0.3592) grad_norm 372024.2812 (inf) mem 14543MB +[2023-10-12 04:17:11 simmim_pretrain](main_simmim.py 218): INFO Train: [103/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2508 (0.2513) loss 0.3617 (0.3590) grad_norm 334053.9688 (inf) mem 14543MB +[2023-10-12 04:19:16 simmim_pretrain](main_simmim.py 218): INFO Train: [103/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2471 (0.2513) loss 0.3563 (0.3589) grad_norm 511195.5938 (inf) mem 14543MB +[2023-10-12 04:21:21 simmim_pretrain](main_simmim.py 218): INFO Train: [103/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2466 (0.2513) loss 0.3725 (0.3589) grad_norm 328542.8750 (inf) mem 14543MB +[2023-10-12 04:23:27 simmim_pretrain](main_simmim.py 218): INFO Train: [103/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2521 (0.2513) loss 0.3769 (0.3589) grad_norm 317993.0938 (inf) mem 14543MB +[2023-10-12 04:24:40 simmim_pretrain](main_simmim.py 228): INFO EPOCH 103 training takes 0:28:26 +[2023-10-12 04:24:41 simmim_pretrain](main_simmim.py 218): INFO Train: [104/200][0/6787] eta 2:45:45 lr 0.000200 time 1.4653 (1.4653) loss 0.3562 (0.3562) grad_norm 331690.6875 (331690.6875) mem 14543MB +[2023-10-12 04:26:46 simmim_pretrain](main_simmim.py 218): INFO Train: [104/200][500/6787] eta 0:26:29 lr 0.000200 time 0.2495 (0.2528) loss 0.3557 (0.3609) grad_norm 312228.6562 (253819.3906) mem 14543MB +[2023-10-12 04:28:52 simmim_pretrain](main_simmim.py 218): INFO Train: [104/200][1000/6787] eta 0:24:17 lr 0.000200 time 0.2522 (0.2518) loss 0.3658 (0.3600) grad_norm 129873.7891 (247006.0625) mem 14543MB +[2023-10-12 04:30:57 simmim_pretrain](main_simmim.py 218): INFO Train: [104/200][1500/6787] eta 0:22:10 lr 0.000200 time 0.2526 (0.2516) loss 0.3589 (0.3598) grad_norm 264976.5000 (inf) mem 14543MB +[2023-10-12 04:33:03 simmim_pretrain](main_simmim.py 218): INFO Train: [104/200][2000/6787] eta 0:20:03 lr 0.000200 time 0.2498 (0.2514) loss 0.3463 (0.3599) grad_norm 259917.6719 (inf) mem 14543MB +[2023-10-12 04:35:08 simmim_pretrain](main_simmim.py 218): INFO Train: [104/200][2500/6787] eta 0:17:57 lr 0.000200 time 0.2497 (0.2514) loss 0.3566 (0.3600) grad_norm 278851.9375 (inf) mem 14543MB +[2023-10-12 04:37:14 simmim_pretrain](main_simmim.py 218): INFO Train: [104/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2457 (0.2515) loss 0.3486 (0.3601) grad_norm 412786.8125 (inf) mem 14543MB +[2023-10-12 04:39:20 simmim_pretrain](main_simmim.py 218): INFO Train: [104/200][3500/6787] eta 0:13:46 lr 0.000200 time 0.2509 (0.2515) loss 0.3594 (0.3602) grad_norm 536714.4375 (inf) mem 14543MB +[2023-10-12 04:41:26 simmim_pretrain](main_simmim.py 218): INFO Train: [104/200][4000/6787] eta 0:11:40 lr 0.000200 time 0.2519 (0.2515) loss 0.3548 (0.3600) grad_norm 200400.7500 (inf) mem 14543MB +[2023-10-12 04:43:32 simmim_pretrain](main_simmim.py 218): INFO Train: [104/200][4500/6787] eta 0:09:35 lr 0.000200 time 0.2588 (0.2515) loss 0.3731 (0.3600) grad_norm 433506.9062 (inf) mem 14543MB +[2023-10-12 04:45:37 simmim_pretrain](main_simmim.py 218): INFO Train: [104/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2452 (0.2514) loss 0.3589 (0.3597) grad_norm 257062.3594 (inf) mem 14543MB +[2023-10-12 04:47:42 simmim_pretrain](main_simmim.py 218): INFO Train: [104/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2592 (0.2513) loss 0.3514 (0.3596) grad_norm 586824.7500 (inf) mem 14543MB +[2023-10-12 04:49:47 simmim_pretrain](main_simmim.py 218): INFO Train: [104/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2457 (0.2513) loss 0.3551 (0.3596) grad_norm 272321.3750 (inf) mem 14543MB +[2023-10-12 04:51:53 simmim_pretrain](main_simmim.py 218): INFO Train: [104/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2519 (0.2512) loss 0.3628 (0.3596) grad_norm 282600.7188 (inf) mem 14543MB +[2023-10-12 04:53:05 simmim_pretrain](main_simmim.py 228): INFO EPOCH 104 training takes 0:28:25 +[2023-10-12 04:53:06 simmim_pretrain](main_simmim.py 218): INFO Train: [105/200][0/6787] eta 2:54:23 lr 0.000200 time 1.5417 (1.5417) loss 0.5384 (0.5384) grad_norm 2427.8015 (2427.8015) mem 14543MB +[2023-10-12 04:55:11 simmim_pretrain](main_simmim.py 218): INFO Train: [105/200][500/6787] eta 0:26:23 lr 0.000200 time 0.2588 (0.2518) loss 0.4678 (0.5064) grad_norm 18258.2773 (14356.2832) mem 14543MB +[2023-10-12 04:57:16 simmim_pretrain](main_simmim.py 218): INFO Train: [105/200][1000/6787] eta 0:24:11 lr 0.000200 time 0.2500 (0.2509) loss 0.4496 (0.4927) grad_norm 28690.3828 (19231.4727) mem 14543MB +[2023-10-12 04:59:21 simmim_pretrain](main_simmim.py 218): INFO Train: [105/200][1500/6787] eta 0:22:05 lr 0.000200 time 0.2470 (0.2506) loss 0.4636 (0.4832) grad_norm 27178.0293 (21687.2891) mem 14543MB +[2023-10-12 05:01:26 simmim_pretrain](main_simmim.py 218): INFO Train: [105/200][2000/6787] eta 0:19:59 lr 0.000200 time 0.2515 (0.2506) loss 0.3869 (0.4685) grad_norm 17513.5566 (22290.4648) mem 14543MB +[2023-10-12 05:03:31 simmim_pretrain](main_simmim.py 218): INFO Train: [105/200][2500/6787] eta 0:17:54 lr 0.000200 time 0.2479 (0.2505) loss 0.3787 (0.4520) grad_norm 40133.2852 (23639.5371) mem 14543MB +[2023-10-12 05:05:37 simmim_pretrain](main_simmim.py 218): INFO Train: [105/200][3000/6787] eta 0:15:49 lr 0.000200 time 0.2528 (0.2506) loss 0.3680 (0.4389) grad_norm 22285.3926 (24396.3906) mem 14543MB +[2023-10-12 05:07:42 simmim_pretrain](main_simmim.py 218): INFO Train: [105/200][3500/6787] eta 0:13:43 lr 0.000200 time 0.2498 (0.2506) loss 0.3663 (0.4291) grad_norm 34334.0352 (24714.7715) mem 14543MB +[2023-10-12 05:09:48 simmim_pretrain](main_simmim.py 218): INFO Train: [105/200][4000/6787] eta 0:11:38 lr 0.000200 time 0.2502 (0.2507) loss 0.3597 (0.4215) grad_norm 32990.3320 (25432.3164) mem 14543MB +[2023-10-12 05:11:53 simmim_pretrain](main_simmim.py 218): INFO Train: [105/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2489 (0.2507) loss 0.3578 (0.4151) grad_norm 43911.1484 (26786.4238) mem 14543MB +[2023-10-12 05:13:59 simmim_pretrain](main_simmim.py 218): INFO Train: [105/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2501 (0.2508) loss 0.3374 (0.4100) grad_norm 35386.1406 (28362.8418) mem 14543MB +[2023-10-12 05:16:04 simmim_pretrain](main_simmim.py 218): INFO Train: [105/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2518 (0.2508) loss 0.3786 (0.4059) grad_norm 35993.1016 (29983.7969) mem 14543MB +[2023-10-12 05:18:10 simmim_pretrain](main_simmim.py 218): INFO Train: [105/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2540 (0.2508) loss 0.3670 (0.4023) grad_norm 40629.6562 (31884.3008) mem 14543MB +[2023-10-12 05:20:16 simmim_pretrain](main_simmim.py 218): INFO Train: [105/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2541 (0.2509) loss 0.3624 (0.3993) grad_norm 142373.7188 (35564.3711) mem 14543MB +[2023-10-12 05:21:28 simmim_pretrain](main_simmim.py 228): INFO EPOCH 105 training takes 0:28:23 +[2023-10-12 05:21:29 simmim_pretrain](main_simmim.py 218): INFO Train: [106/200][0/6787] eta 2:34:57 lr 0.000200 time 1.3699 (1.3699) loss 0.3637 (0.3637) grad_norm 107006.6250 (107006.6250) mem 14543MB +[2023-10-12 05:23:35 simmim_pretrain](main_simmim.py 218): INFO Train: [106/200][500/6787] eta 0:26:30 lr 0.000200 time 0.2496 (0.2529) loss 0.3541 (0.3615) grad_norm 70954.7500 (79706.2109) mem 14543MB +[2023-10-12 05:25:40 simmim_pretrain](main_simmim.py 218): INFO Train: [106/200][1000/6787] eta 0:24:17 lr 0.000200 time 0.2501 (0.2519) loss 0.3731 (0.3618) grad_norm 82121.7578 (81669.6719) mem 14543MB +[2023-10-12 05:27:45 simmim_pretrain](main_simmim.py 218): INFO Train: [106/200][1500/6787] eta 0:22:09 lr 0.000200 time 0.2522 (0.2514) loss 0.3672 (0.3614) grad_norm 98132.1172 (92123.9922) mem 14543MB +[2023-10-12 05:29:51 simmim_pretrain](main_simmim.py 218): INFO Train: [106/200][2000/6787] eta 0:20:02 lr 0.000200 time 0.2461 (0.2512) loss 0.3619 (0.3611) grad_norm 108209.9219 (103156.9297) mem 14543MB +[2023-10-12 05:31:56 simmim_pretrain](main_simmim.py 218): INFO Train: [106/200][2500/6787] eta 0:17:55 lr 0.000200 time 0.2487 (0.2510) loss 0.3692 (0.3606) grad_norm 156429.3594 (120361.1094) mem 14543MB +[2023-10-12 05:34:01 simmim_pretrain](main_simmim.py 218): INFO Train: [106/200][3000/6787] eta 0:15:50 lr 0.000200 time 0.2536 (0.2509) loss 0.3554 (0.3604) grad_norm 113663.1094 (126584.4766) mem 14543MB +[2023-10-12 05:36:06 simmim_pretrain](main_simmim.py 218): INFO Train: [106/200][3500/6787] eta 0:13:44 lr 0.000200 time 0.2474 (0.2509) loss 0.3676 (0.3603) grad_norm 152326.6094 (139064.1719) mem 14543MB +[2023-10-12 05:38:12 simmim_pretrain](main_simmim.py 218): INFO Train: [106/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2495 (0.2508) loss 0.3594 (0.3600) grad_norm 213698.9375 (152384.1719) mem 14543MB +[2023-10-12 05:40:17 simmim_pretrain](main_simmim.py 218): INFO Train: [106/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2524 (0.2508) loss 0.3544 (0.3599) grad_norm 368539.8125 (166747.6250) mem 14543MB +[2023-10-12 05:42:22 simmim_pretrain](main_simmim.py 218): INFO Train: [106/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2535 (0.2508) loss 0.3537 (0.3597) grad_norm 456558.5938 (182570.6094) mem 14543MB +[2023-10-12 05:44:28 simmim_pretrain](main_simmim.py 218): INFO Train: [106/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2543 (0.2508) loss 0.3507 (0.3596) grad_norm 244715.2031 (inf) mem 14543MB +[2023-10-12 05:46:33 simmim_pretrain](main_simmim.py 218): INFO Train: [106/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2484 (0.2508) loss 0.3396 (0.3594) grad_norm 326379.0938 (inf) mem 14543MB +[2023-10-12 05:48:38 simmim_pretrain](main_simmim.py 218): INFO Train: [106/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2486 (0.2507) loss 0.3535 (0.3593) grad_norm 285244.6250 (inf) mem 14543MB +[2023-10-12 05:49:51 simmim_pretrain](main_simmim.py 228): INFO EPOCH 106 training takes 0:28:22 +[2023-10-12 05:49:52 simmim_pretrain](main_simmim.py 218): INFO Train: [107/200][0/6787] eta 2:49:51 lr 0.000200 time 1.5015 (1.5015) loss 0.3506 (0.3506) grad_norm 316355.0625 (316355.0625) mem 14543MB +[2023-10-12 05:51:57 simmim_pretrain](main_simmim.py 218): INFO Train: [107/200][500/6787] eta 0:26:28 lr 0.000200 time 0.2463 (0.2526) loss 0.3558 (0.3577) grad_norm 578327.2500 (inf) mem 14543MB +[2023-10-12 05:54:02 simmim_pretrain](main_simmim.py 218): INFO Train: [107/200][1000/6787] eta 0:24:16 lr 0.000200 time 0.2483 (0.2517) loss 0.3654 (0.3587) grad_norm 231278.6250 (inf) mem 14543MB +[2023-10-12 05:56:08 simmim_pretrain](main_simmim.py 218): INFO Train: [107/200][1500/6787] eta 0:22:07 lr 0.000200 time 0.2510 (0.2512) loss 0.3700 (0.3591) grad_norm 187624.9844 (inf) mem 14543MB +[2023-10-12 05:58:13 simmim_pretrain](main_simmim.py 218): INFO Train: [107/200][2000/6787] eta 0:20:01 lr 0.000200 time 0.2550 (0.2510) loss 0.3776 (0.3596) grad_norm 314052.6562 (inf) mem 14543MB +[2023-10-12 06:00:18 simmim_pretrain](main_simmim.py 218): INFO Train: [107/200][2500/6787] eta 0:17:55 lr 0.000200 time 0.2494 (0.2509) loss 0.3720 (0.3596) grad_norm 129498.7891 (inf) mem 14543MB +[2023-10-12 06:02:23 simmim_pretrain](main_simmim.py 218): INFO Train: [107/200][3000/6787] eta 0:15:49 lr 0.000200 time 0.2510 (0.2508) loss 0.3557 (0.3596) grad_norm 204868.2344 (inf) mem 14543MB +[2023-10-12 06:04:28 simmim_pretrain](main_simmim.py 218): INFO Train: [107/200][3500/6787] eta 0:13:43 lr 0.000200 time 0.2475 (0.2507) loss 0.3616 (0.3596) grad_norm 200137.6562 (inf) mem 14543MB +[2023-10-12 06:06:33 simmim_pretrain](main_simmim.py 218): INFO Train: [107/200][4000/6787] eta 0:11:38 lr 0.000200 time 0.2499 (0.2507) loss 0.3693 (0.3596) grad_norm 111547.7109 (inf) mem 14543MB +[2023-10-12 06:08:39 simmim_pretrain](main_simmim.py 218): INFO Train: [107/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2448 (0.2507) loss 0.3844 (0.3597) grad_norm 409323.9375 (inf) mem 14543MB +[2023-10-12 06:10:44 simmim_pretrain](main_simmim.py 218): INFO Train: [107/200][5000/6787] eta 0:07:27 lr 0.000200 time 0.2502 (0.2506) loss 0.3459 (0.3598) grad_norm 171679.5156 (inf) mem 14543MB +[2023-10-12 06:12:49 simmim_pretrain](main_simmim.py 218): INFO Train: [107/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2484 (0.2507) loss 0.3553 (0.3597) grad_norm 234578.7031 (inf) mem 14543MB +[2023-10-12 06:14:55 simmim_pretrain](main_simmim.py 218): INFO Train: [107/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2516 (0.2507) loss 0.3621 (0.3596) grad_norm 414758.6250 (inf) mem 14543MB +[2023-10-12 06:17:00 simmim_pretrain](main_simmim.py 218): INFO Train: [107/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2508 (0.2507) loss 0.3619 (0.3594) grad_norm 418682.9375 (inf) mem 14543MB +[2023-10-12 06:18:13 simmim_pretrain](main_simmim.py 228): INFO EPOCH 107 training takes 0:28:22 +[2023-10-12 06:18:14 simmim_pretrain](main_simmim.py 218): INFO Train: [108/200][0/6787] eta 2:47:03 lr 0.000200 time 1.4769 (1.4769) loss 0.3466 (0.3466) grad_norm 216161.0469 (216161.0469) mem 14543MB +[2023-10-12 06:20:20 simmim_pretrain](main_simmim.py 218): INFO Train: [108/200][500/6787] eta 0:26:31 lr 0.000200 time 0.2471 (0.2531) loss 0.3505 (0.3572) grad_norm 317402.1250 (435890.6250) mem 14543MB +[2023-10-12 06:22:25 simmim_pretrain](main_simmim.py 218): INFO Train: [108/200][1000/6787] eta 0:24:17 lr 0.000200 time 0.2541 (0.2519) loss 0.3769 (0.3583) grad_norm 199202.3281 (inf) mem 14543MB +[2023-10-12 06:24:30 simmim_pretrain](main_simmim.py 218): INFO Train: [108/200][1500/6787] eta 0:22:09 lr 0.000200 time 0.2504 (0.2515) loss 0.3549 (0.3586) grad_norm 323367.7188 (inf) mem 14543MB +[2023-10-12 06:26:36 simmim_pretrain](main_simmim.py 218): INFO Train: [108/200][2000/6787] eta 0:20:02 lr 0.000200 time 0.2494 (0.2513) loss 0.3648 (0.3591) grad_norm 212898.8438 (inf) mem 14543MB +[2023-10-12 06:28:41 simmim_pretrain](main_simmim.py 218): INFO Train: [108/200][2500/6787] eta 0:17:56 lr 0.000200 time 0.2504 (0.2511) loss 0.3701 (0.3591) grad_norm 251535.2812 (inf) mem 14543MB +[2023-10-12 06:30:46 simmim_pretrain](main_simmim.py 218): INFO Train: [108/200][3000/6787] eta 0:15:50 lr 0.000200 time 0.2509 (0.2511) loss 0.3601 (0.3589) grad_norm 176810.5938 (inf) mem 14543MB +[2023-10-12 06:32:51 simmim_pretrain](main_simmim.py 218): INFO Train: [108/200][3500/6787] eta 0:13:45 lr 0.000200 time 0.2554 (0.2510) loss 0.3537 (0.3589) grad_norm 206705.0938 (inf) mem 14543MB +[2023-10-12 06:34:57 simmim_pretrain](main_simmim.py 218): INFO Train: [108/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2590 (0.2509) loss 0.3576 (0.3587) grad_norm 494662.0938 (inf) mem 14543MB +[2023-10-12 06:37:02 simmim_pretrain](main_simmim.py 218): INFO Train: [108/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2505 (0.2509) loss 0.3723 (0.3587) grad_norm 213262.2031 (inf) mem 14543MB +[2023-10-12 06:39:07 simmim_pretrain](main_simmim.py 218): INFO Train: [108/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2511 (0.2508) loss 0.3730 (0.3586) grad_norm 352535.3750 (inf) mem 14543MB +[2023-10-12 06:41:12 simmim_pretrain](main_simmim.py 218): INFO Train: [108/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2485 (0.2508) loss 0.3594 (0.3585) grad_norm 414309.1875 (inf) mem 14543MB +[2023-10-12 06:43:17 simmim_pretrain](main_simmim.py 218): INFO Train: [108/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2512 (0.2507) loss 0.3724 (0.3585) grad_norm 244333.7656 (inf) mem 14543MB +[2023-10-12 06:45:23 simmim_pretrain](main_simmim.py 218): INFO Train: [108/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2468 (0.2507) loss 0.3579 (0.3585) grad_norm 219809.8594 (inf) mem 14543MB +[2023-10-12 06:46:35 simmim_pretrain](main_simmim.py 228): INFO EPOCH 108 training takes 0:28:21 +[2023-10-12 06:46:36 simmim_pretrain](main_simmim.py 218): INFO Train: [109/200][0/6787] eta 2:41:43 lr 0.000200 time 1.4297 (1.4297) loss 0.3473 (0.3473) grad_norm 258234.1094 (258234.1094) mem 14543MB +[2023-10-12 06:48:41 simmim_pretrain](main_simmim.py 218): INFO Train: [109/200][500/6787] eta 0:26:26 lr 0.000200 time 0.2482 (0.2523) loss 0.3687 (0.3605) grad_norm 356237.1875 (215263.0781) mem 14543MB +[2023-10-12 06:50:46 simmim_pretrain](main_simmim.py 218): INFO Train: [109/200][1000/6787] eta 0:24:13 lr 0.000200 time 0.2487 (0.2512) loss 0.3492 (0.3602) grad_norm 235034.3906 (225949.5469) mem 14543MB +[2023-10-12 06:52:51 simmim_pretrain](main_simmim.py 218): INFO Train: [109/200][1500/6787] eta 0:22:06 lr 0.000200 time 0.2459 (0.2509) loss 0.3704 (0.3602) grad_norm 202601.5469 (inf) mem 14543MB +[2023-10-12 06:54:56 simmim_pretrain](main_simmim.py 218): INFO Train: [109/200][2000/6787] eta 0:20:00 lr 0.000200 time 0.2496 (0.2507) loss 0.3417 (0.3603) grad_norm 274599.2188 (inf) mem 14543MB +[2023-10-12 06:57:02 simmim_pretrain](main_simmim.py 218): INFO Train: [109/200][2500/6787] eta 0:17:54 lr 0.000200 time 0.2581 (0.2507) loss 0.3493 (0.3604) grad_norm 163099.1562 (inf) mem 14543MB +[2023-10-12 06:59:07 simmim_pretrain](main_simmim.py 218): INFO Train: [109/200][3000/6787] eta 0:15:48 lr 0.000200 time 0.2528 (0.2506) loss 0.3497 (0.3603) grad_norm 240821.7969 (inf) mem 14543MB +[2023-10-12 07:01:12 simmim_pretrain](main_simmim.py 218): INFO Train: [109/200][3500/6787] eta 0:13:43 lr 0.000200 time 0.2484 (0.2505) loss 0.3735 (0.3602) grad_norm 322012.6250 (inf) mem 14543MB +[2023-10-12 07:03:17 simmim_pretrain](main_simmim.py 218): INFO Train: [109/200][4000/6787] eta 0:11:38 lr 0.000200 time 0.2524 (0.2506) loss 0.3626 (0.3600) grad_norm 196154.6875 (inf) mem 14543MB +[2023-10-12 07:05:23 simmim_pretrain](main_simmim.py 218): INFO Train: [109/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2559 (0.2506) loss 0.3628 (0.3597) grad_norm 362395.7188 (inf) mem 14543MB +[2023-10-12 07:07:28 simmim_pretrain](main_simmim.py 218): INFO Train: [109/200][5000/6787] eta 0:07:27 lr 0.000200 time 0.2537 (0.2506) loss 0.3495 (0.3595) grad_norm 383274.4062 (inf) mem 14543MB +[2023-10-12 07:09:33 simmim_pretrain](main_simmim.py 218): INFO Train: [109/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2546 (0.2506) loss 0.3635 (0.3594) grad_norm 354818.5938 (inf) mem 14543MB +[2023-10-12 07:11:39 simmim_pretrain](main_simmim.py 218): INFO Train: [109/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2589 (0.2506) loss 0.3711 (0.3593) grad_norm 598589.8750 (inf) mem 14543MB +[2023-10-12 07:13:44 simmim_pretrain](main_simmim.py 218): INFO Train: [109/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2480 (0.2506) loss 0.3491 (0.3591) grad_norm 368270.1875 (inf) mem 14543MB +[2023-10-12 07:14:56 simmim_pretrain](main_simmim.py 228): INFO EPOCH 109 training takes 0:28:21 +[2023-10-12 07:14:58 simmim_pretrain](main_simmim.py 218): INFO Train: [110/200][0/6787] eta 2:40:02 lr 0.000200 time 1.4149 (1.4149) loss 0.3750 (0.3750) grad_norm 858921.2500 (858921.2500) mem 14543MB +[2023-10-12 07:17:03 simmim_pretrain](main_simmim.py 218): INFO Train: [110/200][500/6787] eta 0:26:26 lr 0.000200 time 0.2588 (0.2524) loss 0.3347 (0.3577) grad_norm 551103.6875 (433006.8438) mem 14543MB +[2023-10-12 07:19:08 simmim_pretrain](main_simmim.py 218): INFO Train: [110/200][1000/6787] eta 0:24:14 lr 0.000200 time 0.2511 (0.2513) loss 0.3618 (0.3576) grad_norm 418321.0938 (421670.2500) mem 14543MB +[2023-10-12 07:21:13 simmim_pretrain](main_simmim.py 218): INFO Train: [110/200][1500/6787] eta 0:22:06 lr 0.000200 time 0.2461 (0.2509) loss 0.3757 (0.3577) grad_norm 719537.0625 (inf) mem 14543MB +[2023-10-12 07:23:18 simmim_pretrain](main_simmim.py 218): INFO Train: [110/200][2000/6787] eta 0:20:00 lr 0.000200 time 0.2482 (0.2508) loss 0.3536 (0.3580) grad_norm 291218.4688 (inf) mem 14543MB +[2023-10-12 07:25:23 simmim_pretrain](main_simmim.py 218): INFO Train: [110/200][2500/6787] eta 0:17:54 lr 0.000200 time 0.2469 (0.2507) loss 0.3558 (0.3586) grad_norm 141681.6250 (inf) mem 14543MB +[2023-10-12 07:27:28 simmim_pretrain](main_simmim.py 218): INFO Train: [110/200][3000/6787] eta 0:15:49 lr 0.000200 time 0.2459 (0.2506) loss 0.3619 (0.3588) grad_norm 186532.3594 (inf) mem 14543MB +[2023-10-12 07:29:34 simmim_pretrain](main_simmim.py 218): INFO Train: [110/200][3500/6787] eta 0:13:43 lr 0.000200 time 0.2591 (0.2506) loss 0.3370 (0.3589) grad_norm 225808.6250 (inf) mem 14543MB +[2023-10-12 07:31:39 simmim_pretrain](main_simmim.py 218): INFO Train: [110/200][4000/6787] eta 0:11:38 lr 0.000200 time 0.2473 (0.2506) loss 0.3567 (0.3590) grad_norm 679506.2500 (inf) mem 14543MB +[2023-10-12 07:33:44 simmim_pretrain](main_simmim.py 218): INFO Train: [110/200][4500/6787] eta 0:09:32 lr 0.000200 time 0.2451 (0.2505) loss 0.3643 (0.3591) grad_norm 210184.6406 (inf) mem 14543MB +[2023-10-12 07:35:49 simmim_pretrain](main_simmim.py 218): INFO Train: [110/200][5000/6787] eta 0:07:27 lr 0.000200 time 0.2460 (0.2505) loss 0.3540 (0.3590) grad_norm 498058.8750 (inf) mem 14543MB +[2023-10-12 07:37:54 simmim_pretrain](main_simmim.py 218): INFO Train: [110/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2467 (0.2505) loss 0.3488 (0.3590) grad_norm 514772.0938 (inf) mem 14543MB +[2023-10-12 07:39:59 simmim_pretrain](main_simmim.py 218): INFO Train: [110/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2523 (0.2505) loss 0.3823 (0.3590) grad_norm 313216.3438 (inf) mem 14543MB +[2023-10-12 07:42:05 simmim_pretrain](main_simmim.py 218): INFO Train: [110/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2509 (0.2505) loss 0.3571 (0.3590) grad_norm 228159.7500 (inf) mem 14543MB +[2023-10-12 07:43:17 simmim_pretrain](main_simmim.py 228): INFO EPOCH 110 training takes 0:28:20 +[2023-10-12 07:43:18 simmim_pretrain](main_simmim.py 218): INFO Train: [111/200][0/6787] eta 2:44:51 lr 0.000200 time 1.4574 (1.4574) loss 0.3666 (0.3666) grad_norm 255650.8281 (255650.8281) mem 14543MB +[2023-10-12 07:45:23 simmim_pretrain](main_simmim.py 218): INFO Train: [111/200][500/6787] eta 0:26:25 lr 0.000200 time 0.2515 (0.2523) loss 0.3599 (0.3585) grad_norm 255126.9375 (251659.5156) mem 14543MB +[2023-10-12 07:47:28 simmim_pretrain](main_simmim.py 218): INFO Train: [111/200][1000/6787] eta 0:24:12 lr 0.000200 time 0.2556 (0.2511) loss 0.3324 (0.3592) grad_norm 223996.7031 (244435.8281) mem 14543MB +[2023-10-12 07:49:33 simmim_pretrain](main_simmim.py 218): INFO Train: [111/200][1500/6787] eta 0:22:05 lr 0.000200 time 0.2468 (0.2508) loss 0.3531 (0.3593) grad_norm 177591.2188 (254222.7969) mem 14543MB +[2023-10-12 07:51:38 simmim_pretrain](main_simmim.py 218): INFO Train: [111/200][2000/6787] eta 0:19:59 lr 0.000200 time 0.2447 (0.2505) loss 0.3572 (0.3590) grad_norm 293143.2500 (279598.5312) mem 14543MB +[2023-10-12 07:53:43 simmim_pretrain](main_simmim.py 218): INFO Train: [111/200][2500/6787] eta 0:17:53 lr 0.000200 time 0.2485 (0.2504) loss 0.3643 (0.3590) grad_norm 393097.0625 (293700.0938) mem 14543MB +[2023-10-12 07:55:48 simmim_pretrain](main_simmim.py 218): INFO Train: [111/200][3000/6787] eta 0:15:48 lr 0.000200 time 0.2476 (0.2504) loss 0.3497 (0.3586) grad_norm 633481.9375 (315096.1250) mem 14543MB +[2023-10-12 07:57:54 simmim_pretrain](main_simmim.py 218): INFO Train: [111/200][3500/6787] eta 0:13:43 lr 0.000200 time 0.2511 (0.2504) loss 0.3391 (0.3584) grad_norm 891781.8750 (inf) mem 14543MB +[2023-10-12 07:59:59 simmim_pretrain](main_simmim.py 218): INFO Train: [111/200][4000/6787] eta 0:11:37 lr 0.000200 time 0.2486 (0.2504) loss 0.3593 (0.3584) grad_norm 750587.6875 (inf) mem 14543MB +[2023-10-12 08:02:04 simmim_pretrain](main_simmim.py 218): INFO Train: [111/200][4500/6787] eta 0:09:32 lr 0.000200 time 0.2475 (0.2504) loss 0.3471 (0.3586) grad_norm 156999.4844 (inf) mem 14543MB +[2023-10-12 08:04:09 simmim_pretrain](main_simmim.py 218): INFO Train: [111/200][5000/6787] eta 0:07:27 lr 0.000200 time 0.2505 (0.2504) loss 0.3499 (0.3587) grad_norm 231542.8906 (inf) mem 14543MB +[2023-10-12 08:06:14 simmim_pretrain](main_simmim.py 218): INFO Train: [111/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2502 (0.2504) loss 0.3472 (0.3589) grad_norm 91620.5469 (inf) mem 14543MB +[2023-10-12 08:08:20 simmim_pretrain](main_simmim.py 218): INFO Train: [111/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2483 (0.2504) loss 0.3683 (0.3593) grad_norm 152272.5156 (inf) mem 14543MB +[2023-10-12 08:10:27 simmim_pretrain](main_simmim.py 218): INFO Train: [111/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2547 (0.2508) loss 0.3408 (0.3595) grad_norm 132867.4844 (inf) mem 14543MB +[2023-10-12 08:11:41 simmim_pretrain](main_simmim.py 228): INFO EPOCH 111 training takes 0:28:24 +[2023-10-12 08:11:42 simmim_pretrain](main_simmim.py 218): INFO Train: [112/200][0/6787] eta 2:56:52 lr 0.000200 time 1.5637 (1.5637) loss 0.3633 (0.3633) grad_norm 182611.2969 (182611.2969) mem 14543MB +[2023-10-12 08:13:48 simmim_pretrain](main_simmim.py 218): INFO Train: [112/200][500/6787] eta 0:26:30 lr 0.000200 time 0.2501 (0.2530) loss 0.3850 (0.3622) grad_norm 93117.5859 (130352.5156) mem 14543MB +[2023-10-12 08:15:53 simmim_pretrain](main_simmim.py 218): INFO Train: [112/200][1000/6787] eta 0:24:16 lr 0.000200 time 0.2466 (0.2517) loss 0.3696 (0.3615) grad_norm 152613.9219 (145454.0000) mem 14543MB +[2023-10-12 08:17:58 simmim_pretrain](main_simmim.py 218): INFO Train: [112/200][1500/6787] eta 0:22:08 lr 0.000200 time 0.2592 (0.2512) loss 0.3352 (0.3608) grad_norm 114691.8047 (161896.9688) mem 14543MB +[2023-10-12 08:20:03 simmim_pretrain](main_simmim.py 218): INFO Train: [112/200][2000/6787] eta 0:20:01 lr 0.000200 time 0.2521 (0.2510) loss 0.3617 (0.3604) grad_norm 90712.2188 (173063.3438) mem 14543MB +[2023-10-12 08:22:08 simmim_pretrain](main_simmim.py 218): INFO Train: [112/200][2500/6787] eta 0:17:55 lr 0.000200 time 0.2506 (0.2509) loss 0.3653 (0.3605) grad_norm 216577.2812 (183540.7500) mem 14543MB +[2023-10-12 08:24:13 simmim_pretrain](main_simmim.py 218): INFO Train: [112/200][3000/6787] eta 0:15:49 lr 0.000200 time 0.2542 (0.2508) loss 0.3580 (0.3600) grad_norm 311077.3438 (207840.5312) mem 14543MB +[2023-10-12 08:26:19 simmim_pretrain](main_simmim.py 218): INFO Train: [112/200][3500/6787] eta 0:13:44 lr 0.000200 time 0.2507 (0.2507) loss 0.3530 (0.3599) grad_norm 330789.3125 (inf) mem 14543MB +[2023-10-12 08:28:24 simmim_pretrain](main_simmim.py 218): INFO Train: [112/200][4000/6787] eta 0:11:38 lr 0.000200 time 0.2506 (0.2506) loss 0.3390 (0.3600) grad_norm 336085.7500 (inf) mem 14543MB +[2023-10-12 08:30:29 simmim_pretrain](main_simmim.py 218): INFO Train: [112/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2491 (0.2506) loss 0.3313 (0.3600) grad_norm 129028.0703 (inf) mem 14543MB +[2023-10-12 08:32:34 simmim_pretrain](main_simmim.py 218): INFO Train: [112/200][5000/6787] eta 0:07:27 lr 0.000200 time 0.2459 (0.2505) loss 0.3515 (0.3600) grad_norm 310883.1250 (inf) mem 14543MB +[2023-10-12 08:34:39 simmim_pretrain](main_simmim.py 218): INFO Train: [112/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2533 (0.2505) loss 0.3432 (0.3598) grad_norm 216105.2969 (inf) mem 14543MB +[2023-10-12 08:36:44 simmim_pretrain](main_simmim.py 218): INFO Train: [112/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2479 (0.2504) loss 0.3561 (0.3597) grad_norm 839788.4375 (inf) mem 14543MB +[2023-10-12 08:38:49 simmim_pretrain](main_simmim.py 218): INFO Train: [112/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2493 (0.2504) loss 0.3780 (0.3595) grad_norm 408737.6875 (inf) mem 14543MB +[2023-10-12 08:40:01 simmim_pretrain](main_simmim.py 228): INFO EPOCH 112 training takes 0:28:20 +[2023-10-12 08:40:03 simmim_pretrain](main_simmim.py 218): INFO Train: [113/200][0/6787] eta 2:46:54 lr 0.000200 time 1.4755 (1.4755) loss 0.3513 (0.3513) grad_norm 329901.4375 (329901.4375) mem 14543MB +[2023-10-12 08:42:07 simmim_pretrain](main_simmim.py 218): INFO Train: [113/200][500/6787] eta 0:26:24 lr 0.000200 time 0.2505 (0.2521) loss 0.3721 (0.3577) grad_norm 577443.4375 (inf) mem 14543MB +[2023-10-12 08:44:13 simmim_pretrain](main_simmim.py 218): INFO Train: [113/200][1000/6787] eta 0:24:13 lr 0.000200 time 0.2463 (0.2511) loss 0.3770 (0.3599) grad_norm 129677.6641 (inf) mem 14543MB +[2023-10-12 08:46:18 simmim_pretrain](main_simmim.py 218): INFO Train: [113/200][1500/6787] eta 0:22:05 lr 0.000200 time 0.2517 (0.2508) loss 0.3528 (0.3611) grad_norm 61757.3047 (inf) mem 14543MB +[2023-10-12 08:48:23 simmim_pretrain](main_simmim.py 218): INFO Train: [113/200][2000/6787] eta 0:19:59 lr 0.000200 time 0.2489 (0.2507) loss 0.3716 (0.3611) grad_norm 154113.6406 (inf) mem 14543MB +[2023-10-12 08:50:28 simmim_pretrain](main_simmim.py 218): INFO Train: [113/200][2500/6787] eta 0:17:54 lr 0.000200 time 0.2496 (0.2506) loss 0.3638 (0.3614) grad_norm 170958.3594 (inf) mem 14543MB +[2023-10-12 08:52:33 simmim_pretrain](main_simmim.py 218): INFO Train: [113/200][3000/6787] eta 0:15:48 lr 0.000200 time 0.2486 (0.2506) loss 0.3563 (0.3614) grad_norm 117938.3750 (inf) mem 14543MB +[2023-10-12 08:54:38 simmim_pretrain](main_simmim.py 218): INFO Train: [113/200][3500/6787] eta 0:13:43 lr 0.000200 time 0.2487 (0.2506) loss 0.3387 (0.3612) grad_norm 274557.0312 (inf) mem 14543MB +[2023-10-12 08:56:44 simmim_pretrain](main_simmim.py 218): INFO Train: [113/200][4000/6787] eta 0:11:38 lr 0.000200 time 0.2486 (0.2506) loss 0.3815 (0.3611) grad_norm 223480.6719 (inf) mem 14543MB +[2023-10-12 08:58:49 simmim_pretrain](main_simmim.py 218): INFO Train: [113/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2507 (0.2506) loss 0.3564 (0.3609) grad_norm 241623.5938 (inf) mem 14543MB +[2023-10-12 09:00:55 simmim_pretrain](main_simmim.py 218): INFO Train: [113/200][5000/6787] eta 0:07:27 lr 0.000200 time 0.2496 (0.2507) loss 0.3597 (0.3607) grad_norm 591766.6250 (inf) mem 14543MB +[2023-10-12 09:03:00 simmim_pretrain](main_simmim.py 218): INFO Train: [113/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2506 (0.2507) loss 0.3603 (0.3604) grad_norm 419936.9688 (inf) mem 14543MB +[2023-10-12 09:05:05 simmim_pretrain](main_simmim.py 218): INFO Train: [113/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2463 (0.2506) loss 0.3568 (0.3601) grad_norm 455009.7812 (inf) mem 14543MB +[2023-10-12 09:07:11 simmim_pretrain](main_simmim.py 218): INFO Train: [113/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2506 (0.2507) loss 0.3535 (0.3600) grad_norm 244303.5000 (inf) mem 14543MB +[2023-10-12 09:08:23 simmim_pretrain](main_simmim.py 228): INFO EPOCH 113 training takes 0:28:21 +[2023-10-12 09:08:25 simmim_pretrain](main_simmim.py 218): INFO Train: [114/200][0/6787] eta 2:50:50 lr 0.000200 time 1.5103 (1.5103) loss 0.3538 (0.3538) grad_norm 175280.5000 (175280.5000) mem 14543MB +[2023-10-12 09:10:30 simmim_pretrain](main_simmim.py 218): INFO Train: [114/200][500/6787] eta 0:26:27 lr 0.000200 time 0.2485 (0.2526) loss 0.3495 (0.3581) grad_norm 204833.8125 (266115.7500) mem 14543MB +[2023-10-12 09:12:35 simmim_pretrain](main_simmim.py 218): INFO Train: [114/200][1000/6787] eta 0:24:14 lr 0.000200 time 0.2517 (0.2514) loss 0.3674 (0.3593) grad_norm 240993.4375 (254562.2188) mem 14543MB +[2023-10-12 09:14:40 simmim_pretrain](main_simmim.py 218): INFO Train: [114/200][1500/6787] eta 0:22:07 lr 0.000200 time 0.2472 (0.2511) loss 0.3519 (0.3593) grad_norm 103221.5703 (249725.1094) mem 14543MB +[2023-10-12 09:16:45 simmim_pretrain](main_simmim.py 218): INFO Train: [114/200][2000/6787] eta 0:20:01 lr 0.000200 time 0.2594 (0.2510) loss 0.3436 (0.3593) grad_norm 248129.5156 (261657.9531) mem 14543MB +[2023-10-12 09:18:50 simmim_pretrain](main_simmim.py 218): INFO Train: [114/200][2500/6787] eta 0:17:55 lr 0.000200 time 0.2465 (0.2508) loss 0.3801 (0.3589) grad_norm 820611.7500 (278469.7812) mem 14543MB +[2023-10-12 09:20:55 simmim_pretrain](main_simmim.py 218): INFO Train: [114/200][3000/6787] eta 0:15:49 lr 0.000200 time 0.2513 (0.2506) loss 0.3701 (0.3589) grad_norm 341962.5625 (296981.6250) mem 14543MB +[2023-10-12 09:23:00 simmim_pretrain](main_simmim.py 218): INFO Train: [114/200][3500/6787] eta 0:13:43 lr 0.000200 time 0.2463 (0.2506) loss 0.3522 (0.3588) grad_norm 250905.0156 (312322.2812) mem 14543MB +[2023-10-12 09:25:05 simmim_pretrain](main_simmim.py 218): INFO Train: [114/200][4000/6787] eta 0:11:38 lr 0.000200 time 0.2475 (0.2505) loss 0.3636 (0.3587) grad_norm 535936.0625 (inf) mem 14543MB +[2023-10-12 09:27:11 simmim_pretrain](main_simmim.py 218): INFO Train: [114/200][4500/6787] eta 0:09:32 lr 0.000200 time 0.2470 (0.2505) loss 0.3678 (0.3587) grad_norm 444966.5625 (inf) mem 14543MB +[2023-10-12 09:29:16 simmim_pretrain](main_simmim.py 218): INFO Train: [114/200][5000/6787] eta 0:07:27 lr 0.000200 time 0.2496 (0.2505) loss 0.3703 (0.3588) grad_norm 322821.4062 (inf) mem 14543MB +[2023-10-12 09:31:21 simmim_pretrain](main_simmim.py 218): INFO Train: [114/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2487 (0.2504) loss 0.3483 (0.3589) grad_norm 278766.9375 (inf) mem 14543MB +[2023-10-12 09:33:26 simmim_pretrain](main_simmim.py 218): INFO Train: [114/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2548 (0.2504) loss 0.3624 (0.3589) grad_norm 239693.4688 (inf) mem 14543MB +[2023-10-12 09:35:31 simmim_pretrain](main_simmim.py 218): INFO Train: [114/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2540 (0.2504) loss 0.3706 (0.3590) grad_norm 258044.2656 (inf) mem 14543MB +[2023-10-12 09:36:43 simmim_pretrain](main_simmim.py 228): INFO EPOCH 114 training takes 0:28:19 +[2023-10-12 09:36:44 simmim_pretrain](main_simmim.py 218): INFO Train: [115/200][0/6787] eta 2:36:17 lr 0.000200 time 1.3818 (1.3818) loss 0.3531 (0.3531) grad_norm 210865.9688 (210865.9688) mem 14543MB +[2023-10-12 09:38:49 simmim_pretrain](main_simmim.py 218): INFO Train: [115/200][500/6787] eta 0:26:27 lr 0.000200 time 0.2487 (0.2525) loss 0.3548 (0.3584) grad_norm 339419.5625 (376496.5938) mem 14543MB +[2023-10-12 09:40:55 simmim_pretrain](main_simmim.py 218): INFO Train: [115/200][1000/6787] eta 0:24:15 lr 0.000200 time 0.2588 (0.2515) loss 0.3604 (0.3590) grad_norm 173606.2500 (inf) mem 14543MB +[2023-10-12 09:43:00 simmim_pretrain](main_simmim.py 218): INFO Train: [115/200][1500/6787] eta 0:22:08 lr 0.000200 time 0.2502 (0.2512) loss 0.3744 (0.3599) grad_norm 266474.3125 (inf) mem 14543MB +[2023-10-12 09:45:06 simmim_pretrain](main_simmim.py 218): INFO Train: [115/200][2000/6787] eta 0:20:02 lr 0.000200 time 0.2493 (0.2513) loss 0.3431 (0.3601) grad_norm 127611.3594 (inf) mem 14543MB +[2023-10-12 09:47:11 simmim_pretrain](main_simmim.py 218): INFO Train: [115/200][2500/6787] eta 0:17:56 lr 0.000200 time 0.2473 (0.2512) loss 0.3712 (0.3600) grad_norm 246727.9531 (inf) mem 14543MB +[2023-10-12 09:49:17 simmim_pretrain](main_simmim.py 218): INFO Train: [115/200][3000/6787] eta 0:15:51 lr 0.000200 time 0.2464 (0.2512) loss 0.3631 (0.3599) grad_norm 286571.0938 (inf) mem 14543MB +[2023-10-12 09:51:23 simmim_pretrain](main_simmim.py 218): INFO Train: [115/200][3500/6787] eta 0:13:46 lr 0.000200 time 0.2490 (0.2513) loss 0.3531 (0.3596) grad_norm 224341.6406 (inf) mem 14543MB +[2023-10-12 09:53:29 simmim_pretrain](main_simmim.py 218): INFO Train: [115/200][4000/6787] eta 0:11:40 lr 0.000200 time 0.2512 (0.2514) loss 0.3784 (0.3595) grad_norm 241142.0156 (inf) mem 14543MB +[2023-10-12 09:55:35 simmim_pretrain](main_simmim.py 218): INFO Train: [115/200][4500/6787] eta 0:09:35 lr 0.000200 time 0.2588 (0.2514) loss 0.3394 (0.3595) grad_norm 310090.0625 (inf) mem 14543MB +[2023-10-12 09:57:42 simmim_pretrain](main_simmim.py 218): INFO Train: [115/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2558 (0.2518) loss 0.3614 (0.3595) grad_norm 313936.4375 (inf) mem 14543MB +[2023-10-12 09:59:50 simmim_pretrain](main_simmim.py 218): INFO Train: [115/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2555 (0.2522) loss 0.3758 (0.3594) grad_norm 171392.4375 (inf) mem 14543MB +[2023-10-12 10:01:58 simmim_pretrain](main_simmim.py 218): INFO Train: [115/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2563 (0.2525) loss 0.3683 (0.3595) grad_norm 220458.1562 (inf) mem 14543MB +[2023-10-12 10:04:06 simmim_pretrain](main_simmim.py 218): INFO Train: [115/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2566 (0.2528) loss 0.3747 (0.3595) grad_norm 247485.9375 (inf) mem 14543MB +[2023-10-12 10:05:20 simmim_pretrain](main_simmim.py 228): INFO EPOCH 115 training takes 0:28:37 +[2023-10-12 10:05:21 simmim_pretrain](main_simmim.py 218): INFO Train: [116/200][0/6787] eta 2:38:58 lr 0.000200 time 1.4054 (1.4054) loss 0.3591 (0.3591) grad_norm 340657.4062 (340657.4062) mem 14543MB +[2023-10-12 10:07:27 simmim_pretrain](main_simmim.py 218): INFO Train: [116/200][500/6787] eta 0:26:30 lr 0.000200 time 0.2597 (0.2530) loss 0.3768 (0.3615) grad_norm 166567.3438 (inf) mem 14543MB +[2023-10-12 10:09:33 simmim_pretrain](main_simmim.py 218): INFO Train: [116/200][1000/6787] eta 0:24:19 lr 0.000200 time 0.2516 (0.2523) loss 0.3670 (0.3624) grad_norm 170281.6406 (inf) mem 14543MB +[2023-10-12 10:11:38 simmim_pretrain](main_simmim.py 218): INFO Train: [116/200][1500/6787] eta 0:22:11 lr 0.000200 time 0.2476 (0.2518) loss 0.3635 (0.3622) grad_norm 142595.0156 (inf) mem 14543MB +[2023-10-12 10:13:43 simmim_pretrain](main_simmim.py 218): INFO Train: [116/200][2000/6787] eta 0:20:03 lr 0.000200 time 0.2530 (0.2515) loss 0.3620 (0.3622) grad_norm 124172.9609 (inf) mem 14543MB +[2023-10-12 10:15:49 simmim_pretrain](main_simmim.py 218): INFO Train: [116/200][2500/6787] eta 0:17:58 lr 0.000200 time 0.2495 (0.2515) loss 0.3739 (0.3623) grad_norm 115802.4453 (inf) mem 14543MB +[2023-10-12 10:17:55 simmim_pretrain](main_simmim.py 218): INFO Train: [116/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2479 (0.2514) loss 0.3479 (0.3618) grad_norm 215588.7344 (inf) mem 14543MB +[2023-10-12 10:20:00 simmim_pretrain](main_simmim.py 218): INFO Train: [116/200][3500/6787] eta 0:13:46 lr 0.000200 time 0.2540 (0.2514) loss 0.3762 (0.3617) grad_norm 253147.1562 (inf) mem 14543MB +[2023-10-12 10:22:06 simmim_pretrain](main_simmim.py 218): INFO Train: [116/200][4000/6787] eta 0:11:40 lr 0.000200 time 0.2518 (0.2513) loss 0.3870 (0.3618) grad_norm 122639.5234 (inf) mem 14543MB +[2023-10-12 10:24:11 simmim_pretrain](main_simmim.py 218): INFO Train: [116/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2492 (0.2513) loss 0.3557 (0.3617) grad_norm 148606.2188 (inf) mem 14543MB +[2023-10-12 10:26:17 simmim_pretrain](main_simmim.py 218): INFO Train: [116/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2515 (0.2512) loss 0.3549 (0.3618) grad_norm 155689.3438 (inf) mem 14543MB +[2023-10-12 10:28:22 simmim_pretrain](main_simmim.py 218): INFO Train: [116/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2540 (0.2512) loss 0.3799 (0.3617) grad_norm 104737.2344 (inf) mem 14543MB +[2023-10-12 10:30:28 simmim_pretrain](main_simmim.py 218): INFO Train: [116/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2472 (0.2512) loss 0.3333 (0.3615) grad_norm 235232.8906 (inf) mem 14543MB +[2023-10-12 10:32:33 simmim_pretrain](main_simmim.py 218): INFO Train: [116/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2526 (0.2512) loss 0.3733 (0.3613) grad_norm 281626.9062 (inf) mem 14543MB +[2023-10-12 10:33:46 simmim_pretrain](main_simmim.py 228): INFO EPOCH 116 training takes 0:28:25 +[2023-10-12 10:33:47 simmim_pretrain](main_simmim.py 218): INFO Train: [117/200][0/6787] eta 2:31:54 lr 0.000200 time 1.3429 (1.3429) loss 0.3385 (0.3385) grad_norm 253518.2500 (253518.2500) mem 14543MB +[2023-10-12 10:35:52 simmim_pretrain](main_simmim.py 218): INFO Train: [117/200][500/6787] eta 0:26:26 lr 0.000200 time 0.2585 (0.2523) loss 0.3521 (0.3591) grad_norm 189912.4062 (250410.8281) mem 14543MB +[2023-10-12 10:37:57 simmim_pretrain](main_simmim.py 218): INFO Train: [117/200][1000/6787] eta 0:24:15 lr 0.000200 time 0.2514 (0.2515) loss 0.3599 (0.3590) grad_norm 323083.3438 (276954.4688) mem 14543MB +[2023-10-12 10:40:03 simmim_pretrain](main_simmim.py 218): INFO Train: [117/200][1500/6787] eta 0:22:08 lr 0.000200 time 0.2512 (0.2513) loss 0.3668 (0.3588) grad_norm 165663.0469 (311856.0000) mem 14543MB +[2023-10-12 10:42:08 simmim_pretrain](main_simmim.py 218): INFO Train: [117/200][2000/6787] eta 0:20:02 lr 0.000200 time 0.2520 (0.2512) loss 0.3472 (0.3586) grad_norm 852972.3750 (329970.4688) mem 14543MB +[2023-10-12 10:44:14 simmim_pretrain](main_simmim.py 218): INFO Train: [117/200][2500/6787] eta 0:17:56 lr 0.000200 time 0.2463 (0.2511) loss 0.3635 (0.3586) grad_norm 977080.9375 (368667.2812) mem 14543MB +[2023-10-12 10:46:19 simmim_pretrain](main_simmim.py 218): INFO Train: [117/200][3000/6787] eta 0:15:51 lr 0.000200 time 0.2490 (0.2512) loss 0.3573 (0.3586) grad_norm 283307.1875 (inf) mem 14543MB +[2023-10-12 10:48:25 simmim_pretrain](main_simmim.py 218): INFO Train: [117/200][3500/6787] eta 0:13:45 lr 0.000200 time 0.2469 (0.2511) loss 0.3647 (0.3586) grad_norm 178685.5312 (inf) mem 14543MB +[2023-10-12 10:50:30 simmim_pretrain](main_simmim.py 218): INFO Train: [117/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2589 (0.2511) loss 0.3789 (0.3587) grad_norm 321773.3438 (inf) mem 14543MB +[2023-10-12 10:52:36 simmim_pretrain](main_simmim.py 218): INFO Train: [117/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2501 (0.2511) loss 0.3397 (0.3587) grad_norm 231608.6875 (inf) mem 14543MB +[2023-10-12 10:54:41 simmim_pretrain](main_simmim.py 218): INFO Train: [117/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2486 (0.2511) loss 0.3819 (0.3586) grad_norm 189594.5781 (inf) mem 14543MB +[2023-10-12 10:56:47 simmim_pretrain](main_simmim.py 218): INFO Train: [117/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2485 (0.2511) loss 0.3445 (0.3586) grad_norm 456327.1562 (inf) mem 14543MB +[2023-10-12 10:58:52 simmim_pretrain](main_simmim.py 218): INFO Train: [117/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2463 (0.2511) loss 0.3688 (0.3586) grad_norm 246278.5000 (inf) mem 14543MB +[2023-10-12 11:00:58 simmim_pretrain](main_simmim.py 218): INFO Train: [117/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2550 (0.2510) loss 0.3754 (0.3586) grad_norm 350821.0312 (inf) mem 14543MB +[2023-10-12 11:02:10 simmim_pretrain](main_simmim.py 228): INFO EPOCH 117 training takes 0:28:24 +[2023-10-12 11:02:12 simmim_pretrain](main_simmim.py 218): INFO Train: [118/200][0/6787] eta 2:52:47 lr 0.000200 time 1.5276 (1.5276) loss 0.3641 (0.3641) grad_norm 383179.1875 (383179.1875) mem 14543MB +[2023-10-12 11:04:17 simmim_pretrain](main_simmim.py 218): INFO Train: [118/200][500/6787] eta 0:26:29 lr 0.000200 time 0.2500 (0.2528) loss 0.3596 (0.3575) grad_norm 445160.5000 (452606.3125) mem 14543MB +[2023-10-12 11:06:22 simmim_pretrain](main_simmim.py 218): INFO Train: [118/200][1000/6787] eta 0:24:16 lr 0.000200 time 0.2502 (0.2517) loss 0.3585 (0.3570) grad_norm 316984.0625 (456598.5938) mem 14543MB +[2023-10-12 11:08:27 simmim_pretrain](main_simmim.py 218): INFO Train: [118/200][1500/6787] eta 0:22:08 lr 0.000200 time 0.2469 (0.2514) loss 0.3399 (0.3572) grad_norm 541144.8125 (450720.5312) mem 14543MB +[2023-10-12 11:10:33 simmim_pretrain](main_simmim.py 218): INFO Train: [118/200][2000/6787] eta 0:20:02 lr 0.000200 time 0.2526 (0.2512) loss 0.3322 (0.3575) grad_norm 436935.2812 (inf) mem 14543MB +[2023-10-12 11:12:38 simmim_pretrain](main_simmim.py 218): INFO Train: [118/200][2500/6787] eta 0:17:56 lr 0.000200 time 0.2457 (0.2511) loss 0.3800 (0.3581) grad_norm 221044.7031 (inf) mem 14543MB +[2023-10-12 11:14:43 simmim_pretrain](main_simmim.py 218): INFO Train: [118/200][3000/6787] eta 0:15:50 lr 0.000200 time 0.2469 (0.2510) loss 0.3626 (0.3584) grad_norm 279021.7188 (inf) mem 14543MB +[2023-10-12 11:16:49 simmim_pretrain](main_simmim.py 218): INFO Train: [118/200][3500/6787] eta 0:13:44 lr 0.000200 time 0.2538 (0.2510) loss 0.3774 (0.3586) grad_norm 280337.2188 (inf) mem 14543MB +[2023-10-12 11:18:54 simmim_pretrain](main_simmim.py 218): INFO Train: [118/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2460 (0.2509) loss 0.3675 (0.3588) grad_norm 351947.3125 (inf) mem 14543MB +[2023-10-12 11:20:59 simmim_pretrain](main_simmim.py 218): INFO Train: [118/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2465 (0.2508) loss 0.3515 (0.3589) grad_norm 241084.0781 (inf) mem 14543MB +[2023-10-12 11:23:04 simmim_pretrain](main_simmim.py 218): INFO Train: [118/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2532 (0.2508) loss 0.3487 (0.3588) grad_norm 578943.7500 (inf) mem 14543MB +[2023-10-12 11:25:09 simmim_pretrain](main_simmim.py 218): INFO Train: [118/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2462 (0.2507) loss 0.3833 (0.3588) grad_norm 547064.3125 (inf) mem 14543MB +[2023-10-12 11:27:15 simmim_pretrain](main_simmim.py 218): INFO Train: [118/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2509 (0.2507) loss 0.3533 (0.3588) grad_norm 372251.4062 (inf) mem 14543MB +[2023-10-12 11:29:20 simmim_pretrain](main_simmim.py 218): INFO Train: [118/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2539 (0.2507) loss 0.3623 (0.3588) grad_norm 190820.9062 (inf) mem 14543MB +[2023-10-12 11:30:32 simmim_pretrain](main_simmim.py 228): INFO EPOCH 118 training takes 0:28:22 +[2023-10-12 11:30:34 simmim_pretrain](main_simmim.py 218): INFO Train: [119/200][0/6787] eta 2:46:24 lr 0.000200 time 1.4712 (1.4712) loss 0.3501 (0.3501) grad_norm 307389.3750 (307389.3750) mem 14543MB +[2023-10-12 11:32:39 simmim_pretrain](main_simmim.py 218): INFO Train: [119/200][500/6787] eta 0:26:30 lr 0.000200 time 0.2554 (0.2529) loss 0.3696 (0.3601) grad_norm 200718.2656 (252041.5938) mem 14543MB +[2023-10-12 11:34:45 simmim_pretrain](main_simmim.py 218): INFO Train: [119/200][1000/6787] eta 0:24:18 lr 0.000200 time 0.2514 (0.2520) loss 0.3542 (0.3601) grad_norm 283518.0625 (inf) mem 14543MB +[2023-10-12 11:36:50 simmim_pretrain](main_simmim.py 218): INFO Train: [119/200][1500/6787] eta 0:22:10 lr 0.000200 time 0.2525 (0.2517) loss 0.3896 (0.3597) grad_norm 386525.5000 (inf) mem 14543MB +[2023-10-12 11:38:56 simmim_pretrain](main_simmim.py 218): INFO Train: [119/200][2000/6787] eta 0:20:04 lr 0.000200 time 0.2581 (0.2516) loss 0.3781 (0.3598) grad_norm 329257.7188 (inf) mem 14543MB +[2023-10-12 11:41:02 simmim_pretrain](main_simmim.py 218): INFO Train: [119/200][2500/6787] eta 0:17:58 lr 0.000200 time 0.2525 (0.2516) loss 0.3810 (0.3598) grad_norm 271186.5312 (inf) mem 14543MB +[2023-10-12 11:43:07 simmim_pretrain](main_simmim.py 218): INFO Train: [119/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2516 (0.2516) loss 0.3477 (0.3604) grad_norm 174199.8125 (inf) mem 14543MB +[2023-10-12 11:45:13 simmim_pretrain](main_simmim.py 218): INFO Train: [119/200][3500/6787] eta 0:13:47 lr 0.000200 time 0.2486 (0.2517) loss 0.3583 (0.3607) grad_norm 129420.9297 (inf) mem 14543MB +[2023-10-12 11:47:19 simmim_pretrain](main_simmim.py 218): INFO Train: [119/200][4000/6787] eta 0:11:41 lr 0.000200 time 0.2491 (0.2517) loss 0.3555 (0.3609) grad_norm 104844.6250 (inf) mem 14543MB +[2023-10-12 11:49:25 simmim_pretrain](main_simmim.py 218): INFO Train: [119/200][4500/6787] eta 0:09:35 lr 0.000200 time 0.2495 (0.2517) loss 0.3691 (0.3610) grad_norm 201065.5781 (inf) mem 14543MB +[2023-10-12 11:51:31 simmim_pretrain](main_simmim.py 218): INFO Train: [119/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2507 (0.2516) loss 0.3626 (0.3609) grad_norm 162001.1875 (inf) mem 14543MB +[2023-10-12 11:53:36 simmim_pretrain](main_simmim.py 218): INFO Train: [119/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2529 (0.2516) loss 0.3536 (0.3609) grad_norm 192356.4375 (inf) mem 14543MB +[2023-10-12 11:55:42 simmim_pretrain](main_simmim.py 218): INFO Train: [119/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2504 (0.2516) loss 0.3830 (0.3607) grad_norm 259239.2812 (inf) mem 14543MB +[2023-10-12 11:57:48 simmim_pretrain](main_simmim.py 218): INFO Train: [119/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2496 (0.2516) loss 0.3366 (0.3607) grad_norm 240705.6719 (inf) mem 14543MB +[2023-10-12 11:59:00 simmim_pretrain](main_simmim.py 228): INFO EPOCH 119 training takes 0:28:28 +[2023-10-12 11:59:02 simmim_pretrain](main_simmim.py 218): INFO Train: [120/200][0/6787] eta 2:52:51 lr 0.000200 time 1.5281 (1.5281) loss 0.3496 (0.3496) grad_norm 231375.3125 (231375.3125) mem 14543MB +[2023-10-12 12:01:07 simmim_pretrain](main_simmim.py 218): INFO Train: [120/200][500/6787] eta 0:26:27 lr 0.000200 time 0.2546 (0.2526) loss 0.3425 (0.3579) grad_norm 423830.3750 (328276.5312) mem 14543MB +[2023-10-12 12:03:12 simmim_pretrain](main_simmim.py 218): INFO Train: [120/200][1000/6787] eta 0:24:16 lr 0.000200 time 0.2579 (0.2516) loss 0.3797 (0.3581) grad_norm 281724.2500 (inf) mem 14543MB +[2023-10-12 12:05:17 simmim_pretrain](main_simmim.py 218): INFO Train: [120/200][1500/6787] eta 0:22:08 lr 0.000200 time 0.2510 (0.2513) loss 0.3384 (0.3586) grad_norm 320460.1250 (inf) mem 14543MB +[2023-10-12 12:07:23 simmim_pretrain](main_simmim.py 218): INFO Train: [120/200][2000/6787] eta 0:20:01 lr 0.000200 time 0.2488 (0.2510) loss 0.3446 (0.3588) grad_norm 195391.7656 (inf) mem 14543MB +[2023-10-12 12:09:28 simmim_pretrain](main_simmim.py 218): INFO Train: [120/200][2500/6787] eta 0:17:55 lr 0.000200 time 0.2524 (0.2509) loss 0.3684 (0.3588) grad_norm 117798.2656 (inf) mem 14543MB +[2023-10-12 12:11:33 simmim_pretrain](main_simmim.py 218): INFO Train: [120/200][3000/6787] eta 0:15:50 lr 0.000200 time 0.2523 (0.2509) loss 0.3596 (0.3590) grad_norm 382798.8125 (inf) mem 14543MB +[2023-10-12 12:13:38 simmim_pretrain](main_simmim.py 218): INFO Train: [120/200][3500/6787] eta 0:13:44 lr 0.000200 time 0.2521 (0.2508) loss 0.3576 (0.3591) grad_norm 227173.5000 (inf) mem 14543MB +[2023-10-12 12:15:44 simmim_pretrain](main_simmim.py 218): INFO Train: [120/200][4000/6787] eta 0:11:38 lr 0.000200 time 0.2460 (0.2508) loss 0.5095 (0.3653) grad_norm 12666.9590 (inf) mem 14543MB +[2023-10-12 12:17:49 simmim_pretrain](main_simmim.py 218): INFO Train: [120/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2538 (0.2507) loss 0.3875 (0.3766) grad_norm 30068.6680 (inf) mem 14543MB +[2023-10-12 12:19:54 simmim_pretrain](main_simmim.py 218): INFO Train: [120/200][5000/6787] eta 0:07:27 lr 0.000200 time 0.2462 (0.2506) loss 0.3677 (0.3764) grad_norm 39233.9609 (inf) mem 14543MB +[2023-10-12 12:21:59 simmim_pretrain](main_simmim.py 218): INFO Train: [120/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2486 (0.2506) loss 0.3702 (0.3756) grad_norm 36601.4844 (inf) mem 14543MB +[2023-10-12 12:24:04 simmim_pretrain](main_simmim.py 218): INFO Train: [120/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2461 (0.2505) loss 0.3816 (0.3748) grad_norm 27358.3164 (inf) mem 14543MB +[2023-10-12 12:26:09 simmim_pretrain](main_simmim.py 218): INFO Train: [120/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2507 (0.2505) loss 0.3597 (0.3739) grad_norm 42402.3398 (inf) mem 14543MB +[2023-10-12 12:27:21 simmim_pretrain](main_simmim.py 228): INFO EPOCH 120 training takes 0:28:20 +[2023-10-12 12:27:21 simmim_pretrain](utils.py 62): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_120.pth saving...... +[2023-10-12 12:27:22 simmim_pretrain](utils.py 64): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_120.pth saved !!! +[2023-10-12 12:27:23 simmim_pretrain](main_simmim.py 218): INFO Train: [121/200][0/6787] eta 2:27:56 lr 0.000200 time 1.3078 (1.3078) loss 0.3718 (0.3718) grad_norm 71027.0547 (71027.0547) mem 14543MB +[2023-10-12 12:29:28 simmim_pretrain](main_simmim.py 218): INFO Train: [121/200][500/6787] eta 0:26:21 lr 0.000200 time 0.2463 (0.2516) loss 0.3743 (0.3628) grad_norm 38354.6367 (48843.0352) mem 14543MB +[2023-10-12 12:31:33 simmim_pretrain](main_simmim.py 218): INFO Train: [121/200][1000/6787] eta 0:24:11 lr 0.000200 time 0.2471 (0.2508) loss 0.3620 (0.3627) grad_norm 44278.9961 (49520.5820) mem 14543MB +[2023-10-12 12:33:38 simmim_pretrain](main_simmim.py 218): INFO Train: [121/200][1500/6787] eta 0:22:05 lr 0.000200 time 0.2533 (0.2506) loss 0.3683 (0.3624) grad_norm 68510.8594 (54225.5430) mem 14543MB +[2023-10-12 12:35:44 simmim_pretrain](main_simmim.py 218): INFO Train: [121/200][2000/6787] eta 0:20:00 lr 0.000200 time 0.2530 (0.2507) loss 0.3340 (0.3622) grad_norm 63377.1953 (61776.0352) mem 14543MB +[2023-10-12 12:37:49 simmim_pretrain](main_simmim.py 218): INFO Train: [121/200][2500/6787] eta 0:17:55 lr 0.000200 time 0.2497 (0.2508) loss 0.3762 (0.3617) grad_norm 68822.3906 (65922.7656) mem 14543MB +[2023-10-12 12:39:55 simmim_pretrain](main_simmim.py 218): INFO Train: [121/200][3000/6787] eta 0:15:50 lr 0.000200 time 0.2508 (0.2509) loss 0.3453 (0.3616) grad_norm 99822.3438 (71108.1172) mem 14543MB +[2023-10-12 12:42:00 simmim_pretrain](main_simmim.py 218): INFO Train: [121/200][3500/6787] eta 0:13:44 lr 0.000200 time 0.2588 (0.2510) loss 0.3587 (0.3613) grad_norm 169144.6094 (80026.8047) mem 14543MB +[2023-10-12 12:44:06 simmim_pretrain](main_simmim.py 218): INFO Train: [121/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2462 (0.2510) loss 0.3525 (0.3611) grad_norm 337837.2188 (87072.6328) mem 14543MB +[2023-10-12 12:46:11 simmim_pretrain](main_simmim.py 218): INFO Train: [121/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2585 (0.2510) loss 0.3491 (0.3609) grad_norm 115191.3438 (103367.2500) mem 14543MB +[2023-10-12 12:48:17 simmim_pretrain](main_simmim.py 218): INFO Train: [121/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2542 (0.2509) loss 0.3565 (0.3607) grad_norm 123019.7812 (109690.3438) mem 14543MB +[2023-10-12 12:50:22 simmim_pretrain](main_simmim.py 218): INFO Train: [121/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2484 (0.2509) loss 0.3653 (0.3605) grad_norm 459930.1875 (125741.1562) mem 14543MB +[2023-10-12 12:52:27 simmim_pretrain](main_simmim.py 218): INFO Train: [121/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2514 (0.2509) loss 0.3556 (0.3602) grad_norm 138118.7344 (141188.4531) mem 14543MB +[2023-10-12 12:54:32 simmim_pretrain](main_simmim.py 218): INFO Train: [121/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2502 (0.2508) loss 0.3535 (0.3600) grad_norm 175545.0469 (158868.9531) mem 14543MB +[2023-10-12 12:55:45 simmim_pretrain](main_simmim.py 228): INFO EPOCH 121 training takes 0:28:22 +[2023-10-12 12:55:46 simmim_pretrain](main_simmim.py 218): INFO Train: [122/200][0/6787] eta 2:44:05 lr 0.000200 time 1.4506 (1.4506) loss 0.3513 (0.3513) grad_norm 203051.9531 (203051.9531) mem 14543MB +[2023-10-12 12:57:51 simmim_pretrain](main_simmim.py 218): INFO Train: [122/200][500/6787] eta 0:26:27 lr 0.000200 time 0.2579 (0.2525) loss 0.3467 (0.3568) grad_norm 674823.3125 (inf) mem 14543MB +[2023-10-12 12:59:56 simmim_pretrain](main_simmim.py 218): INFO Train: [122/200][1000/6787] eta 0:24:15 lr 0.000200 time 0.2475 (0.2516) loss 0.3458 (0.3574) grad_norm 211274.0625 (inf) mem 14543MB +[2023-10-12 13:02:02 simmim_pretrain](main_simmim.py 218): INFO Train: [122/200][1500/6787] eta 0:22:07 lr 0.000200 time 0.2459 (0.2512) loss 0.3865 (0.3575) grad_norm 364379.0938 (inf) mem 14543MB +[2023-10-12 13:04:07 simmim_pretrain](main_simmim.py 218): INFO Train: [122/200][2000/6787] eta 0:20:02 lr 0.000200 time 0.2589 (0.2511) loss 0.3576 (0.3574) grad_norm 215404.4219 (inf) mem 14543MB +[2023-10-12 13:06:13 simmim_pretrain](main_simmim.py 218): INFO Train: [122/200][2500/6787] eta 0:17:56 lr 0.000200 time 0.2465 (0.2511) loss 0.3417 (0.3576) grad_norm 608808.3125 (inf) mem 14543MB +[2023-10-12 13:08:18 simmim_pretrain](main_simmim.py 218): INFO Train: [122/200][3000/6787] eta 0:15:50 lr 0.000200 time 0.2474 (0.2511) loss 0.3638 (0.3575) grad_norm 402894.5625 (inf) mem 14543MB +[2023-10-12 13:10:24 simmim_pretrain](main_simmim.py 218): INFO Train: [122/200][3500/6787] eta 0:13:45 lr 0.000200 time 0.2608 (0.2510) loss 0.3583 (0.3575) grad_norm 379572.6562 (inf) mem 14543MB +[2023-10-12 13:12:29 simmim_pretrain](main_simmim.py 218): INFO Train: [122/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2524 (0.2511) loss 0.3452 (0.3577) grad_norm 552910.2500 (inf) mem 14543MB +[2023-10-12 13:14:35 simmim_pretrain](main_simmim.py 218): INFO Train: [122/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2541 (0.2511) loss 0.3673 (0.3578) grad_norm 1043585.6875 (inf) mem 14543MB +[2023-10-12 13:16:40 simmim_pretrain](main_simmim.py 218): INFO Train: [122/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2537 (0.2511) loss 0.3662 (0.3576) grad_norm 338792.8125 (inf) mem 14543MB +[2023-10-12 13:18:46 simmim_pretrain](main_simmim.py 218): INFO Train: [122/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2544 (0.2512) loss 0.3466 (0.3577) grad_norm 247120.5781 (inf) mem 14543MB +[2023-10-12 13:20:52 simmim_pretrain](main_simmim.py 218): INFO Train: [122/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2532 (0.2512) loss 0.3596 (0.3578) grad_norm 289515.0000 (inf) mem 14543MB +[2023-10-12 13:23:01 simmim_pretrain](main_simmim.py 218): INFO Train: [122/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2586 (0.2517) loss 0.3573 (0.3579) grad_norm 113469.9141 (inf) mem 14543MB +[2023-10-12 13:24:16 simmim_pretrain](main_simmim.py 228): INFO EPOCH 122 training takes 0:28:31 +[2023-10-12 13:24:17 simmim_pretrain](main_simmim.py 218): INFO Train: [123/200][0/6787] eta 2:31:49 lr 0.000200 time 1.3422 (1.3422) loss 0.3680 (0.3680) grad_norm 118650.3281 (118650.3281) mem 14543MB +[2023-10-12 13:26:22 simmim_pretrain](main_simmim.py 218): INFO Train: [123/200][500/6787] eta 0:26:28 lr 0.000200 time 0.2454 (0.2527) loss 0.3673 (0.3596) grad_norm 211398.9062 (245909.9375) mem 14543MB +[2023-10-12 13:28:28 simmim_pretrain](main_simmim.py 218): INFO Train: [123/200][1000/6787] eta 0:24:18 lr 0.000200 time 0.2510 (0.2520) loss 0.3265 (0.3586) grad_norm 542161.5625 (274465.7500) mem 14543MB +[2023-10-12 13:30:34 simmim_pretrain](main_simmim.py 218): INFO Train: [123/200][1500/6787] eta 0:22:10 lr 0.000200 time 0.2478 (0.2517) loss 0.3558 (0.3584) grad_norm 278594.5625 (293624.6250) mem 14543MB +[2023-10-12 13:32:39 simmim_pretrain](main_simmim.py 218): INFO Train: [123/200][2000/6787] eta 0:20:03 lr 0.000200 time 0.2462 (0.2515) loss 0.3367 (0.3582) grad_norm 137553.5156 (310133.2812) mem 14543MB +[2023-10-12 13:34:44 simmim_pretrain](main_simmim.py 218): INFO Train: [123/200][2500/6787] eta 0:17:57 lr 0.000200 time 0.2466 (0.2513) loss 0.3399 (0.3581) grad_norm 281124.1250 (inf) mem 14543MB +[2023-10-12 13:36:52 simmim_pretrain](main_simmim.py 218): INFO Train: [123/200][3000/6787] eta 0:15:53 lr 0.000200 time 0.2610 (0.2519) loss 0.3598 (0.3580) grad_norm 429134.5000 (inf) mem 14543MB +[2023-10-12 13:39:02 simmim_pretrain](main_simmim.py 218): INFO Train: [123/200][3500/6787] eta 0:13:51 lr 0.000200 time 0.2611 (0.2530) loss 0.3627 (0.3579) grad_norm 350622.9375 (inf) mem 14543MB +[2023-10-12 13:41:12 simmim_pretrain](main_simmim.py 218): INFO Train: [123/200][4000/6787] eta 0:11:47 lr 0.000200 time 0.2608 (0.2539) loss 0.3451 (0.3579) grad_norm 447717.9688 (inf) mem 14543MB +[2023-10-12 13:43:21 simmim_pretrain](main_simmim.py 218): INFO Train: [123/200][4500/6787] eta 0:09:42 lr 0.000200 time 0.2592 (0.2545) loss 0.3622 (0.3582) grad_norm 181476.1094 (inf) mem 14543MB +[2023-10-12 13:45:31 simmim_pretrain](main_simmim.py 218): INFO Train: [123/200][5000/6787] eta 0:07:35 lr 0.000200 time 0.2607 (0.2550) loss 0.3611 (0.3585) grad_norm 122111.7656 (inf) mem 14543MB +[2023-10-12 13:47:41 simmim_pretrain](main_simmim.py 218): INFO Train: [123/200][5500/6787] eta 0:05:28 lr 0.000200 time 0.2611 (0.2555) loss 0.3571 (0.3587) grad_norm 123192.2656 (inf) mem 14543MB +[2023-10-12 13:49:51 simmim_pretrain](main_simmim.py 218): INFO Train: [123/200][6000/6787] eta 0:03:21 lr 0.000200 time 0.2608 (0.2558) loss 0.3611 (0.3589) grad_norm 91571.2344 (inf) mem 14543MB +[2023-10-12 13:52:01 simmim_pretrain](main_simmim.py 218): INFO Train: [123/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2608 (0.2561) loss 0.3452 (0.3590) grad_norm 80135.4453 (inf) mem 14543MB +[2023-10-12 13:53:16 simmim_pretrain](main_simmim.py 228): INFO EPOCH 123 training takes 0:29:00 +[2023-10-12 13:53:17 simmim_pretrain](main_simmim.py 218): INFO Train: [124/200][0/6787] eta 2:34:55 lr 0.000200 time 1.3696 (1.3696) loss 0.3626 (0.3626) grad_norm 156864.5156 (156864.5156) mem 14543MB +[2023-10-12 13:55:23 simmim_pretrain](main_simmim.py 218): INFO Train: [124/200][500/6787] eta 0:26:30 lr 0.000200 time 0.2491 (0.2529) loss 0.3368 (0.3599) grad_norm 267711.9375 (176202.7031) mem 14543MB +[2023-10-12 13:57:28 simmim_pretrain](main_simmim.py 218): INFO Train: [124/200][1000/6787] eta 0:24:17 lr 0.000200 time 0.2503 (0.2518) loss 0.3514 (0.3597) grad_norm 281073.4062 (179148.3594) mem 14543MB +[2023-10-12 13:59:34 simmim_pretrain](main_simmim.py 218): INFO Train: [124/200][1500/6787] eta 0:22:10 lr 0.000200 time 0.2516 (0.2516) loss 0.3491 (0.3592) grad_norm 358753.0000 (189866.9688) mem 14543MB +[2023-10-12 14:01:40 simmim_pretrain](main_simmim.py 218): INFO Train: [124/200][2000/6787] eta 0:20:04 lr 0.000200 time 0.2538 (0.2516) loss 0.3600 (0.3591) grad_norm 228855.5781 (inf) mem 14543MB +[2023-10-12 14:03:45 simmim_pretrain](main_simmim.py 218): INFO Train: [124/200][2500/6787] eta 0:17:58 lr 0.000200 time 0.2501 (0.2515) loss 0.3596 (0.3589) grad_norm 321817.8750 (inf) mem 14543MB +[2023-10-12 14:05:51 simmim_pretrain](main_simmim.py 218): INFO Train: [124/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2576 (0.2516) loss 0.3431 (0.3589) grad_norm 93651.2969 (inf) mem 14543MB +[2023-10-12 14:07:57 simmim_pretrain](main_simmim.py 218): INFO Train: [124/200][3500/6787] eta 0:13:46 lr 0.000200 time 0.2487 (0.2515) loss 0.3771 (0.3588) grad_norm 244422.4219 (inf) mem 14543MB +[2023-10-12 14:10:02 simmim_pretrain](main_simmim.py 218): INFO Train: [124/200][4000/6787] eta 0:11:40 lr 0.000200 time 0.2469 (0.2515) loss 0.3485 (0.3587) grad_norm 178240.1875 (inf) mem 14543MB +[2023-10-12 14:12:08 simmim_pretrain](main_simmim.py 218): INFO Train: [124/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2522 (0.2514) loss 0.3608 (0.3587) grad_norm 686672.7500 (inf) mem 14543MB +[2023-10-12 14:14:13 simmim_pretrain](main_simmim.py 218): INFO Train: [124/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2462 (0.2514) loss 0.3469 (0.3586) grad_norm 214954.1719 (inf) mem 14543MB +[2023-10-12 14:16:19 simmim_pretrain](main_simmim.py 218): INFO Train: [124/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2505 (0.2513) loss 0.3625 (0.3584) grad_norm 390392.6875 (inf) mem 14543MB +[2023-10-12 14:18:24 simmim_pretrain](main_simmim.py 218): INFO Train: [124/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2536 (0.2513) loss 0.3422 (0.3584) grad_norm 432980.9062 (inf) mem 14543MB +[2023-10-12 14:20:30 simmim_pretrain](main_simmim.py 218): INFO Train: [124/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2514 (0.2513) loss 0.3644 (0.3582) grad_norm 849666.8750 (inf) mem 14543MB +[2023-10-12 14:21:43 simmim_pretrain](main_simmim.py 228): INFO EPOCH 124 training takes 0:28:26 +[2023-10-12 14:21:44 simmim_pretrain](main_simmim.py 218): INFO Train: [125/200][0/6787] eta 2:40:32 lr 0.000200 time 1.4193 (1.4193) loss 0.3657 (0.3657) grad_norm 323390.9688 (323390.9688) mem 14543MB +[2023-10-12 14:23:50 simmim_pretrain](main_simmim.py 218): INFO Train: [125/200][500/6787] eta 0:26:38 lr 0.000200 time 0.2490 (0.2543) loss 0.3492 (0.3591) grad_norm 211577.9062 (248741.5781) mem 14543MB +[2023-10-12 14:25:56 simmim_pretrain](main_simmim.py 218): INFO Train: [125/200][1000/6787] eta 0:24:26 lr 0.000200 time 0.2544 (0.2534) loss 0.3683 (0.3593) grad_norm 275156.5312 (240182.2656) mem 14543MB +[2023-10-12 14:28:02 simmim_pretrain](main_simmim.py 218): INFO Train: [125/200][1500/6787] eta 0:22:17 lr 0.000200 time 0.2540 (0.2529) loss 0.3649 (0.3594) grad_norm 152006.5156 (237202.6562) mem 14543MB +[2023-10-12 14:30:08 simmim_pretrain](main_simmim.py 218): INFO Train: [125/200][2000/6787] eta 0:20:09 lr 0.000200 time 0.2519 (0.2526) loss 0.3559 (0.3595) grad_norm 152184.3594 (247293.1094) mem 14543MB +[2023-10-12 14:32:14 simmim_pretrain](main_simmim.py 218): INFO Train: [125/200][2500/6787] eta 0:18:02 lr 0.000200 time 0.2499 (0.2525) loss 0.3462 (0.3592) grad_norm 254810.3906 (263619.9688) mem 14543MB +[2023-10-12 14:34:20 simmim_pretrain](main_simmim.py 218): INFO Train: [125/200][3000/6787] eta 0:15:55 lr 0.000200 time 0.2584 (0.2523) loss 0.3735 (0.3591) grad_norm 461938.6250 (279142.9375) mem 14543MB +[2023-10-12 14:36:26 simmim_pretrain](main_simmim.py 218): INFO Train: [125/200][3500/6787] eta 0:13:49 lr 0.000200 time 0.2493 (0.2523) loss 0.3520 (0.3589) grad_norm 379179.9062 (291557.3750) mem 14543MB +[2023-10-12 14:38:32 simmim_pretrain](main_simmim.py 218): INFO Train: [125/200][4000/6787] eta 0:11:42 lr 0.000200 time 0.2551 (0.2522) loss 0.3813 (0.3587) grad_norm 473566.9062 (inf) mem 14543MB +[2023-10-12 14:40:38 simmim_pretrain](main_simmim.py 218): INFO Train: [125/200][4500/6787] eta 0:09:36 lr 0.000200 time 0.2552 (0.2522) loss 0.3638 (0.3585) grad_norm 403850.5938 (inf) mem 14543MB +[2023-10-12 14:42:44 simmim_pretrain](main_simmim.py 218): INFO Train: [125/200][5000/6787] eta 0:07:30 lr 0.000200 time 0.2515 (0.2522) loss 0.3561 (0.3584) grad_norm 376596.0625 (inf) mem 14543MB +[2023-10-12 14:44:50 simmim_pretrain](main_simmim.py 218): INFO Train: [125/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2502 (0.2522) loss 0.3579 (0.3583) grad_norm 543785.5625 (inf) mem 14543MB +[2023-10-12 14:46:57 simmim_pretrain](main_simmim.py 218): INFO Train: [125/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2566 (0.2523) loss 0.3395 (0.3582) grad_norm 456096.5312 (inf) mem 14543MB +[2023-10-12 14:49:04 simmim_pretrain](main_simmim.py 218): INFO Train: [125/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2530 (0.2525) loss 0.3846 (0.3582) grad_norm 375934.6875 (inf) mem 14543MB +[2023-10-12 14:50:18 simmim_pretrain](main_simmim.py 228): INFO EPOCH 125 training takes 0:28:35 +[2023-10-12 14:50:20 simmim_pretrain](main_simmim.py 218): INFO Train: [126/200][0/6787] eta 2:57:01 lr 0.000200 time 1.5649 (1.5649) loss 0.3423 (0.3423) grad_norm 253777.9531 (253777.9531) mem 14543MB +[2023-10-12 14:52:29 simmim_pretrain](main_simmim.py 218): INFO Train: [126/200][500/6787] eta 0:27:17 lr 0.000200 time 0.2594 (0.2605) loss 0.3569 (0.3570) grad_norm 363599.9375 (430049.2812) mem 14543MB +[2023-10-12 14:54:38 simmim_pretrain](main_simmim.py 218): INFO Train: [126/200][1000/6787] eta 0:25:00 lr 0.000200 time 0.2592 (0.2592) loss 0.3401 (0.3577) grad_norm 390369.9375 (inf) mem 14543MB +[2023-10-12 14:56:46 simmim_pretrain](main_simmim.py 218): INFO Train: [126/200][1500/6787] eta 0:22:46 lr 0.000200 time 0.2496 (0.2585) loss 0.3606 (0.3579) grad_norm 323866.9062 (inf) mem 14543MB +[2023-10-12 14:58:55 simmim_pretrain](main_simmim.py 218): INFO Train: [126/200][2000/6787] eta 0:20:35 lr 0.000200 time 0.2532 (0.2581) loss 0.3431 (0.3581) grad_norm 89422.7266 (inf) mem 14543MB +[2023-10-12 15:01:02 simmim_pretrain](main_simmim.py 218): INFO Train: [126/200][2500/6787] eta 0:18:24 lr 0.000200 time 0.2542 (0.2576) loss 0.3604 (0.3583) grad_norm 316622.4375 (inf) mem 14543MB +[2023-10-12 15:03:10 simmim_pretrain](main_simmim.py 218): INFO Train: [126/200][3000/6787] eta 0:16:14 lr 0.000200 time 0.2533 (0.2572) loss 0.3762 (0.3583) grad_norm 241167.0469 (inf) mem 14543MB +[2023-10-12 15:05:18 simmim_pretrain](main_simmim.py 218): INFO Train: [126/200][3500/6787] eta 0:14:04 lr 0.000200 time 0.2591 (0.2570) loss 0.3535 (0.3583) grad_norm 584936.5000 (inf) mem 14543MB +[2023-10-12 15:07:27 simmim_pretrain](main_simmim.py 218): INFO Train: [126/200][4000/6787] eta 0:11:56 lr 0.000200 time 0.2611 (0.2572) loss 0.3591 (0.3582) grad_norm 597492.1250 (inf) mem 14543MB +[2023-10-12 15:09:36 simmim_pretrain](main_simmim.py 218): INFO Train: [126/200][4500/6787] eta 0:09:48 lr 0.000200 time 0.2560 (0.2572) loss 0.3579 (0.3582) grad_norm 484591.1875 (inf) mem 14543MB +[2023-10-12 15:11:46 simmim_pretrain](main_simmim.py 218): INFO Train: [126/200][5000/6787] eta 0:07:39 lr 0.000200 time 0.2609 (0.2574) loss 0.3429 (0.3582) grad_norm 250718.5625 (inf) mem 14543MB +[2023-10-12 15:13:56 simmim_pretrain](main_simmim.py 218): INFO Train: [126/200][5500/6787] eta 0:05:31 lr 0.000200 time 0.2609 (0.2576) loss 0.4048 (0.3583) grad_norm 147934.5312 (inf) mem 14543MB +[2023-10-12 15:16:06 simmim_pretrain](main_simmim.py 218): INFO Train: [126/200][6000/6787] eta 0:03:22 lr 0.000200 time 0.2609 (0.2578) loss 0.3695 (0.3583) grad_norm 273047.6250 (inf) mem 14543MB +[2023-10-12 15:18:16 simmim_pretrain](main_simmim.py 218): INFO Train: [126/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2611 (0.2580) loss 0.3589 (0.3585) grad_norm 276722.5625 (inf) mem 14543MB +[2023-10-12 15:19:31 simmim_pretrain](main_simmim.py 228): INFO EPOCH 126 training takes 0:29:12 +[2023-10-12 15:19:32 simmim_pretrain](main_simmim.py 218): INFO Train: [127/200][0/6787] eta 2:42:51 lr 0.000200 time 1.4397 (1.4397) loss 0.3766 (0.3766) grad_norm 373395.7812 (373395.7812) mem 14543MB +[2023-10-12 15:21:38 simmim_pretrain](main_simmim.py 218): INFO Train: [127/200][500/6787] eta 0:26:36 lr 0.000200 time 0.2450 (0.2540) loss 0.3488 (0.3574) grad_norm 219675.9375 (344170.9688) mem 14543MB +[2023-10-12 15:23:44 simmim_pretrain](main_simmim.py 218): INFO Train: [127/200][1000/6787] eta 0:24:24 lr 0.000200 time 0.2537 (0.2531) loss 0.3344 (0.3581) grad_norm 192416.8281 (404688.0938) mem 14543MB +[2023-10-12 15:25:50 simmim_pretrain](main_simmim.py 218): INFO Train: [127/200][1500/6787] eta 0:22:16 lr 0.000200 time 0.2504 (0.2528) loss 0.3710 (0.3583) grad_norm 512367.7500 (394950.2500) mem 14543MB +[2023-10-12 15:27:56 simmim_pretrain](main_simmim.py 218): INFO Train: [127/200][2000/6787] eta 0:20:08 lr 0.000200 time 0.2451 (0.2525) loss 0.3821 (0.3580) grad_norm 757513.3125 (inf) mem 14543MB +[2023-10-12 15:30:02 simmim_pretrain](main_simmim.py 218): INFO Train: [127/200][2500/6787] eta 0:18:01 lr 0.000200 time 0.2518 (0.2523) loss 0.3453 (0.3579) grad_norm 254295.4062 (inf) mem 14543MB +[2023-10-12 15:32:07 simmim_pretrain](main_simmim.py 218): INFO Train: [127/200][3000/6787] eta 0:15:54 lr 0.000200 time 0.2530 (0.2521) loss 0.3657 (0.3578) grad_norm 592169.8125 (inf) mem 14543MB +[2023-10-12 15:34:13 simmim_pretrain](main_simmim.py 218): INFO Train: [127/200][3500/6787] eta 0:13:48 lr 0.000200 time 0.2486 (0.2520) loss 0.3401 (0.3579) grad_norm 303716.9062 (inf) mem 14543MB +[2023-10-12 15:36:19 simmim_pretrain](main_simmim.py 218): INFO Train: [127/200][4000/6787] eta 0:11:42 lr 0.000200 time 0.2478 (0.2519) loss 0.3599 (0.3583) grad_norm 257904.7188 (inf) mem 14543MB +[2023-10-12 15:38:24 simmim_pretrain](main_simmim.py 218): INFO Train: [127/200][4500/6787] eta 0:09:36 lr 0.000200 time 0.2487 (0.2519) loss 0.3648 (0.3587) grad_norm 163928.5469 (inf) mem 14543MB +[2023-10-12 15:40:30 simmim_pretrain](main_simmim.py 218): INFO Train: [127/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2456 (0.2517) loss 0.3663 (0.3590) grad_norm 155262.6250 (inf) mem 14543MB +[2023-10-12 15:42:35 simmim_pretrain](main_simmim.py 218): INFO Train: [127/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2517 (0.2516) loss 0.3553 (0.3592) grad_norm 81886.1719 (inf) mem 14543MB +[2023-10-12 15:44:40 simmim_pretrain](main_simmim.py 218): INFO Train: [127/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2506 (0.2516) loss 0.3552 (0.3593) grad_norm 118899.3672 (inf) mem 14543MB +[2023-10-12 15:46:46 simmim_pretrain](main_simmim.py 218): INFO Train: [127/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2531 (0.2515) loss 0.3488 (0.3593) grad_norm 201421.7500 (inf) mem 14543MB +[2023-10-12 15:47:59 simmim_pretrain](main_simmim.py 228): INFO EPOCH 127 training takes 0:28:28 +[2023-10-12 15:48:00 simmim_pretrain](main_simmim.py 218): INFO Train: [128/200][0/6787] eta 2:51:46 lr 0.000200 time 1.5186 (1.5186) loss 0.3862 (0.3862) grad_norm 217034.2500 (217034.2500) mem 14543MB +[2023-10-12 15:50:06 simmim_pretrain](main_simmim.py 218): INFO Train: [128/200][500/6787] eta 0:26:32 lr 0.000200 time 0.2527 (0.2533) loss 0.3431 (0.3603) grad_norm 282695.5938 (208315.7656) mem 14543MB +[2023-10-12 15:52:11 simmim_pretrain](main_simmim.py 218): INFO Train: [128/200][1000/6787] eta 0:24:16 lr 0.000200 time 0.2511 (0.2518) loss 0.3599 (0.3599) grad_norm 337960.5625 (212261.0469) mem 14543MB +[2023-10-12 15:54:16 simmim_pretrain](main_simmim.py 218): INFO Train: [128/200][1500/6787] eta 0:22:08 lr 0.000200 time 0.2469 (0.2513) loss 0.3473 (0.3593) grad_norm 215515.5625 (236224.3906) mem 14543MB +[2023-10-12 15:56:21 simmim_pretrain](main_simmim.py 218): INFO Train: [128/200][2000/6787] eta 0:20:01 lr 0.000200 time 0.2492 (0.2511) loss 0.3423 (0.3587) grad_norm 511594.0938 (278825.6562) mem 14543MB +[2023-10-12 15:58:26 simmim_pretrain](main_simmim.py 218): INFO Train: [128/200][2500/6787] eta 0:17:55 lr 0.000200 time 0.2483 (0.2509) loss 0.3569 (0.3582) grad_norm 458018.4375 (315098.5312) mem 14543MB +[2023-10-12 16:00:32 simmim_pretrain](main_simmim.py 218): INFO Train: [128/200][3000/6787] eta 0:15:50 lr 0.000200 time 0.2550 (0.2509) loss 0.3695 (0.3584) grad_norm 455031.3750 (331489.3750) mem 14543MB +[2023-10-12 16:02:37 simmim_pretrain](main_simmim.py 218): INFO Train: [128/200][3500/6787] eta 0:13:44 lr 0.000200 time 0.2518 (0.2509) loss 0.3697 (0.3585) grad_norm 248475.2500 (inf) mem 14543MB +[2023-10-12 16:04:43 simmim_pretrain](main_simmim.py 218): INFO Train: [128/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2464 (0.2509) loss 0.3881 (0.3586) grad_norm 173020.7031 (inf) mem 14543MB +[2023-10-12 16:06:48 simmim_pretrain](main_simmim.py 218): INFO Train: [128/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2457 (0.2509) loss 0.3588 (0.3586) grad_norm 155351.0312 (inf) mem 14543MB +[2023-10-12 16:08:53 simmim_pretrain](main_simmim.py 218): INFO Train: [128/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2514 (0.2509) loss 0.3717 (0.3586) grad_norm 252390.2812 (inf) mem 14543MB +[2023-10-12 16:10:59 simmim_pretrain](main_simmim.py 218): INFO Train: [128/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2518 (0.2509) loss 0.3841 (0.3586) grad_norm 223258.7344 (inf) mem 14543MB +[2023-10-12 16:13:04 simmim_pretrain](main_simmim.py 218): INFO Train: [128/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2522 (0.2508) loss 0.3480 (0.3586) grad_norm 371494.4062 (inf) mem 14543MB +[2023-10-12 16:15:10 simmim_pretrain](main_simmim.py 218): INFO Train: [128/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2464 (0.2508) loss 0.3520 (0.3586) grad_norm 354770.1562 (inf) mem 14543MB +[2023-10-12 16:16:22 simmim_pretrain](main_simmim.py 228): INFO EPOCH 128 training takes 0:28:23 +[2023-10-12 16:16:23 simmim_pretrain](main_simmim.py 218): INFO Train: [129/200][0/6787] eta 2:44:01 lr 0.000200 time 1.4500 (1.4500) loss 0.3577 (0.3577) grad_norm 259416.8906 (259416.8906) mem 14543MB +[2023-10-12 16:18:28 simmim_pretrain](main_simmim.py 218): INFO Train: [129/200][500/6787] eta 0:26:26 lr 0.000200 time 0.2465 (0.2523) loss 0.3518 (0.3601) grad_norm 321515.8438 (inf) mem 14543MB +[2023-10-12 16:20:34 simmim_pretrain](main_simmim.py 218): INFO Train: [129/200][1000/6787] eta 0:24:16 lr 0.000200 time 0.2523 (0.2516) loss 0.3727 (0.3594) grad_norm 193473.9531 (inf) mem 14543MB +[2023-10-12 16:22:39 simmim_pretrain](main_simmim.py 218): INFO Train: [129/200][1500/6787] eta 0:22:08 lr 0.000200 time 0.2533 (0.2513) loss 0.3690 (0.3595) grad_norm 194347.1406 (inf) mem 14543MB +[2023-10-12 16:24:45 simmim_pretrain](main_simmim.py 218): INFO Train: [129/200][2000/6787] eta 0:20:02 lr 0.000200 time 0.2460 (0.2512) loss 0.3686 (0.3600) grad_norm 123327.3047 (inf) mem 14543MB +[2023-10-12 16:26:50 simmim_pretrain](main_simmim.py 218): INFO Train: [129/200][2500/6787] eta 0:17:56 lr 0.000200 time 0.2565 (0.2511) loss 0.3478 (0.3602) grad_norm 156901.4844 (inf) mem 14543MB +[2023-10-12 16:28:55 simmim_pretrain](main_simmim.py 218): INFO Train: [129/200][3000/6787] eta 0:15:50 lr 0.000200 time 0.2499 (0.2511) loss 0.3757 (0.3605) grad_norm 135104.0938 (inf) mem 14543MB +[2023-10-12 16:31:01 simmim_pretrain](main_simmim.py 218): INFO Train: [129/200][3500/6787] eta 0:13:45 lr 0.000200 time 0.2504 (0.2510) loss 0.3595 (0.3606) grad_norm 85180.9297 (inf) mem 14543MB +[2023-10-12 16:33:06 simmim_pretrain](main_simmim.py 218): INFO Train: [129/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2515 (0.2511) loss 0.3595 (0.3605) grad_norm 101380.2188 (inf) mem 14543MB +[2023-10-12 16:35:12 simmim_pretrain](main_simmim.py 218): INFO Train: [129/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2495 (0.2510) loss 0.3749 (0.3605) grad_norm 129272.8047 (inf) mem 14543MB +[2023-10-12 16:37:17 simmim_pretrain](main_simmim.py 218): INFO Train: [129/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2528 (0.2510) loss 0.3439 (0.3603) grad_norm 196888.4375 (inf) mem 14543MB +[2023-10-12 16:39:23 simmim_pretrain](main_simmim.py 218): INFO Train: [129/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2515 (0.2511) loss 0.3673 (0.3601) grad_norm 250946.1094 (inf) mem 14543MB +[2023-10-12 16:41:29 simmim_pretrain](main_simmim.py 218): INFO Train: [129/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2530 (0.2511) loss 0.3446 (0.3599) grad_norm 252532.6875 (inf) mem 14543MB +[2023-10-12 16:43:36 simmim_pretrain](main_simmim.py 218): INFO Train: [129/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2533 (0.2513) loss 0.3712 (0.3597) grad_norm 426896.1875 (inf) mem 14543MB +[2023-10-12 16:44:50 simmim_pretrain](main_simmim.py 228): INFO EPOCH 129 training takes 0:28:27 +[2023-10-12 16:44:51 simmim_pretrain](main_simmim.py 218): INFO Train: [130/200][0/6787] eta 2:54:38 lr 0.000200 time 1.5439 (1.5439) loss 0.3519 (0.3519) grad_norm 471710.4375 (471710.4375) mem 14543MB +[2023-10-12 16:46:58 simmim_pretrain](main_simmim.py 218): INFO Train: [130/200][500/6787] eta 0:26:55 lr 0.000200 time 0.2552 (0.2569) loss 0.3599 (0.3592) grad_norm 276504.0000 (371220.8125) mem 14543MB +[2023-10-12 16:49:06 simmim_pretrain](main_simmim.py 218): INFO Train: [130/200][1000/6787] eta 0:24:40 lr 0.000200 time 0.2507 (0.2558) loss 0.3549 (0.3580) grad_norm 342059.5312 (inf) mem 14543MB +[2023-10-12 16:51:13 simmim_pretrain](main_simmim.py 218): INFO Train: [130/200][1500/6787] eta 0:22:30 lr 0.000200 time 0.2587 (0.2554) loss 0.3576 (0.3577) grad_norm 235381.9531 (inf) mem 14543MB +[2023-10-12 16:53:20 simmim_pretrain](main_simmim.py 218): INFO Train: [130/200][2000/6787] eta 0:20:20 lr 0.000200 time 0.2542 (0.2551) loss 0.3481 (0.3577) grad_norm 617979.8125 (inf) mem 14543MB +[2023-10-12 16:55:27 simmim_pretrain](main_simmim.py 218): INFO Train: [130/200][2500/6787] eta 0:18:12 lr 0.000200 time 0.2550 (0.2548) loss 0.3503 (0.3577) grad_norm 393041.7812 (inf) mem 14543MB +[2023-10-12 16:57:34 simmim_pretrain](main_simmim.py 218): INFO Train: [130/200][3000/6787] eta 0:16:04 lr 0.000200 time 0.2533 (0.2546) loss 0.3402 (0.3577) grad_norm 603406.9375 (inf) mem 14543MB +[2023-10-12 16:59:40 simmim_pretrain](main_simmim.py 218): INFO Train: [130/200][3500/6787] eta 0:13:56 lr 0.000200 time 0.2511 (0.2544) loss 0.3503 (0.3578) grad_norm 366752.0938 (inf) mem 14543MB +[2023-10-12 17:01:47 simmim_pretrain](main_simmim.py 218): INFO Train: [130/200][4000/6787] eta 0:11:48 lr 0.000200 time 0.2523 (0.2543) loss 0.3662 (0.3578) grad_norm 262770.4375 (inf) mem 14543MB +[2023-10-12 17:03:53 simmim_pretrain](main_simmim.py 218): INFO Train: [130/200][4500/6787] eta 0:09:41 lr 0.000200 time 0.2535 (0.2541) loss 0.3482 (0.3580) grad_norm 242703.5000 (inf) mem 14543MB +[2023-10-12 17:06:00 simmim_pretrain](main_simmim.py 218): INFO Train: [130/200][5000/6787] eta 0:07:33 lr 0.000200 time 0.2532 (0.2540) loss 0.3486 (0.3582) grad_norm 268152.5312 (inf) mem 14543MB +[2023-10-12 17:08:06 simmim_pretrain](main_simmim.py 218): INFO Train: [130/200][5500/6787] eta 0:05:26 lr 0.000200 time 0.2540 (0.2539) loss 0.3716 (0.3583) grad_norm 327990.4062 (inf) mem 14543MB +[2023-10-12 17:10:12 simmim_pretrain](main_simmim.py 218): INFO Train: [130/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2523 (0.2538) loss 0.3543 (0.3583) grad_norm 245587.0312 (inf) mem 14543MB +[2023-10-12 17:12:19 simmim_pretrain](main_simmim.py 218): INFO Train: [130/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2504 (0.2537) loss 0.3670 (0.3582) grad_norm 291439.6562 (inf) mem 14543MB +[2023-10-12 17:13:32 simmim_pretrain](main_simmim.py 228): INFO EPOCH 130 training takes 0:28:42 +[2023-10-12 17:13:33 simmim_pretrain](main_simmim.py 218): INFO Train: [131/200][0/6787] eta 2:31:28 lr 0.000200 time 1.3391 (1.3391) loss 0.3429 (0.3429) grad_norm 155737.2969 (155737.2969) mem 14543MB +[2023-10-12 17:15:38 simmim_pretrain](main_simmim.py 218): INFO Train: [131/200][500/6787] eta 0:26:30 lr 0.000200 time 0.2496 (0.2529) loss 0.3679 (0.3612) grad_norm 93049.5078 (148288.1094) mem 14543MB +[2023-10-12 17:17:44 simmim_pretrain](main_simmim.py 218): INFO Train: [131/200][1000/6787] eta 0:24:17 lr 0.000200 time 0.2483 (0.2518) loss 0.3485 (0.3614) grad_norm 129578.6172 (141761.3281) mem 14543MB +[2023-10-12 17:19:49 simmim_pretrain](main_simmim.py 218): INFO Train: [131/200][1500/6787] eta 0:22:09 lr 0.000200 time 0.2515 (0.2514) loss 0.3749 (0.3612) grad_norm 136663.1406 (138612.8281) mem 14543MB +[2023-10-12 17:21:54 simmim_pretrain](main_simmim.py 218): INFO Train: [131/200][2000/6787] eta 0:20:02 lr 0.000200 time 0.2501 (0.2513) loss 0.3553 (0.3611) grad_norm 133734.3750 (142076.5781) mem 14543MB +[2023-10-12 17:24:00 simmim_pretrain](main_simmim.py 218): INFO Train: [131/200][2500/6787] eta 0:17:56 lr 0.000200 time 0.2496 (0.2512) loss 0.3550 (0.3609) grad_norm 408582.1875 (153667.4531) mem 14543MB +[2023-10-12 17:26:05 simmim_pretrain](main_simmim.py 218): INFO Train: [131/200][3000/6787] eta 0:15:51 lr 0.000200 time 0.2512 (0.2511) loss 0.3580 (0.3606) grad_norm 282452.0938 (162964.1406) mem 14543MB +[2023-10-12 17:28:11 simmim_pretrain](main_simmim.py 218): INFO Train: [131/200][3500/6787] eta 0:13:45 lr 0.000200 time 0.2468 (0.2511) loss 0.3567 (0.3603) grad_norm 223185.2500 (171436.6875) mem 14543MB +[2023-10-12 17:30:16 simmim_pretrain](main_simmim.py 218): INFO Train: [131/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2469 (0.2511) loss 0.3557 (0.3600) grad_norm 321513.8438 (181662.6406) mem 14543MB +[2023-10-12 17:32:22 simmim_pretrain](main_simmim.py 218): INFO Train: [131/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2509 (0.2511) loss 0.3737 (0.3598) grad_norm 221285.6406 (inf) mem 14543MB +[2023-10-12 17:34:28 simmim_pretrain](main_simmim.py 218): INFO Train: [131/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2491 (0.2512) loss 0.3595 (0.3596) grad_norm 258731.4062 (inf) mem 14543MB +[2023-10-12 17:36:33 simmim_pretrain](main_simmim.py 218): INFO Train: [131/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2491 (0.2512) loss 0.3390 (0.3594) grad_norm 138180.3594 (inf) mem 14543MB +[2023-10-12 17:38:39 simmim_pretrain](main_simmim.py 218): INFO Train: [131/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2550 (0.2511) loss 0.3448 (0.3594) grad_norm 338153.8750 (inf) mem 14543MB +[2023-10-12 17:40:44 simmim_pretrain](main_simmim.py 218): INFO Train: [131/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2542 (0.2511) loss 0.3889 (0.3594) grad_norm 120454.3828 (inf) mem 14543MB +[2023-10-12 17:41:56 simmim_pretrain](main_simmim.py 228): INFO EPOCH 131 training takes 0:28:24 +[2023-10-12 17:41:58 simmim_pretrain](main_simmim.py 218): INFO Train: [132/200][0/6787] eta 2:46:38 lr 0.000200 time 1.4732 (1.4732) loss 0.3678 (0.3678) grad_norm 200131.4062 (200131.4062) mem 14543MB +[2023-10-12 17:44:03 simmim_pretrain](main_simmim.py 218): INFO Train: [132/200][500/6787] eta 0:26:29 lr 0.000200 time 0.2553 (0.2528) loss 0.3516 (0.3598) grad_norm 206942.5312 (235991.3906) mem 14543MB +[2023-10-12 17:46:09 simmim_pretrain](main_simmim.py 218): INFO Train: [132/200][1000/6787] eta 0:24:17 lr 0.000200 time 0.2513 (0.2519) loss 0.3686 (0.3591) grad_norm 165154.3750 (237167.0000) mem 14543MB +[2023-10-12 17:48:14 simmim_pretrain](main_simmim.py 218): INFO Train: [132/200][1500/6787] eta 0:22:09 lr 0.000200 time 0.2540 (0.2515) loss 0.3346 (0.3600) grad_norm 162795.5781 (inf) mem 14543MB +[2023-10-12 17:50:19 simmim_pretrain](main_simmim.py 218): INFO Train: [132/200][2000/6787] eta 0:20:02 lr 0.000200 time 0.2529 (0.2512) loss 0.3642 (0.3606) grad_norm 124697.1172 (inf) mem 14543MB +[2023-10-12 17:52:24 simmim_pretrain](main_simmim.py 218): INFO Train: [132/200][2500/6787] eta 0:17:56 lr 0.000200 time 0.2501 (0.2510) loss 0.3689 (0.3612) grad_norm 95991.1250 (inf) mem 14543MB +[2023-10-12 17:54:30 simmim_pretrain](main_simmim.py 218): INFO Train: [132/200][3000/6787] eta 0:15:50 lr 0.000200 time 0.2466 (0.2510) loss 0.3848 (0.3619) grad_norm 61610.3945 (inf) mem 14543MB +[2023-10-12 17:56:35 simmim_pretrain](main_simmim.py 218): INFO Train: [132/200][3500/6787] eta 0:13:44 lr 0.000200 time 0.2473 (0.2509) loss 0.3727 (0.3623) grad_norm 62217.8867 (inf) mem 14543MB +[2023-10-12 17:58:40 simmim_pretrain](main_simmim.py 218): INFO Train: [132/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2538 (0.2509) loss 0.3864 (0.3624) grad_norm 72399.7812 (inf) mem 14543MB +[2023-10-12 18:00:45 simmim_pretrain](main_simmim.py 218): INFO Train: [132/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2534 (0.2508) loss 0.3385 (0.3625) grad_norm 87369.2734 (inf) mem 14543MB +[2023-10-12 18:02:51 simmim_pretrain](main_simmim.py 218): INFO Train: [132/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2466 (0.2508) loss 0.3484 (0.3623) grad_norm 137244.1250 (inf) mem 14543MB +[2023-10-12 18:04:56 simmim_pretrain](main_simmim.py 218): INFO Train: [132/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2459 (0.2508) loss 0.3530 (0.3621) grad_norm 165874.4688 (inf) mem 14543MB +[2023-10-12 18:07:01 simmim_pretrain](main_simmim.py 218): INFO Train: [132/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2501 (0.2508) loss 0.3632 (0.3619) grad_norm 136591.6094 (inf) mem 14543MB +[2023-10-12 18:09:06 simmim_pretrain](main_simmim.py 218): INFO Train: [132/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2504 (0.2507) loss 0.3445 (0.3618) grad_norm 135790.5781 (inf) mem 14543MB +[2023-10-12 18:10:19 simmim_pretrain](main_simmim.py 228): INFO EPOCH 132 training takes 0:28:22 +[2023-10-12 18:10:21 simmim_pretrain](main_simmim.py 218): INFO Train: [133/200][0/6787] eta 3:29:28 lr 0.000200 time 1.8519 (1.8519) loss 0.3727 (0.3727) grad_norm 97838.2031 (97838.2031) mem 14543MB +[2023-10-12 18:12:26 simmim_pretrain](main_simmim.py 218): INFO Train: [133/200][500/6787] eta 0:26:31 lr 0.000200 time 0.2515 (0.2531) loss 0.3704 (0.3590) grad_norm 193506.2188 (157063.5312) mem 14543MB +[2023-10-12 18:14:31 simmim_pretrain](main_simmim.py 218): INFO Train: [133/200][1000/6787] eta 0:24:16 lr 0.000200 time 0.2538 (0.2517) loss 0.3380 (0.3586) grad_norm 227608.5625 (170667.0781) mem 14543MB +[2023-10-12 18:16:36 simmim_pretrain](main_simmim.py 218): INFO Train: [133/200][1500/6787] eta 0:22:08 lr 0.000200 time 0.2473 (0.2514) loss 0.3699 (0.3582) grad_norm 175783.0469 (182904.2656) mem 14543MB +[2023-10-12 18:18:41 simmim_pretrain](main_simmim.py 218): INFO Train: [133/200][2000/6787] eta 0:20:01 lr 0.000200 time 0.2491 (0.2511) loss 0.3464 (0.3578) grad_norm 581568.2500 (204627.3906) mem 14543MB +[2023-10-12 18:20:47 simmim_pretrain](main_simmim.py 218): INFO Train: [133/200][2500/6787] eta 0:17:55 lr 0.000200 time 0.2501 (0.2510) loss 0.3436 (0.3577) grad_norm 329301.3438 (223439.9375) mem 14543MB +[2023-10-12 18:22:52 simmim_pretrain](main_simmim.py 218): INFO Train: [133/200][3000/6787] eta 0:15:50 lr 0.000200 time 0.2526 (0.2509) loss 0.3609 (0.3574) grad_norm 442782.5312 (252066.7656) mem 14543MB +[2023-10-12 18:24:57 simmim_pretrain](main_simmim.py 218): INFO Train: [133/200][3500/6787] eta 0:13:44 lr 0.000200 time 0.2496 (0.2508) loss 0.3751 (0.3572) grad_norm 326120.0625 (inf) mem 14543MB +[2023-10-12 18:27:02 simmim_pretrain](main_simmim.py 218): INFO Train: [133/200][4000/6787] eta 0:11:38 lr 0.000200 time 0.2509 (0.2508) loss 0.3789 (0.3573) grad_norm 178101.1719 (inf) mem 14543MB +[2023-10-12 18:29:08 simmim_pretrain](main_simmim.py 218): INFO Train: [133/200][4500/6787] eta 0:09:33 lr 0.000200 time 0.2502 (0.2507) loss 0.3726 (0.3576) grad_norm 193723.7344 (inf) mem 14543MB +[2023-10-12 18:31:13 simmim_pretrain](main_simmim.py 218): INFO Train: [133/200][5000/6787] eta 0:07:27 lr 0.000200 time 0.2459 (0.2507) loss 0.3557 (0.3580) grad_norm 140253.6875 (inf) mem 14543MB +[2023-10-12 18:33:18 simmim_pretrain](main_simmim.py 218): INFO Train: [133/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2536 (0.2506) loss 0.3665 (0.3583) grad_norm 127696.6719 (inf) mem 14543MB +[2023-10-12 18:35:23 simmim_pretrain](main_simmim.py 218): INFO Train: [133/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2490 (0.2507) loss 0.3338 (0.3585) grad_norm 123458.5078 (inf) mem 14543MB +[2023-10-12 18:37:29 simmim_pretrain](main_simmim.py 218): INFO Train: [133/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2465 (0.2507) loss 0.3659 (0.3586) grad_norm 133800.5625 (inf) mem 14543MB +[2023-10-12 18:38:41 simmim_pretrain](main_simmim.py 228): INFO EPOCH 133 training takes 0:28:22 +[2023-10-12 18:38:43 simmim_pretrain](main_simmim.py 218): INFO Train: [134/200][0/6787] eta 2:45:43 lr 0.000200 time 1.4651 (1.4651) loss 0.3602 (0.3602) grad_norm 185661.7031 (185661.7031) mem 14543MB +[2023-10-12 18:40:48 simmim_pretrain](main_simmim.py 218): INFO Train: [134/200][500/6787] eta 0:26:31 lr 0.000200 time 0.2529 (0.2531) loss 0.3573 (0.3572) grad_norm 159416.6875 (171021.7812) mem 14543MB +[2023-10-12 18:42:53 simmim_pretrain](main_simmim.py 218): INFO Train: [134/200][1000/6787] eta 0:24:18 lr 0.000200 time 0.2546 (0.2520) loss 0.3599 (0.3586) grad_norm 202375.3906 (193314.6250) mem 14543MB +[2023-10-12 18:44:59 simmim_pretrain](main_simmim.py 218): INFO Train: [134/200][1500/6787] eta 0:22:10 lr 0.000200 time 0.2523 (0.2517) loss 0.3494 (0.3586) grad_norm 161611.0781 (201038.0312) mem 14543MB +[2023-10-12 18:47:05 simmim_pretrain](main_simmim.py 218): INFO Train: [134/200][2000/6787] eta 0:20:05 lr 0.000200 time 0.2452 (0.2519) loss 0.3617 (0.3584) grad_norm 285122.7812 (221396.8750) mem 14543MB +[2023-10-12 18:49:12 simmim_pretrain](main_simmim.py 218): INFO Train: [134/200][2500/6787] eta 0:18:00 lr 0.000200 time 0.2537 (0.2521) loss 0.3521 (0.3583) grad_norm 375998.7500 (236736.6094) mem 14543MB +[2023-10-12 18:51:18 simmim_pretrain](main_simmim.py 218): INFO Train: [134/200][3000/6787] eta 0:15:55 lr 0.000200 time 0.2536 (0.2523) loss 0.3517 (0.3580) grad_norm 201996.0156 (inf) mem 14543MB +[2023-10-12 18:53:26 simmim_pretrain](main_simmim.py 218): INFO Train: [134/200][3500/6787] eta 0:13:50 lr 0.000200 time 0.2529 (0.2526) loss 0.3649 (0.3581) grad_norm 272885.8125 (inf) mem 14543MB +[2023-10-12 18:55:32 simmim_pretrain](main_simmim.py 218): INFO Train: [134/200][4000/6787] eta 0:11:44 lr 0.000200 time 0.2530 (0.2527) loss 0.3591 (0.3582) grad_norm 244011.6094 (inf) mem 14543MB +[2023-10-12 18:57:39 simmim_pretrain](main_simmim.py 218): INFO Train: [134/200][4500/6787] eta 0:09:38 lr 0.000200 time 0.2570 (0.2528) loss 0.3658 (0.3583) grad_norm 191045.4375 (inf) mem 14543MB +[2023-10-12 18:59:46 simmim_pretrain](main_simmim.py 218): INFO Train: [134/200][5000/6787] eta 0:07:31 lr 0.000200 time 0.2474 (0.2528) loss 0.3627 (0.3583) grad_norm 210572.5938 (inf) mem 14543MB +[2023-10-12 19:01:52 simmim_pretrain](main_simmim.py 218): INFO Train: [134/200][5500/6787] eta 0:05:25 lr 0.000200 time 0.2566 (0.2529) loss 0.3706 (0.3582) grad_norm 584391.1250 (inf) mem 14543MB +[2023-10-12 19:04:00 simmim_pretrain](main_simmim.py 218): INFO Train: [134/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2538 (0.2531) loss 0.3448 (0.3581) grad_norm 215602.3281 (inf) mem 14543MB +[2023-10-12 19:06:08 simmim_pretrain](main_simmim.py 218): INFO Train: [134/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2540 (0.2533) loss 0.3470 (0.3580) grad_norm 324175.9375 (inf) mem 14543MB +[2023-10-12 19:07:21 simmim_pretrain](main_simmim.py 228): INFO EPOCH 134 training takes 0:28:40 +[2023-10-12 19:07:23 simmim_pretrain](main_simmim.py 218): INFO Train: [135/200][0/6787] eta 2:47:38 lr 0.000200 time 1.4820 (1.4820) loss 0.3740 (0.3740) grad_norm 411637.9375 (411637.9375) mem 14543MB +[2023-10-12 19:09:29 simmim_pretrain](main_simmim.py 218): INFO Train: [135/200][500/6787] eta 0:26:42 lr 0.000200 time 0.2594 (0.2550) loss 0.3374 (0.3573) grad_norm 286568.0000 (inf) mem 14543MB +[2023-10-12 19:11:35 simmim_pretrain](main_simmim.py 218): INFO Train: [135/200][1000/6787] eta 0:24:28 lr 0.000200 time 0.2591 (0.2538) loss 0.3576 (0.3568) grad_norm 364674.8750 (inf) mem 14543MB +[2023-10-12 19:13:42 simmim_pretrain](main_simmim.py 218): INFO Train: [135/200][1500/6787] eta 0:22:19 lr 0.000200 time 0.2546 (0.2534) loss 0.3367 (0.3570) grad_norm 402828.5938 (inf) mem 14543MB +[2023-10-12 19:15:48 simmim_pretrain](main_simmim.py 218): INFO Train: [135/200][2000/6787] eta 0:20:12 lr 0.000200 time 0.2540 (0.2533) loss 0.3637 (0.3569) grad_norm 463561.6562 (inf) mem 14543MB +[2023-10-12 19:17:55 simmim_pretrain](main_simmim.py 218): INFO Train: [135/200][2500/6787] eta 0:18:05 lr 0.000200 time 0.2556 (0.2533) loss 0.3554 (0.3567) grad_norm 254329.4688 (inf) mem 14543MB +[2023-10-12 19:20:01 simmim_pretrain](main_simmim.py 218): INFO Train: [135/200][3000/6787] eta 0:15:59 lr 0.000200 time 0.2468 (0.2533) loss 0.3444 (0.3566) grad_norm 396845.4062 (inf) mem 14543MB +[2023-10-12 19:22:09 simmim_pretrain](main_simmim.py 218): INFO Train: [135/200][3500/6787] eta 0:13:53 lr 0.000200 time 0.2538 (0.2534) loss 0.3718 (0.3567) grad_norm 240162.9688 (inf) mem 14543MB +[2023-10-12 19:24:18 simmim_pretrain](main_simmim.py 218): INFO Train: [135/200][4000/6787] eta 0:11:47 lr 0.000200 time 0.2541 (0.2540) loss 0.3508 (0.3568) grad_norm 281023.1250 (inf) mem 14543MB +[2023-10-12 19:26:26 simmim_pretrain](main_simmim.py 218): INFO Train: [135/200][4500/6787] eta 0:09:41 lr 0.000200 time 0.2559 (0.2544) loss 0.3683 (0.3569) grad_norm 421727.2188 (inf) mem 14543MB +[2023-10-12 19:28:35 simmim_pretrain](main_simmim.py 218): INFO Train: [135/200][5000/6787] eta 0:07:35 lr 0.000200 time 0.2594 (0.2547) loss 0.3510 (0.3569) grad_norm 552607.7500 (inf) mem 14543MB +[2023-10-12 19:30:43 simmim_pretrain](main_simmim.py 218): INFO Train: [135/200][5500/6787] eta 0:05:27 lr 0.000200 time 0.2578 (0.2548) loss 0.3710 (0.3569) grad_norm 757826.7500 (inf) mem 14543MB +[2023-10-12 19:32:51 simmim_pretrain](main_simmim.py 218): INFO Train: [135/200][6000/6787] eta 0:03:20 lr 0.000200 time 0.2540 (0.2549) loss 0.3531 (0.3569) grad_norm 455450.0938 (inf) mem 14543MB +[2023-10-12 19:34:59 simmim_pretrain](main_simmim.py 218): INFO Train: [135/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2551 (0.2550) loss 0.3612 (0.3570) grad_norm 351890.0625 (inf) mem 14543MB +[2023-10-12 19:36:13 simmim_pretrain](main_simmim.py 228): INFO EPOCH 135 training takes 0:28:51 +[2023-10-12 19:36:14 simmim_pretrain](main_simmim.py 218): INFO Train: [136/200][0/6787] eta 2:48:19 lr 0.000200 time 1.4881 (1.4881) loss 0.3467 (0.3467) grad_norm 247090.6250 (247090.6250) mem 14543MB +[2023-10-12 19:38:21 simmim_pretrain](main_simmim.py 218): INFO Train: [136/200][500/6787] eta 0:26:46 lr 0.000200 time 0.2521 (0.2556) loss 0.3630 (0.3592) grad_norm 289176.0312 (246966.4375) mem 14543MB +[2023-10-12 19:40:28 simmim_pretrain](main_simmim.py 218): INFO Train: [136/200][1000/6787] eta 0:24:34 lr 0.000200 time 0.2555 (0.2547) loss 0.3498 (0.3589) grad_norm 276987.5000 (248842.0156) mem 14543MB +[2023-10-12 19:42:35 simmim_pretrain](main_simmim.py 218): INFO Train: [136/200][1500/6787] eta 0:22:25 lr 0.000200 time 0.2494 (0.2544) loss 0.3560 (0.3588) grad_norm 259091.4844 (246634.2969) mem 14543MB +[2023-10-12 19:44:43 simmim_pretrain](main_simmim.py 218): INFO Train: [136/200][2000/6787] eta 0:20:20 lr 0.000200 time 0.2600 (0.2550) loss 0.3680 (0.3588) grad_norm 312565.5625 (252054.2500) mem 14543MB +[2023-10-12 19:46:53 simmim_pretrain](main_simmim.py 218): INFO Train: [136/200][2500/6787] eta 0:18:16 lr 0.000200 time 0.2602 (0.2559) loss 0.3583 (0.3582) grad_norm 245105.7812 (282024.2188) mem 14543MB +[2023-10-12 19:49:02 simmim_pretrain](main_simmim.py 218): INFO Train: [136/200][3000/6787] eta 0:16:11 lr 0.000200 time 0.2604 (0.2564) loss 0.3767 (0.3581) grad_norm 230134.0000 (inf) mem 14543MB +[2023-10-12 19:51:12 simmim_pretrain](main_simmim.py 218): INFO Train: [136/200][3500/6787] eta 0:14:04 lr 0.000200 time 0.2601 (0.2569) loss 0.3369 (0.3582) grad_norm 246846.2500 (inf) mem 14543MB +[2023-10-12 19:53:22 simmim_pretrain](main_simmim.py 218): INFO Train: [136/200][4000/6787] eta 0:11:56 lr 0.000200 time 0.2597 (0.2572) loss 0.3494 (0.3582) grad_norm 186701.0312 (inf) mem 14543MB +[2023-10-12 19:55:32 simmim_pretrain](main_simmim.py 218): INFO Train: [136/200][4500/6787] eta 0:09:48 lr 0.000200 time 0.2611 (0.2574) loss 0.3485 (0.3583) grad_norm 285443.7812 (inf) mem 14543MB +[2023-10-12 19:57:41 simmim_pretrain](main_simmim.py 218): INFO Train: [136/200][5000/6787] eta 0:07:40 lr 0.000200 time 0.2606 (0.2576) loss 0.3421 (0.3583) grad_norm 302235.7500 (inf) mem 14543MB +[2023-10-12 19:59:51 simmim_pretrain](main_simmim.py 218): INFO Train: [136/200][5500/6787] eta 0:05:31 lr 0.000200 time 0.2600 (0.2578) loss 0.3415 (0.3582) grad_norm 318666.1562 (inf) mem 14543MB +[2023-10-12 20:02:01 simmim_pretrain](main_simmim.py 218): INFO Train: [136/200][6000/6787] eta 0:03:22 lr 0.000200 time 0.2593 (0.2579) loss 0.3480 (0.3582) grad_norm 521203.0312 (inf) mem 14543MB +[2023-10-12 20:04:10 simmim_pretrain](main_simmim.py 218): INFO Train: [136/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2576 (0.2580) loss 0.3527 (0.3581) grad_norm 325966.9375 (inf) mem 14543MB +[2023-10-12 20:05:25 simmim_pretrain](main_simmim.py 228): INFO EPOCH 136 training takes 0:29:12 +[2023-10-12 20:05:27 simmim_pretrain](main_simmim.py 218): INFO Train: [137/200][0/6787] eta 2:44:03 lr 0.000200 time 1.4503 (1.4503) loss 0.3651 (0.3651) grad_norm 582980.5000 (582980.5000) mem 14543MB +[2023-10-12 20:07:34 simmim_pretrain](main_simmim.py 218): INFO Train: [137/200][500/6787] eta 0:26:56 lr 0.000200 time 0.2566 (0.2572) loss 0.3694 (0.3567) grad_norm 660918.7500 (451245.0625) mem 14543MB +[2023-10-12 20:09:44 simmim_pretrain](main_simmim.py 218): INFO Train: [137/200][1000/6787] eta 0:24:55 lr 0.000200 time 0.2541 (0.2585) loss 0.3670 (0.3567) grad_norm 429719.5625 (inf) mem 14543MB +[2023-10-12 20:11:54 simmim_pretrain](main_simmim.py 218): INFO Train: [137/200][1500/6787] eta 0:22:47 lr 0.000200 time 0.2586 (0.2587) loss 0.3533 (0.3577) grad_norm 226942.7812 (inf) mem 14543MB +[2023-10-12 20:14:03 simmim_pretrain](main_simmim.py 218): INFO Train: [137/200][2000/6787] eta 0:20:37 lr 0.000200 time 0.2582 (0.2586) loss 0.3430 (0.3578) grad_norm 114029.3672 (inf) mem 14543MB +[2023-10-12 20:16:12 simmim_pretrain](main_simmim.py 218): INFO Train: [137/200][2500/6787] eta 0:18:28 lr 0.000200 time 0.2582 (0.2585) loss 0.3340 (0.3581) grad_norm 342805.9062 (inf) mem 14543MB +[2023-10-12 20:18:21 simmim_pretrain](main_simmim.py 218): INFO Train: [137/200][3000/6787] eta 0:16:18 lr 0.000200 time 0.2587 (0.2585) loss 0.3495 (0.3582) grad_norm 198535.3906 (inf) mem 14543MB +[2023-10-12 20:20:30 simmim_pretrain](main_simmim.py 218): INFO Train: [137/200][3500/6787] eta 0:14:09 lr 0.000200 time 0.2578 (0.2584) loss 0.3737 (0.3580) grad_norm 348686.5312 (inf) mem 14543MB +[2023-10-12 20:22:39 simmim_pretrain](main_simmim.py 218): INFO Train: [137/200][4000/6787] eta 0:12:00 lr 0.000200 time 0.2588 (0.2584) loss 0.3506 (0.3580) grad_norm 167342.8281 (inf) mem 14543MB +[2023-10-12 20:24:48 simmim_pretrain](main_simmim.py 218): INFO Train: [137/200][4500/6787] eta 0:09:50 lr 0.000200 time 0.2593 (0.2584) loss 0.3516 (0.3582) grad_norm 280914.9375 (inf) mem 14543MB +[2023-10-12 20:26:57 simmim_pretrain](main_simmim.py 218): INFO Train: [137/200][5000/6787] eta 0:07:41 lr 0.000200 time 0.2575 (0.2583) loss 0.3552 (0.3584) grad_norm 261513.7500 (inf) mem 14543MB +[2023-10-12 20:29:06 simmim_pretrain](main_simmim.py 218): INFO Train: [137/200][5500/6787] eta 0:05:32 lr 0.000200 time 0.2569 (0.2582) loss 0.3259 (0.3584) grad_norm 236525.0312 (inf) mem 14543MB +[2023-10-12 20:31:15 simmim_pretrain](main_simmim.py 218): INFO Train: [137/200][6000/6787] eta 0:03:23 lr 0.000200 time 0.2579 (0.2582) loss 0.3737 (0.3585) grad_norm 262438.4062 (inf) mem 14543MB +[2023-10-12 20:33:24 simmim_pretrain](main_simmim.py 218): INFO Train: [137/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2574 (0.2582) loss 0.3743 (0.3583) grad_norm 383274.7188 (inf) mem 14543MB +[2023-10-12 20:34:38 simmim_pretrain](main_simmim.py 228): INFO EPOCH 137 training takes 0:29:13 +[2023-10-12 20:34:40 simmim_pretrain](main_simmim.py 218): INFO Train: [138/200][0/6787] eta 2:53:26 lr 0.000200 time 1.5333 (1.5333) loss 0.3688 (0.3688) grad_norm 423714.8438 (423714.8438) mem 14543MB +[2023-10-12 20:36:46 simmim_pretrain](main_simmim.py 218): INFO Train: [138/200][500/6787] eta 0:26:46 lr 0.000200 time 0.2546 (0.2555) loss 0.3594 (0.3572) grad_norm 356931.8125 (inf) mem 14543MB +[2023-10-12 20:38:52 simmim_pretrain](main_simmim.py 218): INFO Train: [138/200][1000/6787] eta 0:24:28 lr 0.000200 time 0.2536 (0.2537) loss 0.3698 (0.3579) grad_norm 201832.4062 (inf) mem 14543MB +[2023-10-12 20:40:58 simmim_pretrain](main_simmim.py 218): INFO Train: [138/200][1500/6787] eta 0:22:18 lr 0.000200 time 0.2507 (0.2531) loss 0.3575 (0.3582) grad_norm 335909.4375 (inf) mem 14543MB +[2023-10-12 20:43:05 simmim_pretrain](main_simmim.py 218): INFO Train: [138/200][2000/6787] eta 0:20:11 lr 0.000200 time 0.2508 (0.2530) loss 0.3636 (0.3583) grad_norm 392523.2812 (inf) mem 14543MB +[2023-10-12 20:45:11 simmim_pretrain](main_simmim.py 218): INFO Train: [138/200][2500/6787] eta 0:18:04 lr 0.000200 time 0.2519 (0.2530) loss 0.3639 (0.3591) grad_norm 117083.3594 (nan) mem 14543MB +[2023-10-12 20:47:18 simmim_pretrain](main_simmim.py 218): INFO Train: [138/200][3000/6787] eta 0:15:58 lr 0.000200 time 0.2488 (0.2531) loss 0.3443 (0.3598) grad_norm 98810.0312 (nan) mem 14543MB +[2023-10-12 20:49:25 simmim_pretrain](main_simmim.py 218): INFO Train: [138/200][3500/6787] eta 0:13:52 lr 0.000200 time 0.2534 (0.2532) loss 0.3387 (0.3599) grad_norm 131022.7344 (nan) mem 14543MB +[2023-10-12 20:51:31 simmim_pretrain](main_simmim.py 218): INFO Train: [138/200][4000/6787] eta 0:11:45 lr 0.000200 time 0.2502 (0.2532) loss 0.3445 (0.3601) grad_norm 184089.5625 (nan) mem 14543MB +[2023-10-12 20:53:38 simmim_pretrain](main_simmim.py 218): INFO Train: [138/200][4500/6787] eta 0:09:39 lr 0.000200 time 0.2529 (0.2532) loss 0.3495 (0.3600) grad_norm 177251.2500 (nan) mem 14543MB +[2023-10-12 20:55:45 simmim_pretrain](main_simmim.py 218): INFO Train: [138/200][5000/6787] eta 0:07:32 lr 0.000200 time 0.2519 (0.2532) loss 0.3626 (0.3599) grad_norm 149186.5156 (nan) mem 14543MB +[2023-10-12 20:57:52 simmim_pretrain](main_simmim.py 218): INFO Train: [138/200][5500/6787] eta 0:05:26 lr 0.000200 time 0.2509 (0.2534) loss 0.3609 (0.3597) grad_norm 278803.7500 (nan) mem 14543MB +[2023-10-12 21:00:02 simmim_pretrain](main_simmim.py 218): INFO Train: [138/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2610 (0.2539) loss 0.3561 (0.3597) grad_norm 409670.2812 (nan) mem 14543MB +[2023-10-12 21:02:11 simmim_pretrain](main_simmim.py 218): INFO Train: [138/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2540 (0.2541) loss 0.3453 (0.3595) grad_norm 335395.8125 (nan) mem 14543MB +[2023-10-12 21:03:24 simmim_pretrain](main_simmim.py 228): INFO EPOCH 138 training takes 0:28:45 +[2023-10-12 21:03:26 simmim_pretrain](main_simmim.py 218): INFO Train: [139/200][0/6787] eta 2:46:39 lr 0.000200 time 1.4733 (1.4733) loss 0.3603 (0.3603) grad_norm 736024.6250 (736024.6250) mem 14543MB +[2023-10-12 21:05:32 simmim_pretrain](main_simmim.py 218): INFO Train: [139/200][500/6787] eta 0:26:39 lr 0.000200 time 0.2537 (0.2544) loss 0.3327 (0.3573) grad_norm 310923.6250 (439042.5625) mem 14543MB +[2023-10-12 21:07:38 simmim_pretrain](main_simmim.py 218): INFO Train: [139/200][1000/6787] eta 0:24:25 lr 0.000200 time 0.2494 (0.2533) loss 0.3654 (0.3576) grad_norm 322528.1875 (412265.4375) mem 14543MB +[2023-10-12 21:09:44 simmim_pretrain](main_simmim.py 218): INFO Train: [139/200][1500/6787] eta 0:22:16 lr 0.000200 time 0.2509 (0.2528) loss 0.3567 (0.3575) grad_norm 560631.5000 (inf) mem 14543MB +[2023-10-12 21:11:50 simmim_pretrain](main_simmim.py 218): INFO Train: [139/200][2000/6787] eta 0:20:09 lr 0.000200 time 0.2499 (0.2527) loss 0.3597 (0.3575) grad_norm 237367.2812 (inf) mem 14543MB +[2023-10-12 21:13:57 simmim_pretrain](main_simmim.py 218): INFO Train: [139/200][2500/6787] eta 0:18:04 lr 0.000200 time 0.2570 (0.2529) loss 0.3483 (0.3573) grad_norm 364589.4688 (inf) mem 14543MB +[2023-10-12 21:16:03 simmim_pretrain](main_simmim.py 218): INFO Train: [139/200][3000/6787] eta 0:15:58 lr 0.000200 time 0.2557 (0.2530) loss 0.3291 (0.3573) grad_norm 521464.1562 (inf) mem 14543MB +[2023-10-12 21:18:10 simmim_pretrain](main_simmim.py 218): INFO Train: [139/200][3500/6787] eta 0:13:51 lr 0.000200 time 0.2513 (0.2530) loss 0.3619 (0.3571) grad_norm 638485.6875 (inf) mem 14543MB +[2023-10-12 21:20:17 simmim_pretrain](main_simmim.py 218): INFO Train: [139/200][4000/6787] eta 0:11:45 lr 0.000200 time 0.2657 (0.2531) loss 0.3526 (0.3572) grad_norm 305279.9688 (inf) mem 14543MB +[2023-10-12 21:22:23 simmim_pretrain](main_simmim.py 218): INFO Train: [139/200][4500/6787] eta 0:09:38 lr 0.000200 time 0.2503 (0.2531) loss 0.3542 (0.3574) grad_norm 303506.0625 (inf) mem 14543MB +[2023-10-12 21:24:31 simmim_pretrain](main_simmim.py 218): INFO Train: [139/200][5000/6787] eta 0:07:32 lr 0.000200 time 0.2553 (0.2532) loss 0.3674 (0.3576) grad_norm 318532.4688 (inf) mem 14543MB +[2023-10-12 21:26:38 simmim_pretrain](main_simmim.py 218): INFO Train: [139/200][5500/6787] eta 0:05:26 lr 0.000200 time 0.2555 (0.2534) loss 0.3693 (0.3577) grad_norm 354473.2500 (inf) mem 14543MB +[2023-10-12 21:28:45 simmim_pretrain](main_simmim.py 218): INFO Train: [139/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2523 (0.2535) loss 0.3707 (0.3576) grad_norm 181190.7031 (inf) mem 14543MB +[2023-10-12 21:30:53 simmim_pretrain](main_simmim.py 218): INFO Train: [139/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2554 (0.2536) loss 0.3820 (0.3577) grad_norm 196072.5469 (inf) mem 14543MB +[2023-10-12 21:32:06 simmim_pretrain](main_simmim.py 228): INFO EPOCH 139 training takes 0:28:41 +[2023-10-12 21:32:07 simmim_pretrain](main_simmim.py 218): INFO Train: [140/200][0/6787] eta 2:40:49 lr 0.000200 time 1.4217 (1.4217) loss 0.3671 (0.3671) grad_norm 157128.3750 (157128.3750) mem 14543MB +[2023-10-12 21:34:13 simmim_pretrain](main_simmim.py 218): INFO Train: [140/200][500/6787] eta 0:26:37 lr 0.000200 time 0.2471 (0.2542) loss 0.3690 (0.3567) grad_norm 192203.0625 (254308.3594) mem 14543MB +[2023-10-12 21:36:19 simmim_pretrain](main_simmim.py 218): INFO Train: [140/200][1000/6787] eta 0:24:25 lr 0.000200 time 0.2531 (0.2532) loss 0.3539 (0.3576) grad_norm 240057.7812 (254957.1875) mem 14543MB +[2023-10-12 21:38:25 simmim_pretrain](main_simmim.py 218): INFO Train: [140/200][1500/6787] eta 0:22:17 lr 0.000200 time 0.2517 (0.2530) loss 0.3649 (0.3578) grad_norm 353946.9062 (260324.5938) mem 14543MB +[2023-10-12 21:40:32 simmim_pretrain](main_simmim.py 218): INFO Train: [140/200][2000/6787] eta 0:20:11 lr 0.000200 time 0.2510 (0.2530) loss 0.3665 (0.3576) grad_norm 382478.8750 (287769.2500) mem 14543MB +[2023-10-12 21:42:39 simmim_pretrain](main_simmim.py 218): INFO Train: [140/200][2500/6787] eta 0:18:05 lr 0.000200 time 0.2489 (0.2532) loss 0.3504 (0.3575) grad_norm 515669.9375 (307327.9375) mem 14543MB +[2023-10-12 21:44:48 simmim_pretrain](main_simmim.py 218): INFO Train: [140/200][3000/6787] eta 0:16:02 lr 0.000200 time 0.2595 (0.2542) loss 0.3828 (0.3574) grad_norm 327587.2812 (327162.2812) mem 14543MB +[2023-10-12 21:46:58 simmim_pretrain](main_simmim.py 218): INFO Train: [140/200][3500/6787] eta 0:13:57 lr 0.000200 time 0.2578 (0.2549) loss 0.3503 (0.3573) grad_norm 603094.0000 (inf) mem 14543MB +[2023-10-12 21:49:07 simmim_pretrain](main_simmim.py 218): INFO Train: [140/200][4000/6787] eta 0:11:51 lr 0.000200 time 0.2594 (0.2554) loss 0.3414 (0.3574) grad_norm 283232.3750 (inf) mem 14543MB +[2023-10-12 21:51:17 simmim_pretrain](main_simmim.py 218): INFO Train: [140/200][4500/6787] eta 0:09:44 lr 0.000200 time 0.2598 (0.2558) loss 0.3740 (0.3576) grad_norm 196739.5625 (inf) mem 14543MB +[2023-10-12 21:53:26 simmim_pretrain](main_simmim.py 218): INFO Train: [140/200][5000/6787] eta 0:07:37 lr 0.000200 time 0.2593 (0.2561) loss 0.3762 (0.3578) grad_norm 139841.1562 (inf) mem 14543MB +[2023-10-12 21:55:36 simmim_pretrain](main_simmim.py 218): INFO Train: [140/200][5500/6787] eta 0:05:29 lr 0.000200 time 0.2584 (0.2564) loss 0.3438 (0.3578) grad_norm 201952.2031 (inf) mem 14543MB +[2023-10-12 21:57:46 simmim_pretrain](main_simmim.py 218): INFO Train: [140/200][6000/6787] eta 0:03:21 lr 0.000200 time 0.2593 (0.2566) loss 0.3781 (0.3579) grad_norm 180120.3438 (inf) mem 14543MB +[2023-10-12 21:59:55 simmim_pretrain](main_simmim.py 218): INFO Train: [140/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2543 (0.2568) loss 0.3643 (0.3578) grad_norm 331432.2188 (inf) mem 14543MB +[2023-10-12 22:01:10 simmim_pretrain](main_simmim.py 228): INFO EPOCH 140 training takes 0:29:04 +[2023-10-12 22:01:10 simmim_pretrain](utils.py 62): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_140.pth saving...... +[2023-10-12 22:01:10 simmim_pretrain](utils.py 64): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_140.pth saved !!! +[2023-10-12 22:01:12 simmim_pretrain](main_simmim.py 218): INFO Train: [141/200][0/6787] eta 2:29:52 lr 0.000200 time 1.3249 (1.3249) loss 0.3513 (0.3513) grad_norm 452105.2188 (452105.2188) mem 14543MB +[2023-10-12 22:03:17 simmim_pretrain](main_simmim.py 218): INFO Train: [141/200][500/6787] eta 0:26:34 lr 0.000200 time 0.2544 (0.2536) loss 0.3574 (0.3564) grad_norm 383170.1250 (436252.9375) mem 14543MB +[2023-10-12 22:05:24 simmim_pretrain](main_simmim.py 218): INFO Train: [141/200][1000/6787] eta 0:24:24 lr 0.000200 time 0.2506 (0.2530) loss 0.3442 (0.3569) grad_norm 657144.2500 (inf) mem 14543MB +[2023-10-12 22:07:31 simmim_pretrain](main_simmim.py 218): INFO Train: [141/200][1500/6787] eta 0:22:19 lr 0.000200 time 0.2561 (0.2534) loss 0.3572 (0.3573) grad_norm 566559.1875 (inf) mem 14543MB +[2023-10-12 22:09:38 simmim_pretrain](main_simmim.py 218): INFO Train: [141/200][2000/6787] eta 0:20:14 lr 0.000200 time 0.2530 (0.2536) loss 0.3593 (0.3575) grad_norm 276387.7188 (inf) mem 14543MB +[2023-10-12 22:11:45 simmim_pretrain](main_simmim.py 218): INFO Train: [141/200][2500/6787] eta 0:18:07 lr 0.000200 time 0.2483 (0.2536) loss 0.5117 (0.3687) grad_norm 11041.0029 (inf) mem 14543MB +[2023-10-12 22:13:51 simmim_pretrain](main_simmim.py 218): INFO Train: [141/200][3000/6787] eta 0:15:59 lr 0.000200 time 0.2501 (0.2535) loss 0.4524 (0.3900) grad_norm 46830.1914 (inf) mem 14543MB +[2023-10-12 22:15:57 simmim_pretrain](main_simmim.py 218): INFO Train: [141/200][3500/6787] eta 0:13:52 lr 0.000200 time 0.2474 (0.2533) loss 0.3939 (0.3922) grad_norm 19839.7910 (inf) mem 14543MB +[2023-10-12 22:18:03 simmim_pretrain](main_simmim.py 218): INFO Train: [141/200][4000/6787] eta 0:11:45 lr 0.000200 time 0.2490 (0.2532) loss 0.3741 (0.3896) grad_norm 43657.7344 (inf) mem 14543MB +[2023-10-12 22:20:10 simmim_pretrain](main_simmim.py 218): INFO Train: [141/200][4500/6787] eta 0:09:39 lr 0.000200 time 0.2528 (0.2532) loss 0.3706 (0.3872) grad_norm 31653.1074 (inf) mem 14543MB +[2023-10-12 22:22:17 simmim_pretrain](main_simmim.py 218): INFO Train: [141/200][5000/6787] eta 0:07:32 lr 0.000200 time 0.2465 (0.2533) loss 0.3519 (0.3848) grad_norm 17131.6387 (inf) mem 14543MB +[2023-10-12 22:24:24 simmim_pretrain](main_simmim.py 218): INFO Train: [141/200][5500/6787] eta 0:05:26 lr 0.000200 time 0.2509 (0.2534) loss 0.3499 (0.3827) grad_norm 70889.2344 (inf) mem 14543MB +[2023-10-12 22:26:31 simmim_pretrain](main_simmim.py 218): INFO Train: [141/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2538 (0.2534) loss 0.3450 (0.3811) grad_norm 66766.6719 (inf) mem 14543MB +[2023-10-12 22:28:37 simmim_pretrain](main_simmim.py 218): INFO Train: [141/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2506 (0.2533) loss 0.3536 (0.3795) grad_norm 75082.8594 (inf) mem 14543MB +[2023-10-12 22:29:50 simmim_pretrain](main_simmim.py 228): INFO EPOCH 141 training takes 0:28:39 +[2023-10-12 22:29:51 simmim_pretrain](main_simmim.py 218): INFO Train: [142/200][0/6787] eta 2:48:04 lr 0.000200 time 1.4858 (1.4858) loss 0.3555 (0.3555) grad_norm 75042.5000 (75042.5000) mem 14543MB +[2023-10-12 22:31:57 simmim_pretrain](main_simmim.py 218): INFO Train: [142/200][500/6787] eta 0:26:41 lr 0.000200 time 0.2517 (0.2547) loss 0.3514 (0.3607) grad_norm 86339.3516 (78284.5312) mem 14543MB +[2023-10-12 22:34:04 simmim_pretrain](main_simmim.py 218): INFO Train: [142/200][1000/6787] eta 0:24:29 lr 0.000200 time 0.2458 (0.2539) loss 0.3757 (0.3603) grad_norm 103156.4844 (83900.3047) mem 14543MB +[2023-10-12 22:36:11 simmim_pretrain](main_simmim.py 218): INFO Train: [142/200][1500/6787] eta 0:22:21 lr 0.000200 time 0.2519 (0.2538) loss 0.3639 (0.3598) grad_norm 130637.8750 (88242.4922) mem 14543MB +[2023-10-12 22:38:18 simmim_pretrain](main_simmim.py 218): INFO Train: [142/200][2000/6787] eta 0:20:15 lr 0.000200 time 0.2516 (0.2539) loss 0.3417 (0.3596) grad_norm 117280.4922 (99600.8750) mem 14543MB +[2023-10-12 22:40:25 simmim_pretrain](main_simmim.py 218): INFO Train: [142/200][2500/6787] eta 0:18:08 lr 0.000200 time 0.2543 (0.2539) loss 0.3656 (0.3595) grad_norm 88122.8984 (111333.5781) mem 14543MB +[2023-10-12 22:42:32 simmim_pretrain](main_simmim.py 218): INFO Train: [142/200][3000/6787] eta 0:16:01 lr 0.000200 time 0.2557 (0.2538) loss 0.3608 (0.3592) grad_norm 88883.7188 (119312.1406) mem 14543MB +[2023-10-12 22:44:38 simmim_pretrain](main_simmim.py 218): INFO Train: [142/200][3500/6787] eta 0:13:54 lr 0.000200 time 0.2463 (0.2538) loss 0.3463 (0.3590) grad_norm 202045.5469 (128418.7656) mem 14543MB +[2023-10-12 22:46:45 simmim_pretrain](main_simmim.py 218): INFO Train: [142/200][4000/6787] eta 0:11:47 lr 0.000200 time 0.2467 (0.2537) loss 0.3541 (0.3588) grad_norm 226299.7969 (inf) mem 14543MB +[2023-10-12 22:48:52 simmim_pretrain](main_simmim.py 218): INFO Train: [142/200][4500/6787] eta 0:09:40 lr 0.000200 time 0.2568 (0.2537) loss 0.3487 (0.3588) grad_norm 152624.7188 (inf) mem 14543MB +[2023-10-12 22:50:59 simmim_pretrain](main_simmim.py 218): INFO Train: [142/200][5000/6787] eta 0:07:33 lr 0.000200 time 0.2508 (0.2537) loss 0.3639 (0.3586) grad_norm 95081.8828 (inf) mem 14543MB +[2023-10-12 22:53:05 simmim_pretrain](main_simmim.py 218): INFO Train: [142/200][5500/6787] eta 0:05:26 lr 0.000200 time 0.2568 (0.2536) loss 0.3787 (0.3586) grad_norm 107240.5078 (inf) mem 14543MB +[2023-10-12 22:55:12 simmim_pretrain](main_simmim.py 218): INFO Train: [142/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2579 (0.2536) loss 0.3574 (0.3586) grad_norm 327172.4375 (inf) mem 14543MB +[2023-10-12 22:57:18 simmim_pretrain](main_simmim.py 218): INFO Train: [142/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2532 (0.2535) loss 0.3595 (0.3585) grad_norm 257903.5781 (inf) mem 14543MB +[2023-10-12 22:58:31 simmim_pretrain](main_simmim.py 228): INFO EPOCH 142 training takes 0:28:41 +[2023-10-12 22:58:33 simmim_pretrain](main_simmim.py 218): INFO Train: [143/200][0/6787] eta 2:40:52 lr 0.000200 time 1.4222 (1.4222) loss 0.3443 (0.3443) grad_norm 464211.7500 (464211.7500) mem 14543MB +[2023-10-12 23:00:39 simmim_pretrain](main_simmim.py 218): INFO Train: [143/200][500/6787] eta 0:26:46 lr 0.000200 time 0.2545 (0.2555) loss 0.3399 (0.3560) grad_norm 304563.1875 (465708.3438) mem 14543MB +[2023-10-12 23:02:46 simmim_pretrain](main_simmim.py 218): INFO Train: [143/200][1000/6787] eta 0:24:36 lr 0.000200 time 0.2539 (0.2551) loss 0.3509 (0.3561) grad_norm 548714.6875 (inf) mem 14543MB +[2023-10-12 23:04:54 simmim_pretrain](main_simmim.py 218): INFO Train: [143/200][1500/6787] eta 0:22:29 lr 0.000200 time 0.2546 (0.2552) loss 0.3524 (0.3564) grad_norm 382324.8438 (inf) mem 14543MB +[2023-10-12 23:07:02 simmim_pretrain](main_simmim.py 218): INFO Train: [143/200][2000/6787] eta 0:20:21 lr 0.000200 time 0.2548 (0.2552) loss 0.3582 (0.3566) grad_norm 320679.6562 (inf) mem 14543MB +[2023-10-12 23:09:09 simmim_pretrain](main_simmim.py 218): INFO Train: [143/200][2500/6787] eta 0:18:13 lr 0.000200 time 0.2533 (0.2550) loss 0.3501 (0.3564) grad_norm 537145.4375 (inf) mem 14543MB +[2023-10-12 23:11:16 simmim_pretrain](main_simmim.py 218): INFO Train: [143/200][3000/6787] eta 0:16:05 lr 0.000200 time 0.2534 (0.2550) loss 0.3806 (0.3565) grad_norm 490226.4375 (inf) mem 14543MB +[2023-10-12 23:13:24 simmim_pretrain](main_simmim.py 218): INFO Train: [143/200][3500/6787] eta 0:13:58 lr 0.000200 time 0.2564 (0.2550) loss 0.3742 (0.3567) grad_norm 154836.9844 (inf) mem 14543MB +[2023-10-12 23:15:31 simmim_pretrain](main_simmim.py 218): INFO Train: [143/200][4000/6787] eta 0:11:50 lr 0.000200 time 0.2526 (0.2550) loss 0.3631 (0.3570) grad_norm 210106.0469 (inf) mem 14543MB +[2023-10-12 23:17:39 simmim_pretrain](main_simmim.py 218): INFO Train: [143/200][4500/6787] eta 0:09:43 lr 0.000200 time 0.2539 (0.2550) loss 0.3453 (0.3571) grad_norm 184100.8281 (inf) mem 14543MB +[2023-10-12 23:19:47 simmim_pretrain](main_simmim.py 218): INFO Train: [143/200][5000/6787] eta 0:07:35 lr 0.000200 time 0.2538 (0.2550) loss 0.3572 (0.3572) grad_norm 161449.7344 (inf) mem 14543MB +[2023-10-12 23:21:54 simmim_pretrain](main_simmim.py 218): INFO Train: [143/200][5500/6787] eta 0:05:28 lr 0.000200 time 0.2527 (0.2551) loss 0.3663 (0.3573) grad_norm 146782.6094 (inf) mem 14543MB +[2023-10-12 23:24:02 simmim_pretrain](main_simmim.py 218): INFO Train: [143/200][6000/6787] eta 0:03:20 lr 0.000200 time 0.2594 (0.2551) loss 0.3601 (0.3573) grad_norm 251937.4844 (inf) mem 14543MB +[2023-10-12 23:26:10 simmim_pretrain](main_simmim.py 218): INFO Train: [143/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2529 (0.2551) loss 0.3617 (0.3572) grad_norm 499877.8125 (inf) mem 14543MB +[2023-10-12 23:27:24 simmim_pretrain](main_simmim.py 228): INFO EPOCH 143 training takes 0:28:52 +[2023-10-12 23:27:25 simmim_pretrain](main_simmim.py 218): INFO Train: [144/200][0/6787] eta 2:55:46 lr 0.000200 time 1.5539 (1.5539) loss 0.3760 (0.3760) grad_norm 382667.5312 (382667.5312) mem 14543MB +[2023-10-12 23:29:32 simmim_pretrain](main_simmim.py 218): INFO Train: [144/200][500/6787] eta 0:26:53 lr 0.000200 time 0.2544 (0.2566) loss 0.3574 (0.3577) grad_norm 399868.3438 (359994.4375) mem 14543MB +[2023-10-12 23:31:39 simmim_pretrain](main_simmim.py 218): INFO Train: [144/200][1000/6787] eta 0:24:38 lr 0.000200 time 0.2528 (0.2554) loss 0.3389 (0.3571) grad_norm 420865.2188 (inf) mem 14543MB +[2023-10-12 23:33:46 simmim_pretrain](main_simmim.py 218): INFO Train: [144/200][1500/6787] eta 0:22:26 lr 0.000200 time 0.2531 (0.2547) loss 0.3485 (0.3570) grad_norm 265816.0312 (inf) mem 14543MB +[2023-10-12 23:35:52 simmim_pretrain](main_simmim.py 218): INFO Train: [144/200][2000/6787] eta 0:20:15 lr 0.000200 time 0.2489 (0.2540) loss 0.3503 (0.3573) grad_norm 259058.4688 (inf) mem 14543MB +[2023-10-12 23:37:58 simmim_pretrain](main_simmim.py 218): INFO Train: [144/200][2500/6787] eta 0:18:07 lr 0.000200 time 0.2507 (0.2536) loss 0.3535 (0.3578) grad_norm 268508.8438 (inf) mem 14543MB +[2023-10-12 23:40:04 simmim_pretrain](main_simmim.py 218): INFO Train: [144/200][3000/6787] eta 0:15:59 lr 0.000200 time 0.2594 (0.2533) loss 0.3469 (0.3579) grad_norm 97643.1797 (inf) mem 14543MB +[2023-10-12 23:42:10 simmim_pretrain](main_simmim.py 218): INFO Train: [144/200][3500/6787] eta 0:13:52 lr 0.000200 time 0.2553 (0.2533) loss 0.3552 (0.3580) grad_norm 293765.2500 (inf) mem 14543MB +[2023-10-12 23:44:18 simmim_pretrain](main_simmim.py 218): INFO Train: [144/200][4000/6787] eta 0:11:46 lr 0.000200 time 0.2609 (0.2537) loss 0.3665 (0.3579) grad_norm 580131.0000 (inf) mem 14543MB +[2023-10-12 23:46:29 simmim_pretrain](main_simmim.py 218): INFO Train: [144/200][4500/6787] eta 0:09:41 lr 0.000200 time 0.2608 (0.2544) loss 0.3280 (0.3577) grad_norm 352433.3750 (inf) mem 14543MB +[2023-10-12 23:48:39 simmim_pretrain](main_simmim.py 218): INFO Train: [144/200][5000/6787] eta 0:07:35 lr 0.000200 time 0.2608 (0.2550) loss 0.3488 (0.3577) grad_norm 380837.4375 (inf) mem 14543MB +[2023-10-12 23:50:49 simmim_pretrain](main_simmim.py 218): INFO Train: [144/200][5500/6787] eta 0:05:28 lr 0.000200 time 0.2605 (0.2554) loss 0.3520 (0.3576) grad_norm 281264.0625 (inf) mem 14543MB +[2023-10-12 23:52:59 simmim_pretrain](main_simmim.py 218): INFO Train: [144/200][6000/6787] eta 0:03:21 lr 0.000200 time 0.2610 (0.2558) loss 0.3663 (0.3576) grad_norm 465062.7812 (inf) mem 14543MB +[2023-10-12 23:55:08 simmim_pretrain](main_simmim.py 218): INFO Train: [144/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2609 (0.2561) loss 0.3539 (0.3575) grad_norm 332526.3125 (inf) mem 14543MB +[2023-10-12 23:56:24 simmim_pretrain](main_simmim.py 228): INFO EPOCH 144 training takes 0:29:00 +[2023-10-12 23:56:25 simmim_pretrain](main_simmim.py 218): INFO Train: [145/200][0/6787] eta 2:31:31 lr 0.000200 time 1.3395 (1.3395) loss 0.3585 (0.3585) grad_norm 509315.8750 (509315.8750) mem 14543MB +[2023-10-12 23:58:32 simmim_pretrain](main_simmim.py 218): INFO Train: [145/200][500/6787] eta 0:26:50 lr 0.000200 time 0.2547 (0.2562) loss 0.3538 (0.3584) grad_norm 287250.8125 (284358.6250) mem 14543MB +[2023-10-13 00:00:39 simmim_pretrain](main_simmim.py 218): INFO Train: [145/200][1000/6787] eta 0:24:36 lr 0.000200 time 0.2563 (0.2552) loss 0.3738 (0.3582) grad_norm 237128.4688 (263038.1250) mem 14543MB +[2023-10-13 00:02:46 simmim_pretrain](main_simmim.py 218): INFO Train: [145/200][1500/6787] eta 0:22:26 lr 0.000200 time 0.2501 (0.2547) loss 0.3392 (0.3583) grad_norm 278416.2812 (254640.7812) mem 14543MB +[2023-10-13 00:04:52 simmim_pretrain](main_simmim.py 218): INFO Train: [145/200][2000/6787] eta 0:20:16 lr 0.000200 time 0.2503 (0.2542) loss 0.3660 (0.3582) grad_norm 219032.8281 (250165.9375) mem 14543MB +[2023-10-13 00:07:00 simmim_pretrain](main_simmim.py 218): INFO Train: [145/200][2500/6787] eta 0:18:10 lr 0.000200 time 0.2539 (0.2543) loss 0.3536 (0.3581) grad_norm 386238.7500 (259889.0312) mem 14543MB +[2023-10-13 00:09:08 simmim_pretrain](main_simmim.py 218): INFO Train: [145/200][3000/6787] eta 0:16:04 lr 0.000200 time 0.2578 (0.2548) loss 0.3443 (0.3580) grad_norm 484961.1875 (272865.6875) mem 14543MB +[2023-10-13 00:11:17 simmim_pretrain](main_simmim.py 218): INFO Train: [145/200][3500/6787] eta 0:13:59 lr 0.000200 time 0.2583 (0.2553) loss 0.3552 (0.3579) grad_norm 369376.4062 (288776.8438) mem 14543MB +[2023-10-13 00:13:26 simmim_pretrain](main_simmim.py 218): INFO Train: [145/200][4000/6787] eta 0:11:51 lr 0.000200 time 0.2512 (0.2554) loss 0.3522 (0.3577) grad_norm 483793.6875 (305521.8750) mem 14543MB +[2023-10-13 00:15:33 simmim_pretrain](main_simmim.py 218): INFO Train: [145/200][4500/6787] eta 0:09:44 lr 0.000200 time 0.2519 (0.2554) loss 0.3724 (0.3575) grad_norm 238602.7344 (inf) mem 14543MB +[2023-10-13 00:17:40 simmim_pretrain](main_simmim.py 218): INFO Train: [145/200][5000/6787] eta 0:07:36 lr 0.000200 time 0.2550 (0.2552) loss 0.3570 (0.3575) grad_norm 505789.1250 (inf) mem 14543MB +[2023-10-13 00:19:46 simmim_pretrain](main_simmim.py 218): INFO Train: [145/200][5500/6787] eta 0:05:28 lr 0.000200 time 0.2560 (0.2550) loss 0.3412 (0.3575) grad_norm 592765.0625 (inf) mem 14543MB +[2023-10-13 00:21:53 simmim_pretrain](main_simmim.py 218): INFO Train: [145/200][6000/6787] eta 0:03:20 lr 0.000200 time 0.2507 (0.2549) loss 0.3711 (0.3574) grad_norm 445956.4062 (inf) mem 14543MB +[2023-10-13 00:24:00 simmim_pretrain](main_simmim.py 218): INFO Train: [145/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2509 (0.2547) loss 0.3392 (0.3573) grad_norm 527869.8750 (inf) mem 14543MB +[2023-10-13 00:25:13 simmim_pretrain](main_simmim.py 228): INFO EPOCH 145 training takes 0:28:49 +[2023-10-13 00:25:14 simmim_pretrain](main_simmim.py 218): INFO Train: [146/200][0/6787] eta 2:36:26 lr 0.000200 time 1.3830 (1.3830) loss 0.3692 (0.3692) grad_norm 434800.2188 (434800.2188) mem 14543MB +[2023-10-13 00:27:21 simmim_pretrain](main_simmim.py 218): INFO Train: [146/200][500/6787] eta 0:26:43 lr 0.000200 time 0.2590 (0.2551) loss 0.3455 (0.3570) grad_norm 322820.2812 (inf) mem 14543MB +[2023-10-13 00:29:28 simmim_pretrain](main_simmim.py 218): INFO Train: [146/200][1000/6787] eta 0:24:34 lr 0.000200 time 0.2535 (0.2548) loss 0.3414 (0.3579) grad_norm 262320.1250 (inf) mem 14543MB +[2023-10-13 00:31:36 simmim_pretrain](main_simmim.py 218): INFO Train: [146/200][1500/6787] eta 0:22:29 lr 0.000200 time 0.2562 (0.2553) loss 0.3635 (0.3580) grad_norm 217056.8594 (inf) mem 14543MB +[2023-10-13 00:33:44 simmim_pretrain](main_simmim.py 218): INFO Train: [146/200][2000/6787] eta 0:20:22 lr 0.000200 time 0.2578 (0.2555) loss 0.3396 (0.3592) grad_norm 239126.0156 (inf) mem 14543MB +[2023-10-13 00:35:52 simmim_pretrain](main_simmim.py 218): INFO Train: [146/200][2500/6787] eta 0:18:15 lr 0.000200 time 0.2590 (0.2556) loss 0.3515 (0.3598) grad_norm 152799.5469 (inf) mem 14543MB +[2023-10-13 00:38:00 simmim_pretrain](main_simmim.py 218): INFO Train: [146/200][3000/6787] eta 0:16:07 lr 0.000200 time 0.2518 (0.2556) loss 0.3935 (0.3599) grad_norm 199040.6406 (inf) mem 14543MB +[2023-10-13 00:40:08 simmim_pretrain](main_simmim.py 218): INFO Train: [146/200][3500/6787] eta 0:13:59 lr 0.000200 time 0.2534 (0.2555) loss 0.3694 (0.3600) grad_norm 83091.8125 (inf) mem 14543MB +[2023-10-13 00:42:15 simmim_pretrain](main_simmim.py 218): INFO Train: [146/200][4000/6787] eta 0:11:51 lr 0.000200 time 0.2572 (0.2553) loss 0.3343 (0.3599) grad_norm 226849.3594 (inf) mem 14543MB +[2023-10-13 00:44:22 simmim_pretrain](main_simmim.py 218): INFO Train: [146/200][4500/6787] eta 0:09:43 lr 0.000200 time 0.2524 (0.2552) loss 0.3787 (0.3598) grad_norm 216481.7031 (inf) mem 14543MB +[2023-10-13 00:46:31 simmim_pretrain](main_simmim.py 218): INFO Train: [146/200][5000/6787] eta 0:07:36 lr 0.000200 time 0.2609 (0.2555) loss 0.3484 (0.3597) grad_norm 331799.2812 (inf) mem 14543MB +[2023-10-13 00:48:41 simmim_pretrain](main_simmim.py 218): INFO Train: [146/200][5500/6787] eta 0:05:29 lr 0.000200 time 0.2606 (0.2559) loss 0.3624 (0.3596) grad_norm 204233.0156 (inf) mem 14543MB +[2023-10-13 00:50:51 simmim_pretrain](main_simmim.py 218): INFO Train: [146/200][6000/6787] eta 0:03:21 lr 0.000200 time 0.2600 (0.2562) loss 0.3379 (0.3595) grad_norm 330938.0312 (inf) mem 14543MB +[2023-10-13 00:53:01 simmim_pretrain](main_simmim.py 218): INFO Train: [146/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2600 (0.2565) loss 0.3557 (0.3593) grad_norm 264080.6875 (inf) mem 14543MB +[2023-10-13 00:54:16 simmim_pretrain](main_simmim.py 228): INFO EPOCH 146 training takes 0:29:02 +[2023-10-13 00:54:17 simmim_pretrain](main_simmim.py 218): INFO Train: [147/200][0/6787] eta 2:47:20 lr 0.000200 time 1.4794 (1.4794) loss 0.3573 (0.3573) grad_norm 211475.4219 (211475.4219) mem 14543MB +[2023-10-13 00:56:23 simmim_pretrain](main_simmim.py 218): INFO Train: [147/200][500/6787] eta 0:26:42 lr 0.000200 time 0.2493 (0.2550) loss 0.3644 (0.3586) grad_norm 169592.3281 (241175.1250) mem 14543MB +[2023-10-13 00:58:31 simmim_pretrain](main_simmim.py 218): INFO Train: [147/200][1000/6787] eta 0:24:39 lr 0.000200 time 0.2589 (0.2556) loss 0.3665 (0.3588) grad_norm 347051.6250 (238049.0312) mem 14543MB +[2023-10-13 01:00:40 simmim_pretrain](main_simmim.py 218): INFO Train: [147/200][1500/6787] eta 0:22:35 lr 0.000200 time 0.2580 (0.2564) loss 0.3511 (0.3585) grad_norm 104975.1953 (239609.1875) mem 14543MB +[2023-10-13 01:02:49 simmim_pretrain](main_simmim.py 218): INFO Train: [147/200][2000/6787] eta 0:20:29 lr 0.000200 time 0.2590 (0.2569) loss 0.3505 (0.3582) grad_norm 294382.8438 (253863.4062) mem 14543MB +[2023-10-13 01:04:57 simmim_pretrain](main_simmim.py 218): INFO Train: [147/200][2500/6787] eta 0:18:20 lr 0.000200 time 0.2559 (0.2566) loss 0.3512 (0.3579) grad_norm 201535.5469 (288262.2812) mem 14543MB +[2023-10-13 01:07:06 simmim_pretrain](main_simmim.py 218): INFO Train: [147/200][3000/6787] eta 0:16:11 lr 0.000200 time 0.2601 (0.2566) loss 0.3461 (0.3578) grad_norm 273209.3750 (303969.1875) mem 14543MB +[2023-10-13 01:09:14 simmim_pretrain](main_simmim.py 218): INFO Train: [147/200][3500/6787] eta 0:14:03 lr 0.000200 time 0.2607 (0.2567) loss 0.3483 (0.3577) grad_norm 423470.6875 (324463.2188) mem 14543MB +[2023-10-13 01:11:23 simmim_pretrain](main_simmim.py 218): INFO Train: [147/200][4000/6787] eta 0:11:55 lr 0.000200 time 0.2514 (0.2568) loss 0.3388 (0.3575) grad_norm 347963.3750 (inf) mem 14543MB +[2023-10-13 01:13:31 simmim_pretrain](main_simmim.py 218): INFO Train: [147/200][4500/6787] eta 0:09:47 lr 0.000200 time 0.2540 (0.2568) loss 0.3642 (0.3575) grad_norm 427354.7812 (inf) mem 14543MB +[2023-10-13 01:15:40 simmim_pretrain](main_simmim.py 218): INFO Train: [147/200][5000/6787] eta 0:07:38 lr 0.000200 time 0.2533 (0.2568) loss 0.3419 (0.3575) grad_norm 284245.1875 (inf) mem 14543MB +[2023-10-13 01:17:48 simmim_pretrain](main_simmim.py 218): INFO Train: [147/200][5500/6787] eta 0:05:30 lr 0.000200 time 0.2591 (0.2569) loss 0.3724 (0.3575) grad_norm 244469.0625 (inf) mem 14543MB +[2023-10-13 01:19:57 simmim_pretrain](main_simmim.py 218): INFO Train: [147/200][6000/6787] eta 0:03:22 lr 0.000200 time 0.2493 (0.2569) loss 0.3375 (0.3577) grad_norm 268601.1250 (inf) mem 14543MB +[2023-10-13 01:22:06 simmim_pretrain](main_simmim.py 218): INFO Train: [147/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.3587 (0.2570) loss 0.3700 (0.3577) grad_norm 122887.5938 (inf) mem 14543MB +[2023-10-13 01:23:20 simmim_pretrain](main_simmim.py 228): INFO EPOCH 147 training takes 0:29:04 +[2023-10-13 01:23:22 simmim_pretrain](main_simmim.py 218): INFO Train: [148/200][0/6787] eta 2:46:19 lr 0.000200 time 1.4704 (1.4704) loss 0.3338 (0.3338) grad_norm 357979.3438 (357979.3438) mem 14543MB +[2023-10-13 01:25:31 simmim_pretrain](main_simmim.py 218): INFO Train: [148/200][500/6787] eta 0:27:19 lr 0.000200 time 0.2596 (0.2608) loss 0.3456 (0.3572) grad_norm 272372.8438 (292792.9688) mem 14543MB +[2023-10-13 01:27:40 simmim_pretrain](main_simmim.py 218): INFO Train: [148/200][1000/6787] eta 0:24:58 lr 0.000200 time 0.2570 (0.2589) loss 0.3517 (0.3571) grad_norm 393587.7188 (340455.0625) mem 14543MB +[2023-10-13 01:29:48 simmim_pretrain](main_simmim.py 218): INFO Train: [148/200][1500/6787] eta 0:22:44 lr 0.000200 time 0.2497 (0.2581) loss 0.3650 (0.3570) grad_norm 427314.7812 (355725.2188) mem 14543MB +[2023-10-13 01:31:56 simmim_pretrain](main_simmim.py 218): INFO Train: [148/200][2000/6787] eta 0:20:33 lr 0.000200 time 0.2544 (0.2576) loss 0.3522 (0.3575) grad_norm 302286.1250 (inf) mem 14543MB +[2023-10-13 01:34:04 simmim_pretrain](main_simmim.py 218): INFO Train: [148/200][2500/6787] eta 0:18:23 lr 0.000200 time 0.2574 (0.2575) loss 0.3650 (0.3576) grad_norm 244324.5469 (inf) mem 14543MB +[2023-10-13 01:36:13 simmim_pretrain](main_simmim.py 218): INFO Train: [148/200][3000/6787] eta 0:16:15 lr 0.000200 time 0.2611 (0.2575) loss 0.3645 (0.3578) grad_norm 247005.0469 (inf) mem 14543MB +[2023-10-13 01:38:22 simmim_pretrain](main_simmim.py 218): INFO Train: [148/200][3500/6787] eta 0:14:06 lr 0.000200 time 0.2580 (0.2574) loss 0.3812 (0.3579) grad_norm 253302.4375 (inf) mem 14543MB +[2023-10-13 01:40:30 simmim_pretrain](main_simmim.py 218): INFO Train: [148/200][4000/6787] eta 0:11:57 lr 0.000200 time 0.2500 (0.2574) loss 0.3407 (0.3579) grad_norm 297803.8438 (inf) mem 14543MB +[2023-10-13 01:42:39 simmim_pretrain](main_simmim.py 218): INFO Train: [148/200][4500/6787] eta 0:09:48 lr 0.000200 time 0.2545 (0.2574) loss 0.3601 (0.3578) grad_norm 430838.8750 (inf) mem 14543MB +[2023-10-13 01:44:48 simmim_pretrain](main_simmim.py 218): INFO Train: [148/200][5000/6787] eta 0:07:39 lr 0.000200 time 0.2530 (0.2574) loss 0.3491 (0.3577) grad_norm 333429.0312 (inf) mem 14543MB +[2023-10-13 01:46:56 simmim_pretrain](main_simmim.py 218): INFO Train: [148/200][5500/6787] eta 0:05:31 lr 0.000200 time 0.2458 (0.2573) loss 0.3570 (0.3576) grad_norm 301454.5625 (inf) mem 14543MB +[2023-10-13 01:49:04 simmim_pretrain](main_simmim.py 218): INFO Train: [148/200][6000/6787] eta 0:03:22 lr 0.000200 time 0.2539 (0.2573) loss 0.3670 (0.3576) grad_norm 555655.0000 (inf) mem 14543MB +[2023-10-13 01:51:13 simmim_pretrain](main_simmim.py 218): INFO Train: [148/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2547 (0.2573) loss 0.3523 (0.3577) grad_norm 292190.5312 (inf) mem 14543MB +[2023-10-13 01:52:27 simmim_pretrain](main_simmim.py 228): INFO EPOCH 148 training takes 0:29:06 +[2023-10-13 01:52:28 simmim_pretrain](main_simmim.py 218): INFO Train: [149/200][0/6787] eta 2:40:08 lr 0.000200 time 1.4157 (1.4157) loss 0.3588 (0.3588) grad_norm 403019.2812 (403019.2812) mem 14543MB +[2023-10-13 01:54:37 simmim_pretrain](main_simmim.py 218): INFO Train: [149/200][500/6787] eta 0:27:05 lr 0.000200 time 0.2530 (0.2586) loss 0.3609 (0.3600) grad_norm 267265.0938 (271737.1562) mem 14543MB +[2023-10-13 01:56:45 simmim_pretrain](main_simmim.py 218): INFO Train: [149/200][1000/6787] eta 0:24:52 lr 0.000200 time 0.2602 (0.2578) loss 0.3482 (0.3591) grad_norm 298225.5938 (267084.9688) mem 14543MB +[2023-10-13 01:58:54 simmim_pretrain](main_simmim.py 218): INFO Train: [149/200][1500/6787] eta 0:22:42 lr 0.000200 time 0.2572 (0.2577) loss 0.3622 (0.3593) grad_norm 224466.4375 (264735.8750) mem 14543MB +[2023-10-13 02:01:03 simmim_pretrain](main_simmim.py 218): INFO Train: [149/200][2000/6787] eta 0:20:33 lr 0.000200 time 0.2571 (0.2577) loss 0.3543 (0.3589) grad_norm 395137.3750 (278995.8750) mem 14543MB +[2023-10-13 02:03:11 simmim_pretrain](main_simmim.py 218): INFO Train: [149/200][2500/6787] eta 0:18:24 lr 0.000200 time 0.2559 (0.2576) loss 0.3610 (0.3583) grad_norm 357110.0000 (301106.0000) mem 14543MB +[2023-10-13 02:05:20 simmim_pretrain](main_simmim.py 218): INFO Train: [149/200][3000/6787] eta 0:16:15 lr 0.000200 time 0.2608 (0.2575) loss 0.3617 (0.3583) grad_norm 395157.3750 (inf) mem 14543MB +[2023-10-13 02:07:28 simmim_pretrain](main_simmim.py 218): INFO Train: [149/200][3500/6787] eta 0:14:06 lr 0.000200 time 0.2594 (0.2574) loss 0.3586 (0.3583) grad_norm 382023.6875 (inf) mem 14543MB +[2023-10-13 02:09:37 simmim_pretrain](main_simmim.py 218): INFO Train: [149/200][4000/6787] eta 0:11:57 lr 0.000200 time 0.2587 (0.2573) loss 0.3418 (0.3584) grad_norm 241769.4062 (inf) mem 14543MB +[2023-10-13 02:11:45 simmim_pretrain](main_simmim.py 218): INFO Train: [149/200][4500/6787] eta 0:09:48 lr 0.000200 time 0.2569 (0.2573) loss 0.3659 (0.3585) grad_norm 384938.4688 (inf) mem 14543MB +[2023-10-13 02:13:54 simmim_pretrain](main_simmim.py 218): INFO Train: [149/200][5000/6787] eta 0:07:39 lr 0.000200 time 0.2503 (0.2573) loss 0.3620 (0.3584) grad_norm 189740.0469 (inf) mem 14543MB +[2023-10-13 02:16:03 simmim_pretrain](main_simmim.py 218): INFO Train: [149/200][5500/6787] eta 0:05:31 lr 0.000200 time 0.2563 (0.2573) loss 0.3393 (0.3583) grad_norm 446399.0312 (inf) mem 14543MB +[2023-10-13 02:18:11 simmim_pretrain](main_simmim.py 218): INFO Train: [149/200][6000/6787] eta 0:03:22 lr 0.000200 time 0.2507 (0.2573) loss 0.3549 (0.3582) grad_norm 570175.2500 (inf) mem 14543MB +[2023-10-13 02:20:20 simmim_pretrain](main_simmim.py 218): INFO Train: [149/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2587 (0.2574) loss 0.3556 (0.3580) grad_norm 495990.3125 (inf) mem 14543MB +[2023-10-13 02:21:35 simmim_pretrain](main_simmim.py 228): INFO EPOCH 149 training takes 0:29:07 +[2023-10-13 02:21:36 simmim_pretrain](main_simmim.py 218): INFO Train: [150/200][0/6787] eta 2:42:07 lr 0.000200 time 1.4332 (1.4332) loss 0.3699 (0.3699) grad_norm 353860.7500 (353860.7500) mem 14543MB +[2023-10-13 02:23:45 simmim_pretrain](main_simmim.py 218): INFO Train: [150/200][500/6787] eta 0:27:14 lr 0.000200 time 0.2555 (0.2599) loss 0.3398 (0.3568) grad_norm 455074.1562 (inf) mem 14543MB +[2023-10-13 02:25:53 simmim_pretrain](main_simmim.py 218): INFO Train: [150/200][1000/6787] eta 0:24:56 lr 0.000200 time 0.2596 (0.2585) loss 0.3575 (0.3577) grad_norm 196789.5312 (inf) mem 14543MB +[2023-10-13 02:28:02 simmim_pretrain](main_simmim.py 218): INFO Train: [150/200][1500/6787] eta 0:22:43 lr 0.000200 time 0.2540 (0.2579) loss 0.3870 (0.3583) grad_norm 336320.4688 (inf) mem 14543MB +[2023-10-13 02:30:10 simmim_pretrain](main_simmim.py 218): INFO Train: [150/200][2000/6787] eta 0:20:33 lr 0.000200 time 0.2529 (0.2576) loss 0.3658 (0.3583) grad_norm 150733.2344 (inf) mem 14543MB +[2023-10-13 02:32:19 simmim_pretrain](main_simmim.py 218): INFO Train: [150/200][2500/6787] eta 0:18:24 lr 0.000200 time 0.2597 (0.2576) loss 0.3405 (0.3584) grad_norm 273879.8125 (inf) mem 14543MB +[2023-10-13 02:34:28 simmim_pretrain](main_simmim.py 218): INFO Train: [150/200][3000/6787] eta 0:16:15 lr 0.000200 time 0.2562 (0.2576) loss 0.3691 (0.3583) grad_norm 507146.6562 (inf) mem 14543MB +[2023-10-13 02:36:37 simmim_pretrain](main_simmim.py 218): INFO Train: [150/200][3500/6787] eta 0:14:06 lr 0.000200 time 0.2575 (0.2576) loss 0.3571 (0.3583) grad_norm 379908.3750 (inf) mem 14543MB +[2023-10-13 02:38:45 simmim_pretrain](main_simmim.py 218): INFO Train: [150/200][4000/6787] eta 0:11:57 lr 0.000200 time 0.2546 (0.2575) loss 0.3515 (0.3582) grad_norm 299977.9688 (inf) mem 14543MB +[2023-10-13 02:40:54 simmim_pretrain](main_simmim.py 218): INFO Train: [150/200][4500/6787] eta 0:09:49 lr 0.000200 time 0.2569 (0.2576) loss 0.3840 (0.3583) grad_norm 225012.5156 (inf) mem 14543MB +[2023-10-13 02:43:03 simmim_pretrain](main_simmim.py 218): INFO Train: [150/200][5000/6787] eta 0:07:40 lr 0.000200 time 0.2543 (0.2575) loss 0.3767 (0.3583) grad_norm 239171.5312 (inf) mem 14543MB +[2023-10-13 02:45:11 simmim_pretrain](main_simmim.py 218): INFO Train: [150/200][5500/6787] eta 0:05:31 lr 0.000200 time 0.2592 (0.2575) loss 0.5483 (0.3655) grad_norm 1782.6455 (inf) mem 14543MB +[2023-10-13 02:47:20 simmim_pretrain](main_simmim.py 218): INFO Train: [150/200][6000/6787] eta 0:03:22 lr 0.000200 time 0.2516 (0.2575) loss 0.5035 (0.3772) grad_norm 20636.1270 (inf) mem 14543MB +[2023-10-13 02:49:28 simmim_pretrain](main_simmim.py 218): INFO Train: [150/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2605 (0.2574) loss 0.4623 (0.3849) grad_norm 25645.9219 (inf) mem 14543MB +[2023-10-13 02:50:42 simmim_pretrain](main_simmim.py 228): INFO EPOCH 150 training takes 0:29:07 +[2023-10-13 02:50:44 simmim_pretrain](main_simmim.py 218): INFO Train: [151/200][0/6787] eta 2:38:48 lr 0.000200 time 1.4040 (1.4040) loss 0.4102 (0.4102) grad_norm 13462.2559 (13462.2559) mem 14543MB +[2023-10-13 02:52:53 simmim_pretrain](main_simmim.py 218): INFO Train: [151/200][500/6787] eta 0:27:15 lr 0.000200 time 0.2570 (0.2601) loss 0.3867 (0.4030) grad_norm 17193.7891 (15487.4199) mem 14543MB +[2023-10-13 02:55:01 simmim_pretrain](main_simmim.py 218): INFO Train: [151/200][1000/6787] eta 0:24:55 lr 0.000200 time 0.2575 (0.2584) loss 0.3982 (0.3895) grad_norm 20401.3320 (18724.9551) mem 14543MB +[2023-10-13 02:57:09 simmim_pretrain](main_simmim.py 218): INFO Train: [151/200][1500/6787] eta 0:22:43 lr 0.000200 time 0.2523 (0.2578) loss 0.3558 (0.3831) grad_norm 21270.8145 (20845.8789) mem 14543MB +[2023-10-13 02:59:17 simmim_pretrain](main_simmim.py 218): INFO Train: [151/200][2000/6787] eta 0:20:32 lr 0.000200 time 0.2557 (0.2574) loss 0.3790 (0.3791) grad_norm 23940.1211 (21782.5605) mem 14543MB +[2023-10-13 03:01:26 simmim_pretrain](main_simmim.py 218): INFO Train: [151/200][2500/6787] eta 0:18:23 lr 0.000200 time 0.2536 (0.2574) loss 0.3604 (0.3767) grad_norm 81420.7109 (22379.1445) mem 14543MB +[2023-10-13 03:03:34 simmim_pretrain](main_simmim.py 218): INFO Train: [151/200][3000/6787] eta 0:16:14 lr 0.000200 time 0.2546 (0.2573) loss 0.3600 (0.3747) grad_norm 77059.1797 (24392.2520) mem 14543MB +[2023-10-13 03:05:43 simmim_pretrain](main_simmim.py 218): INFO Train: [151/200][3500/6787] eta 0:14:05 lr 0.000200 time 0.2585 (0.2573) loss 0.3695 (0.3729) grad_norm 44571.5820 (27122.8027) mem 14543MB +[2023-10-13 03:07:52 simmim_pretrain](main_simmim.py 218): INFO Train: [151/200][4000/6787] eta 0:11:57 lr 0.000200 time 0.2573 (0.2574) loss 0.3584 (0.3716) grad_norm 38739.6602 (28973.2324) mem 14543MB +[2023-10-13 03:10:00 simmim_pretrain](main_simmim.py 218): INFO Train: [151/200][4500/6787] eta 0:09:48 lr 0.000200 time 0.2578 (0.2573) loss 0.3468 (0.3705) grad_norm 62294.9141 (30826.6465) mem 14543MB +[2023-10-13 03:12:09 simmim_pretrain](main_simmim.py 218): INFO Train: [151/200][5000/6787] eta 0:07:39 lr 0.000200 time 0.2581 (0.2573) loss 0.3878 (0.3695) grad_norm 57446.9414 (34103.2031) mem 14543MB +[2023-10-13 03:14:17 simmim_pretrain](main_simmim.py 218): INFO Train: [151/200][5500/6787] eta 0:05:31 lr 0.000200 time 0.2605 (0.2572) loss 0.3756 (0.3688) grad_norm 106045.9844 (37038.3789) mem 14543MB +[2023-10-13 03:16:25 simmim_pretrain](main_simmim.py 218): INFO Train: [151/200][6000/6787] eta 0:03:22 lr 0.000200 time 0.2614 (0.2571) loss 0.3640 (0.3680) grad_norm 109342.7734 (40335.4336) mem 14543MB +[2023-10-13 03:18:35 simmim_pretrain](main_simmim.py 218): INFO Train: [151/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2597 (0.2573) loss 0.3543 (0.3674) grad_norm 205913.9531 (43840.9336) mem 14543MB +[2023-10-13 03:19:50 simmim_pretrain](main_simmim.py 228): INFO EPOCH 151 training takes 0:29:07 +[2023-10-13 03:19:52 simmim_pretrain](main_simmim.py 218): INFO Train: [152/200][0/6787] eta 2:46:33 lr 0.000200 time 1.4724 (1.4724) loss 0.3513 (0.3513) grad_norm 111140.8203 (111140.8203) mem 14543MB +[2023-10-13 03:21:58 simmim_pretrain](main_simmim.py 218): INFO Train: [152/200][500/6787] eta 0:26:38 lr 0.000200 time 0.2522 (0.2542) loss 0.3485 (0.3579) grad_norm 129025.0234 (118477.9375) mem 14543MB +[2023-10-13 03:24:04 simmim_pretrain](main_simmim.py 218): INFO Train: [152/200][1000/6787] eta 0:24:26 lr 0.000200 time 0.2592 (0.2534) loss 0.3452 (0.3585) grad_norm 194501.3438 (126527.5391) mem 14543MB +[2023-10-13 03:26:10 simmim_pretrain](main_simmim.py 218): INFO Train: [152/200][1500/6787] eta 0:22:17 lr 0.000200 time 0.2587 (0.2529) loss 0.3579 (0.3590) grad_norm 118416.9219 (135834.5625) mem 14543MB +[2023-10-13 03:28:16 simmim_pretrain](main_simmim.py 218): INFO Train: [152/200][2000/6787] eta 0:20:10 lr 0.000200 time 0.2530 (0.2528) loss 0.3569 (0.3588) grad_norm 369486.2500 (149380.3906) mem 14543MB +[2023-10-13 03:30:23 simmim_pretrain](main_simmim.py 218): INFO Train: [152/200][2500/6787] eta 0:18:03 lr 0.000200 time 0.2524 (0.2528) loss 0.3456 (0.3586) grad_norm 197205.8125 (164560.0000) mem 14543MB +[2023-10-13 03:32:30 simmim_pretrain](main_simmim.py 218): INFO Train: [152/200][3000/6787] eta 0:15:58 lr 0.000200 time 0.2481 (0.2531) loss 0.3715 (0.3584) grad_norm 286720.9688 (201639.7812) mem 14543MB +[2023-10-13 03:34:37 simmim_pretrain](main_simmim.py 218): INFO Train: [152/200][3500/6787] eta 0:13:52 lr 0.000200 time 0.2549 (0.2533) loss 0.3885 (0.3583) grad_norm 218390.5781 (inf) mem 14543MB +[2023-10-13 03:36:45 simmim_pretrain](main_simmim.py 218): INFO Train: [152/200][4000/6787] eta 0:11:46 lr 0.000200 time 0.2547 (0.2536) loss 0.3567 (0.3584) grad_norm 144553.0000 (inf) mem 14543MB +[2023-10-13 03:38:53 simmim_pretrain](main_simmim.py 218): INFO Train: [152/200][4500/6787] eta 0:09:40 lr 0.000200 time 0.2543 (0.2538) loss 0.3598 (0.3584) grad_norm 217747.8594 (inf) mem 14543MB +[2023-10-13 03:41:01 simmim_pretrain](main_simmim.py 218): INFO Train: [152/200][5000/6787] eta 0:07:34 lr 0.000200 time 0.2557 (0.2541) loss 0.3733 (0.3582) grad_norm 178488.0469 (inf) mem 14543MB +[2023-10-13 03:43:09 simmim_pretrain](main_simmim.py 218): INFO Train: [152/200][5500/6787] eta 0:05:27 lr 0.000200 time 0.2609 (0.2543) loss 0.3376 (0.3584) grad_norm 152598.9375 (inf) mem 14543MB +[2023-10-13 03:45:18 simmim_pretrain](main_simmim.py 218): INFO Train: [152/200][6000/6787] eta 0:03:20 lr 0.000200 time 0.2548 (0.2547) loss 0.3764 (0.3586) grad_norm 88212.9766 (inf) mem 14543MB +[2023-10-13 03:47:27 simmim_pretrain](main_simmim.py 218): INFO Train: [152/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2516 (0.2548) loss 0.3510 (0.3587) grad_norm 114580.9141 (inf) mem 14543MB +[2023-10-13 03:48:40 simmim_pretrain](main_simmim.py 228): INFO EPOCH 152 training takes 0:28:50 +[2023-10-13 03:48:42 simmim_pretrain](main_simmim.py 218): INFO Train: [153/200][0/6787] eta 2:53:06 lr 0.000200 time 1.5303 (1.5303) loss 0.3431 (0.3431) grad_norm 88908.8828 (88908.8828) mem 14543MB +[2023-10-13 03:50:50 simmim_pretrain](main_simmim.py 218): INFO Train: [153/200][500/6787] eta 0:26:59 lr 0.000200 time 0.2538 (0.2575) loss 0.3524 (0.3590) grad_norm 71872.1328 (104846.9062) mem 14543MB +[2023-10-13 03:52:58 simmim_pretrain](main_simmim.py 218): INFO Train: [153/200][1000/6787] eta 0:24:51 lr 0.000200 time 0.2616 (0.2577) loss 0.3828 (0.3587) grad_norm 171802.3281 (116874.8906) mem 14543MB +[2023-10-13 03:55:09 simmim_pretrain](main_simmim.py 218): INFO Train: [153/200][1500/6787] eta 0:22:46 lr 0.000200 time 0.2613 (0.2585) loss 0.3540 (0.3584) grad_norm 376390.4062 (128587.7812) mem 14543MB +[2023-10-13 03:57:19 simmim_pretrain](main_simmim.py 218): INFO Train: [153/200][2000/6787] eta 0:20:40 lr 0.000200 time 0.2547 (0.2590) loss 0.3691 (0.3585) grad_norm 193374.7656 (147317.0312) mem 14543MB +[2023-10-13 03:59:27 simmim_pretrain](main_simmim.py 218): INFO Train: [153/200][2500/6787] eta 0:18:28 lr 0.000200 time 0.2567 (0.2586) loss 0.3634 (0.3583) grad_norm 145557.3906 (154220.4531) mem 14543MB +[2023-10-13 04:01:35 simmim_pretrain](main_simmim.py 218): INFO Train: [153/200][3000/6787] eta 0:16:17 lr 0.000200 time 0.2565 (0.2582) loss 0.3662 (0.3579) grad_norm 259877.6094 (176576.6875) mem 14543MB +[2023-10-13 04:03:45 simmim_pretrain](main_simmim.py 218): INFO Train: [153/200][3500/6787] eta 0:14:08 lr 0.000200 time 0.2606 (0.2582) loss 0.3922 (0.3578) grad_norm 456254.2500 (187944.7031) mem 14543MB +[2023-10-13 04:05:53 simmim_pretrain](main_simmim.py 218): INFO Train: [153/200][4000/6787] eta 0:11:59 lr 0.000200 time 0.2536 (0.2581) loss 0.3480 (0.3576) grad_norm 393385.7500 (200468.7344) mem 14543MB +[2023-10-13 04:08:01 simmim_pretrain](main_simmim.py 218): INFO Train: [153/200][4500/6787] eta 0:09:49 lr 0.000200 time 0.2591 (0.2577) loss 0.3633 (0.3575) grad_norm 300445.6875 (218728.2188) mem 14543MB +[2023-10-13 04:10:09 simmim_pretrain](main_simmim.py 218): INFO Train: [153/200][5000/6787] eta 0:07:40 lr 0.000200 time 0.2532 (0.2576) loss 0.3641 (0.3574) grad_norm 391887.3750 (243255.8750) mem 14543MB +[2023-10-13 04:12:17 simmim_pretrain](main_simmim.py 218): INFO Train: [153/200][5500/6787] eta 0:05:31 lr 0.000200 time 0.2528 (0.2574) loss 0.3732 (0.3573) grad_norm 394974.6250 (inf) mem 14543MB +[2023-10-13 04:14:24 simmim_pretrain](main_simmim.py 218): INFO Train: [153/200][6000/6787] eta 0:03:22 lr 0.000200 time 0.2462 (0.2572) loss 0.3477 (0.3572) grad_norm 362436.6562 (inf) mem 14543MB +[2023-10-13 04:16:31 simmim_pretrain](main_simmim.py 218): INFO Train: [153/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2526 (0.2570) loss 0.3289 (0.3572) grad_norm 457967.6875 (inf) mem 14543MB +[2023-10-13 04:17:45 simmim_pretrain](main_simmim.py 228): INFO EPOCH 153 training takes 0:29:04 +[2023-10-13 04:17:47 simmim_pretrain](main_simmim.py 218): INFO Train: [154/200][0/6787] eta 2:49:52 lr 0.000200 time 1.5018 (1.5018) loss 0.3556 (0.3556) grad_norm 351122.5000 (351122.5000) mem 14543MB +[2023-10-13 04:19:53 simmim_pretrain](main_simmim.py 218): INFO Train: [154/200][500/6787] eta 0:26:46 lr 0.000200 time 0.2496 (0.2556) loss 0.3384 (0.3568) grad_norm 426157.5312 (365153.0938) mem 14543MB +[2023-10-13 04:22:00 simmim_pretrain](main_simmim.py 218): INFO Train: [154/200][1000/6787] eta 0:24:32 lr 0.000200 time 0.2513 (0.2545) loss 0.3527 (0.3566) grad_norm 462092.5312 (inf) mem 14543MB +[2023-10-13 04:24:07 simmim_pretrain](main_simmim.py 218): INFO Train: [154/200][1500/6787] eta 0:22:24 lr 0.000200 time 0.2565 (0.2543) loss 0.3599 (0.3566) grad_norm 362926.0625 (inf) mem 14543MB +[2023-10-13 04:26:13 simmim_pretrain](main_simmim.py 218): INFO Train: [154/200][2000/6787] eta 0:20:15 lr 0.000200 time 0.2546 (0.2540) loss 0.3437 (0.3565) grad_norm 385189.7500 (inf) mem 14543MB +[2023-10-13 04:28:20 simmim_pretrain](main_simmim.py 218): INFO Train: [154/200][2500/6787] eta 0:18:08 lr 0.000200 time 0.2525 (0.2539) loss 0.3511 (0.3565) grad_norm 380030.4375 (inf) mem 14543MB +[2023-10-13 04:30:27 simmim_pretrain](main_simmim.py 218): INFO Train: [154/200][3000/6787] eta 0:16:01 lr 0.000200 time 0.2529 (0.2538) loss 0.3349 (0.3565) grad_norm 497230.6562 (inf) mem 14543MB +[2023-10-13 04:32:34 simmim_pretrain](main_simmim.py 218): INFO Train: [154/200][3500/6787] eta 0:13:54 lr 0.000200 time 0.2541 (0.2538) loss 0.3668 (0.3564) grad_norm 478590.4688 (inf) mem 14543MB +[2023-10-13 04:34:40 simmim_pretrain](main_simmim.py 218): INFO Train: [154/200][4000/6787] eta 0:11:47 lr 0.000200 time 0.2537 (0.2537) loss 0.3513 (0.3563) grad_norm 353974.0000 (inf) mem 14543MB +[2023-10-13 04:36:47 simmim_pretrain](main_simmim.py 218): INFO Train: [154/200][4500/6787] eta 0:09:40 lr 0.000200 time 0.2524 (0.2538) loss 0.3305 (0.3562) grad_norm 280012.6250 (inf) mem 14543MB +[2023-10-13 04:38:54 simmim_pretrain](main_simmim.py 218): INFO Train: [154/200][5000/6787] eta 0:07:33 lr 0.000200 time 0.2522 (0.2538) loss 0.3422 (0.3562) grad_norm 510298.7812 (inf) mem 14543MB +[2023-10-13 04:41:01 simmim_pretrain](main_simmim.py 218): INFO Train: [154/200][5500/6787] eta 0:05:26 lr 0.000200 time 0.2537 (0.2538) loss 0.3462 (0.3562) grad_norm 221191.6406 (inf) mem 14543MB +[2023-10-13 04:43:07 simmim_pretrain](main_simmim.py 218): INFO Train: [154/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2525 (0.2536) loss 0.3580 (0.3563) grad_norm 277066.1562 (inf) mem 14543MB +[2023-10-13 04:45:13 simmim_pretrain](main_simmim.py 218): INFO Train: [154/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2513 (0.2535) loss 0.3458 (0.3563) grad_norm 185827.8125 (inf) mem 14543MB +[2023-10-13 04:46:26 simmim_pretrain](main_simmim.py 228): INFO EPOCH 154 training takes 0:28:40 +[2023-10-13 04:46:27 simmim_pretrain](main_simmim.py 218): INFO Train: [155/200][0/6787] eta 2:58:16 lr 0.000200 time 1.5760 (1.5760) loss 0.3474 (0.3474) grad_norm 361899.0000 (361899.0000) mem 14543MB +[2023-10-13 04:48:33 simmim_pretrain](main_simmim.py 218): INFO Train: [155/200][500/6787] eta 0:26:33 lr 0.000200 time 0.2528 (0.2535) loss 0.3478 (0.3581) grad_norm 240292.4531 (inf) mem 14543MB +[2023-10-13 04:50:38 simmim_pretrain](main_simmim.py 218): INFO Train: [155/200][1000/6787] eta 0:24:19 lr 0.000200 time 0.2524 (0.2522) loss 0.3553 (0.3577) grad_norm 197346.3281 (inf) mem 14543MB +[2023-10-13 04:52:44 simmim_pretrain](main_simmim.py 218): INFO Train: [155/200][1500/6787] eta 0:22:13 lr 0.000200 time 0.2583 (0.2522) loss 0.3633 (0.3579) grad_norm 257376.0469 (inf) mem 14543MB +[2023-10-13 04:54:51 simmim_pretrain](main_simmim.py 218): INFO Train: [155/200][2000/6787] eta 0:20:09 lr 0.000200 time 0.2579 (0.2526) loss 0.3523 (0.3580) grad_norm 124071.3906 (inf) mem 14543MB +[2023-10-13 04:56:59 simmim_pretrain](main_simmim.py 218): INFO Train: [155/200][2500/6787] eta 0:18:05 lr 0.000200 time 0.2612 (0.2532) loss 0.3725 (0.3578) grad_norm 335331.0938 (inf) mem 14543MB +[2023-10-13 04:59:08 simmim_pretrain](main_simmim.py 218): INFO Train: [155/200][3000/6787] eta 0:16:01 lr 0.000200 time 0.2609 (0.2539) loss 0.3728 (0.3579) grad_norm 255552.9844 (inf) mem 14543MB +[2023-10-13 05:01:16 simmim_pretrain](main_simmim.py 218): INFO Train: [155/200][3500/6787] eta 0:13:56 lr 0.000200 time 0.2471 (0.2544) loss 0.3641 (0.3577) grad_norm 201499.5625 (inf) mem 14543MB +[2023-10-13 05:03:25 simmim_pretrain](main_simmim.py 218): INFO Train: [155/200][4000/6787] eta 0:11:49 lr 0.000200 time 0.2567 (0.2547) loss 0.3288 (0.3577) grad_norm 186453.6406 (inf) mem 14543MB +[2023-10-13 05:05:33 simmim_pretrain](main_simmim.py 218): INFO Train: [155/200][4500/6787] eta 0:09:42 lr 0.000200 time 0.2533 (0.2549) loss 0.3303 (0.3576) grad_norm 213723.7969 (inf) mem 14543MB +[2023-10-13 05:07:42 simmim_pretrain](main_simmim.py 218): INFO Train: [155/200][5000/6787] eta 0:07:36 lr 0.000200 time 0.2602 (0.2553) loss 0.3532 (0.3577) grad_norm 192604.1875 (inf) mem 14543MB +[2023-10-13 05:09:52 simmim_pretrain](main_simmim.py 218): INFO Train: [155/200][5500/6787] eta 0:05:29 lr 0.000200 time 0.2603 (0.2557) loss 0.3498 (0.3576) grad_norm 272069.6562 (inf) mem 14543MB +[2023-10-13 05:12:01 simmim_pretrain](main_simmim.py 218): INFO Train: [155/200][6000/6787] eta 0:03:21 lr 0.000200 time 0.2572 (0.2558) loss 0.3418 (0.3576) grad_norm 310849.5000 (inf) mem 14543MB +[2023-10-13 05:14:10 simmim_pretrain](main_simmim.py 218): INFO Train: [155/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2596 (0.2560) loss 0.3525 (0.3577) grad_norm 389106.2188 (inf) mem 14543MB +[2023-10-13 05:15:24 simmim_pretrain](main_simmim.py 228): INFO EPOCH 155 training takes 0:28:58 +[2023-10-13 05:15:25 simmim_pretrain](main_simmim.py 218): INFO Train: [156/200][0/6787] eta 2:44:36 lr 0.000200 time 1.4553 (1.4553) loss 0.3663 (0.3663) grad_norm 371849.5000 (371849.5000) mem 14543MB +[2023-10-13 05:17:34 simmim_pretrain](main_simmim.py 218): INFO Train: [156/200][500/6787] eta 0:27:13 lr 0.000200 time 0.2593 (0.2598) loss 0.3414 (0.3566) grad_norm 493985.0938 (377549.1875) mem 14543MB +[2023-10-13 05:19:44 simmim_pretrain](main_simmim.py 218): INFO Train: [156/200][1000/6787] eta 0:25:05 lr 0.000200 time 0.2611 (0.2601) loss 0.3656 (0.3564) grad_norm 274734.3750 (inf) mem 14543MB +[2023-10-13 05:21:54 simmim_pretrain](main_simmim.py 218): INFO Train: [156/200][1500/6787] eta 0:22:55 lr 0.000200 time 0.2573 (0.2602) loss 0.3597 (0.3565) grad_norm 360929.5938 (inf) mem 14543MB +[2023-10-13 05:24:04 simmim_pretrain](main_simmim.py 218): INFO Train: [156/200][2000/6787] eta 0:20:45 lr 0.000200 time 0.2590 (0.2602) loss 0.3390 (0.3563) grad_norm 528151.4375 (inf) mem 14543MB +[2023-10-13 05:26:15 simmim_pretrain](main_simmim.py 218): INFO Train: [156/200][2500/6787] eta 0:18:35 lr 0.000200 time 0.2596 (0.2603) loss 0.3266 (0.3563) grad_norm 295734.4688 (inf) mem 14543MB +[2023-10-13 05:28:25 simmim_pretrain](main_simmim.py 218): INFO Train: [156/200][3000/6787] eta 0:16:25 lr 0.000200 time 0.2611 (0.2604) loss 0.3476 (0.3562) grad_norm 489478.5625 (inf) mem 14543MB +[2023-10-13 05:30:36 simmim_pretrain](main_simmim.py 218): INFO Train: [156/200][3500/6787] eta 0:14:15 lr 0.000200 time 0.2611 (0.2604) loss 0.3662 (0.3563) grad_norm 518558.8750 (inf) mem 14543MB +[2023-10-13 05:32:46 simmim_pretrain](main_simmim.py 218): INFO Train: [156/200][4000/6787] eta 0:12:05 lr 0.000200 time 0.2607 (0.2604) loss 0.3533 (0.3565) grad_norm 171952.5000 (inf) mem 14543MB +[2023-10-13 05:34:56 simmim_pretrain](main_simmim.py 218): INFO Train: [156/200][4500/6787] eta 0:09:55 lr 0.000200 time 0.2608 (0.2605) loss 0.3420 (0.3567) grad_norm 240473.8281 (inf) mem 14543MB +[2023-10-13 05:37:07 simmim_pretrain](main_simmim.py 218): INFO Train: [156/200][5000/6787] eta 0:07:45 lr 0.000200 time 0.2606 (0.2605) loss 0.3475 (0.3570) grad_norm 279045.9375 (inf) mem 14543MB +[2023-10-13 05:39:17 simmim_pretrain](main_simmim.py 218): INFO Train: [156/200][5500/6787] eta 0:05:35 lr 0.000200 time 0.2611 (0.2605) loss 0.3566 (0.3572) grad_norm 171222.9531 (inf) mem 14543MB +[2023-10-13 05:41:27 simmim_pretrain](main_simmim.py 218): INFO Train: [156/200][6000/6787] eta 0:03:25 lr 0.000200 time 0.2607 (0.2605) loss 0.3543 (0.3571) grad_norm 490550.9062 (inf) mem 14543MB +[2023-10-13 05:43:38 simmim_pretrain](main_simmim.py 218): INFO Train: [156/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2611 (0.2605) loss 0.3655 (0.3571) grad_norm 328912.0625 (inf) mem 14543MB +[2023-10-13 05:44:53 simmim_pretrain](main_simmim.py 228): INFO EPOCH 156 training takes 0:29:29 +[2023-10-13 05:44:54 simmim_pretrain](main_simmim.py 218): INFO Train: [157/200][0/6787] eta 2:42:18 lr 0.000200 time 1.4348 (1.4348) loss 0.3433 (0.3433) grad_norm 537741.4375 (537741.4375) mem 14543MB +[2023-10-13 05:47:02 simmim_pretrain](main_simmim.py 218): INFO Train: [157/200][500/6787] eta 0:26:59 lr 0.000200 time 0.2589 (0.2576) loss 0.3615 (0.3575) grad_norm 281209.5312 (inf) mem 14543MB +[2023-10-13 05:49:10 simmim_pretrain](main_simmim.py 218): INFO Train: [157/200][1000/6787] eta 0:24:47 lr 0.000200 time 0.2545 (0.2571) loss 0.3620 (0.3593) grad_norm 114405.8750 (inf) mem 14543MB +[2023-10-13 05:51:20 simmim_pretrain](main_simmim.py 218): INFO Train: [157/200][1500/6787] eta 0:22:44 lr 0.000200 time 0.2612 (0.2580) loss 0.3707 (0.3593) grad_norm 122775.2891 (inf) mem 14543MB +[2023-10-13 05:53:30 simmim_pretrain](main_simmim.py 218): INFO Train: [157/200][2000/6787] eta 0:20:37 lr 0.000200 time 0.2613 (0.2586) loss 0.3496 (0.3594) grad_norm 175143.0469 (inf) mem 14543MB +[2023-10-13 05:55:40 simmim_pretrain](main_simmim.py 218): INFO Train: [157/200][2500/6787] eta 0:18:30 lr 0.000200 time 0.2610 (0.2589) loss 0.3827 (0.3598) grad_norm 243360.6562 (inf) mem 14543MB +[2023-10-13 05:57:50 simmim_pretrain](main_simmim.py 218): INFO Train: [157/200][3000/6787] eta 0:16:21 lr 0.000200 time 0.2608 (0.2591) loss 0.3679 (0.3598) grad_norm 299998.3750 (inf) mem 14543MB +[2023-10-13 06:00:00 simmim_pretrain](main_simmim.py 218): INFO Train: [157/200][3500/6787] eta 0:14:12 lr 0.000200 time 0.2605 (0.2592) loss 0.3591 (0.3596) grad_norm 209698.4531 (inf) mem 14543MB +[2023-10-13 06:02:10 simmim_pretrain](main_simmim.py 218): INFO Train: [157/200][4000/6787] eta 0:12:02 lr 0.000200 time 0.2605 (0.2593) loss 0.3815 (0.3595) grad_norm 160190.5156 (inf) mem 14543MB +[2023-10-13 06:04:20 simmim_pretrain](main_simmim.py 218): INFO Train: [157/200][4500/6787] eta 0:09:53 lr 0.000200 time 0.2566 (0.2593) loss 0.3420 (0.3594) grad_norm 474693.3438 (inf) mem 14543MB +[2023-10-13 06:06:30 simmim_pretrain](main_simmim.py 218): INFO Train: [157/200][5000/6787] eta 0:07:43 lr 0.000200 time 0.2596 (0.2593) loss 0.3503 (0.3592) grad_norm 365059.6250 (inf) mem 14543MB +[2023-10-13 06:08:38 simmim_pretrain](main_simmim.py 218): INFO Train: [157/200][5500/6787] eta 0:05:33 lr 0.000200 time 0.2577 (0.2591) loss 0.3428 (0.3589) grad_norm 563106.5000 (inf) mem 14543MB +[2023-10-13 06:10:46 simmim_pretrain](main_simmim.py 218): INFO Train: [157/200][6000/6787] eta 0:03:23 lr 0.000200 time 0.2567 (0.2589) loss 0.3734 (0.3587) grad_norm 256868.7812 (inf) mem 14543MB +[2023-10-13 06:12:54 simmim_pretrain](main_simmim.py 218): INFO Train: [157/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2545 (0.2586) loss 0.3443 (0.3586) grad_norm 296856.0938 (inf) mem 14543MB +[2023-10-13 06:14:08 simmim_pretrain](main_simmim.py 228): INFO EPOCH 157 training takes 0:29:14 +[2023-10-13 06:14:09 simmim_pretrain](main_simmim.py 218): INFO Train: [158/200][0/6787] eta 2:45:09 lr 0.000200 time 1.4601 (1.4601) loss 0.3510 (0.3510) grad_norm 258717.1719 (258717.1719) mem 14543MB +[2023-10-13 06:16:14 simmim_pretrain](main_simmim.py 218): INFO Train: [158/200][500/6787] eta 0:26:22 lr 0.000200 time 0.2462 (0.2517) loss 0.3545 (0.3582) grad_norm 230107.0312 (233489.0312) mem 14543MB +[2023-10-13 06:18:19 simmim_pretrain](main_simmim.py 218): INFO Train: [158/200][1000/6787] eta 0:24:10 lr 0.000200 time 0.2463 (0.2507) loss 0.3629 (0.3583) grad_norm 276626.1250 (231824.6406) mem 14543MB +[2023-10-13 06:20:24 simmim_pretrain](main_simmim.py 218): INFO Train: [158/200][1500/6787] eta 0:22:04 lr 0.000200 time 0.2483 (0.2505) loss 0.3516 (0.3582) grad_norm 495431.4688 (238631.6094) mem 14543MB +[2023-10-13 06:22:29 simmim_pretrain](main_simmim.py 218): INFO Train: [158/200][2000/6787] eta 0:19:57 lr 0.000200 time 0.2530 (0.2503) loss 0.3694 (0.3580) grad_norm 418535.8125 (263063.8125) mem 14543MB +[2023-10-13 06:24:33 simmim_pretrain](main_simmim.py 218): INFO Train: [158/200][2500/6787] eta 0:17:52 lr 0.000200 time 0.2491 (0.2502) loss 0.3729 (0.3578) grad_norm 301516.2812 (inf) mem 14543MB +[2023-10-13 06:26:38 simmim_pretrain](main_simmim.py 218): INFO Train: [158/200][3000/6787] eta 0:15:47 lr 0.000200 time 0.2482 (0.2501) loss 0.3497 (0.3578) grad_norm 221309.6719 (inf) mem 14543MB +[2023-10-13 06:28:43 simmim_pretrain](main_simmim.py 218): INFO Train: [158/200][3500/6787] eta 0:13:41 lr 0.000200 time 0.2473 (0.2500) loss 0.3421 (0.3578) grad_norm 144594.2500 (inf) mem 14543MB +[2023-10-13 06:30:48 simmim_pretrain](main_simmim.py 218): INFO Train: [158/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2587 (0.2499) loss 0.3508 (0.3579) grad_norm 203066.7812 (inf) mem 14543MB +[2023-10-13 06:32:52 simmim_pretrain](main_simmim.py 218): INFO Train: [158/200][4500/6787] eta 0:09:31 lr 0.000200 time 0.2479 (0.2499) loss 0.3603 (0.3580) grad_norm 316892.8750 (inf) mem 14543MB +[2023-10-13 06:34:58 simmim_pretrain](main_simmim.py 218): INFO Train: [158/200][5000/6787] eta 0:07:26 lr 0.000200 time 0.2515 (0.2500) loss 0.3322 (0.3578) grad_norm 346415.8750 (inf) mem 14543MB +[2023-10-13 06:37:04 simmim_pretrain](main_simmim.py 218): INFO Train: [158/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2562 (0.2502) loss 0.3629 (0.3578) grad_norm 288676.2812 (inf) mem 14543MB +[2023-10-13 06:39:10 simmim_pretrain](main_simmim.py 218): INFO Train: [158/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2476 (0.2504) loss 0.3564 (0.3578) grad_norm 501229.6562 (inf) mem 14543MB +[2023-10-13 06:41:14 simmim_pretrain](main_simmim.py 218): INFO Train: [158/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2494 (0.2502) loss 0.3415 (0.3576) grad_norm 917671.5000 (inf) mem 14543MB +[2023-10-13 06:42:25 simmim_pretrain](main_simmim.py 228): INFO EPOCH 158 training takes 0:28:17 +[2023-10-13 06:42:27 simmim_pretrain](main_simmim.py 218): INFO Train: [159/200][0/6787] eta 2:37:10 lr 0.000200 time 1.3895 (1.3895) loss 0.3457 (0.3457) grad_norm 353975.7188 (353975.7188) mem 14543MB +[2023-10-13 06:44:30 simmim_pretrain](main_simmim.py 218): INFO Train: [159/200][500/6787] eta 0:25:59 lr 0.000200 time 0.2452 (0.2480) loss 0.3496 (0.3549) grad_norm 606030.5625 (inf) mem 14543MB +[2023-10-13 06:46:32 simmim_pretrain](main_simmim.py 218): INFO Train: [159/200][1000/6787] eta 0:23:47 lr 0.000200 time 0.2444 (0.2466) loss 0.3409 (0.3557) grad_norm 476927.6250 (inf) mem 14543MB +[2023-10-13 06:48:34 simmim_pretrain](main_simmim.py 218): INFO Train: [159/200][1500/6787] eta 0:21:39 lr 0.000200 time 0.2444 (0.2458) loss 0.3594 (0.3563) grad_norm 431038.6250 (inf) mem 14543MB +[2023-10-13 06:50:36 simmim_pretrain](main_simmim.py 218): INFO Train: [159/200][2000/6787] eta 0:19:34 lr 0.000200 time 0.2436 (0.2453) loss 0.3656 (0.3560) grad_norm 372077.4062 (inf) mem 14543MB +[2023-10-13 06:52:38 simmim_pretrain](main_simmim.py 218): INFO Train: [159/200][2500/6787] eta 0:17:30 lr 0.000200 time 0.2433 (0.2450) loss 0.3620 (0.3561) grad_norm 426754.4062 (inf) mem 14543MB +[2023-10-13 06:54:40 simmim_pretrain](main_simmim.py 218): INFO Train: [159/200][3000/6787] eta 0:15:27 lr 0.000200 time 0.2439 (0.2448) loss 0.3565 (0.3563) grad_norm 265930.3438 (inf) mem 14543MB +[2023-10-13 06:56:42 simmim_pretrain](main_simmim.py 218): INFO Train: [159/200][3500/6787] eta 0:13:24 lr 0.000200 time 0.2435 (0.2447) loss 0.3607 (0.3571) grad_norm 153865.7969 (inf) mem 14543MB +[2023-10-13 06:58:44 simmim_pretrain](main_simmim.py 218): INFO Train: [159/200][4000/6787] eta 0:11:21 lr 0.000200 time 0.2438 (0.2445) loss 0.3483 (0.3578) grad_norm 129962.8906 (inf) mem 14543MB +[2023-10-13 07:00:46 simmim_pretrain](main_simmim.py 218): INFO Train: [159/200][4500/6787] eta 0:09:19 lr 0.000200 time 0.2437 (0.2444) loss 0.3758 (0.3581) grad_norm 124392.2969 (inf) mem 14543MB +[2023-10-13 07:02:48 simmim_pretrain](main_simmim.py 218): INFO Train: [159/200][5000/6787] eta 0:07:16 lr 0.000200 time 0.2437 (0.2444) loss 0.3437 (0.3584) grad_norm 189971.3438 (inf) mem 14543MB +[2023-10-13 07:04:49 simmim_pretrain](main_simmim.py 218): INFO Train: [159/200][5500/6787] eta 0:05:14 lr 0.000200 time 0.2437 (0.2443) loss 0.3691 (0.3585) grad_norm 146737.9531 (inf) mem 14543MB +[2023-10-13 07:06:51 simmim_pretrain](main_simmim.py 218): INFO Train: [159/200][6000/6787] eta 0:03:12 lr 0.000200 time 0.2437 (0.2443) loss 0.3781 (0.3585) grad_norm 172779.5312 (inf) mem 14543MB +[2023-10-13 07:08:53 simmim_pretrain](main_simmim.py 218): INFO Train: [159/200][6500/6787] eta 0:01:10 lr 0.000200 time 0.2436 (0.2442) loss 0.3606 (0.3585) grad_norm 181695.5312 (inf) mem 14543MB +[2023-10-13 07:10:04 simmim_pretrain](main_simmim.py 228): INFO EPOCH 159 training takes 0:27:38 +[2023-10-13 07:10:05 simmim_pretrain](main_simmim.py 218): INFO Train: [160/200][0/6787] eta 2:19:12 lr 0.000200 time 1.2306 (1.2306) loss 0.3481 (0.3481) grad_norm 102236.2812 (102236.2812) mem 14543MB +[2023-10-13 07:12:07 simmim_pretrain](main_simmim.py 218): INFO Train: [160/200][500/6787] eta 0:25:45 lr 0.000200 time 0.2439 (0.2458) loss 0.3740 (0.3583) grad_norm 174118.9844 (227118.1719) mem 14543MB +[2023-10-13 07:14:09 simmim_pretrain](main_simmim.py 218): INFO Train: [160/200][1000/6787] eta 0:23:36 lr 0.000200 time 0.2440 (0.2448) loss 0.3265 (0.3574) grad_norm 275769.4688 (253380.7500) mem 14543MB +[2023-10-13 07:16:11 simmim_pretrain](main_simmim.py 218): INFO Train: [160/200][1500/6787] eta 0:21:33 lr 0.000200 time 0.2441 (0.2446) loss 0.3455 (0.3571) grad_norm 333898.8125 (283318.9375) mem 14543MB +[2023-10-13 07:18:13 simmim_pretrain](main_simmim.py 218): INFO Train: [160/200][2000/6787] eta 0:19:30 lr 0.000200 time 0.2441 (0.2445) loss 0.3657 (0.3569) grad_norm 216874.9844 (inf) mem 14543MB +[2023-10-13 07:20:15 simmim_pretrain](main_simmim.py 218): INFO Train: [160/200][2500/6787] eta 0:17:27 lr 0.000200 time 0.2441 (0.2444) loss 0.3663 (0.3571) grad_norm 266075.2188 (inf) mem 14543MB +[2023-10-13 07:22:17 simmim_pretrain](main_simmim.py 218): INFO Train: [160/200][3000/6787] eta 0:15:25 lr 0.000200 time 0.2437 (0.2444) loss 0.3544 (0.3571) grad_norm 323675.9062 (inf) mem 14543MB +[2023-10-13 07:24:19 simmim_pretrain](main_simmim.py 218): INFO Train: [160/200][3500/6787] eta 0:13:23 lr 0.000200 time 0.2443 (0.2444) loss 0.3519 (0.3573) grad_norm 205161.0938 (inf) mem 14543MB +[2023-10-13 07:26:21 simmim_pretrain](main_simmim.py 218): INFO Train: [160/200][4000/6787] eta 0:11:20 lr 0.000200 time 0.2437 (0.2443) loss 0.3420 (0.3573) grad_norm 301832.3750 (inf) mem 14543MB +[2023-10-13 07:28:23 simmim_pretrain](main_simmim.py 218): INFO Train: [160/200][4500/6787] eta 0:09:18 lr 0.000200 time 0.2436 (0.2443) loss 0.3832 (0.3572) grad_norm 166255.3906 (inf) mem 14543MB +[2023-10-13 07:30:25 simmim_pretrain](main_simmim.py 218): INFO Train: [160/200][5000/6787] eta 0:07:16 lr 0.000200 time 0.2437 (0.2442) loss 0.3417 (0.3572) grad_norm 303188.2188 (inf) mem 14543MB +[2023-10-13 07:32:27 simmim_pretrain](main_simmim.py 218): INFO Train: [160/200][5500/6787] eta 0:05:14 lr 0.000200 time 0.2436 (0.2442) loss 0.3745 (0.3571) grad_norm 880668.4375 (inf) mem 14543MB +[2023-10-13 07:34:29 simmim_pretrain](main_simmim.py 218): INFO Train: [160/200][6000/6787] eta 0:03:12 lr 0.000200 time 0.2436 (0.2441) loss 0.3636 (0.3570) grad_norm 612333.3750 (inf) mem 14543MB +[2023-10-13 07:36:31 simmim_pretrain](main_simmim.py 218): INFO Train: [160/200][6500/6787] eta 0:01:10 lr 0.000200 time 0.2433 (0.2441) loss 0.3579 (0.3569) grad_norm 339250.9688 (inf) mem 14543MB +[2023-10-13 07:37:41 simmim_pretrain](main_simmim.py 228): INFO EPOCH 160 training takes 0:27:37 +[2023-10-13 07:37:41 simmim_pretrain](utils.py 62): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_160.pth saving...... +[2023-10-13 07:37:42 simmim_pretrain](utils.py 64): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_160.pth saved !!! +[2023-10-13 07:37:43 simmim_pretrain](main_simmim.py 218): INFO Train: [161/200][0/6787] eta 2:36:56 lr 0.000200 time 1.3875 (1.3875) loss 0.3617 (0.3617) grad_norm 220175.4844 (220175.4844) mem 14543MB +[2023-10-13 07:39:45 simmim_pretrain](main_simmim.py 218): INFO Train: [161/200][500/6787] eta 0:25:46 lr 0.000200 time 0.2440 (0.2460) loss 0.3413 (0.3574) grad_norm 341399.0938 (259755.2344) mem 14543MB +[2023-10-13 07:41:47 simmim_pretrain](main_simmim.py 218): INFO Train: [161/200][1000/6787] eta 0:23:36 lr 0.000200 time 0.2438 (0.2449) loss 0.3743 (0.3578) grad_norm 247712.6250 (251756.7656) mem 14543MB +[2023-10-13 07:43:49 simmim_pretrain](main_simmim.py 218): INFO Train: [161/200][1500/6787] eta 0:21:33 lr 0.000200 time 0.2445 (0.2446) loss 0.3647 (0.3580) grad_norm 280618.3750 (250955.6250) mem 14543MB +[2023-10-13 07:45:51 simmim_pretrain](main_simmim.py 218): INFO Train: [161/200][2000/6787] eta 0:19:30 lr 0.000200 time 0.2439 (0.2444) loss 0.3601 (0.3578) grad_norm 215340.0938 (262470.1250) mem 14543MB +[2023-10-13 07:47:53 simmim_pretrain](main_simmim.py 218): INFO Train: [161/200][2500/6787] eta 0:17:27 lr 0.000200 time 0.2444 (0.2444) loss 0.3719 (0.3577) grad_norm 214835.4219 (279945.9062) mem 14543MB +[2023-10-13 07:49:56 simmim_pretrain](main_simmim.py 218): INFO Train: [161/200][3000/6787] eta 0:15:25 lr 0.000200 time 0.2454 (0.2445) loss 0.3598 (0.3575) grad_norm 385935.8438 (304813.8750) mem 14543MB +[2023-10-13 07:51:58 simmim_pretrain](main_simmim.py 218): INFO Train: [161/200][3500/6787] eta 0:13:24 lr 0.000200 time 0.2454 (0.2447) loss 0.3507 (0.3574) grad_norm 460918.9062 (330253.8438) mem 14543MB +[2023-10-13 07:54:01 simmim_pretrain](main_simmim.py 218): INFO Train: [161/200][4000/6787] eta 0:11:22 lr 0.000200 time 0.2471 (0.2448) loss 0.3458 (0.3572) grad_norm 278269.8125 (inf) mem 14543MB +[2023-10-13 07:56:04 simmim_pretrain](main_simmim.py 218): INFO Train: [161/200][4500/6787] eta 0:09:20 lr 0.000200 time 0.2453 (0.2449) loss 0.3476 (0.3570) grad_norm 557877.1250 (inf) mem 14543MB +[2023-10-13 07:58:07 simmim_pretrain](main_simmim.py 218): INFO Train: [161/200][5000/6787] eta 0:07:17 lr 0.000200 time 0.2454 (0.2450) loss 0.3498 (0.3571) grad_norm 487547.6562 (inf) mem 14543MB +[2023-10-13 08:00:10 simmim_pretrain](main_simmim.py 218): INFO Train: [161/200][5500/6787] eta 0:05:15 lr 0.000200 time 0.2463 (0.2451) loss 0.3349 (0.3571) grad_norm 355191.7812 (inf) mem 14543MB +[2023-10-13 08:02:12 simmim_pretrain](main_simmim.py 218): INFO Train: [161/200][6000/6787] eta 0:03:12 lr 0.000200 time 0.2481 (0.2450) loss 0.3665 (0.3572) grad_norm 214859.5469 (inf) mem 14543MB +[2023-10-13 08:04:15 simmim_pretrain](main_simmim.py 218): INFO Train: [161/200][6500/6787] eta 0:01:10 lr 0.000200 time 0.2453 (0.2450) loss 0.3542 (0.3575) grad_norm 327978.1875 (inf) mem 14543MB +[2023-10-13 08:05:25 simmim_pretrain](main_simmim.py 228): INFO EPOCH 161 training takes 0:27:43 +[2023-10-13 08:05:27 simmim_pretrain](main_simmim.py 218): INFO Train: [162/200][0/6787] eta 2:28:05 lr 0.000200 time 1.3092 (1.3092) loss 0.3798 (0.3798) grad_norm 212816.1875 (212816.1875) mem 14543MB +[2023-10-13 08:07:29 simmim_pretrain](main_simmim.py 218): INFO Train: [162/200][500/6787] eta 0:25:53 lr 0.000200 time 0.2490 (0.2470) loss 0.3524 (0.3594) grad_norm 208015.3438 (255356.5000) mem 14543MB +[2023-10-13 08:09:32 simmim_pretrain](main_simmim.py 218): INFO Train: [162/200][1000/6787] eta 0:23:46 lr 0.000200 time 0.2445 (0.2464) loss 0.3522 (0.3579) grad_norm 313810.5000 (293862.6875) mem 14543MB +[2023-10-13 08:11:35 simmim_pretrain](main_simmim.py 218): INFO Train: [162/200][1500/6787] eta 0:21:41 lr 0.000200 time 0.2488 (0.2462) loss 0.3710 (0.3575) grad_norm 428011.2188 (323607.4062) mem 14543MB +[2023-10-13 08:13:38 simmim_pretrain](main_simmim.py 218): INFO Train: [162/200][2000/6787] eta 0:19:38 lr 0.000200 time 0.2456 (0.2461) loss 0.3625 (0.3572) grad_norm 470220.7812 (359089.8438) mem 14543MB +[2023-10-13 08:15:41 simmim_pretrain](main_simmim.py 218): INFO Train: [162/200][2500/6787] eta 0:17:35 lr 0.000200 time 0.2440 (0.2461) loss 0.3270 (0.3570) grad_norm 681758.8750 (inf) mem 14543MB +[2023-10-13 08:17:44 simmim_pretrain](main_simmim.py 218): INFO Train: [162/200][3000/6787] eta 0:15:31 lr 0.000200 time 0.2443 (0.2461) loss 0.3553 (0.3568) grad_norm 506910.2812 (inf) mem 14543MB +[2023-10-13 08:19:47 simmim_pretrain](main_simmim.py 218): INFO Train: [162/200][3500/6787] eta 0:13:28 lr 0.000200 time 0.2445 (0.2460) loss 0.3528 (0.3570) grad_norm 448595.9688 (inf) mem 14543MB +[2023-10-13 08:21:50 simmim_pretrain](main_simmim.py 218): INFO Train: [162/200][4000/6787] eta 0:11:25 lr 0.000200 time 0.2438 (0.2460) loss 0.3693 (0.3578) grad_norm 127302.8203 (inf) mem 14543MB +[2023-10-13 08:23:52 simmim_pretrain](main_simmim.py 218): INFO Train: [162/200][4500/6787] eta 0:09:22 lr 0.000200 time 0.2490 (0.2460) loss 0.3700 (0.3582) grad_norm 140054.6406 (inf) mem 14543MB +[2023-10-13 08:25:55 simmim_pretrain](main_simmim.py 218): INFO Train: [162/200][5000/6787] eta 0:07:19 lr 0.000200 time 0.2444 (0.2459) loss 0.3632 (0.3584) grad_norm 168065.4844 (inf) mem 14543MB +[2023-10-13 08:27:58 simmim_pretrain](main_simmim.py 218): INFO Train: [162/200][5500/6787] eta 0:05:16 lr 0.000200 time 0.2444 (0.2459) loss 0.3488 (0.3586) grad_norm 139496.7188 (inf) mem 14543MB +[2023-10-13 08:30:01 simmim_pretrain](main_simmim.py 218): INFO Train: [162/200][6000/6787] eta 0:03:13 lr 0.000200 time 0.2442 (0.2459) loss 0.3654 (0.3587) grad_norm 191229.7031 (inf) mem 14543MB +[2023-10-13 08:32:04 simmim_pretrain](main_simmim.py 218): INFO Train: [162/200][6500/6787] eta 0:01:10 lr 0.000200 time 0.2439 (0.2459) loss 0.3501 (0.3587) grad_norm 155272.0156 (inf) mem 14543MB +[2023-10-13 08:33:15 simmim_pretrain](main_simmim.py 228): INFO EPOCH 162 training takes 0:27:49 +[2023-10-13 08:33:16 simmim_pretrain](main_simmim.py 218): INFO Train: [163/200][0/6787] eta 2:42:09 lr 0.000200 time 1.4336 (1.4336) loss 0.3827 (0.3827) grad_norm 158166.1562 (158166.1562) mem 14543MB +[2023-10-13 08:35:19 simmim_pretrain](main_simmim.py 218): INFO Train: [163/200][500/6787] eta 0:25:59 lr 0.000200 time 0.2452 (0.2481) loss 0.3446 (0.3583) grad_norm 155169.2969 (226022.0625) mem 14543MB +[2023-10-13 08:37:22 simmim_pretrain](main_simmim.py 218): INFO Train: [163/200][1000/6787] eta 0:23:48 lr 0.000200 time 0.2446 (0.2469) loss 0.3435 (0.3578) grad_norm 180997.2500 (259132.4688) mem 14543MB +[2023-10-13 08:39:25 simmim_pretrain](main_simmim.py 218): INFO Train: [163/200][1500/6787] eta 0:21:43 lr 0.000200 time 0.2448 (0.2465) loss 0.3648 (0.3572) grad_norm 387402.8750 (284876.9062) mem 14543MB +[2023-10-13 08:41:28 simmim_pretrain](main_simmim.py 218): INFO Train: [163/200][2000/6787] eta 0:19:39 lr 0.000200 time 0.2443 (0.2463) loss 0.3480 (0.3572) grad_norm 430725.9375 (307281.5625) mem 14543MB +[2023-10-13 08:43:31 simmim_pretrain](main_simmim.py 218): INFO Train: [163/200][2500/6787] eta 0:17:35 lr 0.000200 time 0.2443 (0.2462) loss 0.3461 (0.3572) grad_norm 549763.7500 (336486.4688) mem 14543MB +[2023-10-13 08:45:34 simmim_pretrain](main_simmim.py 218): INFO Train: [163/200][3000/6787] eta 0:15:32 lr 0.000200 time 0.2446 (0.2462) loss 0.3377 (0.3571) grad_norm 535634.6250 (inf) mem 14543MB +[2023-10-13 08:47:37 simmim_pretrain](main_simmim.py 218): INFO Train: [163/200][3500/6787] eta 0:13:28 lr 0.000200 time 0.2485 (0.2461) loss 0.3735 (0.3571) grad_norm 832909.3750 (inf) mem 14543MB +[2023-10-13 08:49:39 simmim_pretrain](main_simmim.py 218): INFO Train: [163/200][4000/6787] eta 0:11:25 lr 0.000200 time 0.2481 (0.2461) loss 0.3497 (0.3570) grad_norm 372282.9688 (inf) mem 14543MB +[2023-10-13 08:51:42 simmim_pretrain](main_simmim.py 218): INFO Train: [163/200][4500/6787] eta 0:09:22 lr 0.000200 time 0.2451 (0.2460) loss 0.3623 (0.3573) grad_norm 228408.2500 (inf) mem 14543MB +[2023-10-13 08:53:45 simmim_pretrain](main_simmim.py 218): INFO Train: [163/200][5000/6787] eta 0:07:19 lr 0.000200 time 0.2451 (0.2460) loss 0.3526 (0.3574) grad_norm 281228.7500 (inf) mem 14543MB +[2023-10-13 08:55:48 simmim_pretrain](main_simmim.py 218): INFO Train: [163/200][5500/6787] eta 0:05:16 lr 0.000200 time 0.2440 (0.2460) loss 0.3682 (0.3575) grad_norm 316905.0938 (inf) mem 14543MB +[2023-10-13 08:57:51 simmim_pretrain](main_simmim.py 218): INFO Train: [163/200][6000/6787] eta 0:03:13 lr 0.000200 time 0.2441 (0.2460) loss 0.3656 (0.3576) grad_norm 446959.0000 (inf) mem 14543MB +[2023-10-13 08:59:54 simmim_pretrain](main_simmim.py 218): INFO Train: [163/200][6500/6787] eta 0:01:10 lr 0.000200 time 0.2480 (0.2460) loss 0.3422 (0.3575) grad_norm 179075.6406 (inf) mem 14543MB +[2023-10-13 09:01:05 simmim_pretrain](main_simmim.py 228): INFO EPOCH 163 training takes 0:27:50 +[2023-10-13 09:01:06 simmim_pretrain](main_simmim.py 218): INFO Train: [164/200][0/6787] eta 2:24:26 lr 0.000200 time 1.2769 (1.2769) loss 0.3618 (0.3618) grad_norm 168501.8438 (168501.8438) mem 14543MB +[2023-10-13 09:03:09 simmim_pretrain](main_simmim.py 218): INFO Train: [164/200][500/6787] eta 0:25:58 lr 0.000200 time 0.2443 (0.2478) loss 0.3615 (0.3605) grad_norm 85595.3984 (150176.7031) mem 14543MB +[2023-10-13 09:05:12 simmim_pretrain](main_simmim.py 218): INFO Train: [164/200][1000/6787] eta 0:23:48 lr 0.000200 time 0.2439 (0.2468) loss 0.3568 (0.3603) grad_norm 138852.4062 (146163.7188) mem 14543MB +[2023-10-13 09:07:15 simmim_pretrain](main_simmim.py 218): INFO Train: [164/200][1500/6787] eta 0:21:42 lr 0.000200 time 0.2446 (0.2464) loss 0.3348 (0.3608) grad_norm 223223.5469 (140518.1719) mem 14543MB +[2023-10-13 09:09:18 simmim_pretrain](main_simmim.py 218): INFO Train: [164/200][2000/6787] eta 0:19:39 lr 0.000200 time 0.2453 (0.2463) loss 0.3638 (0.3606) grad_norm 137159.5000 (142774.9375) mem 14543MB +[2023-10-13 09:11:21 simmim_pretrain](main_simmim.py 218): INFO Train: [164/200][2500/6787] eta 0:17:35 lr 0.000200 time 0.2453 (0.2462) loss 0.3763 (0.3601) grad_norm 219539.3906 (153732.7031) mem 14543MB +[2023-10-13 09:13:24 simmim_pretrain](main_simmim.py 218): INFO Train: [164/200][3000/6787] eta 0:15:32 lr 0.000200 time 0.2444 (0.2462) loss 0.3500 (0.3596) grad_norm 191823.7188 (162009.1094) mem 14543MB +[2023-10-13 09:15:27 simmim_pretrain](main_simmim.py 218): INFO Train: [164/200][3500/6787] eta 0:13:28 lr 0.000200 time 0.2457 (0.2461) loss 0.3486 (0.3593) grad_norm 303283.9375 (169603.4375) mem 14543MB +[2023-10-13 09:17:30 simmim_pretrain](main_simmim.py 218): INFO Train: [164/200][4000/6787] eta 0:11:25 lr 0.000200 time 0.2446 (0.2461) loss 0.3649 (0.3591) grad_norm 538078.5625 (182905.6562) mem 14543MB +[2023-10-13 09:19:32 simmim_pretrain](main_simmim.py 218): INFO Train: [164/200][4500/6787] eta 0:09:22 lr 0.000200 time 0.2481 (0.2460) loss 0.3488 (0.3588) grad_norm 158370.2344 (204587.8906) mem 14543MB +[2023-10-13 09:21:35 simmim_pretrain](main_simmim.py 218): INFO Train: [164/200][5000/6787] eta 0:07:19 lr 0.000200 time 0.2451 (0.2460) loss 0.3587 (0.3586) grad_norm 334846.9375 (220143.5938) mem 14543MB +[2023-10-13 09:23:38 simmim_pretrain](main_simmim.py 218): INFO Train: [164/200][5500/6787] eta 0:05:16 lr 0.000200 time 0.2449 (0.2460) loss 0.3696 (0.3584) grad_norm 135668.7188 (inf) mem 14543MB +[2023-10-13 09:25:41 simmim_pretrain](main_simmim.py 218): INFO Train: [164/200][6000/6787] eta 0:03:13 lr 0.000200 time 0.2451 (0.2460) loss 0.3462 (0.3584) grad_norm 250772.6406 (inf) mem 14543MB +[2023-10-13 09:27:44 simmim_pretrain](main_simmim.py 218): INFO Train: [164/200][6500/6787] eta 0:01:10 lr 0.000200 time 0.2454 (0.2460) loss 0.3515 (0.3584) grad_norm 218418.6875 (inf) mem 14543MB +[2023-10-13 09:28:55 simmim_pretrain](main_simmim.py 228): INFO EPOCH 164 training takes 0:27:50 +[2023-10-13 09:28:56 simmim_pretrain](main_simmim.py 218): INFO Train: [165/200][0/6787] eta 2:23:51 lr 0.000200 time 1.2717 (1.2717) loss 0.3586 (0.3586) grad_norm 270000.0938 (270000.0938) mem 14543MB +[2023-10-13 09:30:59 simmim_pretrain](main_simmim.py 218): INFO Train: [165/200][500/6787] eta 0:25:57 lr 0.000200 time 0.2457 (0.2478) loss 0.3777 (0.3568) grad_norm 357105.6562 (259645.5938) mem 14543MB +[2023-10-13 09:33:02 simmim_pretrain](main_simmim.py 218): INFO Train: [165/200][1000/6787] eta 0:23:48 lr 0.000200 time 0.2455 (0.2468) loss 0.3699 (0.3569) grad_norm 258978.7344 (320131.6562) mem 14543MB +[2023-10-13 09:35:05 simmim_pretrain](main_simmim.py 218): INFO Train: [165/200][1500/6787] eta 0:21:42 lr 0.000200 time 0.2454 (0.2464) loss 0.3689 (0.3567) grad_norm 365289.1875 (342477.8125) mem 14543MB +[2023-10-13 09:37:08 simmim_pretrain](main_simmim.py 218): INFO Train: [165/200][2000/6787] eta 0:19:38 lr 0.000200 time 0.2452 (0.2463) loss 0.3476 (0.3570) grad_norm 371304.2500 (inf) mem 14543MB +[2023-10-13 09:39:11 simmim_pretrain](main_simmim.py 218): INFO Train: [165/200][2500/6787] eta 0:17:35 lr 0.000200 time 0.2460 (0.2462) loss 0.3434 (0.3572) grad_norm 179247.8281 (inf) mem 14543MB +[2023-10-13 09:41:14 simmim_pretrain](main_simmim.py 218): INFO Train: [165/200][3000/6787] eta 0:15:31 lr 0.000200 time 0.2452 (0.2461) loss 0.3721 (0.3573) grad_norm 224285.6562 (inf) mem 14543MB +[2023-10-13 09:43:17 simmim_pretrain](main_simmim.py 218): INFO Train: [165/200][3500/6787] eta 0:13:28 lr 0.000200 time 0.2452 (0.2460) loss 0.3483 (0.3575) grad_norm 289217.6562 (inf) mem 14543MB +[2023-10-13 09:45:20 simmim_pretrain](main_simmim.py 218): INFO Train: [165/200][4000/6787] eta 0:11:25 lr 0.000200 time 0.2459 (0.2460) loss 0.3396 (0.3575) grad_norm 422155.1562 (inf) mem 14543MB +[2023-10-13 09:47:24 simmim_pretrain](main_simmim.py 218): INFO Train: [165/200][4500/6787] eta 0:09:23 lr 0.000200 time 0.2495 (0.2463) loss 0.3639 (0.3574) grad_norm 325241.3750 (inf) mem 14543MB +[2023-10-13 09:49:30 simmim_pretrain](main_simmim.py 218): INFO Train: [165/200][5000/6787] eta 0:07:21 lr 0.000200 time 0.2516 (0.2469) loss 0.3333 (0.3572) grad_norm 321164.3750 (inf) mem 14543MB +[2023-10-13 09:51:37 simmim_pretrain](main_simmim.py 218): INFO Train: [165/200][5500/6787] eta 0:05:18 lr 0.000200 time 0.2465 (0.2475) loss 0.3337 (0.3572) grad_norm 280656.5938 (inf) mem 14543MB +[2023-10-13 09:53:43 simmim_pretrain](main_simmim.py 218): INFO Train: [165/200][6000/6787] eta 0:03:15 lr 0.000200 time 0.2550 (0.2480) loss 0.3470 (0.3573) grad_norm 328595.9375 (inf) mem 14543MB +[2023-10-13 09:55:53 simmim_pretrain](main_simmim.py 218): INFO Train: [165/200][6500/6787] eta 0:01:11 lr 0.000200 time 0.2611 (0.2489) loss 0.3561 (0.3575) grad_norm 171798.4375 (inf) mem 14543MB +[2023-10-13 09:57:08 simmim_pretrain](main_simmim.py 228): INFO EPOCH 165 training takes 0:28:13 +[2023-10-13 09:57:10 simmim_pretrain](main_simmim.py 218): INFO Train: [166/200][0/6787] eta 2:28:02 lr 0.000200 time 1.3088 (1.3088) loss 0.3554 (0.3554) grad_norm 133739.7344 (133739.7344) mem 14543MB +[2023-10-13 09:59:14 simmim_pretrain](main_simmim.py 218): INFO Train: [166/200][500/6787] eta 0:26:13 lr 0.000200 time 0.2464 (0.2503) loss 0.3597 (0.3599) grad_norm 176402.4844 (139173.7656) mem 14543MB +[2023-10-13 10:01:18 simmim_pretrain](main_simmim.py 218): INFO Train: [166/200][1000/6787] eta 0:24:01 lr 0.000200 time 0.2460 (0.2490) loss 0.3671 (0.3600) grad_norm 199878.1406 (134413.2500) mem 14543MB +[2023-10-13 10:03:21 simmim_pretrain](main_simmim.py 218): INFO Train: [166/200][1500/6787] eta 0:21:53 lr 0.000200 time 0.2458 (0.2484) loss 0.3691 (0.3601) grad_norm 141788.3750 (133173.8125) mem 14543MB +[2023-10-13 10:05:25 simmim_pretrain](main_simmim.py 218): INFO Train: [166/200][2000/6787] eta 0:19:47 lr 0.000200 time 0.2466 (0.2481) loss 0.3370 (0.3597) grad_norm 186883.1562 (144039.6250) mem 14543MB +[2023-10-13 10:07:29 simmim_pretrain](main_simmim.py 218): INFO Train: [166/200][2500/6787] eta 0:17:44 lr 0.000200 time 0.2453 (0.2483) loss 0.3509 (0.3595) grad_norm 131602.8906 (154643.1094) mem 14543MB +[2023-10-13 10:09:34 simmim_pretrain](main_simmim.py 218): INFO Train: [166/200][3000/6787] eta 0:15:41 lr 0.000200 time 0.2504 (0.2485) loss 0.3678 (0.3593) grad_norm 295628.7500 (164803.7656) mem 14543MB +[2023-10-13 10:11:40 simmim_pretrain](main_simmim.py 218): INFO Train: [166/200][3500/6787] eta 0:13:38 lr 0.000200 time 0.2484 (0.2491) loss 0.3935 (0.3592) grad_norm 166350.6875 (173726.8750) mem 14543MB +[2023-10-13 10:13:48 simmim_pretrain](main_simmim.py 218): INFO Train: [166/200][4000/6787] eta 0:11:36 lr 0.000200 time 0.2489 (0.2498) loss 0.3443 (0.3589) grad_norm 337484.4375 (205526.6562) mem 14543MB +[2023-10-13 10:15:55 simmim_pretrain](main_simmim.py 218): INFO Train: [166/200][4500/6787] eta 0:09:32 lr 0.000200 time 0.2575 (0.2504) loss 0.3418 (0.3587) grad_norm 360448.0000 (219656.7344) mem 14543MB +[2023-10-13 10:18:04 simmim_pretrain](main_simmim.py 218): INFO Train: [166/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2604 (0.2511) loss 0.3646 (0.3583) grad_norm 472809.2812 (233837.3125) mem 14543MB +[2023-10-13 10:20:12 simmim_pretrain](main_simmim.py 218): INFO Train: [166/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2513 (0.2516) loss 0.3422 (0.3582) grad_norm 334329.8750 (250818.3594) mem 14543MB +[2023-10-13 10:22:20 simmim_pretrain](main_simmim.py 218): INFO Train: [166/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2576 (0.2519) loss 0.3632 (0.3580) grad_norm 202576.9531 (inf) mem 14543MB +[2023-10-13 10:24:28 simmim_pretrain](main_simmim.py 218): INFO Train: [166/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2601 (0.2522) loss 0.3587 (0.3579) grad_norm 242910.6562 (inf) mem 14543MB +[2023-10-13 10:25:43 simmim_pretrain](main_simmim.py 228): INFO EPOCH 166 training takes 0:28:34 +[2023-10-13 10:25:44 simmim_pretrain](main_simmim.py 218): INFO Train: [167/200][0/6787] eta 2:30:36 lr 0.000200 time 1.3314 (1.3314) loss 0.3587 (0.3587) grad_norm 221753.3906 (221753.3906) mem 14543MB +[2023-10-13 10:27:51 simmim_pretrain](main_simmim.py 218): INFO Train: [167/200][500/6787] eta 0:26:45 lr 0.000200 time 0.2515 (0.2554) loss 0.3706 (0.3585) grad_norm 220825.6875 (270814.2500) mem 14543MB +[2023-10-13 10:29:57 simmim_pretrain](main_simmim.py 218): INFO Train: [167/200][1000/6787] eta 0:24:30 lr 0.000200 time 0.2466 (0.2540) loss 0.3554 (0.3589) grad_norm 313616.3438 (260036.7500) mem 14543MB +[2023-10-13 10:32:03 simmim_pretrain](main_simmim.py 218): INFO Train: [167/200][1500/6787] eta 0:22:19 lr 0.000200 time 0.2469 (0.2534) loss 0.3893 (0.3585) grad_norm 417989.4062 (255936.3438) mem 14543MB +[2023-10-13 10:34:09 simmim_pretrain](main_simmim.py 218): INFO Train: [167/200][2000/6787] eta 0:20:10 lr 0.000200 time 0.2518 (0.2528) loss 0.3556 (0.3585) grad_norm 197368.0156 (273454.9375) mem 14543MB +[2023-10-13 10:36:14 simmim_pretrain](main_simmim.py 218): INFO Train: [167/200][2500/6787] eta 0:18:02 lr 0.000200 time 0.2521 (0.2524) loss 0.3795 (0.3580) grad_norm 474783.0000 (298285.5312) mem 14543MB +[2023-10-13 10:38:19 simmim_pretrain](main_simmim.py 218): INFO Train: [167/200][3000/6787] eta 0:15:54 lr 0.000200 time 0.2466 (0.2520) loss 0.3419 (0.3576) grad_norm 341850.7188 (329990.2500) mem 14543MB +[2023-10-13 10:40:24 simmim_pretrain](main_simmim.py 218): INFO Train: [167/200][3500/6787] eta 0:13:47 lr 0.000200 time 0.2449 (0.2517) loss 0.3692 (0.3574) grad_norm 318762.3438 (inf) mem 14543MB +[2023-10-13 10:42:29 simmim_pretrain](main_simmim.py 218): INFO Train: [167/200][4000/6787] eta 0:11:40 lr 0.000200 time 0.2479 (0.2514) loss 0.3701 (0.3575) grad_norm 402413.1875 (inf) mem 14543MB +[2023-10-13 10:44:34 simmim_pretrain](main_simmim.py 218): INFO Train: [167/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2461 (0.2512) loss 0.3485 (0.3577) grad_norm 216998.4688 (inf) mem 14543MB +[2023-10-13 10:46:39 simmim_pretrain](main_simmim.py 218): INFO Train: [167/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2535 (0.2511) loss 0.3470 (0.3578) grad_norm 156082.0312 (inf) mem 14543MB +[2023-10-13 10:48:44 simmim_pretrain](main_simmim.py 218): INFO Train: [167/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2458 (0.2510) loss 0.3458 (0.3579) grad_norm 79119.3750 (inf) mem 14543MB +[2023-10-13 10:50:49 simmim_pretrain](main_simmim.py 218): INFO Train: [167/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2574 (0.2510) loss 0.3592 (0.3582) grad_norm 79177.7578 (inf) mem 14543MB +[2023-10-13 10:52:55 simmim_pretrain](main_simmim.py 218): INFO Train: [167/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2499 (0.2510) loss 0.3667 (0.3585) grad_norm 122780.8281 (inf) mem 14543MB +[2023-10-13 10:54:07 simmim_pretrain](main_simmim.py 228): INFO EPOCH 167 training takes 0:28:24 +[2023-10-13 10:54:09 simmim_pretrain](main_simmim.py 218): INFO Train: [168/200][0/6787] eta 3:00:22 lr 0.000200 time 1.5945 (1.5945) loss 0.3890 (0.3890) grad_norm 61192.7109 (61192.7109) mem 14543MB +[2023-10-13 10:56:14 simmim_pretrain](main_simmim.py 218): INFO Train: [168/200][500/6787] eta 0:26:35 lr 0.000200 time 0.2534 (0.2537) loss 0.3589 (0.3604) grad_norm 159491.5156 (131847.3438) mem 14543MB +[2023-10-13 10:58:20 simmim_pretrain](main_simmim.py 218): INFO Train: [168/200][1000/6787] eta 0:24:21 lr 0.000200 time 0.2546 (0.2526) loss 0.3582 (0.3597) grad_norm 208774.1562 (154517.2344) mem 14543MB +[2023-10-13 11:00:26 simmim_pretrain](main_simmim.py 218): INFO Train: [168/200][1500/6787] eta 0:22:14 lr 0.000200 time 0.2477 (0.2524) loss 0.3559 (0.3592) grad_norm 233936.6875 (168350.3438) mem 14543MB +[2023-10-13 11:02:32 simmim_pretrain](main_simmim.py 218): INFO Train: [168/200][2000/6787] eta 0:20:07 lr 0.000200 time 0.2464 (0.2523) loss 0.3591 (0.3587) grad_norm 137638.8750 (178147.2031) mem 14543MB +[2023-10-13 11:04:38 simmim_pretrain](main_simmim.py 218): INFO Train: [168/200][2500/6787] eta 0:18:01 lr 0.000200 time 0.2505 (0.2523) loss 0.3473 (0.3586) grad_norm 185826.8594 (185974.0781) mem 14543MB +[2023-10-13 11:06:44 simmim_pretrain](main_simmim.py 218): INFO Train: [168/200][3000/6787] eta 0:15:55 lr 0.000200 time 0.2523 (0.2523) loss 0.3572 (0.3583) grad_norm 357885.2188 (210770.6250) mem 14543MB +[2023-10-13 11:08:51 simmim_pretrain](main_simmim.py 218): INFO Train: [168/200][3500/6787] eta 0:13:49 lr 0.000200 time 0.2558 (0.2523) loss 0.3564 (0.3580) grad_norm 466469.1562 (232267.9844) mem 14543MB +[2023-10-13 11:10:57 simmim_pretrain](main_simmim.py 218): INFO Train: [168/200][4000/6787] eta 0:11:43 lr 0.000200 time 0.2459 (0.2523) loss 0.3366 (0.3579) grad_norm 369993.4062 (257380.0469) mem 14543MB +[2023-10-13 11:13:03 simmim_pretrain](main_simmim.py 218): INFO Train: [168/200][4500/6787] eta 0:09:36 lr 0.000200 time 0.2585 (0.2523) loss 0.3393 (0.3578) grad_norm 335482.3125 (274836.0938) mem 14543MB +[2023-10-13 11:15:08 simmim_pretrain](main_simmim.py 218): INFO Train: [168/200][5000/6787] eta 0:07:30 lr 0.000200 time 0.2506 (0.2522) loss 0.3469 (0.3576) grad_norm 1160416.5000 (301340.6562) mem 14543MB +[2023-10-13 11:17:14 simmim_pretrain](main_simmim.py 218): INFO Train: [168/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2462 (0.2521) loss 0.3541 (0.3575) grad_norm 357477.4375 (inf) mem 14543MB +[2023-10-13 11:19:20 simmim_pretrain](main_simmim.py 218): INFO Train: [168/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2526 (0.2520) loss 0.3571 (0.3576) grad_norm 140712.5938 (inf) mem 14543MB +[2023-10-13 11:21:25 simmim_pretrain](main_simmim.py 218): INFO Train: [168/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2472 (0.2520) loss 0.3422 (0.3576) grad_norm 302327.0000 (inf) mem 14543MB +[2023-10-13 11:22:38 simmim_pretrain](main_simmim.py 228): INFO EPOCH 168 training takes 0:28:30 +[2023-10-13 11:22:39 simmim_pretrain](main_simmim.py 218): INFO Train: [169/200][0/6787] eta 3:01:30 lr 0.000200 time 1.6046 (1.6046) loss 0.3356 (0.3356) grad_norm 222485.7188 (222485.7188) mem 14543MB +[2023-10-13 11:24:44 simmim_pretrain](main_simmim.py 218): INFO Train: [169/200][500/6787] eta 0:26:30 lr 0.000200 time 0.2511 (0.2530) loss 0.3594 (0.3578) grad_norm 366837.8750 (259586.3281) mem 14543MB +[2023-10-13 11:26:50 simmim_pretrain](main_simmim.py 218): INFO Train: [169/200][1000/6787] eta 0:24:16 lr 0.000200 time 0.2533 (0.2517) loss 0.3321 (0.3574) grad_norm 290214.8125 (299226.5000) mem 14543MB +[2023-10-13 11:28:55 simmim_pretrain](main_simmim.py 218): INFO Train: [169/200][1500/6787] eta 0:22:08 lr 0.000200 time 0.2488 (0.2513) loss 0.3635 (0.3570) grad_norm 475944.4375 (330011.9062) mem 14543MB +[2023-10-13 11:31:00 simmim_pretrain](main_simmim.py 218): INFO Train: [169/200][2000/6787] eta 0:20:02 lr 0.000200 time 0.2598 (0.2513) loss 0.3659 (0.3568) grad_norm 316713.4375 (inf) mem 14543MB +[2023-10-13 11:33:06 simmim_pretrain](main_simmim.py 218): INFO Train: [169/200][2500/6787] eta 0:17:57 lr 0.000200 time 0.2455 (0.2513) loss 0.3620 (0.3569) grad_norm 311703.8125 (inf) mem 14543MB +[2023-10-13 11:35:12 simmim_pretrain](main_simmim.py 218): INFO Train: [169/200][3000/6787] eta 0:15:51 lr 0.000200 time 0.2528 (0.2513) loss 0.3282 (0.3571) grad_norm 450430.1875 (inf) mem 14543MB +[2023-10-13 11:37:18 simmim_pretrain](main_simmim.py 218): INFO Train: [169/200][3500/6787] eta 0:13:46 lr 0.000200 time 0.2470 (0.2515) loss 0.5572 (0.3589) grad_norm 40141.7773 (inf) mem 14543MB +[2023-10-13 11:39:24 simmim_pretrain](main_simmim.py 218): INFO Train: [169/200][4000/6787] eta 0:11:41 lr 0.000200 time 0.2529 (0.2516) loss 0.3605 (0.3681) grad_norm 55909.1328 (inf) mem 14543MB +[2023-10-13 11:41:30 simmim_pretrain](main_simmim.py 218): INFO Train: [169/200][4500/6787] eta 0:09:35 lr 0.000200 time 0.2539 (0.2517) loss 0.3421 (0.3682) grad_norm 44270.9531 (inf) mem 14543MB +[2023-10-13 11:43:37 simmim_pretrain](main_simmim.py 218): INFO Train: [169/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2483 (0.2518) loss 0.3629 (0.3681) grad_norm 21967.7871 (inf) mem 14543MB +[2023-10-13 11:45:43 simmim_pretrain](main_simmim.py 218): INFO Train: [169/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2584 (0.2518) loss 0.3656 (0.3678) grad_norm 22016.1504 (inf) mem 14543MB +[2023-10-13 11:47:49 simmim_pretrain](main_simmim.py 218): INFO Train: [169/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2529 (0.2518) loss 0.3860 (0.3674) grad_norm 51017.5742 (inf) mem 14543MB +[2023-10-13 11:49:55 simmim_pretrain](main_simmim.py 218): INFO Train: [169/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2467 (0.2519) loss 0.3502 (0.3669) grad_norm 58240.2656 (inf) mem 14543MB +[2023-10-13 11:51:08 simmim_pretrain](main_simmim.py 228): INFO EPOCH 169 training takes 0:28:30 +[2023-10-13 11:51:09 simmim_pretrain](main_simmim.py 218): INFO Train: [170/200][0/6787] eta 2:43:37 lr 0.000200 time 1.4465 (1.4465) loss 0.3507 (0.3507) grad_norm 58643.3359 (58643.3359) mem 14543MB +[2023-10-13 11:53:15 simmim_pretrain](main_simmim.py 218): INFO Train: [170/200][500/6787] eta 0:26:32 lr 0.000200 time 0.2513 (0.2534) loss 0.3657 (0.3615) grad_norm 40290.1523 (48998.8633) mem 14543MB +[2023-10-13 11:55:20 simmim_pretrain](main_simmim.py 218): INFO Train: [170/200][1000/6787] eta 0:24:21 lr 0.000200 time 0.2517 (0.2525) loss 0.3525 (0.3611) grad_norm 37085.3789 (53129.5859) mem 14543MB +[2023-10-13 11:57:26 simmim_pretrain](main_simmim.py 218): INFO Train: [170/200][1500/6787] eta 0:22:12 lr 0.000200 time 0.2540 (0.2521) loss 0.3643 (0.3606) grad_norm 91229.5312 (59043.3438) mem 14543MB +[2023-10-13 11:59:32 simmim_pretrain](main_simmim.py 218): INFO Train: [170/200][2000/6787] eta 0:20:05 lr 0.000200 time 0.2469 (0.2518) loss 0.3761 (0.3599) grad_norm 76814.6484 (64850.3672) mem 14543MB +[2023-10-13 12:01:37 simmim_pretrain](main_simmim.py 218): INFO Train: [170/200][2500/6787] eta 0:17:58 lr 0.000200 time 0.2479 (0.2517) loss 0.3625 (0.3595) grad_norm 99347.4531 (69596.9688) mem 14543MB +[2023-10-13 12:03:43 simmim_pretrain](main_simmim.py 218): INFO Train: [170/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2465 (0.2516) loss 0.3423 (0.3594) grad_norm 95042.6016 (75063.4531) mem 14543MB +[2023-10-13 12:05:48 simmim_pretrain](main_simmim.py 218): INFO Train: [170/200][3500/6787] eta 0:13:46 lr 0.000200 time 0.2584 (0.2515) loss 0.3459 (0.3592) grad_norm 92178.9688 (82647.7812) mem 14543MB +[2023-10-13 12:07:54 simmim_pretrain](main_simmim.py 218): INFO Train: [170/200][4000/6787] eta 0:11:40 lr 0.000200 time 0.2481 (0.2515) loss 0.3740 (0.3589) grad_norm 154235.5000 (90068.5781) mem 14543MB +[2023-10-13 12:09:59 simmim_pretrain](main_simmim.py 218): INFO Train: [170/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2464 (0.2514) loss 0.3386 (0.3587) grad_norm 237254.6875 (98517.3438) mem 14543MB +[2023-10-13 12:12:05 simmim_pretrain](main_simmim.py 218): INFO Train: [170/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2562 (0.2514) loss 0.3495 (0.3586) grad_norm 195652.6719 (109687.5781) mem 14543MB +[2023-10-13 12:14:10 simmim_pretrain](main_simmim.py 218): INFO Train: [170/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2530 (0.2513) loss 0.3794 (0.3585) grad_norm 410068.5625 (125423.1797) mem 14543MB +[2023-10-13 12:16:16 simmim_pretrain](main_simmim.py 218): INFO Train: [170/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2496 (0.2513) loss 0.3532 (0.3583) grad_norm 216391.1562 (139856.7969) mem 14543MB +[2023-10-13 12:18:21 simmim_pretrain](main_simmim.py 218): INFO Train: [170/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2527 (0.2513) loss 0.3402 (0.3582) grad_norm 397446.0312 (153151.2812) mem 14543MB +[2023-10-13 12:19:34 simmim_pretrain](main_simmim.py 228): INFO EPOCH 170 training takes 0:28:26 +[2023-10-13 12:19:36 simmim_pretrain](main_simmim.py 218): INFO Train: [171/200][0/6787] eta 3:01:10 lr 0.000200 time 1.6017 (1.6017) loss 0.3693 (0.3693) grad_norm 466116.8125 (466116.8125) mem 14543MB +[2023-10-13 12:21:41 simmim_pretrain](main_simmim.py 218): INFO Train: [171/200][500/6787] eta 0:26:35 lr 0.000200 time 0.2495 (0.2538) loss 0.3490 (0.3578) grad_norm 424978.3438 (338295.4375) mem 14543MB +[2023-10-13 12:23:47 simmim_pretrain](main_simmim.py 218): INFO Train: [171/200][1000/6787] eta 0:24:23 lr 0.000200 time 0.2521 (0.2529) loss 0.3701 (0.3570) grad_norm 395261.7500 (345965.4688) mem 14543MB +[2023-10-13 12:25:53 simmim_pretrain](main_simmim.py 218): INFO Train: [171/200][1500/6787] eta 0:22:16 lr 0.000200 time 0.2589 (0.2528) loss 0.3567 (0.3568) grad_norm 325881.0625 (348855.5312) mem 14543MB +[2023-10-13 12:28:00 simmim_pretrain](main_simmim.py 218): INFO Train: [171/200][2000/6787] eta 0:20:09 lr 0.000200 time 0.2546 (0.2527) loss 0.3614 (0.3564) grad_norm 841634.9375 (355705.6250) mem 14543MB +[2023-10-13 12:30:06 simmim_pretrain](main_simmim.py 218): INFO Train: [171/200][2500/6787] eta 0:18:03 lr 0.000200 time 0.2454 (0.2527) loss 0.3485 (0.3564) grad_norm 521801.4688 (inf) mem 14543MB +[2023-10-13 12:32:12 simmim_pretrain](main_simmim.py 218): INFO Train: [171/200][3000/6787] eta 0:15:57 lr 0.000200 time 0.2525 (0.2527) loss 0.3565 (0.3563) grad_norm 465554.5312 (inf) mem 14543MB +[2023-10-13 12:34:19 simmim_pretrain](main_simmim.py 218): INFO Train: [171/200][3500/6787] eta 0:13:50 lr 0.000200 time 0.2515 (0.2527) loss 0.3585 (0.3562) grad_norm 464424.2500 (inf) mem 14543MB +[2023-10-13 12:36:25 simmim_pretrain](main_simmim.py 218): INFO Train: [171/200][4000/6787] eta 0:11:44 lr 0.000200 time 0.2529 (0.2527) loss 0.3468 (0.3561) grad_norm 475429.9375 (inf) mem 14543MB +[2023-10-13 12:38:31 simmim_pretrain](main_simmim.py 218): INFO Train: [171/200][4500/6787] eta 0:09:37 lr 0.000200 time 0.2504 (0.2526) loss 0.3617 (0.3559) grad_norm 464281.5000 (inf) mem 14543MB +[2023-10-13 12:40:37 simmim_pretrain](main_simmim.py 218): INFO Train: [171/200][5000/6787] eta 0:07:31 lr 0.000200 time 0.2538 (0.2525) loss 0.3614 (0.3559) grad_norm 432284.7188 (inf) mem 14543MB +[2023-10-13 12:42:43 simmim_pretrain](main_simmim.py 218): INFO Train: [171/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2522 (0.2524) loss 0.3652 (0.3560) grad_norm 281701.3750 (inf) mem 14543MB +[2023-10-13 12:44:48 simmim_pretrain](main_simmim.py 218): INFO Train: [171/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2528 (0.2524) loss 0.3715 (0.3560) grad_norm 299196.1875 (inf) mem 14543MB +[2023-10-13 12:46:54 simmim_pretrain](main_simmim.py 218): INFO Train: [171/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2502 (0.2523) loss 0.3581 (0.3561) grad_norm 112424.3359 (inf) mem 14543MB +[2023-10-13 12:48:07 simmim_pretrain](main_simmim.py 228): INFO EPOCH 171 training takes 0:28:32 +[2023-10-13 12:48:08 simmim_pretrain](main_simmim.py 218): INFO Train: [172/200][0/6787] eta 2:52:41 lr 0.000200 time 1.5267 (1.5267) loss 0.3633 (0.3633) grad_norm 134335.3125 (134335.3125) mem 14543MB +[2023-10-13 12:50:14 simmim_pretrain](main_simmim.py 218): INFO Train: [172/200][500/6787] eta 0:26:33 lr 0.000200 time 0.2511 (0.2535) loss 0.3573 (0.3568) grad_norm 281342.1875 (214038.3125) mem 14543MB +[2023-10-13 12:52:19 simmim_pretrain](main_simmim.py 218): INFO Train: [172/200][1000/6787] eta 0:24:19 lr 0.000200 time 0.2500 (0.2522) loss 0.3687 (0.3572) grad_norm 235114.5469 (215037.8594) mem 14543MB +[2023-10-13 12:54:25 simmim_pretrain](main_simmim.py 218): INFO Train: [172/200][1500/6787] eta 0:22:11 lr 0.000200 time 0.2508 (0.2518) loss 0.3305 (0.3567) grad_norm 389761.3750 (235739.2344) mem 14543MB +[2023-10-13 12:56:31 simmim_pretrain](main_simmim.py 218): INFO Train: [172/200][2000/6787] eta 0:20:05 lr 0.000200 time 0.2482 (0.2517) loss 0.3575 (0.3567) grad_norm 364430.0938 (257126.1562) mem 14543MB +[2023-10-13 12:58:36 simmim_pretrain](main_simmim.py 218): INFO Train: [172/200][2500/6787] eta 0:17:58 lr 0.000200 time 0.2537 (0.2516) loss 0.3557 (0.3564) grad_norm 432418.4688 (273453.0000) mem 14543MB +[2023-10-13 13:00:42 simmim_pretrain](main_simmim.py 218): INFO Train: [172/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2486 (0.2516) loss 0.3446 (0.3564) grad_norm 220069.6250 (inf) mem 14543MB +[2023-10-13 13:02:47 simmim_pretrain](main_simmim.py 218): INFO Train: [172/200][3500/6787] eta 0:13:46 lr 0.000200 time 0.2512 (0.2515) loss 0.3554 (0.3566) grad_norm 158950.5938 (inf) mem 14543MB +[2023-10-13 13:04:53 simmim_pretrain](main_simmim.py 218): INFO Train: [172/200][4000/6787] eta 0:11:40 lr 0.000200 time 0.2517 (0.2514) loss 0.3448 (0.3567) grad_norm 299793.8750 (inf) mem 14543MB +[2023-10-13 13:06:58 simmim_pretrain](main_simmim.py 218): INFO Train: [172/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2491 (0.2513) loss 0.3577 (0.3567) grad_norm 121309.2734 (inf) mem 14543MB +[2023-10-13 13:09:04 simmim_pretrain](main_simmim.py 218): INFO Train: [172/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2521 (0.2513) loss 0.3635 (0.3568) grad_norm 146870.6094 (inf) mem 14543MB +[2023-10-13 13:11:09 simmim_pretrain](main_simmim.py 218): INFO Train: [172/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2502 (0.2513) loss 0.3662 (0.3567) grad_norm 697606.4375 (inf) mem 14543MB +[2023-10-13 13:13:15 simmim_pretrain](main_simmim.py 218): INFO Train: [172/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2504 (0.2514) loss 0.3408 (0.3567) grad_norm 417194.5312 (inf) mem 14543MB +[2023-10-13 13:15:21 simmim_pretrain](main_simmim.py 218): INFO Train: [172/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2482 (0.2514) loss 0.3290 (0.3566) grad_norm 464593.3750 (inf) mem 14543MB +[2023-10-13 13:16:34 simmim_pretrain](main_simmim.py 228): INFO EPOCH 172 training takes 0:28:27 +[2023-10-13 13:16:36 simmim_pretrain](main_simmim.py 218): INFO Train: [173/200][0/6787] eta 2:48:38 lr 0.000200 time 1.4908 (1.4908) loss 0.3410 (0.3410) grad_norm 364327.1562 (364327.1562) mem 14543MB +[2023-10-13 13:18:42 simmim_pretrain](main_simmim.py 218): INFO Train: [173/200][500/6787] eta 0:26:41 lr 0.000200 time 0.2516 (0.2548) loss 0.3250 (0.3559) grad_norm 374274.0000 (395520.8750) mem 14543MB +[2023-10-13 13:20:48 simmim_pretrain](main_simmim.py 218): INFO Train: [173/200][1000/6787] eta 0:24:27 lr 0.000200 time 0.2467 (0.2535) loss 0.3637 (0.3552) grad_norm 561121.0000 (405394.1250) mem 14543MB +[2023-10-13 13:22:54 simmim_pretrain](main_simmim.py 218): INFO Train: [173/200][1500/6787] eta 0:22:18 lr 0.000200 time 0.2533 (0.2532) loss 0.3391 (0.3555) grad_norm 238660.5156 (inf) mem 14543MB +[2023-10-13 13:25:01 simmim_pretrain](main_simmim.py 218): INFO Train: [173/200][2000/6787] eta 0:20:11 lr 0.000200 time 0.2556 (0.2530) loss 0.3636 (0.3561) grad_norm 214211.0000 (inf) mem 14543MB +[2023-10-13 13:27:07 simmim_pretrain](main_simmim.py 218): INFO Train: [173/200][2500/6787] eta 0:18:03 lr 0.000200 time 0.2502 (0.2528) loss 0.3906 (0.3565) grad_norm 212856.2812 (inf) mem 14543MB +[2023-10-13 13:29:13 simmim_pretrain](main_simmim.py 218): INFO Train: [173/200][3000/6787] eta 0:15:56 lr 0.000200 time 0.2525 (0.2526) loss 0.3737 (0.3568) grad_norm 177084.5469 (inf) mem 14543MB +[2023-10-13 13:31:18 simmim_pretrain](main_simmim.py 218): INFO Train: [173/200][3500/6787] eta 0:13:49 lr 0.000200 time 0.2496 (0.2525) loss 0.3302 (0.3570) grad_norm 161417.3750 (inf) mem 14543MB +[2023-10-13 13:33:24 simmim_pretrain](main_simmim.py 218): INFO Train: [173/200][4000/6787] eta 0:11:43 lr 0.000200 time 0.2530 (0.2523) loss 0.3739 (0.3569) grad_norm 320008.7812 (inf) mem 14543MB +[2023-10-13 13:35:29 simmim_pretrain](main_simmim.py 218): INFO Train: [173/200][4500/6787] eta 0:09:36 lr 0.000200 time 0.2526 (0.2521) loss 0.3397 (0.3569) grad_norm 146682.1406 (inf) mem 14543MB +[2023-10-13 13:37:35 simmim_pretrain](main_simmim.py 218): INFO Train: [173/200][5000/6787] eta 0:07:30 lr 0.000200 time 0.2498 (0.2520) loss 0.3433 (0.3570) grad_norm 162987.9375 (inf) mem 14543MB +[2023-10-13 13:39:40 simmim_pretrain](main_simmim.py 218): INFO Train: [173/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2542 (0.2518) loss 0.3443 (0.3571) grad_norm 94976.4062 (inf) mem 14543MB +[2023-10-13 13:41:45 simmim_pretrain](main_simmim.py 218): INFO Train: [173/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2488 (0.2517) loss 0.3680 (0.3575) grad_norm 142997.5312 (inf) mem 14543MB +[2023-10-13 13:43:50 simmim_pretrain](main_simmim.py 218): INFO Train: [173/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2546 (0.2516) loss 0.3529 (0.3576) grad_norm 106500.7188 (inf) mem 14543MB +[2023-10-13 13:45:02 simmim_pretrain](main_simmim.py 228): INFO EPOCH 173 training takes 0:28:28 +[2023-10-13 13:45:04 simmim_pretrain](main_simmim.py 218): INFO Train: [174/200][0/6787] eta 2:57:04 lr 0.000200 time 1.5654 (1.5654) loss 0.3643 (0.3643) grad_norm 161605.8125 (161605.8125) mem 14543MB +[2023-10-13 13:47:09 simmim_pretrain](main_simmim.py 218): INFO Train: [174/200][500/6787] eta 0:26:28 lr 0.000200 time 0.2467 (0.2526) loss 0.3632 (0.3582) grad_norm 119358.9453 (142749.9062) mem 14543MB +[2023-10-13 13:49:15 simmim_pretrain](main_simmim.py 218): INFO Train: [174/200][1000/6787] eta 0:24:18 lr 0.000200 time 0.2469 (0.2521) loss 0.3434 (0.3576) grad_norm 205515.5938 (157318.2500) mem 14543MB +[2023-10-13 13:51:21 simmim_pretrain](main_simmim.py 218): INFO Train: [174/200][1500/6787] eta 0:22:11 lr 0.000200 time 0.2489 (0.2519) loss 0.3750 (0.3576) grad_norm 245292.9219 (164527.9219) mem 14543MB +[2023-10-13 13:53:27 simmim_pretrain](main_simmim.py 218): INFO Train: [174/200][2000/6787] eta 0:20:06 lr 0.000200 time 0.2465 (0.2520) loss 0.3635 (0.3576) grad_norm 197961.8438 (inf) mem 14543MB +[2023-10-13 13:55:33 simmim_pretrain](main_simmim.py 218): INFO Train: [174/200][2500/6787] eta 0:18:00 lr 0.000200 time 0.2535 (0.2520) loss 0.3595 (0.3579) grad_norm 101259.0312 (inf) mem 14543MB +[2023-10-13 13:57:39 simmim_pretrain](main_simmim.py 218): INFO Train: [174/200][3000/6787] eta 0:15:54 lr 0.000200 time 0.2562 (0.2521) loss 0.3660 (0.3583) grad_norm 155792.5781 (inf) mem 14543MB +[2023-10-13 13:59:45 simmim_pretrain](main_simmim.py 218): INFO Train: [174/200][3500/6787] eta 0:13:48 lr 0.000200 time 0.2456 (0.2522) loss 0.3399 (0.3586) grad_norm 113733.9609 (inf) mem 14543MB +[2023-10-13 14:01:52 simmim_pretrain](main_simmim.py 218): INFO Train: [174/200][4000/6787] eta 0:11:43 lr 0.000200 time 0.2578 (0.2523) loss 0.3350 (0.3588) grad_norm 154773.9375 (inf) mem 14543MB +[2023-10-13 14:03:58 simmim_pretrain](main_simmim.py 218): INFO Train: [174/200][4500/6787] eta 0:09:37 lr 0.000200 time 0.2562 (0.2523) loss 0.3624 (0.3587) grad_norm 98354.1719 (inf) mem 14543MB +[2023-10-13 14:06:04 simmim_pretrain](main_simmim.py 218): INFO Train: [174/200][5000/6787] eta 0:07:30 lr 0.000200 time 0.2562 (0.2523) loss 0.3550 (0.3585) grad_norm 200539.7812 (inf) mem 14543MB +[2023-10-13 14:08:11 simmim_pretrain](main_simmim.py 218): INFO Train: [174/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2539 (0.2524) loss 0.3701 (0.3584) grad_norm 204500.5938 (inf) mem 14543MB +[2023-10-13 14:10:17 simmim_pretrain](main_simmim.py 218): INFO Train: [174/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2513 (0.2524) loss 0.3585 (0.3584) grad_norm 298652.7188 (inf) mem 14543MB +[2023-10-13 14:12:24 simmim_pretrain](main_simmim.py 218): INFO Train: [174/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2520 (0.2525) loss 0.3551 (0.3582) grad_norm 346487.5938 (inf) mem 14543MB +[2023-10-13 14:13:37 simmim_pretrain](main_simmim.py 228): INFO EPOCH 174 training takes 0:28:34 +[2023-10-13 14:13:38 simmim_pretrain](main_simmim.py 218): INFO Train: [175/200][0/6787] eta 2:40:12 lr 0.000200 time 1.4163 (1.4163) loss 0.3526 (0.3526) grad_norm 171432.5312 (171432.5312) mem 14543MB +[2023-10-13 14:15:44 simmim_pretrain](main_simmim.py 218): INFO Train: [175/200][500/6787] eta 0:26:35 lr 0.000200 time 0.2517 (0.2538) loss 0.3681 (0.3567) grad_norm 340015.5625 (322708.9688) mem 14543MB +[2023-10-13 14:17:50 simmim_pretrain](main_simmim.py 218): INFO Train: [175/200][1000/6787] eta 0:24:23 lr 0.000200 time 0.2521 (0.2528) loss 0.3775 (0.3565) grad_norm 337308.0000 (373191.3750) mem 14543MB +[2023-10-13 14:19:56 simmim_pretrain](main_simmim.py 218): INFO Train: [175/200][1500/6787] eta 0:22:15 lr 0.000200 time 0.2481 (0.2525) loss 0.3501 (0.3564) grad_norm 315614.5000 (inf) mem 14543MB +[2023-10-13 14:22:01 simmim_pretrain](main_simmim.py 218): INFO Train: [175/200][2000/6787] eta 0:20:07 lr 0.000200 time 0.2457 (0.2522) loss 0.3634 (0.3563) grad_norm 495129.0938 (inf) mem 14543MB +[2023-10-13 14:24:07 simmim_pretrain](main_simmim.py 218): INFO Train: [175/200][2500/6787] eta 0:18:00 lr 0.000200 time 0.2499 (0.2520) loss 0.3562 (0.3561) grad_norm 300935.6875 (inf) mem 14543MB +[2023-10-13 14:26:12 simmim_pretrain](main_simmim.py 218): INFO Train: [175/200][3000/6787] eta 0:15:53 lr 0.000200 time 0.2490 (0.2518) loss 0.3837 (0.3562) grad_norm 295160.4688 (inf) mem 14543MB +[2023-10-13 14:28:18 simmim_pretrain](main_simmim.py 218): INFO Train: [175/200][3500/6787] eta 0:13:47 lr 0.000200 time 0.2464 (0.2517) loss 0.3642 (0.3560) grad_norm 323705.7500 (inf) mem 14543MB +[2023-10-13 14:30:23 simmim_pretrain](main_simmim.py 218): INFO Train: [175/200][4000/6787] eta 0:11:41 lr 0.000200 time 0.2519 (0.2515) loss 0.3519 (0.3559) grad_norm 426892.1875 (inf) mem 14543MB +[2023-10-13 14:32:28 simmim_pretrain](main_simmim.py 218): INFO Train: [175/200][4500/6787] eta 0:09:35 lr 0.000200 time 0.2547 (0.2515) loss 0.3643 (0.3559) grad_norm 493198.4688 (inf) mem 14543MB +[2023-10-13 14:34:33 simmim_pretrain](main_simmim.py 218): INFO Train: [175/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2453 (0.2513) loss 0.3492 (0.3560) grad_norm inf (inf) mem 14543MB +[2023-10-13 14:36:39 simmim_pretrain](main_simmim.py 218): INFO Train: [175/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2559 (0.2512) loss 0.3482 (0.3561) grad_norm 364682.7812 (inf) mem 14543MB +[2023-10-13 14:38:44 simmim_pretrain](main_simmim.py 218): INFO Train: [175/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2500 (0.2512) loss 0.3774 (0.3563) grad_norm 121211.3516 (inf) mem 14543MB +[2023-10-13 14:40:49 simmim_pretrain](main_simmim.py 218): INFO Train: [175/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2459 (0.2511) loss 0.3731 (0.3564) grad_norm 352697.5938 (inf) mem 14543MB +[2023-10-13 14:42:02 simmim_pretrain](main_simmim.py 228): INFO EPOCH 175 training takes 0:28:24 +[2023-10-13 14:42:03 simmim_pretrain](main_simmim.py 218): INFO Train: [176/200][0/6787] eta 2:32:37 lr 0.000200 time 1.3492 (1.3492) loss 0.3504 (0.3504) grad_norm 304569.6875 (304569.6875) mem 14543MB +[2023-10-13 14:44:08 simmim_pretrain](main_simmim.py 218): INFO Train: [176/200][500/6787] eta 0:26:32 lr 0.000200 time 0.2528 (0.2533) loss 0.3783 (0.3570) grad_norm 360711.3438 (262258.7812) mem 14543MB +[2023-10-13 14:46:14 simmim_pretrain](main_simmim.py 218): INFO Train: [176/200][1000/6787] eta 0:24:21 lr 0.000200 time 0.2512 (0.2525) loss 0.3344 (0.3567) grad_norm 382950.3438 (284186.5312) mem 14543MB +[2023-10-13 14:48:20 simmim_pretrain](main_simmim.py 218): INFO Train: [176/200][1500/6787] eta 0:22:14 lr 0.000200 time 0.2498 (0.2524) loss 0.3922 (0.3564) grad_norm 420884.3125 (313460.2500) mem 14543MB +[2023-10-13 14:50:27 simmim_pretrain](main_simmim.py 218): INFO Train: [176/200][2000/6787] eta 0:20:08 lr 0.000200 time 0.2500 (0.2524) loss 0.3466 (0.3562) grad_norm 569204.1250 (332326.0625) mem 14543MB +[2023-10-13 14:52:33 simmim_pretrain](main_simmim.py 218): INFO Train: [176/200][2500/6787] eta 0:18:02 lr 0.000200 time 0.2526 (0.2525) loss 0.3668 (0.3560) grad_norm 227858.9219 (inf) mem 14543MB +[2023-10-13 14:54:39 simmim_pretrain](main_simmim.py 218): INFO Train: [176/200][3000/6787] eta 0:15:56 lr 0.000200 time 0.2592 (0.2526) loss 0.3587 (0.3562) grad_norm 192857.4531 (inf) mem 14543MB +[2023-10-13 14:56:46 simmim_pretrain](main_simmim.py 218): INFO Train: [176/200][3500/6787] eta 0:13:50 lr 0.000200 time 0.2503 (0.2526) loss 0.3633 (0.3564) grad_norm 145429.2656 (inf) mem 14543MB +[2023-10-13 14:58:52 simmim_pretrain](main_simmim.py 218): INFO Train: [176/200][4000/6787] eta 0:11:43 lr 0.000200 time 0.2474 (0.2525) loss 0.3313 (0.3570) grad_norm 165360.3281 (inf) mem 14543MB +[2023-10-13 15:00:58 simmim_pretrain](main_simmim.py 218): INFO Train: [176/200][4500/6787] eta 0:09:37 lr 0.000200 time 0.2533 (0.2525) loss 0.3604 (0.3574) grad_norm 115199.1484 (inf) mem 14543MB +[2023-10-13 15:03:04 simmim_pretrain](main_simmim.py 218): INFO Train: [176/200][5000/6787] eta 0:07:30 lr 0.000200 time 0.2517 (0.2524) loss 0.3681 (0.3576) grad_norm 133063.5156 (inf) mem 14543MB +[2023-10-13 15:05:09 simmim_pretrain](main_simmim.py 218): INFO Train: [176/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2527 (0.2522) loss 0.3429 (0.3579) grad_norm 154319.3750 (inf) mem 14543MB +[2023-10-13 15:07:14 simmim_pretrain](main_simmim.py 218): INFO Train: [176/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2466 (0.2521) loss 0.3500 (0.3578) grad_norm 129154.7734 (inf) mem 14543MB +[2023-10-13 15:09:20 simmim_pretrain](main_simmim.py 218): INFO Train: [176/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2475 (0.2520) loss 0.3682 (0.3578) grad_norm 121991.1094 (inf) mem 14543MB +[2023-10-13 15:10:32 simmim_pretrain](main_simmim.py 228): INFO EPOCH 176 training takes 0:28:30 +[2023-10-13 15:10:34 simmim_pretrain](main_simmim.py 218): INFO Train: [177/200][0/6787] eta 3:05:32 lr 0.000200 time 1.6402 (1.6402) loss 0.3702 (0.3702) grad_norm 82397.2344 (82397.2344) mem 14543MB +[2023-10-13 15:12:39 simmim_pretrain](main_simmim.py 218): INFO Train: [177/200][500/6787] eta 0:26:30 lr 0.000200 time 0.2522 (0.2530) loss 0.3670 (0.3589) grad_norm 90639.8359 (131379.0312) mem 14543MB +[2023-10-13 15:14:44 simmim_pretrain](main_simmim.py 218): INFO Train: [177/200][1000/6787] eta 0:24:15 lr 0.000200 time 0.2533 (0.2515) loss 0.3399 (0.3588) grad_norm 138804.9375 (128666.5234) mem 14543MB +[2023-10-13 15:16:49 simmim_pretrain](main_simmim.py 218): INFO Train: [177/200][1500/6787] eta 0:22:07 lr 0.000200 time 0.2507 (0.2511) loss 0.3772 (0.3593) grad_norm 126654.2969 (125652.7578) mem 14543MB +[2023-10-13 15:18:54 simmim_pretrain](main_simmim.py 218): INFO Train: [177/200][2000/6787] eta 0:20:01 lr 0.000200 time 0.2518 (0.2509) loss 0.3604 (0.3591) grad_norm 160520.8906 (125847.0234) mem 14543MB +[2023-10-13 15:21:00 simmim_pretrain](main_simmim.py 218): INFO Train: [177/200][2500/6787] eta 0:17:55 lr 0.000200 time 0.2491 (0.2509) loss 0.3669 (0.3587) grad_norm 126495.6406 (132592.0469) mem 14543MB +[2023-10-13 15:23:05 simmim_pretrain](main_simmim.py 218): INFO Train: [177/200][3000/6787] eta 0:15:50 lr 0.000200 time 0.2454 (0.2509) loss 0.3558 (0.3586) grad_norm 228613.7656 (140080.8125) mem 14543MB +[2023-10-13 15:25:11 simmim_pretrain](main_simmim.py 218): INFO Train: [177/200][3500/6787] eta 0:13:45 lr 0.000200 time 0.2538 (0.2510) loss 0.3351 (0.3584) grad_norm 133409.6562 (148887.7500) mem 14543MB +[2023-10-13 15:27:17 simmim_pretrain](main_simmim.py 218): INFO Train: [177/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2525 (0.2511) loss 0.3366 (0.3582) grad_norm 288540.2188 (157983.7500) mem 14543MB +[2023-10-13 15:29:23 simmim_pretrain](main_simmim.py 218): INFO Train: [177/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2487 (0.2512) loss 0.3534 (0.3580) grad_norm 291285.2812 (171557.6875) mem 14543MB +[2023-10-13 15:31:29 simmim_pretrain](main_simmim.py 218): INFO Train: [177/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2533 (0.2513) loss 0.3296 (0.3577) grad_norm 228369.9531 (187558.5938) mem 14543MB +[2023-10-13 15:33:35 simmim_pretrain](main_simmim.py 218): INFO Train: [177/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2543 (0.2514) loss 0.3602 (0.3576) grad_norm 651642.6875 (202574.6719) mem 14543MB +[2023-10-13 15:35:42 simmim_pretrain](main_simmim.py 218): INFO Train: [177/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2459 (0.2515) loss 0.3505 (0.3576) grad_norm 212421.2031 (nan) mem 14543MB +[2023-10-13 15:37:48 simmim_pretrain](main_simmim.py 218): INFO Train: [177/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2575 (0.2517) loss 0.3672 (0.3576) grad_norm 225284.5156 (nan) mem 14543MB +[2023-10-13 15:39:01 simmim_pretrain](main_simmim.py 228): INFO EPOCH 177 training takes 0:28:29 +[2023-10-13 15:39:03 simmim_pretrain](main_simmim.py 218): INFO Train: [178/200][0/6787] eta 2:43:36 lr 0.000200 time 1.4463 (1.4463) loss 0.3479 (0.3479) grad_norm 279496.1875 (279496.1875) mem 14543MB +[2023-10-13 15:41:09 simmim_pretrain](main_simmim.py 218): INFO Train: [178/200][500/6787] eta 0:26:39 lr 0.000200 time 0.2540 (0.2545) loss 0.3604 (0.3572) grad_norm 155986.1250 (inf) mem 14543MB +[2023-10-13 15:43:15 simmim_pretrain](main_simmim.py 218): INFO Train: [178/200][1000/6787] eta 0:24:27 lr 0.000200 time 0.2511 (0.2536) loss 0.3650 (0.3588) grad_norm 152570.0625 (inf) mem 14543MB +[2023-10-13 15:45:21 simmim_pretrain](main_simmim.py 218): INFO Train: [178/200][1500/6787] eta 0:22:18 lr 0.000200 time 0.2516 (0.2532) loss 0.3484 (0.3591) grad_norm 122526.7188 (inf) mem 14543MB +[2023-10-13 15:47:28 simmim_pretrain](main_simmim.py 218): INFO Train: [178/200][2000/6787] eta 0:20:10 lr 0.000200 time 0.2534 (0.2530) loss 0.3494 (0.3595) grad_norm 193233.1562 (inf) mem 14543MB +[2023-10-13 15:49:34 simmim_pretrain](main_simmim.py 218): INFO Train: [178/200][2500/6787] eta 0:18:03 lr 0.000200 time 0.2505 (0.2528) loss 0.3541 (0.3594) grad_norm 93997.1406 (inf) mem 14543MB +[2023-10-13 15:51:39 simmim_pretrain](main_simmim.py 218): INFO Train: [178/200][3000/6787] eta 0:15:56 lr 0.000200 time 0.2591 (0.2525) loss 0.3529 (0.3591) grad_norm 165179.5156 (inf) mem 14543MB +[2023-10-13 15:53:45 simmim_pretrain](main_simmim.py 218): INFO Train: [178/200][3500/6787] eta 0:13:49 lr 0.000200 time 0.2499 (0.2523) loss 0.3481 (0.3588) grad_norm 151640.6250 (inf) mem 14543MB +[2023-10-13 15:55:50 simmim_pretrain](main_simmim.py 218): INFO Train: [178/200][4000/6787] eta 0:11:42 lr 0.000200 time 0.2534 (0.2522) loss 0.3503 (0.3587) grad_norm 175264.9531 (inf) mem 14543MB +[2023-10-13 15:57:55 simmim_pretrain](main_simmim.py 218): INFO Train: [178/200][4500/6787] eta 0:09:36 lr 0.000200 time 0.2506 (0.2520) loss 0.3480 (0.3584) grad_norm 144900.8438 (inf) mem 14543MB +[2023-10-13 16:00:01 simmim_pretrain](main_simmim.py 218): INFO Train: [178/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2589 (0.2518) loss 0.3573 (0.3582) grad_norm 357703.0938 (inf) mem 14543MB +[2023-10-13 16:02:06 simmim_pretrain](main_simmim.py 218): INFO Train: [178/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2502 (0.2516) loss 0.3635 (0.3581) grad_norm 372290.4375 (inf) mem 14543MB +[2023-10-13 16:04:11 simmim_pretrain](main_simmim.py 218): INFO Train: [178/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2516 (0.2515) loss 0.3635 (0.3580) grad_norm 212023.1250 (inf) mem 14543MB +[2023-10-13 16:06:16 simmim_pretrain](main_simmim.py 218): INFO Train: [178/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2512 (0.2514) loss 0.3423 (0.3578) grad_norm 201310.3125 (inf) mem 14543MB +[2023-10-13 16:07:28 simmim_pretrain](main_simmim.py 228): INFO EPOCH 178 training takes 0:28:26 +[2023-10-13 16:07:30 simmim_pretrain](main_simmim.py 218): INFO Train: [179/200][0/6787] eta 2:58:19 lr 0.000200 time 1.5764 (1.5764) loss 0.3674 (0.3674) grad_norm 436660.1250 (436660.1250) mem 14543MB +[2023-10-13 16:09:35 simmim_pretrain](main_simmim.py 218): INFO Train: [179/200][500/6787] eta 0:26:30 lr 0.000200 time 0.2597 (0.2529) loss 0.3590 (0.3567) grad_norm 201875.3750 (inf) mem 14543MB +[2023-10-13 16:11:41 simmim_pretrain](main_simmim.py 218): INFO Train: [179/200][1000/6787] eta 0:24:20 lr 0.000200 time 0.2556 (0.2524) loss 0.3547 (0.3589) grad_norm 98453.1562 (inf) mem 14543MB +[2023-10-13 16:13:47 simmim_pretrain](main_simmim.py 218): INFO Train: [179/200][1500/6787] eta 0:22:15 lr 0.000200 time 0.2508 (0.2526) loss 0.3435 (0.3592) grad_norm 137136.0781 (inf) mem 14543MB +[2023-10-13 16:15:54 simmim_pretrain](main_simmim.py 218): INFO Train: [179/200][2000/6787] eta 0:20:10 lr 0.000200 time 0.2553 (0.2528) loss 0.3464 (0.3596) grad_norm 145842.3281 (inf) mem 14543MB +[2023-10-13 16:18:01 simmim_pretrain](main_simmim.py 218): INFO Train: [179/200][2500/6787] eta 0:18:04 lr 0.000200 time 0.2555 (0.2531) loss 0.3649 (0.3596) grad_norm 113860.8125 (inf) mem 14543MB +[2023-10-13 16:20:09 simmim_pretrain](main_simmim.py 218): INFO Train: [179/200][3000/6787] eta 0:16:00 lr 0.000200 time 0.2594 (0.2537) loss 0.3439 (0.3594) grad_norm 167252.1250 (inf) mem 14543MB +[2023-10-13 16:22:19 simmim_pretrain](main_simmim.py 218): INFO Train: [179/200][3500/6787] eta 0:13:56 lr 0.000200 time 0.2593 (0.2544) loss 0.3714 (0.3590) grad_norm 203946.1875 (inf) mem 14543MB +[2023-10-13 16:24:29 simmim_pretrain](main_simmim.py 218): INFO Train: [179/200][4000/6787] eta 0:11:50 lr 0.000200 time 0.2592 (0.2550) loss 0.3609 (0.3589) grad_norm 105017.1562 (inf) mem 14543MB +[2023-10-13 16:26:37 simmim_pretrain](main_simmim.py 218): INFO Train: [179/200][4500/6787] eta 0:09:43 lr 0.000200 time 0.2579 (0.2552) loss 0.3569 (0.3589) grad_norm 96443.5859 (inf) mem 14543MB +[2023-10-13 16:28:46 simmim_pretrain](main_simmim.py 218): INFO Train: [179/200][5000/6787] eta 0:07:36 lr 0.000200 time 0.2592 (0.2554) loss 0.3526 (0.3590) grad_norm 116598.6172 (inf) mem 14543MB +[2023-10-13 16:30:53 simmim_pretrain](main_simmim.py 218): INFO Train: [179/200][5500/6787] eta 0:05:28 lr 0.000200 time 0.2513 (0.2554) loss 0.3581 (0.3591) grad_norm 159960.1094 (inf) mem 14543MB +[2023-10-13 16:33:01 simmim_pretrain](main_simmim.py 218): INFO Train: [179/200][6000/6787] eta 0:03:20 lr 0.000200 time 0.2563 (0.2554) loss 0.3503 (0.3591) grad_norm 243081.6250 (inf) mem 14543MB +[2023-10-13 16:35:08 simmim_pretrain](main_simmim.py 218): INFO Train: [179/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2539 (0.2554) loss 0.3621 (0.3589) grad_norm 121159.7344 (inf) mem 14543MB +[2023-10-13 16:36:22 simmim_pretrain](main_simmim.py 228): INFO EPOCH 179 training takes 0:28:53 +[2023-10-13 16:36:24 simmim_pretrain](main_simmim.py 218): INFO Train: [180/200][0/6787] eta 2:44:49 lr 0.000200 time 1.4571 (1.4571) loss 0.3565 (0.3565) grad_norm 215607.2500 (215607.2500) mem 14543MB +[2023-10-13 16:38:30 simmim_pretrain](main_simmim.py 218): INFO Train: [180/200][500/6787] eta 0:26:39 lr 0.000200 time 0.2466 (0.2544) loss 0.3521 (0.3571) grad_norm 224558.2656 (191749.0156) mem 14543MB +[2023-10-13 16:40:35 simmim_pretrain](main_simmim.py 218): INFO Train: [180/200][1000/6787] eta 0:24:23 lr 0.000200 time 0.2535 (0.2530) loss 0.3722 (0.3574) grad_norm 174706.5625 (195154.1250) mem 14543MB +[2023-10-13 16:42:41 simmim_pretrain](main_simmim.py 218): INFO Train: [180/200][1500/6787] eta 0:22:15 lr 0.000200 time 0.2489 (0.2525) loss 0.3626 (0.3570) grad_norm 398399.4688 (225161.1875) mem 14543MB +[2023-10-13 16:44:47 simmim_pretrain](main_simmim.py 218): INFO Train: [180/200][2000/6787] eta 0:20:07 lr 0.000200 time 0.2456 (0.2523) loss 0.3714 (0.3567) grad_norm 243426.7344 (inf) mem 14543MB +[2023-10-13 16:46:53 simmim_pretrain](main_simmim.py 218): INFO Train: [180/200][2500/6787] eta 0:18:00 lr 0.000200 time 0.2519 (0.2522) loss 0.3839 (0.3569) grad_norm 110051.3906 (nan) mem 14543MB +[2023-10-13 16:48:58 simmim_pretrain](main_simmim.py 218): INFO Train: [180/200][3000/6787] eta 0:15:54 lr 0.000200 time 0.2516 (0.2520) loss 0.3560 (0.3574) grad_norm 127171.4297 (nan) mem 14543MB +[2023-10-13 16:51:04 simmim_pretrain](main_simmim.py 218): INFO Train: [180/200][3500/6787] eta 0:13:48 lr 0.000200 time 0.2512 (0.2520) loss 0.3501 (0.3576) grad_norm 132613.2344 (nan) mem 14543MB +[2023-10-13 16:53:10 simmim_pretrain](main_simmim.py 218): INFO Train: [180/200][4000/6787] eta 0:11:41 lr 0.000200 time 0.2511 (0.2519) loss 0.3740 (0.3582) grad_norm 72408.9844 (nan) mem 14543MB +[2023-10-13 16:55:15 simmim_pretrain](main_simmim.py 218): INFO Train: [180/200][4500/6787] eta 0:09:35 lr 0.000200 time 0.2537 (0.2518) loss 0.3361 (0.3585) grad_norm 71960.0703 (nan) mem 14543MB +[2023-10-13 16:57:21 simmim_pretrain](main_simmim.py 218): INFO Train: [180/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2489 (0.2517) loss 0.3537 (0.3589) grad_norm 60139.3828 (nan) mem 14543MB +[2023-10-13 16:59:27 simmim_pretrain](main_simmim.py 218): INFO Train: [180/200][5500/6787] eta 0:05:23 lr 0.000200 time 0.2598 (0.2517) loss 0.3631 (0.3592) grad_norm 79601.4609 (nan) mem 14543MB +[2023-10-13 17:01:33 simmim_pretrain](main_simmim.py 218): INFO Train: [180/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2582 (0.2518) loss 0.3625 (0.3593) grad_norm 68794.8203 (nan) mem 14543MB +[2023-10-13 17:03:39 simmim_pretrain](main_simmim.py 218): INFO Train: [180/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2493 (0.2518) loss 0.3575 (0.3594) grad_norm 85724.7969 (nan) mem 14543MB +[2023-10-13 17:04:52 simmim_pretrain](main_simmim.py 228): INFO EPOCH 180 training takes 0:28:29 +[2023-10-13 17:04:52 simmim_pretrain](utils.py 62): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_180.pth saving...... +[2023-10-13 17:04:53 simmim_pretrain](utils.py 64): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_180.pth saved !!! +[2023-10-13 17:04:54 simmim_pretrain](main_simmim.py 218): INFO Train: [181/200][0/6787] eta 2:26:40 lr 0.000200 time 1.2967 (1.2967) loss 0.3552 (0.3552) grad_norm 85865.9219 (85865.9219) mem 14543MB +[2023-10-13 17:07:00 simmim_pretrain](main_simmim.py 218): INFO Train: [181/200][500/6787] eta 0:26:39 lr 0.000200 time 0.2579 (0.2544) loss 0.3643 (0.3587) grad_norm 38902.9453 (92803.9766) mem 14543MB +[2023-10-13 17:09:06 simmim_pretrain](main_simmim.py 218): INFO Train: [181/200][1000/6787] eta 0:24:28 lr 0.000200 time 0.2483 (0.2537) loss 0.3367 (0.3582) grad_norm 112823.5703 (94805.3281) mem 14543MB +[2023-10-13 17:11:13 simmim_pretrain](main_simmim.py 218): INFO Train: [181/200][1500/6787] eta 0:22:19 lr 0.000200 time 0.2459 (0.2534) loss 0.3346 (0.3579) grad_norm 69781.5000 (104531.6172) mem 14543MB +[2023-10-13 17:13:20 simmim_pretrain](main_simmim.py 218): INFO Train: [181/200][2000/6787] eta 0:20:13 lr 0.000200 time 0.2480 (0.2535) loss 0.3413 (0.3576) grad_norm 124624.3047 (114849.0859) mem 14543MB +[2023-10-13 17:15:26 simmim_pretrain](main_simmim.py 218): INFO Train: [181/200][2500/6787] eta 0:18:06 lr 0.000200 time 0.2531 (0.2534) loss 0.3699 (0.3575) grad_norm 99668.1641 (124477.9922) mem 14543MB +[2023-10-13 17:17:33 simmim_pretrain](main_simmim.py 218): INFO Train: [181/200][3000/6787] eta 0:15:59 lr 0.000200 time 0.2536 (0.2533) loss 0.3473 (0.3574) grad_norm 172290.0312 (132671.6562) mem 14543MB +[2023-10-13 17:19:39 simmim_pretrain](main_simmim.py 218): INFO Train: [181/200][3500/6787] eta 0:13:52 lr 0.000200 time 0.2490 (0.2532) loss 0.3615 (0.3573) grad_norm 206155.5312 (147419.6562) mem 14543MB +[2023-10-13 17:21:45 simmim_pretrain](main_simmim.py 218): INFO Train: [181/200][4000/6787] eta 0:11:45 lr 0.000200 time 0.2503 (0.2531) loss 0.3517 (0.3570) grad_norm 181384.9688 (162562.0469) mem 14543MB +[2023-10-13 17:23:51 simmim_pretrain](main_simmim.py 218): INFO Train: [181/200][4500/6787] eta 0:09:38 lr 0.000200 time 0.2487 (0.2530) loss 0.3595 (0.3569) grad_norm 255865.3594 (185355.2969) mem 14543MB +[2023-10-13 17:25:58 simmim_pretrain](main_simmim.py 218): INFO Train: [181/200][5000/6787] eta 0:07:32 lr 0.000200 time 0.2494 (0.2530) loss 0.3735 (0.3568) grad_norm 183144.1250 (199658.5625) mem 14543MB +[2023-10-13 17:28:04 simmim_pretrain](main_simmim.py 218): INFO Train: [181/200][5500/6787] eta 0:05:25 lr 0.000200 time 0.2489 (0.2529) loss 0.3832 (0.3566) grad_norm 154694.3125 (inf) mem 14543MB +[2023-10-13 17:30:10 simmim_pretrain](main_simmim.py 218): INFO Train: [181/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2522 (0.2528) loss 0.3663 (0.3566) grad_norm 250510.2344 (inf) mem 14543MB +[2023-10-13 17:32:16 simmim_pretrain](main_simmim.py 218): INFO Train: [181/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2483 (0.2528) loss 0.3569 (0.3566) grad_norm 243042.2656 (inf) mem 14543MB +[2023-10-13 17:33:29 simmim_pretrain](main_simmim.py 228): INFO EPOCH 181 training takes 0:28:36 +[2023-10-13 17:33:30 simmim_pretrain](main_simmim.py 218): INFO Train: [182/200][0/6787] eta 2:48:47 lr 0.000200 time 1.4922 (1.4922) loss 0.3684 (0.3684) grad_norm 253017.4688 (253017.4688) mem 14543MB +[2023-10-13 17:35:35 simmim_pretrain](main_simmim.py 218): INFO Train: [182/200][500/6787] eta 0:26:29 lr 0.000200 time 0.2519 (0.2528) loss 0.3504 (0.3578) grad_norm 100166.4844 (inf) mem 14543MB +[2023-10-13 17:37:41 simmim_pretrain](main_simmim.py 218): INFO Train: [182/200][1000/6787] eta 0:24:17 lr 0.000200 time 0.2590 (0.2518) loss 0.3641 (0.3583) grad_norm 118435.4609 (inf) mem 14543MB +[2023-10-13 17:39:46 simmim_pretrain](main_simmim.py 218): INFO Train: [182/200][1500/6787] eta 0:22:10 lr 0.000200 time 0.2507 (0.2516) loss 0.3551 (0.3585) grad_norm 69517.2969 (inf) mem 14543MB +[2023-10-13 17:41:52 simmim_pretrain](main_simmim.py 218): INFO Train: [182/200][2000/6787] eta 0:20:04 lr 0.000200 time 0.2483 (0.2516) loss 0.3468 (0.3585) grad_norm 118994.6797 (inf) mem 14543MB +[2023-10-13 17:43:58 simmim_pretrain](main_simmim.py 218): INFO Train: [182/200][2500/6787] eta 0:17:59 lr 0.000200 time 0.2577 (0.2517) loss 0.3287 (0.3585) grad_norm 81066.2109 (inf) mem 14543MB +[2023-10-13 17:46:04 simmim_pretrain](main_simmim.py 218): INFO Train: [182/200][3000/6787] eta 0:15:53 lr 0.000200 time 0.2530 (0.2518) loss 0.3565 (0.3583) grad_norm 201463.7031 (inf) mem 14543MB +[2023-10-13 17:48:11 simmim_pretrain](main_simmim.py 218): INFO Train: [182/200][3500/6787] eta 0:13:48 lr 0.000200 time 0.2602 (0.2519) loss 0.3482 (0.3581) grad_norm 181093.2031 (inf) mem 14543MB +[2023-10-13 17:50:17 simmim_pretrain](main_simmim.py 218): INFO Train: [182/200][4000/6787] eta 0:11:42 lr 0.000200 time 0.2586 (0.2521) loss 0.3625 (0.3579) grad_norm 151281.2969 (inf) mem 14543MB +[2023-10-13 17:52:24 simmim_pretrain](main_simmim.py 218): INFO Train: [182/200][4500/6787] eta 0:09:37 lr 0.000200 time 0.2606 (0.2523) loss 0.3597 (0.3577) grad_norm 141815.9062 (inf) mem 14543MB +[2023-10-13 17:54:34 simmim_pretrain](main_simmim.py 218): INFO Train: [182/200][5000/6787] eta 0:07:32 lr 0.000200 time 0.2610 (0.2531) loss 0.3373 (0.3579) grad_norm 139475.9688 (inf) mem 14543MB +[2023-10-13 17:56:44 simmim_pretrain](main_simmim.py 218): INFO Train: [182/200][5500/6787] eta 0:05:26 lr 0.000200 time 0.2606 (0.2538) loss 0.3451 (0.3580) grad_norm 129226.3750 (inf) mem 14543MB +[2023-10-13 17:58:55 simmim_pretrain](main_simmim.py 218): INFO Train: [182/200][6000/6787] eta 0:03:20 lr 0.000200 time 0.2608 (0.2543) loss 0.3433 (0.3582) grad_norm 115159.5625 (inf) mem 14543MB +[2023-10-13 18:01:05 simmim_pretrain](main_simmim.py 218): INFO Train: [182/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2609 (0.2548) loss 0.3533 (0.3582) grad_norm 80655.1641 (inf) mem 14543MB +[2023-10-13 18:02:20 simmim_pretrain](main_simmim.py 228): INFO EPOCH 182 training takes 0:28:51 +[2023-10-13 18:02:21 simmim_pretrain](main_simmim.py 218): INFO Train: [183/200][0/6787] eta 2:45:14 lr 0.000200 time 1.4609 (1.4609) loss 0.3515 (0.3515) grad_norm 76554.4844 (76554.4844) mem 14543MB +[2023-10-13 18:04:27 simmim_pretrain](main_simmim.py 218): INFO Train: [183/200][500/6787] eta 0:26:39 lr 0.000200 time 0.2476 (0.2544) loss 0.3591 (0.3564) grad_norm 127096.1562 (170891.0000) mem 14543MB +[2023-10-13 18:06:33 simmim_pretrain](main_simmim.py 218): INFO Train: [183/200][1000/6787] eta 0:24:26 lr 0.000200 time 0.2491 (0.2533) loss 0.3560 (0.3564) grad_norm 77557.0859 (165109.9219) mem 14543MB +[2023-10-13 18:08:39 simmim_pretrain](main_simmim.py 218): INFO Train: [183/200][1500/6787] eta 0:22:17 lr 0.000200 time 0.2463 (0.2529) loss 0.3723 (0.3568) grad_norm 413376.1875 (171186.3438) mem 14543MB +[2023-10-13 18:10:45 simmim_pretrain](main_simmim.py 218): INFO Train: [183/200][2000/6787] eta 0:20:09 lr 0.000200 time 0.2519 (0.2527) loss 0.3581 (0.3566) grad_norm 137128.2500 (190310.2969) mem 14543MB +[2023-10-13 18:12:52 simmim_pretrain](main_simmim.py 218): INFO Train: [183/200][2500/6787] eta 0:18:02 lr 0.000200 time 0.2532 (0.2526) loss 0.3703 (0.3567) grad_norm 317263.0625 (210609.0156) mem 14543MB +[2023-10-13 18:14:58 simmim_pretrain](main_simmim.py 218): INFO Train: [183/200][3000/6787] eta 0:15:56 lr 0.000200 time 0.2541 (0.2526) loss 0.3459 (0.3565) grad_norm 251966.2344 (227942.5625) mem 14543MB +[2023-10-13 18:17:04 simmim_pretrain](main_simmim.py 218): INFO Train: [183/200][3500/6787] eta 0:13:50 lr 0.000200 time 0.2498 (0.2526) loss 0.3682 (0.3564) grad_norm 275606.4375 (240739.4219) mem 14543MB +[2023-10-13 18:19:11 simmim_pretrain](main_simmim.py 218): INFO Train: [183/200][4000/6787] eta 0:11:44 lr 0.000200 time 0.2523 (0.2526) loss 0.3632 (0.3564) grad_norm 270686.3750 (inf) mem 14543MB +[2023-10-13 18:21:17 simmim_pretrain](main_simmim.py 218): INFO Train: [183/200][4500/6787] eta 0:09:37 lr 0.000200 time 0.2482 (0.2526) loss 0.3367 (0.3565) grad_norm 188962.1719 (inf) mem 14543MB +[2023-10-13 18:23:24 simmim_pretrain](main_simmim.py 218): INFO Train: [183/200][5000/6787] eta 0:07:31 lr 0.000200 time 0.2581 (0.2527) loss 0.3610 (0.3566) grad_norm 311665.9062 (inf) mem 14543MB +[2023-10-13 18:25:32 simmim_pretrain](main_simmim.py 218): INFO Train: [183/200][5500/6787] eta 0:05:25 lr 0.000200 time 0.2598 (0.2531) loss 0.3751 (0.3566) grad_norm 206069.2656 (inf) mem 14543MB +[2023-10-13 18:27:42 simmim_pretrain](main_simmim.py 218): INFO Train: [183/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2599 (0.2537) loss 0.3391 (0.3564) grad_norm 147205.3906 (inf) mem 14543MB +[2023-10-13 18:29:52 simmim_pretrain](main_simmim.py 218): INFO Train: [183/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2534 (0.2541) loss 0.3746 (0.3564) grad_norm 219439.5156 (inf) mem 14543MB +[2023-10-13 18:31:06 simmim_pretrain](main_simmim.py 228): INFO EPOCH 183 training takes 0:28:45 +[2023-10-13 18:31:07 simmim_pretrain](main_simmim.py 218): INFO Train: [184/200][0/6787] eta 2:57:50 lr 0.000200 time 1.5722 (1.5722) loss 0.3643 (0.3643) grad_norm 376927.5312 (376927.5312) mem 14543MB +[2023-10-13 18:33:15 simmim_pretrain](main_simmim.py 218): INFO Train: [184/200][500/6787] eta 0:26:57 lr 0.000200 time 0.2524 (0.2572) loss 0.3228 (0.3554) grad_norm 454106.4375 (338742.6875) mem 14543MB +[2023-10-13 18:35:22 simmim_pretrain](main_simmim.py 218): INFO Train: [184/200][1000/6787] eta 0:24:41 lr 0.000200 time 0.2519 (0.2560) loss 0.3319 (0.3552) grad_norm 493399.6562 (inf) mem 14543MB +[2023-10-13 18:37:29 simmim_pretrain](main_simmim.py 218): INFO Train: [184/200][1500/6787] eta 0:22:29 lr 0.000200 time 0.2501 (0.2552) loss 0.3598 (0.3552) grad_norm 493181.5312 (inf) mem 14543MB +[2023-10-13 18:39:35 simmim_pretrain](main_simmim.py 218): INFO Train: [184/200][2000/6787] eta 0:20:18 lr 0.000200 time 0.2550 (0.2546) loss 0.3548 (0.3553) grad_norm 278771.2500 (inf) mem 14543MB +[2023-10-13 18:41:41 simmim_pretrain](main_simmim.py 218): INFO Train: [184/200][2500/6787] eta 0:18:09 lr 0.000200 time 0.2502 (0.2542) loss 0.3281 (0.3553) grad_norm 474966.7500 (inf) mem 14543MB +[2023-10-13 18:43:48 simmim_pretrain](main_simmim.py 218): INFO Train: [184/200][3000/6787] eta 0:16:01 lr 0.000200 time 0.2477 (0.2539) loss 0.3494 (0.3552) grad_norm 312082.7500 (inf) mem 14543MB +[2023-10-13 18:45:53 simmim_pretrain](main_simmim.py 218): INFO Train: [184/200][3500/6787] eta 0:13:53 lr 0.000200 time 0.2534 (0.2536) loss 0.3691 (0.3552) grad_norm 336733.9062 (inf) mem 14543MB +[2023-10-13 18:47:59 simmim_pretrain](main_simmim.py 218): INFO Train: [184/200][4000/6787] eta 0:11:45 lr 0.000200 time 0.2592 (0.2533) loss 0.3524 (0.3551) grad_norm 587151.1875 (inf) mem 14543MB +[2023-10-13 18:50:05 simmim_pretrain](main_simmim.py 218): INFO Train: [184/200][4500/6787] eta 0:09:38 lr 0.000200 time 0.2464 (0.2531) loss 0.3453 (0.3551) grad_norm 263619.8125 (inf) mem 14543MB +[2023-10-13 18:52:10 simmim_pretrain](main_simmim.py 218): INFO Train: [184/200][5000/6787] eta 0:07:31 lr 0.000200 time 0.2460 (0.2529) loss 0.3554 (0.3552) grad_norm 292374.7188 (inf) mem 14543MB +[2023-10-13 18:54:16 simmim_pretrain](main_simmim.py 218): INFO Train: [184/200][5500/6787] eta 0:05:25 lr 0.000200 time 0.2479 (0.2527) loss 0.3508 (0.3554) grad_norm 144429.1719 (inf) mem 14543MB +[2023-10-13 18:56:21 simmim_pretrain](main_simmim.py 218): INFO Train: [184/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2488 (0.2526) loss 0.3663 (0.3555) grad_norm 207911.6562 (inf) mem 14543MB +[2023-10-13 18:58:27 simmim_pretrain](main_simmim.py 218): INFO Train: [184/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2539 (0.2524) loss 0.3455 (0.3557) grad_norm 156165.5156 (inf) mem 14543MB +[2023-10-13 18:59:39 simmim_pretrain](main_simmim.py 228): INFO EPOCH 184 training takes 0:28:33 +[2023-10-13 18:59:41 simmim_pretrain](main_simmim.py 218): INFO Train: [185/200][0/6787] eta 2:50:30 lr 0.000200 time 1.5074 (1.5074) loss 0.3373 (0.3373) grad_norm 234501.8906 (234501.8906) mem 14543MB +[2023-10-13 19:01:47 simmim_pretrain](main_simmim.py 218): INFO Train: [185/200][500/6787] eta 0:26:35 lr 0.000200 time 0.2505 (0.2538) loss 0.3905 (0.3568) grad_norm 157650.0312 (inf) mem 14543MB +[2023-10-13 19:03:52 simmim_pretrain](main_simmim.py 218): INFO Train: [185/200][1000/6787] eta 0:24:22 lr 0.000200 time 0.2461 (0.2528) loss 0.3562 (0.3572) grad_norm 246554.1094 (inf) mem 14543MB +[2023-10-13 19:05:59 simmim_pretrain](main_simmim.py 218): INFO Train: [185/200][1500/6787] eta 0:22:15 lr 0.000200 time 0.2580 (0.2526) loss 0.3528 (0.3569) grad_norm 215366.1875 (inf) mem 14543MB +[2023-10-13 19:08:05 simmim_pretrain](main_simmim.py 218): INFO Train: [185/200][2000/6787] eta 0:20:09 lr 0.000200 time 0.2562 (0.2528) loss 0.3562 (0.3570) grad_norm 181876.3438 (inf) mem 14543MB +[2023-10-13 19:10:12 simmim_pretrain](main_simmim.py 218): INFO Train: [185/200][2500/6787] eta 0:18:03 lr 0.000200 time 0.2589 (0.2528) loss 0.3453 (0.3568) grad_norm 196953.1094 (inf) mem 14543MB +[2023-10-13 19:12:18 simmim_pretrain](main_simmim.py 218): INFO Train: [185/200][3000/6787] eta 0:15:57 lr 0.000200 time 0.2526 (0.2529) loss 0.3461 (0.3567) grad_norm 443918.4375 (inf) mem 14543MB +[2023-10-13 19:14:25 simmim_pretrain](main_simmim.py 218): INFO Train: [185/200][3500/6787] eta 0:13:51 lr 0.000200 time 0.2593 (0.2530) loss 0.3527 (0.3565) grad_norm 255741.5625 (inf) mem 14543MB +[2023-10-13 19:16:32 simmim_pretrain](main_simmim.py 218): INFO Train: [185/200][4000/6787] eta 0:11:45 lr 0.000200 time 0.2513 (0.2531) loss 0.3608 (0.3563) grad_norm 692436.2500 (inf) mem 14543MB +[2023-10-13 19:18:39 simmim_pretrain](main_simmim.py 218): INFO Train: [185/200][4500/6787] eta 0:09:38 lr 0.000200 time 0.2871 (0.2531) loss 0.3488 (0.3561) grad_norm 239431.0156 (inf) mem 14543MB +[2023-10-13 19:20:45 simmim_pretrain](main_simmim.py 218): INFO Train: [185/200][5000/6787] eta 0:07:32 lr 0.000200 time 0.2526 (0.2531) loss 0.3497 (0.3562) grad_norm 250409.8594 (inf) mem 14543MB +[2023-10-13 19:22:52 simmim_pretrain](main_simmim.py 218): INFO Train: [185/200][5500/6787] eta 0:05:25 lr 0.000200 time 0.2541 (0.2531) loss 0.3545 (0.3563) grad_norm 134846.6094 (inf) mem 14543MB +[2023-10-13 19:24:58 simmim_pretrain](main_simmim.py 218): INFO Train: [185/200][6000/6787] eta 0:03:19 lr 0.000200 time 0.2459 (0.2531) loss 0.3626 (0.3566) grad_norm 151372.5625 (inf) mem 14543MB +[2023-10-13 19:27:04 simmim_pretrain](main_simmim.py 218): INFO Train: [185/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2570 (0.2530) loss 0.3617 (0.3568) grad_norm 142897.0625 (inf) mem 14543MB +[2023-10-13 19:28:17 simmim_pretrain](main_simmim.py 228): INFO EPOCH 185 training takes 0:28:37 +[2023-10-13 19:28:19 simmim_pretrain](main_simmim.py 218): INFO Train: [186/200][0/6787] eta 2:55:48 lr 0.000200 time 1.5542 (1.5542) loss 0.3465 (0.3465) grad_norm 69697.7031 (69697.7031) mem 14543MB +[2023-10-13 19:30:24 simmim_pretrain](main_simmim.py 218): INFO Train: [186/200][500/6787] eta 0:26:35 lr 0.000200 time 0.2526 (0.2538) loss 0.3424 (0.3585) grad_norm 106456.5469 (116079.1406) mem 14543MB +[2023-10-13 19:32:30 simmim_pretrain](main_simmim.py 218): INFO Train: [186/200][1000/6787] eta 0:24:23 lr 0.000200 time 0.2537 (0.2528) loss 0.3512 (0.3580) grad_norm 148266.9375 (125810.9453) mem 14543MB +[2023-10-13 19:34:36 simmim_pretrain](main_simmim.py 218): INFO Train: [186/200][1500/6787] eta 0:22:13 lr 0.000200 time 0.2460 (0.2522) loss 0.3681 (0.3579) grad_norm 110984.7188 (138786.0625) mem 14543MB +[2023-10-13 19:36:41 simmim_pretrain](main_simmim.py 218): INFO Train: [186/200][2000/6787] eta 0:20:05 lr 0.000200 time 0.2497 (0.2519) loss 0.3559 (0.3577) grad_norm 202212.3438 (150280.7344) mem 14543MB +[2023-10-13 19:38:47 simmim_pretrain](main_simmim.py 218): INFO Train: [186/200][2500/6787] eta 0:17:59 lr 0.000200 time 0.2515 (0.2517) loss 0.3477 (0.3575) grad_norm 192295.7031 (163308.4375) mem 14543MB +[2023-10-13 19:40:52 simmim_pretrain](main_simmim.py 218): INFO Train: [186/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2458 (0.2516) loss 0.3780 (0.3572) grad_norm 329073.6562 (180958.5625) mem 14543MB +[2023-10-13 19:42:58 simmim_pretrain](main_simmim.py 218): INFO Train: [186/200][3500/6787] eta 0:13:47 lr 0.000200 time 0.2498 (0.2516) loss 0.3551 (0.3570) grad_norm 162216.1719 (199337.0156) mem 14543MB +[2023-10-13 19:45:04 simmim_pretrain](main_simmim.py 218): INFO Train: [186/200][4000/6787] eta 0:11:41 lr 0.000200 time 0.2529 (0.2516) loss 0.3431 (0.3571) grad_norm 204631.0938 (inf) mem 14543MB +[2023-10-13 19:47:10 simmim_pretrain](main_simmim.py 218): INFO Train: [186/200][4500/6787] eta 0:09:35 lr 0.000200 time 0.2521 (0.2517) loss 0.3492 (0.3577) grad_norm 167290.2344 (inf) mem 14543MB +[2023-10-13 19:49:16 simmim_pretrain](main_simmim.py 218): INFO Train: [186/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2511 (0.2518) loss 0.3563 (0.3581) grad_norm 64944.2695 (inf) mem 14543MB +[2023-10-13 19:51:23 simmim_pretrain](main_simmim.py 218): INFO Train: [186/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2513 (0.2519) loss 0.3580 (0.3585) grad_norm 44634.4023 (inf) mem 14543MB +[2023-10-13 19:53:29 simmim_pretrain](main_simmim.py 218): INFO Train: [186/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2527 (0.2520) loss 0.3448 (0.3588) grad_norm 55371.0078 (inf) mem 14543MB +[2023-10-13 19:55:38 simmim_pretrain](main_simmim.py 218): INFO Train: [186/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2582 (0.2524) loss 0.3413 (0.3588) grad_norm 63087.7148 (inf) mem 14543MB +[2023-10-13 19:56:52 simmim_pretrain](main_simmim.py 228): INFO EPOCH 186 training takes 0:28:35 +[2023-10-13 19:56:54 simmim_pretrain](main_simmim.py 218): INFO Train: [187/200][0/6787] eta 2:46:52 lr 0.000200 time 1.4753 (1.4753) loss 0.3656 (0.3656) grad_norm 76636.9141 (76636.9141) mem 14543MB +[2023-10-13 19:59:01 simmim_pretrain](main_simmim.py 218): INFO Train: [187/200][500/6787] eta 0:26:49 lr 0.000200 time 0.2531 (0.2561) loss 0.3592 (0.3586) grad_norm 88129.7109 (82246.4219) mem 14543MB +[2023-10-13 20:01:08 simmim_pretrain](main_simmim.py 218): INFO Train: [187/200][1000/6787] eta 0:24:40 lr 0.000200 time 0.2597 (0.2559) loss 0.3422 (0.3589) grad_norm 49909.4648 (87997.9141) mem 14543MB +[2023-10-13 20:03:18 simmim_pretrain](main_simmim.py 218): INFO Train: [187/200][1500/6787] eta 0:22:38 lr 0.000200 time 0.2594 (0.2570) loss 0.3563 (0.3585) grad_norm 94033.5625 (91824.0156) mem 14543MB +[2023-10-13 20:05:28 simmim_pretrain](main_simmim.py 218): INFO Train: [187/200][2000/6787] eta 0:20:33 lr 0.000200 time 0.2597 (0.2576) loss 0.3535 (0.3582) grad_norm 272624.5625 (105814.9453) mem 14543MB +[2023-10-13 20:07:37 simmim_pretrain](main_simmim.py 218): INFO Train: [187/200][2500/6787] eta 0:18:25 lr 0.000200 time 0.2598 (0.2579) loss 0.3467 (0.3580) grad_norm 93059.9844 (115807.8359) mem 14543MB +[2023-10-13 20:09:47 simmim_pretrain](main_simmim.py 218): INFO Train: [187/200][3000/6787] eta 0:16:17 lr 0.000200 time 0.2595 (0.2581) loss 0.3531 (0.3578) grad_norm 241058.3750 (124323.6406) mem 14543MB +[2023-10-13 20:11:56 simmim_pretrain](main_simmim.py 218): INFO Train: [187/200][3500/6787] eta 0:14:08 lr 0.000200 time 0.2598 (0.2583) loss 0.3767 (0.3577) grad_norm 165201.0469 (133047.0938) mem 14543MB +[2023-10-13 20:14:06 simmim_pretrain](main_simmim.py 218): INFO Train: [187/200][4000/6787] eta 0:12:00 lr 0.000200 time 0.2593 (0.2584) loss 0.3584 (0.3574) grad_norm 199413.0781 (156939.2969) mem 14543MB +[2023-10-13 20:16:15 simmim_pretrain](main_simmim.py 218): INFO Train: [187/200][4500/6787] eta 0:09:50 lr 0.000200 time 0.2562 (0.2584) loss 0.3548 (0.3572) grad_norm 185677.5312 (167241.2500) mem 14543MB +[2023-10-13 20:18:25 simmim_pretrain](main_simmim.py 218): INFO Train: [187/200][5000/6787] eta 0:07:41 lr 0.000200 time 0.2588 (0.2585) loss 0.3616 (0.3570) grad_norm 582537.6875 (183261.5938) mem 14543MB +[2023-10-13 20:20:34 simmim_pretrain](main_simmim.py 218): INFO Train: [187/200][5500/6787] eta 0:05:32 lr 0.000200 time 0.2594 (0.2585) loss 0.3577 (0.3568) grad_norm 305145.8125 (inf) mem 14543MB +[2023-10-13 20:22:43 simmim_pretrain](main_simmim.py 218): INFO Train: [187/200][6000/6787] eta 0:03:23 lr 0.000200 time 0.2545 (0.2585) loss 0.3639 (0.3567) grad_norm 464761.6875 (inf) mem 14543MB +[2023-10-13 20:24:53 simmim_pretrain](main_simmim.py 218): INFO Train: [187/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2593 (0.2585) loss 0.3832 (0.3566) grad_norm 529095.9375 (inf) mem 14543MB +[2023-10-13 20:26:07 simmim_pretrain](main_simmim.py 228): INFO EPOCH 187 training takes 0:29:14 +[2023-10-13 20:26:09 simmim_pretrain](main_simmim.py 218): INFO Train: [188/200][0/6787] eta 2:54:05 lr 0.000200 time 1.5391 (1.5391) loss 0.3391 (0.3391) grad_norm 273062.9375 (273062.9375) mem 14543MB +[2023-10-13 20:28:14 simmim_pretrain](main_simmim.py 218): INFO Train: [188/200][500/6787] eta 0:26:28 lr 0.000200 time 0.2464 (0.2527) loss 0.3780 (0.3552) grad_norm 419497.4688 (370591.6875) mem 14543MB +[2023-10-13 20:30:19 simmim_pretrain](main_simmim.py 218): INFO Train: [188/200][1000/6787] eta 0:24:17 lr 0.000200 time 0.2523 (0.2519) loss 0.3454 (0.3552) grad_norm 464307.2188 (inf) mem 14543MB +[2023-10-13 20:32:25 simmim_pretrain](main_simmim.py 218): INFO Train: [188/200][1500/6787] eta 0:22:10 lr 0.000200 time 0.2525 (0.2517) loss 0.3534 (0.3554) grad_norm 277651.9062 (inf) mem 14543MB +[2023-10-13 20:34:31 simmim_pretrain](main_simmim.py 218): INFO Train: [188/200][2000/6787] eta 0:20:04 lr 0.000200 time 0.2511 (0.2515) loss 0.3667 (0.3554) grad_norm 487617.0000 (inf) mem 14543MB +[2023-10-13 20:36:36 simmim_pretrain](main_simmim.py 218): INFO Train: [188/200][2500/6787] eta 0:17:58 lr 0.000200 time 0.2559 (0.2515) loss 0.3490 (0.3554) grad_norm 323333.7500 (inf) mem 14543MB +[2023-10-13 20:38:42 simmim_pretrain](main_simmim.py 218): INFO Train: [188/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2493 (0.2516) loss 0.3668 (0.3551) grad_norm 201557.6719 (inf) mem 14543MB +[2023-10-13 20:40:48 simmim_pretrain](main_simmim.py 218): INFO Train: [188/200][3500/6787] eta 0:13:47 lr 0.000200 time 0.2510 (0.2516) loss 0.3528 (0.3555) grad_norm 288090.2500 (inf) mem 14543MB +[2023-10-13 20:42:55 simmim_pretrain](main_simmim.py 218): INFO Train: [188/200][4000/6787] eta 0:11:41 lr 0.000200 time 0.2502 (0.2518) loss 0.3497 (0.3556) grad_norm 362685.1250 (inf) mem 14543MB +[2023-10-13 20:45:01 simmim_pretrain](main_simmim.py 218): INFO Train: [188/200][4500/6787] eta 0:09:35 lr 0.000200 time 0.2475 (0.2519) loss 0.3604 (0.3561) grad_norm 236826.0781 (inf) mem 14543MB +[2023-10-13 20:47:07 simmim_pretrain](main_simmim.py 218): INFO Train: [188/200][5000/6787] eta 0:07:30 lr 0.000200 time 0.2497 (0.2520) loss 0.3697 (0.3564) grad_norm 159493.6719 (inf) mem 14543MB +[2023-10-13 20:49:14 simmim_pretrain](main_simmim.py 218): INFO Train: [188/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2567 (0.2521) loss 0.3813 (0.3566) grad_norm 121574.4219 (inf) mem 14543MB +[2023-10-13 20:51:24 simmim_pretrain](main_simmim.py 218): INFO Train: [188/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2604 (0.2528) loss 0.3527 (0.3568) grad_norm 137406.7500 (inf) mem 14543MB +[2023-10-13 20:53:34 simmim_pretrain](main_simmim.py 218): INFO Train: [188/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2609 (0.2533) loss 0.3362 (0.3569) grad_norm 158516.0938 (inf) mem 14543MB +[2023-10-13 20:54:49 simmim_pretrain](main_simmim.py 228): INFO EPOCH 188 training takes 0:28:42 +[2023-10-13 20:54:51 simmim_pretrain](main_simmim.py 218): INFO Train: [189/200][0/6787] eta 2:55:21 lr 0.000200 time 1.5502 (1.5502) loss 0.3753 (0.3753) grad_norm 213029.8750 (213029.8750) mem 14543MB +[2023-10-13 20:56:57 simmim_pretrain](main_simmim.py 218): INFO Train: [189/200][500/6787] eta 0:26:40 lr 0.000200 time 0.2498 (0.2545) loss 0.3873 (0.3582) grad_norm 196106.9375 (164673.0625) mem 14543MB +[2023-10-13 20:59:03 simmim_pretrain](main_simmim.py 218): INFO Train: [189/200][1000/6787] eta 0:24:26 lr 0.000200 time 0.2503 (0.2535) loss 0.3613 (0.3573) grad_norm 101362.5469 (167330.4062) mem 14543MB +[2023-10-13 21:01:09 simmim_pretrain](main_simmim.py 218): INFO Train: [189/200][1500/6787] eta 0:22:18 lr 0.000200 time 0.2533 (0.2531) loss 0.3648 (0.3570) grad_norm 204668.3750 (182612.4219) mem 14543MB +[2023-10-13 21:03:15 simmim_pretrain](main_simmim.py 218): INFO Train: [189/200][2000/6787] eta 0:20:10 lr 0.000200 time 0.2559 (0.2529) loss 0.3515 (0.3564) grad_norm 213556.3750 (220901.6406) mem 14543MB +[2023-10-13 21:05:21 simmim_pretrain](main_simmim.py 218): INFO Train: [189/200][2500/6787] eta 0:18:03 lr 0.000200 time 0.2458 (0.2527) loss 0.3596 (0.3565) grad_norm 338358.3125 (237615.6719) mem 14543MB +[2023-10-13 21:07:27 simmim_pretrain](main_simmim.py 218): INFO Train: [189/200][3000/6787] eta 0:15:56 lr 0.000200 time 0.2518 (0.2526) loss 0.3719 (0.3564) grad_norm 306506.5000 (253917.4375) mem 14543MB +[2023-10-13 21:09:33 simmim_pretrain](main_simmim.py 218): INFO Train: [189/200][3500/6787] eta 0:13:49 lr 0.000200 time 0.2520 (0.2524) loss 0.3631 (0.3562) grad_norm 173564.6406 (inf) mem 14543MB +[2023-10-13 21:11:39 simmim_pretrain](main_simmim.py 218): INFO Train: [189/200][4000/6787] eta 0:11:43 lr 0.000200 time 0.2589 (0.2523) loss 0.3540 (0.3560) grad_norm 248149.7188 (inf) mem 14543MB +[2023-10-13 21:13:45 simmim_pretrain](main_simmim.py 218): INFO Train: [189/200][4500/6787] eta 0:09:36 lr 0.000200 time 0.2499 (0.2522) loss 0.3646 (0.3561) grad_norm 147302.4688 (inf) mem 14543MB +[2023-10-13 21:15:50 simmim_pretrain](main_simmim.py 218): INFO Train: [189/200][5000/6787] eta 0:07:30 lr 0.000200 time 0.2498 (0.2521) loss 0.3300 (0.3562) grad_norm 201229.4688 (inf) mem 14543MB +[2023-10-13 21:17:56 simmim_pretrain](main_simmim.py 218): INFO Train: [189/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2473 (0.2520) loss 0.3655 (0.3562) grad_norm 155365.4062 (inf) mem 14543MB +[2023-10-13 21:20:02 simmim_pretrain](main_simmim.py 218): INFO Train: [189/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2486 (0.2520) loss 0.3461 (0.3562) grad_norm 219934.7344 (inf) mem 14543MB +[2023-10-13 21:22:08 simmim_pretrain](main_simmim.py 218): INFO Train: [189/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2486 (0.2520) loss 0.3544 (0.3562) grad_norm 340285.9375 (inf) mem 14543MB +[2023-10-13 21:23:21 simmim_pretrain](main_simmim.py 228): INFO EPOCH 189 training takes 0:28:31 +[2023-10-13 21:23:22 simmim_pretrain](main_simmim.py 218): INFO Train: [190/200][0/6787] eta 2:47:47 lr 0.000200 time 1.4833 (1.4833) loss 0.3549 (0.3549) grad_norm 461040.5938 (461040.5938) mem 14543MB +[2023-10-13 21:25:28 simmim_pretrain](main_simmim.py 218): INFO Train: [190/200][500/6787] eta 0:26:40 lr 0.000200 time 0.2508 (0.2545) loss 0.3486 (0.3555) grad_norm 215064.6562 (430273.7812) mem 14543MB +[2023-10-13 21:27:35 simmim_pretrain](main_simmim.py 218): INFO Train: [190/200][1000/6787] eta 0:24:30 lr 0.000200 time 0.2550 (0.2541) loss 0.3626 (0.3554) grad_norm 332880.7812 (382086.2812) mem 14543MB +[2023-10-13 21:29:43 simmim_pretrain](main_simmim.py 218): INFO Train: [190/200][1500/6787] eta 0:22:27 lr 0.000200 time 0.2614 (0.2548) loss 0.3505 (0.3559) grad_norm 192371.3438 (inf) mem 14543MB +[2023-10-13 21:31:53 simmim_pretrain](main_simmim.py 218): INFO Train: [190/200][2000/6787] eta 0:20:26 lr 0.000200 time 0.2606 (0.2561) loss 0.3514 (0.3561) grad_norm 161280.6250 (inf) mem 14543MB +[2023-10-13 21:34:03 simmim_pretrain](main_simmim.py 218): INFO Train: [190/200][2500/6787] eta 0:18:21 lr 0.000200 time 0.2607 (0.2570) loss 0.3565 (0.3562) grad_norm 227372.6562 (inf) mem 14543MB +[2023-10-13 21:36:13 simmim_pretrain](main_simmim.py 218): INFO Train: [190/200][3000/6787] eta 0:16:15 lr 0.000200 time 0.2610 (0.2575) loss 0.3653 (0.3564) grad_norm 254291.0469 (inf) mem 14543MB +[2023-10-13 21:38:23 simmim_pretrain](main_simmim.py 218): INFO Train: [190/200][3500/6787] eta 0:14:06 lr 0.000200 time 0.2578 (0.2576) loss 0.3707 (0.3563) grad_norm 338453.4062 (inf) mem 14543MB +[2023-10-13 21:40:31 simmim_pretrain](main_simmim.py 218): INFO Train: [190/200][4000/6787] eta 0:11:57 lr 0.000200 time 0.2583 (0.2576) loss 0.3496 (0.3561) grad_norm 501778.2188 (inf) mem 14543MB +[2023-10-13 21:42:39 simmim_pretrain](main_simmim.py 218): INFO Train: [190/200][4500/6787] eta 0:09:48 lr 0.000200 time 0.2537 (0.2575) loss 0.3437 (0.3560) grad_norm 259197.0156 (inf) mem 14543MB +[2023-10-13 21:44:47 simmim_pretrain](main_simmim.py 218): INFO Train: [190/200][5000/6787] eta 0:07:39 lr 0.000200 time 0.2534 (0.2572) loss 0.3572 (0.3561) grad_norm 269465.2188 (inf) mem 14543MB +[2023-10-13 21:46:54 simmim_pretrain](main_simmim.py 218): INFO Train: [190/200][5500/6787] eta 0:05:30 lr 0.000200 time 0.2550 (0.2568) loss 0.3593 (0.3563) grad_norm 215768.2656 (inf) mem 14543MB +[2023-10-13 21:49:02 simmim_pretrain](main_simmim.py 218): INFO Train: [190/200][6000/6787] eta 0:03:22 lr 0.000200 time 0.2560 (0.2568) loss 0.3516 (0.3563) grad_norm 355818.0000 (inf) mem 14543MB +[2023-10-13 21:51:11 simmim_pretrain](main_simmim.py 218): INFO Train: [190/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2606 (0.2570) loss 0.3378 (0.3563) grad_norm 325573.3750 (inf) mem 14543MB +[2023-10-13 21:52:26 simmim_pretrain](main_simmim.py 228): INFO EPOCH 190 training takes 0:29:05 +[2023-10-13 21:52:28 simmim_pretrain](main_simmim.py 218): INFO Train: [191/200][0/6787] eta 2:49:50 lr 0.000200 time 1.5015 (1.5015) loss 0.3556 (0.3556) grad_norm 149541.4844 (149541.4844) mem 14543MB +[2023-10-13 21:54:34 simmim_pretrain](main_simmim.py 218): INFO Train: [191/200][500/6787] eta 0:26:38 lr 0.000200 time 0.2570 (0.2542) loss 0.3558 (0.3555) grad_norm 366313.5625 (309579.9375) mem 14543MB +[2023-10-13 21:56:40 simmim_pretrain](main_simmim.py 218): INFO Train: [191/200][1000/6787] eta 0:24:24 lr 0.000200 time 0.2556 (0.2531) loss 0.3511 (0.3556) grad_norm 473457.6875 (352328.5938) mem 14543MB +[2023-10-13 21:58:45 simmim_pretrain](main_simmim.py 218): INFO Train: [191/200][1500/6787] eta 0:22:15 lr 0.000200 time 0.2529 (0.2526) loss 0.3497 (0.3557) grad_norm 276720.4375 (360801.7500) mem 14543MB +[2023-10-13 22:00:51 simmim_pretrain](main_simmim.py 218): INFO Train: [191/200][2000/6787] eta 0:20:08 lr 0.000200 time 0.2532 (0.2524) loss 0.3606 (0.3557) grad_norm 346433.0312 (379431.5625) mem 14543MB +[2023-10-13 22:02:57 simmim_pretrain](main_simmim.py 218): INFO Train: [191/200][2500/6787] eta 0:18:00 lr 0.000200 time 0.2505 (0.2521) loss 0.3647 (0.3556) grad_norm 614310.3750 (inf) mem 14543MB +[2023-10-13 22:05:03 simmim_pretrain](main_simmim.py 218): INFO Train: [191/200][3000/6787] eta 0:15:54 lr 0.000200 time 0.2525 (0.2521) loss 0.3710 (0.3556) grad_norm 207754.6562 (inf) mem 14543MB +[2023-10-13 22:07:09 simmim_pretrain](main_simmim.py 218): INFO Train: [191/200][3500/6787] eta 0:13:48 lr 0.000200 time 0.2523 (0.2521) loss 0.3568 (0.3553) grad_norm 350684.7812 (inf) mem 14543MB +[2023-10-13 22:09:15 simmim_pretrain](main_simmim.py 218): INFO Train: [191/200][4000/6787] eta 0:11:42 lr 0.000200 time 0.2524 (0.2520) loss 0.3558 (0.3553) grad_norm 399651.0625 (inf) mem 14543MB +[2023-10-13 22:11:21 simmim_pretrain](main_simmim.py 218): INFO Train: [191/200][4500/6787] eta 0:09:36 lr 0.000200 time 0.2531 (0.2520) loss 0.3522 (0.3552) grad_norm 431535.2188 (inf) mem 14543MB +[2023-10-13 22:13:27 simmim_pretrain](main_simmim.py 218): INFO Train: [191/200][5000/6787] eta 0:07:30 lr 0.000200 time 0.2496 (0.2521) loss 0.3541 (0.3552) grad_norm 548871.8125 (inf) mem 14543MB +[2023-10-13 22:15:33 simmim_pretrain](main_simmim.py 218): INFO Train: [191/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2545 (0.2522) loss 0.3605 (0.3552) grad_norm 521067.8750 (inf) mem 14543MB +[2023-10-13 22:17:40 simmim_pretrain](main_simmim.py 218): INFO Train: [191/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2591 (0.2523) loss 0.3404 (0.3551) grad_norm 401392.2812 (inf) mem 14543MB +[2023-10-13 22:19:49 simmim_pretrain](main_simmim.py 218): INFO Train: [191/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2488 (0.2527) loss 0.3548 (0.3552) grad_norm 521799.0625 (inf) mem 14543MB +[2023-10-13 22:21:03 simmim_pretrain](main_simmim.py 228): INFO EPOCH 191 training takes 0:28:36 +[2023-10-13 22:21:04 simmim_pretrain](main_simmim.py 218): INFO Train: [192/200][0/6787] eta 2:47:37 lr 0.000200 time 1.4819 (1.4819) loss 0.3454 (0.3454) grad_norm 344808.4062 (344808.4062) mem 14543MB +[2023-10-13 22:23:11 simmim_pretrain](main_simmim.py 218): INFO Train: [192/200][500/6787] eta 0:26:49 lr 0.000200 time 0.2547 (0.2559) loss 0.3241 (0.3555) grad_norm 376958.5000 (nan) mem 14543MB +[2023-10-13 22:25:18 simmim_pretrain](main_simmim.py 218): INFO Train: [192/200][1000/6787] eta 0:24:35 lr 0.000200 time 0.2554 (0.2550) loss 0.3508 (0.3562) grad_norm 382849.2500 (nan) mem 14543MB +[2023-10-13 22:27:26 simmim_pretrain](main_simmim.py 218): INFO Train: [192/200][1500/6787] eta 0:22:29 lr 0.000200 time 0.2555 (0.2553) loss 0.3584 (0.3565) grad_norm 196838.1719 (nan) mem 14543MB +[2023-10-13 22:29:34 simmim_pretrain](main_simmim.py 218): INFO Train: [192/200][2000/6787] eta 0:20:22 lr 0.000200 time 0.2591 (0.2555) loss 0.3198 (0.3567) grad_norm 351823.6875 (nan) mem 14543MB +[2023-10-13 22:31:42 simmim_pretrain](main_simmim.py 218): INFO Train: [192/200][2500/6787] eta 0:18:14 lr 0.000200 time 0.2484 (0.2554) loss 0.3607 (0.3567) grad_norm 386591.3125 (nan) mem 14543MB +[2023-10-13 22:33:48 simmim_pretrain](main_simmim.py 218): INFO Train: [192/200][3000/6787] eta 0:16:05 lr 0.000200 time 0.2525 (0.2549) loss 0.3527 (0.3570) grad_norm 348804.7812 (nan) mem 14543MB +[2023-10-13 22:35:55 simmim_pretrain](main_simmim.py 218): INFO Train: [192/200][3500/6787] eta 0:13:57 lr 0.000200 time 0.2525 (0.2548) loss 0.3692 (0.3570) grad_norm 431138.1875 (nan) mem 14543MB +[2023-10-13 22:38:02 simmim_pretrain](main_simmim.py 218): INFO Train: [192/200][4000/6787] eta 0:11:50 lr 0.000200 time 0.2527 (0.2548) loss 0.3463 (0.3574) grad_norm 212839.6562 (nan) mem 14543MB +[2023-10-13 22:40:10 simmim_pretrain](main_simmim.py 218): INFO Train: [192/200][4500/6787] eta 0:09:42 lr 0.000200 time 0.2592 (0.2548) loss 0.3679 (0.3576) grad_norm 129881.5234 (nan) mem 14543MB +[2023-10-13 22:42:17 simmim_pretrain](main_simmim.py 218): INFO Train: [192/200][5000/6787] eta 0:07:35 lr 0.000200 time 0.2544 (0.2547) loss 0.3476 (0.3578) grad_norm 98268.8984 (nan) mem 14543MB +[2023-10-13 22:44:24 simmim_pretrain](main_simmim.py 218): INFO Train: [192/200][5500/6787] eta 0:05:27 lr 0.000200 time 0.2532 (0.2546) loss 0.3521 (0.3579) grad_norm 127697.8984 (nan) mem 14543MB +[2023-10-13 22:46:30 simmim_pretrain](main_simmim.py 218): INFO Train: [192/200][6000/6787] eta 0:03:20 lr 0.000200 time 0.2520 (0.2545) loss 0.3685 (0.3580) grad_norm 287933.2812 (nan) mem 14543MB +[2023-10-13 22:48:37 simmim_pretrain](main_simmim.py 218): INFO Train: [192/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2670 (0.2545) loss 0.3481 (0.3579) grad_norm 139591.9688 (nan) mem 14543MB +[2023-10-13 22:49:51 simmim_pretrain](main_simmim.py 228): INFO EPOCH 192 training takes 0:28:47 +[2023-10-13 22:49:52 simmim_pretrain](main_simmim.py 218): INFO Train: [193/200][0/6787] eta 2:59:08 lr 0.000200 time 1.5836 (1.5836) loss 0.3680 (0.3680) grad_norm 275102.9375 (275102.9375) mem 14543MB +[2023-10-13 22:51:58 simmim_pretrain](main_simmim.py 218): INFO Train: [193/200][500/6787] eta 0:26:32 lr 0.000200 time 0.2488 (0.2534) loss 0.3592 (0.3567) grad_norm 83196.3984 (220798.6875) mem 14543MB +[2023-10-13 22:54:03 simmim_pretrain](main_simmim.py 218): INFO Train: [193/200][1000/6787] eta 0:24:20 lr 0.000200 time 0.2491 (0.2523) loss 0.3747 (0.3567) grad_norm 225868.1094 (inf) mem 14543MB +[2023-10-13 22:56:09 simmim_pretrain](main_simmim.py 218): INFO Train: [193/200][1500/6787] eta 0:22:14 lr 0.000200 time 0.2587 (0.2523) loss 0.3597 (0.3566) grad_norm 255076.7656 (inf) mem 14543MB +[2023-10-13 22:58:16 simmim_pretrain](main_simmim.py 218): INFO Train: [193/200][2000/6787] eta 0:20:08 lr 0.000200 time 0.2533 (0.2524) loss 0.3727 (0.3567) grad_norm 349562.0625 (inf) mem 14543MB +[2023-10-13 23:00:22 simmim_pretrain](main_simmim.py 218): INFO Train: [193/200][2500/6787] eta 0:18:02 lr 0.000200 time 0.2521 (0.2525) loss 0.3688 (0.3566) grad_norm 237473.4375 (inf) mem 14543MB +[2023-10-13 23:02:29 simmim_pretrain](main_simmim.py 218): INFO Train: [193/200][3000/6787] eta 0:15:56 lr 0.000200 time 0.2535 (0.2526) loss 0.3460 (0.3567) grad_norm 256213.2969 (inf) mem 14543MB +[2023-10-13 23:04:35 simmim_pretrain](main_simmim.py 218): INFO Train: [193/200][3500/6787] eta 0:13:50 lr 0.000200 time 0.2523 (0.2526) loss 0.3469 (0.3566) grad_norm 325910.7500 (inf) mem 14543MB +[2023-10-13 23:06:42 simmim_pretrain](main_simmim.py 218): INFO Train: [193/200][4000/6787] eta 0:11:44 lr 0.000200 time 0.2497 (0.2528) loss 0.3539 (0.3566) grad_norm 326580.2500 (inf) mem 14543MB +[2023-10-13 23:08:49 simmim_pretrain](main_simmim.py 218): INFO Train: [193/200][4500/6787] eta 0:09:38 lr 0.000200 time 0.2595 (0.2529) loss 0.3620 (0.3567) grad_norm 511985.5938 (inf) mem 14543MB +[2023-10-13 23:10:56 simmim_pretrain](main_simmim.py 218): INFO Train: [193/200][5000/6787] eta 0:07:32 lr 0.000200 time 0.2581 (0.2530) loss 0.3690 (0.3566) grad_norm 230771.1406 (inf) mem 14543MB +[2023-10-13 23:13:06 simmim_pretrain](main_simmim.py 218): INFO Train: [193/200][5500/6787] eta 0:05:26 lr 0.000200 time 0.2574 (0.2536) loss 0.3772 (0.3566) grad_norm 206221.6406 (inf) mem 14543MB +[2023-10-13 23:15:16 simmim_pretrain](main_simmim.py 218): INFO Train: [193/200][6000/6787] eta 0:03:20 lr 0.000200 time 0.2594 (0.2542) loss 0.3603 (0.3566) grad_norm 444114.5625 (inf) mem 14543MB +[2023-10-13 23:17:26 simmim_pretrain](main_simmim.py 218): INFO Train: [193/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2514 (0.2546) loss 0.3502 (0.3566) grad_norm 208559.3594 (inf) mem 14543MB +[2023-10-13 23:18:41 simmim_pretrain](main_simmim.py 228): INFO EPOCH 193 training takes 0:28:50 +[2023-10-13 23:18:43 simmim_pretrain](main_simmim.py 218): INFO Train: [194/200][0/6787] eta 2:55:35 lr 0.000200 time 1.5523 (1.5523) loss 0.3652 (0.3652) grad_norm 205349.2188 (205349.2188) mem 14543MB +[2023-10-13 23:20:48 simmim_pretrain](main_simmim.py 218): INFO Train: [194/200][500/6787] eta 0:26:37 lr 0.000200 time 0.2505 (0.2540) loss 0.3431 (0.3554) grad_norm 492198.5000 (405991.1250) mem 14543MB +[2023-10-13 23:22:54 simmim_pretrain](main_simmim.py 218): INFO Train: [194/200][1000/6787] eta 0:24:22 lr 0.000200 time 0.2566 (0.2528) loss 0.3482 (0.3545) grad_norm 363155.5625 (inf) mem 14543MB +[2023-10-13 23:25:00 simmim_pretrain](main_simmim.py 218): INFO Train: [194/200][1500/6787] eta 0:22:15 lr 0.000200 time 0.2500 (0.2526) loss 0.3323 (0.3549) grad_norm 292055.9375 (inf) mem 14543MB +[2023-10-13 23:27:06 simmim_pretrain](main_simmim.py 218): INFO Train: [194/200][2000/6787] eta 0:20:08 lr 0.000200 time 0.2534 (0.2525) loss 0.3583 (0.3561) grad_norm 162312.2188 (inf) mem 14543MB +[2023-10-13 23:29:12 simmim_pretrain](main_simmim.py 218): INFO Train: [194/200][2500/6787] eta 0:18:01 lr 0.000200 time 0.2545 (0.2524) loss 0.3259 (0.3568) grad_norm 81559.5859 (inf) mem 14543MB +[2023-10-13 23:31:18 simmim_pretrain](main_simmim.py 218): INFO Train: [194/200][3000/6787] eta 0:15:55 lr 0.000200 time 0.2588 (0.2523) loss 0.3535 (0.3572) grad_norm 146313.4688 (inf) mem 14543MB +[2023-10-13 23:33:24 simmim_pretrain](main_simmim.py 218): INFO Train: [194/200][3500/6787] eta 0:13:48 lr 0.000200 time 0.2511 (0.2522) loss 0.3351 (0.3575) grad_norm 96472.0469 (inf) mem 14543MB +[2023-10-13 23:35:30 simmim_pretrain](main_simmim.py 218): INFO Train: [194/200][4000/6787] eta 0:11:42 lr 0.000200 time 0.2548 (0.2521) loss 0.3509 (0.3578) grad_norm 187762.5469 (inf) mem 14543MB +[2023-10-13 23:37:36 simmim_pretrain](main_simmim.py 218): INFO Train: [194/200][4500/6787] eta 0:09:36 lr 0.000200 time 0.2474 (0.2521) loss 0.3610 (0.3576) grad_norm 105905.7109 (inf) mem 14543MB +[2023-10-13 23:39:42 simmim_pretrain](main_simmim.py 218): INFO Train: [194/200][5000/6787] eta 0:07:30 lr 0.000200 time 0.2551 (0.2520) loss 0.3624 (0.3575) grad_norm 294943.5938 (inf) mem 14543MB +[2023-10-13 23:41:48 simmim_pretrain](main_simmim.py 218): INFO Train: [194/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2506 (0.2521) loss 0.3491 (0.3575) grad_norm 188075.0781 (inf) mem 14543MB +[2023-10-13 23:43:54 simmim_pretrain](main_simmim.py 218): INFO Train: [194/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2591 (0.2522) loss 0.3640 (0.3573) grad_norm 393169.8125 (inf) mem 14543MB +[2023-10-13 23:46:01 simmim_pretrain](main_simmim.py 218): INFO Train: [194/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2490 (0.2522) loss 0.3753 (0.3572) grad_norm 255751.1562 (inf) mem 14543MB +[2023-10-13 23:47:14 simmim_pretrain](main_simmim.py 228): INFO EPOCH 194 training takes 0:28:32 +[2023-10-13 23:47:15 simmim_pretrain](main_simmim.py 218): INFO Train: [195/200][0/6787] eta 2:56:02 lr 0.000200 time 1.5563 (1.5563) loss 0.3615 (0.3615) grad_norm 336096.2188 (336096.2188) mem 14543MB +[2023-10-13 23:49:22 simmim_pretrain](main_simmim.py 218): INFO Train: [195/200][500/6787] eta 0:26:43 lr 0.000200 time 0.2523 (0.2551) loss 0.3388 (0.3559) grad_norm 507365.3750 (378207.1875) mem 14543MB +[2023-10-13 23:51:28 simmim_pretrain](main_simmim.py 218): INFO Train: [195/200][1000/6787] eta 0:24:29 lr 0.000200 time 0.2502 (0.2539) loss 0.3607 (0.3554) grad_norm 544352.3750 (inf) mem 14543MB +[2023-10-13 23:53:36 simmim_pretrain](main_simmim.py 218): INFO Train: [195/200][1500/6787] eta 0:22:27 lr 0.000200 time 0.2593 (0.2548) loss 0.3520 (0.3558) grad_norm 189888.5469 (inf) mem 14543MB +[2023-10-13 23:55:46 simmim_pretrain](main_simmim.py 218): INFO Train: [195/200][2000/6787] eta 0:20:26 lr 0.000200 time 0.2595 (0.2562) loss 0.3638 (0.3560) grad_norm 131617.6719 (inf) mem 14543MB +[2023-10-13 23:57:56 simmim_pretrain](main_simmim.py 218): INFO Train: [195/200][2500/6787] eta 0:18:21 lr 0.000200 time 0.2497 (0.2569) loss 0.3335 (0.3562) grad_norm 218092.7656 (inf) mem 14543MB +[2023-10-14 00:00:03 simmim_pretrain](main_simmim.py 218): INFO Train: [195/200][3000/6787] eta 0:16:11 lr 0.000200 time 0.2558 (0.2565) loss 0.3648 (0.3563) grad_norm 384229.4375 (inf) mem 14543MB +[2023-10-14 00:02:13 simmim_pretrain](main_simmim.py 218): INFO Train: [195/200][3500/6787] eta 0:14:04 lr 0.000200 time 0.2517 (0.2568) loss 0.3614 (0.3562) grad_norm 301729.7500 (inf) mem 14543MB +[2023-10-14 00:04:23 simmim_pretrain](main_simmim.py 218): INFO Train: [195/200][4000/6787] eta 0:11:56 lr 0.000200 time 0.2609 (0.2572) loss 0.3640 (0.3561) grad_norm 209223.8438 (inf) mem 14543MB +[2023-10-14 00:06:33 simmim_pretrain](main_simmim.py 218): INFO Train: [195/200][4500/6787] eta 0:09:48 lr 0.000200 time 0.2608 (0.2575) loss 0.3540 (0.3561) grad_norm 596011.0625 (inf) mem 14543MB +[2023-10-14 00:08:43 simmim_pretrain](main_simmim.py 218): INFO Train: [195/200][5000/6787] eta 0:07:40 lr 0.000200 time 0.2611 (0.2578) loss 0.3551 (0.3560) grad_norm 545414.4375 (inf) mem 14543MB +[2023-10-14 00:10:53 simmim_pretrain](main_simmim.py 218): INFO Train: [195/200][5500/6787] eta 0:05:32 lr 0.000200 time 0.2615 (0.2580) loss 0.3380 (0.3559) grad_norm 416826.4062 (inf) mem 14543MB +[2023-10-14 00:13:03 simmim_pretrain](main_simmim.py 218): INFO Train: [195/200][6000/6787] eta 0:03:23 lr 0.000200 time 0.2591 (0.2581) loss 0.3781 (0.3558) grad_norm 364557.1875 (inf) mem 14543MB +[2023-10-14 00:15:13 simmim_pretrain](main_simmim.py 218): INFO Train: [195/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2579 (0.2583) loss 0.3529 (0.3559) grad_norm 206967.9531 (inf) mem 14543MB +[2023-10-14 00:16:28 simmim_pretrain](main_simmim.py 228): INFO EPOCH 195 training takes 0:29:14 +[2023-10-14 00:16:29 simmim_pretrain](main_simmim.py 218): INFO Train: [196/200][0/6787] eta 2:50:55 lr 0.000200 time 1.5110 (1.5110) loss 0.3571 (0.3571) grad_norm 364913.9062 (364913.9062) mem 14543MB +[2023-10-14 00:18:35 simmim_pretrain](main_simmim.py 218): INFO Train: [196/200][500/6787] eta 0:26:27 lr 0.000200 time 0.2475 (0.2525) loss 0.3715 (0.3576) grad_norm 316566.9688 (248606.1875) mem 14543MB +[2023-10-14 00:20:40 simmim_pretrain](main_simmim.py 218): INFO Train: [196/200][1000/6787] eta 0:24:18 lr 0.000200 time 0.2529 (0.2520) loss 0.3543 (0.3572) grad_norm 158194.5000 (245907.6250) mem 14543MB +[2023-10-14 00:22:46 simmim_pretrain](main_simmim.py 218): INFO Train: [196/200][1500/6787] eta 0:22:12 lr 0.000200 time 0.2578 (0.2521) loss 0.3476 (0.3565) grad_norm 320833.1250 (268958.8438) mem 14543MB +[2023-10-14 00:24:54 simmim_pretrain](main_simmim.py 218): INFO Train: [196/200][2000/6787] eta 0:20:10 lr 0.000200 time 0.2567 (0.2529) loss 0.3673 (0.3561) grad_norm 323179.0938 (nan) mem 14543MB +[2023-10-14 00:27:02 simmim_pretrain](main_simmim.py 218): INFO Train: [196/200][2500/6787] eta 0:18:07 lr 0.000200 time 0.2552 (0.2536) loss 0.3812 (0.3567) grad_norm 241881.3281 (nan) mem 14543MB +[2023-10-14 00:29:11 simmim_pretrain](main_simmim.py 218): INFO Train: [196/200][3000/6787] eta 0:16:02 lr 0.000200 time 0.2585 (0.2542) loss 0.3568 (0.3573) grad_norm 111176.3828 (nan) mem 14543MB +[2023-10-14 00:31:20 simmim_pretrain](main_simmim.py 218): INFO Train: [196/200][3500/6787] eta 0:13:57 lr 0.000200 time 0.2568 (0.2547) loss 0.3646 (0.3576) grad_norm 116309.4922 (nan) mem 14543MB +[2023-10-14 00:33:29 simmim_pretrain](main_simmim.py 218): INFO Train: [196/200][4000/6787] eta 0:11:50 lr 0.000200 time 0.2616 (0.2551) loss 0.3513 (0.3579) grad_norm 99959.5625 (nan) mem 14543MB +[2023-10-14 00:35:37 simmim_pretrain](main_simmim.py 218): INFO Train: [196/200][4500/6787] eta 0:09:43 lr 0.000200 time 0.2496 (0.2553) loss 0.3656 (0.3581) grad_norm 207295.3438 (nan) mem 14543MB +[2023-10-14 00:37:46 simmim_pretrain](main_simmim.py 218): INFO Train: [196/200][5000/6787] eta 0:07:36 lr 0.000200 time 0.2599 (0.2555) loss 0.3639 (0.3582) grad_norm 158706.2656 (nan) mem 14543MB +[2023-10-14 00:39:54 simmim_pretrain](main_simmim.py 218): INFO Train: [196/200][5500/6787] eta 0:05:28 lr 0.000200 time 0.2604 (0.2556) loss 0.3473 (0.3583) grad_norm 168193.1250 (nan) mem 14543MB +[2023-10-14 00:42:02 simmim_pretrain](main_simmim.py 218): INFO Train: [196/200][6000/6787] eta 0:03:21 lr 0.000200 time 0.2597 (0.2557) loss 0.3799 (0.3584) grad_norm 149597.2656 (nan) mem 14543MB +[2023-10-14 00:44:10 simmim_pretrain](main_simmim.py 218): INFO Train: [196/200][6500/6787] eta 0:01:13 lr 0.000200 time 0.2588 (0.2557) loss 0.3681 (0.3584) grad_norm 88202.3984 (nan) mem 14543MB +[2023-10-14 00:45:24 simmim_pretrain](main_simmim.py 228): INFO EPOCH 196 training takes 0:28:56 +[2023-10-14 00:45:25 simmim_pretrain](main_simmim.py 218): INFO Train: [197/200][0/6787] eta 2:38:22 lr 0.000200 time 1.4001 (1.4001) loss 0.3577 (0.3577) grad_norm 115283.4844 (115283.4844) mem 14543MB +[2023-10-14 00:47:32 simmim_pretrain](main_simmim.py 218): INFO Train: [197/200][500/6787] eta 0:26:41 lr 0.000200 time 0.2516 (0.2547) loss 0.3674 (0.3577) grad_norm 270207.6875 (156907.6094) mem 14543MB +[2023-10-14 00:49:38 simmim_pretrain](main_simmim.py 218): INFO Train: [197/200][1000/6787] eta 0:24:25 lr 0.000200 time 0.2525 (0.2533) loss 0.3573 (0.3577) grad_norm 345715.2500 (170754.7500) mem 14543MB +[2023-10-14 00:51:43 simmim_pretrain](main_simmim.py 218): INFO Train: [197/200][1500/6787] eta 0:22:14 lr 0.000200 time 0.2466 (0.2524) loss 0.3511 (0.3572) grad_norm 145732.6094 (180641.3438) mem 14543MB +[2023-10-14 00:53:48 simmim_pretrain](main_simmim.py 218): INFO Train: [197/200][2000/6787] eta 0:20:06 lr 0.000200 time 0.2483 (0.2520) loss 0.3650 (0.3571) grad_norm 194416.2344 (191247.9375) mem 14543MB +[2023-10-14 00:55:53 simmim_pretrain](main_simmim.py 218): INFO Train: [197/200][2500/6787] eta 0:17:58 lr 0.000200 time 0.2588 (0.2517) loss 0.3533 (0.3570) grad_norm 440575.4062 (216641.0312) mem 14543MB +[2023-10-14 00:57:59 simmim_pretrain](main_simmim.py 218): INFO Train: [197/200][3000/6787] eta 0:15:52 lr 0.000200 time 0.2589 (0.2515) loss 0.3496 (0.3568) grad_norm 375312.9688 (234518.9531) mem 14543MB +[2023-10-14 01:00:04 simmim_pretrain](main_simmim.py 218): INFO Train: [197/200][3500/6787] eta 0:13:45 lr 0.000200 time 0.2517 (0.2513) loss 0.3484 (0.3565) grad_norm 427771.4688 (252147.2969) mem 14543MB +[2023-10-14 01:02:09 simmim_pretrain](main_simmim.py 218): INFO Train: [197/200][4000/6787] eta 0:11:39 lr 0.000200 time 0.2457 (0.2511) loss 0.3504 (0.3564) grad_norm 629555.3750 (272102.0000) mem 14543MB +[2023-10-14 01:04:14 simmim_pretrain](main_simmim.py 218): INFO Train: [197/200][4500/6787] eta 0:09:34 lr 0.000200 time 0.2477 (0.2510) loss 0.3545 (0.3562) grad_norm 263213.4688 (inf) mem 14543MB +[2023-10-14 01:06:19 simmim_pretrain](main_simmim.py 218): INFO Train: [197/200][5000/6787] eta 0:07:28 lr 0.000200 time 0.2599 (0.2509) loss 0.3531 (0.3561) grad_norm 403320.0625 (inf) mem 14543MB +[2023-10-14 01:08:24 simmim_pretrain](main_simmim.py 218): INFO Train: [197/200][5500/6787] eta 0:05:22 lr 0.000200 time 0.2526 (0.2509) loss 0.3437 (0.3561) grad_norm 232945.0156 (inf) mem 14543MB +[2023-10-14 01:10:30 simmim_pretrain](main_simmim.py 218): INFO Train: [197/200][6000/6787] eta 0:03:17 lr 0.000200 time 0.2561 (0.2510) loss 0.3618 (0.3562) grad_norm 292770.2500 (inf) mem 14543MB +[2023-10-14 01:12:36 simmim_pretrain](main_simmim.py 218): INFO Train: [197/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2545 (0.2510) loss 0.3767 (0.3564) grad_norm 321448.1250 (inf) mem 14543MB +[2023-10-14 01:13:49 simmim_pretrain](main_simmim.py 228): INFO EPOCH 197 training takes 0:28:24 +[2023-10-14 01:13:50 simmim_pretrain](main_simmim.py 218): INFO Train: [198/200][0/6787] eta 2:44:17 lr 0.000200 time 1.4524 (1.4524) loss 0.3634 (0.3634) grad_norm 302900.5000 (302900.5000) mem 14543MB +[2023-10-14 01:15:56 simmim_pretrain](main_simmim.py 218): INFO Train: [198/200][500/6787] eta 0:26:39 lr 0.000200 time 0.2513 (0.2544) loss 0.3636 (0.3584) grad_norm 142350.5781 (inf) mem 14543MB +[2023-10-14 01:18:03 simmim_pretrain](main_simmim.py 218): INFO Train: [198/200][1000/6787] eta 0:24:27 lr 0.000200 time 0.2539 (0.2536) loss 0.3589 (0.3592) grad_norm 130047.0938 (inf) mem 14543MB +[2023-10-14 01:20:09 simmim_pretrain](main_simmim.py 218): INFO Train: [198/200][1500/6787] eta 0:22:20 lr 0.000200 time 0.2552 (0.2535) loss 0.3624 (0.3596) grad_norm 149347.1875 (inf) mem 14543MB +[2023-10-14 01:22:17 simmim_pretrain](main_simmim.py 218): INFO Train: [198/200][2000/6787] eta 0:20:15 lr 0.000200 time 0.2616 (0.2538) loss 0.3522 (0.3607) grad_norm 79951.1406 (inf) mem 14543MB +[2023-10-14 01:24:27 simmim_pretrain](main_simmim.py 218): INFO Train: [198/200][2500/6787] eta 0:18:13 lr 0.000200 time 0.2607 (0.2550) loss 0.3635 (0.3611) grad_norm 51467.3008 (inf) mem 14543MB +[2023-10-14 01:26:37 simmim_pretrain](main_simmim.py 218): INFO Train: [198/200][3000/6787] eta 0:16:09 lr 0.000200 time 0.2605 (0.2559) loss 0.3345 (0.3611) grad_norm 24979.0566 (inf) mem 14543MB +[2023-10-14 01:28:47 simmim_pretrain](main_simmim.py 218): INFO Train: [198/200][3500/6787] eta 0:14:02 lr 0.000200 time 0.2604 (0.2565) loss 0.3515 (0.3613) grad_norm 68689.3516 (inf) mem 14543MB +[2023-10-14 01:30:57 simmim_pretrain](main_simmim.py 218): INFO Train: [198/200][4000/6787] eta 0:11:55 lr 0.000200 time 0.2608 (0.2569) loss 0.3564 (0.3611) grad_norm 133474.7812 (inf) mem 14543MB +[2023-10-14 01:33:07 simmim_pretrain](main_simmim.py 218): INFO Train: [198/200][4500/6787] eta 0:09:48 lr 0.000200 time 0.2603 (0.2572) loss 0.3701 (0.3609) grad_norm 66461.5703 (inf) mem 14543MB +[2023-10-14 01:35:17 simmim_pretrain](main_simmim.py 218): INFO Train: [198/200][5000/6787] eta 0:07:40 lr 0.000200 time 0.2607 (0.2575) loss 0.3963 (0.3606) grad_norm 152168.8750 (inf) mem 14543MB +[2023-10-14 01:37:26 simmim_pretrain](main_simmim.py 218): INFO Train: [198/200][5500/6787] eta 0:05:31 lr 0.000200 time 0.2612 (0.2577) loss 0.3558 (0.3605) grad_norm 103547.5781 (inf) mem 14543MB +[2023-10-14 01:39:36 simmim_pretrain](main_simmim.py 218): INFO Train: [198/200][6000/6787] eta 0:03:22 lr 0.000200 time 0.2605 (0.2579) loss 0.3301 (0.3603) grad_norm 133191.2656 (inf) mem 14543MB +[2023-10-14 01:41:46 simmim_pretrain](main_simmim.py 218): INFO Train: [198/200][6500/6787] eta 0:01:14 lr 0.000200 time 0.2598 (0.2580) loss 0.3534 (0.3600) grad_norm 236418.2812 (inf) mem 14543MB +[2023-10-14 01:43:01 simmim_pretrain](main_simmim.py 228): INFO EPOCH 198 training takes 0:29:12 +[2023-10-14 01:43:03 simmim_pretrain](main_simmim.py 218): INFO Train: [199/200][0/6787] eta 3:05:32 lr 0.000200 time 1.6403 (1.6403) loss 0.3670 (0.3670) grad_norm 124683.3828 (124683.3828) mem 14543MB +[2023-10-14 01:45:08 simmim_pretrain](main_simmim.py 218): INFO Train: [199/200][500/6787] eta 0:26:26 lr 0.000200 time 0.2516 (0.2524) loss 0.3567 (0.3573) grad_norm 139540.4375 (181335.5781) mem 14543MB +[2023-10-14 01:47:13 simmim_pretrain](main_simmim.py 218): INFO Train: [199/200][1000/6787] eta 0:24:15 lr 0.000200 time 0.2451 (0.2514) loss 0.3620 (0.3570) grad_norm 190056.2344 (182440.2812) mem 14543MB +[2023-10-14 01:49:18 simmim_pretrain](main_simmim.py 218): INFO Train: [199/200][1500/6787] eta 0:22:07 lr 0.000200 time 0.2587 (0.2511) loss 0.3333 (0.3563) grad_norm 268888.2812 (202905.9062) mem 14543MB +[2023-10-14 01:51:24 simmim_pretrain](main_simmim.py 218): INFO Train: [199/200][2000/6787] eta 0:20:02 lr 0.000200 time 0.2496 (0.2511) loss 0.3661 (0.3561) grad_norm 511422.1250 (223033.3906) mem 14543MB +[2023-10-14 01:53:29 simmim_pretrain](main_simmim.py 218): INFO Train: [199/200][2500/6787] eta 0:17:56 lr 0.000200 time 0.2463 (0.2511) loss 0.3614 (0.3558) grad_norm 254368.5312 (243853.5938) mem 14543MB +[2023-10-14 01:55:35 simmim_pretrain](main_simmim.py 218): INFO Train: [199/200][3000/6787] eta 0:15:51 lr 0.000200 time 0.2571 (0.2512) loss 0.3416 (0.3557) grad_norm 422309.1250 (264830.0312) mem 14543MB +[2023-10-14 01:57:41 simmim_pretrain](main_simmim.py 218): INFO Train: [199/200][3500/6787] eta 0:13:46 lr 0.000200 time 0.2486 (0.2514) loss 0.3685 (0.3556) grad_norm 418466.9688 (inf) mem 14543MB +[2023-10-14 01:59:47 simmim_pretrain](main_simmim.py 218): INFO Train: [199/200][4000/6787] eta 0:11:40 lr 0.000200 time 0.2596 (0.2515) loss 0.3498 (0.3556) grad_norm 545823.3750 (inf) mem 14543MB +[2023-10-14 02:01:54 simmim_pretrain](main_simmim.py 218): INFO Train: [199/200][4500/6787] eta 0:09:35 lr 0.000200 time 0.2592 (0.2516) loss 0.3615 (0.3554) grad_norm 556156.5625 (inf) mem 14543MB +[2023-10-14 02:04:00 simmim_pretrain](main_simmim.py 218): INFO Train: [199/200][5000/6787] eta 0:07:29 lr 0.000200 time 0.2543 (0.2517) loss 0.3322 (0.3553) grad_norm 203762.3125 (nan) mem 14543MB +[2023-10-14 02:06:06 simmim_pretrain](main_simmim.py 218): INFO Train: [199/200][5500/6787] eta 0:05:24 lr 0.000200 time 0.2561 (0.2518) loss 0.3517 (0.3554) grad_norm 353202.0312 (nan) mem 14543MB +[2023-10-14 02:08:15 simmim_pretrain](main_simmim.py 218): INFO Train: [199/200][6000/6787] eta 0:03:18 lr 0.000200 time 0.2598 (0.2522) loss 0.3787 (0.3556) grad_norm 189523.1719 (nan) mem 14543MB +[2023-10-14 02:10:24 simmim_pretrain](main_simmim.py 218): INFO Train: [199/200][6500/6787] eta 0:01:12 lr 0.000200 time 0.2597 (0.2528) loss 0.3604 (0.3558) grad_norm 114084.1875 (nan) mem 14543MB +[2023-10-14 02:11:39 simmim_pretrain](main_simmim.py 228): INFO EPOCH 199 training takes 0:28:38 +[2023-10-14 02:11:39 simmim_pretrain](utils.py 62): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_199.pth saving...... +[2023-10-14 02:11:40 simmim_pretrain](utils.py 64): INFO /root/autodl-tmp/LSQ-simmim/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_199.pth saved !!! +[2023-10-14 02:11:40 simmim_pretrain](main_simmim.py 156): INFO Training time 3 days, 23:07:37 diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/optimizer.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..45bbeaed4927cf86b06619ca435a53db9526af07 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/optimizer.py @@ -0,0 +1,191 @@ +# -------------------------------------------------------- +# SimMIM +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# Modified by Zhenda Xie +# -------------------------------------------------------- + +import json +from functools import partial +from torch import optim as optim + + +def build_optimizer(config, model, logger, is_pretrain): + if is_pretrain: + return build_pretrain_optimizer(config, model, logger) + else: + return build_finetune_optimizer(config, model, logger) + + +def build_pretrain_optimizer(config, model, logger): + logger.info('>>>>>>>>>> Build Optimizer for Pre-training Stage') + skip = {} + skip_keywords = {} + if hasattr(model, 'no_weight_decay'): + skip = model.no_weight_decay() + logger.info(f'No weight decay: {skip}') + if hasattr(model, 'no_weight_decay_keywords'): + skip_keywords = model.no_weight_decay_keywords() + logger.info(f'No weight decay keywords: {skip_keywords}') + + parameters = get_pretrain_param_groups(model, logger, skip, skip_keywords) + + opt_lower = config.TRAIN.OPTIMIZER.NAME.lower() + optimizer = None + if opt_lower == 'sgd': + optimizer = optim.SGD(parameters, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True, + lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY) + elif opt_lower == 'adamw': + optimizer = optim.AdamW(parameters, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS, + lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY) + + logger.info(optimizer) + return optimizer + + +def get_pretrain_param_groups(model, logger, skip_list=(), skip_keywords=()): + has_decay = [] + no_decay = [] + has_decay_name = [] + no_decay_name = [] + + for name, param in model.named_parameters(): + if not param.requires_grad: + continue + if len(param.shape) == 1 or name.endswith(".bias") or (name in skip_list) or \ + check_keywords_in_name(name, skip_keywords): + no_decay.append(param) + no_decay_name.append(name) + else: + has_decay.append(param) + has_decay_name.append(name) + logger.info(f'No decay params: {no_decay_name}') + logger.info(f'Has decay params: {has_decay_name}') + return [{'params': has_decay}, + {'params': no_decay, 'weight_decay': 0.}] + + +def build_finetune_optimizer(config, model, logger): + logger.info('>>>>>>>>>> Build Optimizer for Fine-tuning Stage') + if config.MODEL.TYPE == 'swin': + depths = config.MODEL.SWIN.DEPTHS + num_layers = sum(depths) + get_layer_func = partial(get_swin_layer, num_layers=num_layers + 2, depths=depths) + elif config.MODEL.TYPE == 'vit': + num_layers = config.MODEL.VIT.DEPTH + get_layer_func = partial(get_vit_layer, num_layers=num_layers + 2) + else: + raise NotImplementedError + + scales = list(config.TRAIN.LAYER_DECAY ** i for i in reversed(range(num_layers + 2))) + + skip = {} + skip_keywords = {} + if hasattr(model, 'no_weight_decay'): + skip = model.no_weight_decay() + logger.info(f'No weight decay: {skip}') + if hasattr(model, 'no_weight_decay_keywords'): + skip_keywords = model.no_weight_decay_keywords() + logger.info(f'No weight decay keywords: {skip_keywords}') + + parameters = get_finetune_param_groups( + model, logger, config.TRAIN.BASE_LR, config.TRAIN.WEIGHT_DECAY, + get_layer_func, scales, skip, skip_keywords) + + opt_lower = config.TRAIN.OPTIMIZER.NAME.lower() + optimizer = None + if opt_lower == 'sgd': + optimizer = optim.SGD(parameters, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True, + lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY) + elif opt_lower == 'adamw': + optimizer = optim.AdamW(parameters, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS, + lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY) + + logger.info(optimizer) + return optimizer + + +def get_vit_layer(name, num_layers): + if name in ("cls_token", "mask_token", "pos_embed"): + return 0 + elif name.startswith("patch_embed"): + return 0 + elif name.startswith("rel_pos_bias"): + return num_layers - 1 + elif name.startswith("blocks"): + layer_id = int(name.split('.')[1]) + return layer_id + 1 + else: + return num_layers - 1 + + +def get_swin_layer(name, num_layers, depths): + if name in ("mask_token"): + return 0 + elif name.startswith("patch_embed"): + return 0 + elif name.startswith("layers"): + layer_id = int(name.split('.')[1]) + block_id = name.split('.')[3] + if block_id == 'reduction' or block_id == 'norm': + return sum(depths[:layer_id + 1]) + layer_id = sum(depths[:layer_id]) + int(block_id) + return layer_id + 1 + else: + return num_layers - 1 + + +def get_finetune_param_groups(model, logger, lr, weight_decay, get_layer_func, scales, skip_list=(), skip_keywords=()): + parameter_group_names = {} + parameter_group_vars = {} + + for name, param in model.named_parameters(): + if not param.requires_grad: + continue + if len(param.shape) == 1 or name.endswith(".bias") or (name in skip_list) or \ + check_keywords_in_name(name, skip_keywords): + group_name = "no_decay" + this_weight_decay = 0. + else: + group_name = "decay" + this_weight_decay = weight_decay + if get_layer_func is not None: + layer_id = get_layer_func(name) + group_name = "layer_%d_%s" % (layer_id, group_name) + else: + layer_id = None + + if group_name not in parameter_group_names: + if scales is not None: + scale = scales[layer_id] + else: + scale = 1. + + parameter_group_names[group_name] = { + "group_name": group_name, + "weight_decay": this_weight_decay, + "params": [], + "lr": lr * scale, + "lr_scale": scale, + } + parameter_group_vars[group_name] = { + "group_name": group_name, + "weight_decay": this_weight_decay, + "params": [], + "lr": lr * scale, + "lr_scale": scale + } + + parameter_group_vars[group_name]["params"].append(param) + parameter_group_names[group_name]["params"].append(name) + logger.info("Param groups = %s" % json.dumps(parameter_group_names, indent=2)) + return list(parameter_group_vars.values()) + + +def check_keywords_in_name(name, keywords=()): + isin = False + for keyword in keywords: + if keyword in name: + isin = True + return isin \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/pretrain.sh b/PuzzleTuning/Counterpart PreTrain Methods/simmim/pretrain.sh new file mode 100644 index 0000000000000000000000000000000000000000..b098d5ea6c919fce5a610025170b65f6a48966c5 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/pretrain.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# ps -ef | grep simmim | awk '{print $2}' |xargs kill + +# Training settings +pretrain_model="timm" +dataset="All" +model_weights="/root/autodl-tmp/model_base/ViT_b16_224_Imagenet.pth" + +# Init params +epochs=10 +data_path="/root/autodl-tmp/datasets/${dataset}" +model_name="ViT_b16_224_${pretrain_model}_sdmae_${dataset}_${epochs}" +checkpoint_path="/root/autodl-tmp/LSQ-simmim/checkpoint/" +save_weight_path="/root/autodl-tmp/LSQ-simmim/model_saved/" +tensorboard_path="/root/tf-logs/" + + +# Training. Save checkpoint every 10 epochs. +# The checkpoint and backbone model will be available under checkpoint_path folder. +set -e + +CUDA_VISIBLE_DEVICES=0,1,2,3 \ +python -u -m torch.distributed.launch \ + --nproc_per_node 4 \ + main_simmim.py \ + --tag vit_simmim \ + --cfg ./configs/vit_base__test/simmim_pretrain__vit_base__img224__100ep.yaml \ + --batch-size 128 \ + --data-path $data_path \ + --output $checkpoint_path \ + --log_dir $tensorboard_path \ + --amp-opt-level O1 \ + --load-weight $model_weights + +python load_vit_from_ckpt.py \ + --checkpoint /root/autodl-tmp/LSQ-simmim/B/checkpoint/simmim_pretrain/vit_simmim/ckpt_epoch_199.pth \ + --save-to ./output/ \ + --save-name "ViT_b16_224_timm_SIMMIM_ALL_200.pth" \ + --basic-weight $model_weights \ + --num-classes 2 + +set +e \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/requirements.txt b/PuzzleTuning/Counterpart PreTrain Methods/simmim/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..16ebf2cdf2af6b8ae1d6cf4ed81ace18978bfda3 --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/requirements.txt @@ -0,0 +1,5 @@ +pyyaml +scipy +termcolor +timm +yacs \ No newline at end of file diff --git a/PuzzleTuning/Counterpart PreTrain Methods/simmim/utils.py b/PuzzleTuning/Counterpart PreTrain Methods/simmim/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4b902fb75b720c78be6346553d1dbaffa4bf856d --- /dev/null +++ b/PuzzleTuning/Counterpart PreTrain Methods/simmim/utils.py @@ -0,0 +1,286 @@ +# -------------------------------------------------------- +# SimMIM +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# Modified by Zhenda Xie +# -------------------------------------------------------- + +import os +import torch +import torch.distributed as dist +import numpy as np +from scipy import interpolate + +# try: +# # noinspection PyUnresolvedReferences +# # from apex import amp +# import torch.cuda.amp as amp +# from torch.cuda.amp import autocast as autocast +# except ImportError: +# amp = None + + +def load_checkpoint(config, model, optimizer, lr_scheduler, logger): + logger.info(f">>>>>>>>>> Resuming from {config.MODEL.RESUME} ..........") + if config.MODEL.RESUME.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url( + config.MODEL.RESUME, map_location='cpu', check_hash=True) + else: + checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu') + msg = model.load_state_dict(checkpoint['model'], strict=False) + logger.info(msg) + max_accuracy = 0.0 + if not config.EVAL_MODE and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint: + optimizer.load_state_dict(checkpoint['optimizer']) + lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) + config.defrost() + config.TRAIN.START_EPOCH = checkpoint['epoch'] + 1 + config.freeze() + # if 'amp' in checkpoint and config.AMP_OPT_LEVEL != "O0" and checkpoint['config'].AMP_OPT_LEVEL != "O0": + # amp.load_state_dict(checkpoint['amp']) + logger.info(f"=> loaded successfully '{config.MODEL.RESUME}' (epoch {checkpoint['epoch']})") + if 'max_accuracy' in checkpoint: + max_accuracy = checkpoint['max_accuracy'] + + del checkpoint + torch.cuda.empty_cache() + return max_accuracy + + +def save_checkpoint(config, epoch, model, max_accuracy, optimizer, lr_scheduler, logger): + save_state = {'model': model.state_dict(), + 'optimizer': optimizer.state_dict(), + 'lr_scheduler': lr_scheduler.state_dict(), + 'max_accuracy': max_accuracy, + 'epoch': epoch, + 'config': config} + # if config.AMP_OPT_LEVEL != "O0": + # save_state['amp'] = amp.state_dict() + + save_path = os.path.join(config.OUTPUT, f'ckpt_epoch_{epoch}.pth') + logger.info(f"{save_path} saving......") + torch.save(save_state, save_path) + logger.info(f"{save_path} saved !!!") + + +def get_grad_norm(parameters, norm_type=2): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = list(filter(lambda p: p.grad is not None, parameters)) + norm_type = float(norm_type) + total_norm = 0 + for p in parameters: + param_norm = p.grad.data.norm(norm_type) + total_norm += param_norm.item() ** norm_type + total_norm = total_norm ** (1. / norm_type) + return total_norm + + +def auto_resume_helper(output_dir, logger): + checkpoints = os.listdir(output_dir) + checkpoints = [ckpt for ckpt in checkpoints if ckpt.endswith('pth')] + logger.info(f"All checkpoints founded in {output_dir}: {checkpoints}") + if len(checkpoints) > 0: + latest_checkpoint = max([os.path.join(output_dir, d) for d in checkpoints], key=os.path.getmtime) + logger.info(f"The latest checkpoint founded: {latest_checkpoint}") + resume_file = latest_checkpoint + else: + resume_file = None + return resume_file + + +def reduce_tensor(tensor): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= dist.get_world_size() + return rt + + +def load_pretrained(config, model, logger): + logger.info(f">>>>>>>>>> Fine-tuned from {config.PRETRAINED} ..........") + checkpoint = torch.load(config.PRETRAINED, map_location='cpu') + checkpoint_model = checkpoint['model'] + + if any([True if 'encoder.' in k else False for k in checkpoint_model.keys()]): + checkpoint_model = {k.replace('encoder.', ''): v for k, v in checkpoint_model.items() if k.startswith('encoder.')} + logger.info('Detect pre-trained model, remove [encoder.] prefix.') + else: + logger.info('Detect non-pre-trained model, pass without doing anything.') + + if config.MODEL.TYPE == 'swin': + logger.info(f">>>>>>>>>> Remapping pre-trained keys for SWIN ..........") + checkpoint = remap_pretrained_keys_swin(model, checkpoint_model, logger) + elif config.MODEL.TYPE == 'vit': + logger.info(f">>>>>>>>>> Remapping pre-trained keys for VIT ..........") + checkpoint = remap_pretrained_keys_vit(model, checkpoint_model, logger) + else: + raise NotImplementedError + + msg = model.load_state_dict(checkpoint_model, strict=False) + logger.info(msg) + + del checkpoint + torch.cuda.empty_cache() + logger.info(f">>>>>>>>>> loaded successfully '{config.PRETRAINED}'") + + +def remap_pretrained_keys_swin(model, checkpoint_model, logger): + state_dict = model.state_dict() + + # Geometric interpolation when pre-trained patch size mismatch with fine-tuned patch size + all_keys = list(checkpoint_model.keys()) + for key in all_keys: + if "relative_position_bias_table" in key: + relative_position_bias_table_pretrained = checkpoint_model[key] + relative_position_bias_table_current = state_dict[key] + L1, nH1 = relative_position_bias_table_pretrained.size() + L2, nH2 = relative_position_bias_table_current.size() + if nH1 != nH2: + logger.info(f"Error in loading {key}, passing......") + else: + if L1 != L2: + logger.info(f"{key}: Interpolate relative_position_bias_table using geo.") + src_size = int(L1 ** 0.5) + dst_size = int(L2 ** 0.5) + + def geometric_progression(a, r, n): + return a * (1.0 - r ** n) / (1.0 - r) + + left, right = 1.01, 1.5 + while right - left > 1e-6: + q = (left + right) / 2.0 + gp = geometric_progression(1, q, src_size // 2) + if gp > dst_size // 2: + right = q + else: + left = q + + # if q > 1.090307: + # q = 1.090307 + + dis = [] + cur = 1 + for i in range(src_size // 2): + dis.append(cur) + cur += q ** (i + 1) + + r_ids = [-_ for _ in reversed(dis)] + + x = r_ids + [0] + dis + y = r_ids + [0] + dis + + t = dst_size // 2.0 + dx = np.arange(-t, t + 0.1, 1.0) + dy = np.arange(-t, t + 0.1, 1.0) + + logger.info("Original positions = %s" % str(x)) + logger.info("Target positions = %s" % str(dx)) + + all_rel_pos_bias = [] + + for i in range(nH1): + z = relative_position_bias_table_pretrained[:, i].view(src_size, src_size).float().numpy() + f_cubic = interpolate.interp2d(x, y, z, kind='cubic') + all_rel_pos_bias.append(torch.Tensor(f_cubic(dx, dy)).contiguous().view(-1, 1).to( + relative_position_bias_table_pretrained.device)) + + new_rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) + checkpoint_model[key] = new_rel_pos_bias + + # delete relative_position_index since we always re-init it + relative_position_index_keys = [k for k in checkpoint_model.keys() if "relative_position_index" in k] + for k in relative_position_index_keys: + del checkpoint_model[k] + + # delete relative_coords_table since we always re-init it + relative_coords_table_keys = [k for k in checkpoint_model.keys() if "relative_coords_table" in k] + for k in relative_coords_table_keys: + del checkpoint_model[k] + + # delete attn_mask since we always re-init it + attn_mask_keys = [k for k in checkpoint_model.keys() if "attn_mask" in k] + for k in attn_mask_keys: + del checkpoint_model[k] + + return checkpoint_model + + +def remap_pretrained_keys_vit(model, checkpoint_model, logger): + # Duplicate shared rel_pos_bias to each layer + if getattr(model, 'use_rel_pos_bias', False) and "rel_pos_bias.relative_position_bias_table" in checkpoint_model: + logger.info("Expand the shared relative position embedding to each transformer block.") + num_layers = model.get_num_layers() + rel_pos_bias = checkpoint_model["rel_pos_bias.relative_position_bias_table"] + for i in range(num_layers): + checkpoint_model["blocks.%d.attn.relative_position_bias_table" % i] = rel_pos_bias.clone() + checkpoint_model.pop("rel_pos_bias.relative_position_bias_table") + + # Geometric interpolation when pre-trained patch size mismatch with fine-tuned patch size + all_keys = list(checkpoint_model.keys()) + for key in all_keys: + if "relative_position_index" in key: + checkpoint_model.pop(key) + + if "relative_position_bias_table" in key: + rel_pos_bias = checkpoint_model[key] + src_num_pos, num_attn_heads = rel_pos_bias.size() + dst_num_pos, _ = model.state_dict()[key].size() + dst_patch_shape = model.patch_embed.patch_shape + if dst_patch_shape[0] != dst_patch_shape[1]: + raise NotImplementedError() + num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1) + src_size = int((src_num_pos - num_extra_tokens) ** 0.5) + dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5) + if src_size != dst_size: + logger.info("Position interpolate for %s from %dx%d to %dx%d" % (key, src_size, src_size, dst_size, dst_size)) + extra_tokens = rel_pos_bias[-num_extra_tokens:, :] + rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] + + def geometric_progression(a, r, n): + return a * (1.0 - r ** n) / (1.0 - r) + + left, right = 1.01, 1.5 + while right - left > 1e-6: + q = (left + right) / 2.0 + gp = geometric_progression(1, q, src_size // 2) + if gp > dst_size // 2: + right = q + else: + left = q + + # if q > 1.090307: + # q = 1.090307 + + dis = [] + cur = 1 + for i in range(src_size // 2): + dis.append(cur) + cur += q ** (i + 1) + + r_ids = [-_ for _ in reversed(dis)] + + x = r_ids + [0] + dis + y = r_ids + [0] + dis + + t = dst_size // 2.0 + dx = np.arange(-t, t + 0.1, 1.0) + dy = np.arange(-t, t + 0.1, 1.0) + + logger.info("Original positions = %s" % str(x)) + logger.info("Target positions = %s" % str(dx)) + + all_rel_pos_bias = [] + + for i in range(num_attn_heads): + z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy() + f = interpolate.interp2d(x, y, z, kind='cubic') + all_rel_pos_bias.append( + torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device)) + + rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) + + new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0) + checkpoint_model[key] = new_rel_pos_bias + + return checkpoint_model \ No newline at end of file diff --git a/PuzzleTuning/Get_PuzzleTuning_model.py b/PuzzleTuning/Get_PuzzleTuning_model.py new file mode 100644 index 0000000000000000000000000000000000000000..9bb5ba6b944c483a022df9fb6640aa7f27347055 --- /dev/null +++ b/PuzzleTuning/Get_PuzzleTuning_model.py @@ -0,0 +1,25 @@ +import timm +import os +from Backbone.GetPromptModel import build_promptmodel +from pprint import pprint + + +def get_PuzzleTuning_VPT_model(num_classes=0, edge_size=224, prompt_state_dict=None, base_state_dict='timm'): + """ + :param num_classes: classification required number of your dataset, 0 for taking the feature + :param edge_size: the input edge size of the dataloder + :param model_idx: the model we are going to use. by the format of Model_size_other_info + + :param pretrained_backbone: The backbone CNN is initiate randomly or by its official Pretrained models + + :return: prepared model + """ + + model = build_promptmodel( + num_classes=0, # set to feature extractor model, output is CLS token + edge_size=edge_size, model_idx='ViT', patch_size=16, + Prompt_Token_num=20, VPT_type="Deep", + prompt_state_dict=prompt_state_dict, + base_state_dict=base_state_dict) + + return model diff --git a/PuzzleTuning/PuzzleTesting.py b/PuzzleTuning/PuzzleTesting.py new file mode 100644 index 0000000000000000000000000000000000000000..dde514476e469d15f8c3a4aed2ce1a39d688f3fb --- /dev/null +++ b/PuzzleTuning/PuzzleTesting.py @@ -0,0 +1,369 @@ +""" +Testing script of PuzzleTuning Visualization Script ver: Feb 11th 14:00 + +Paper: +https://arxiv.org/abs/2311.06712 +Code: +https://github.com/sagizty/PuzzleTuning +Ref: MAE +https://github.com/facebookresearch/mae + +Step 1: PreTraining on the ImagetNet-1k style dataset (others) +Step 2: Domain Prompt Tuning (PuzzleTuning) on Pathological Images (in ImageFolder) +Step 3: FineTuning on the Downstream Tasks + +This is the independent testing for step 2 + + +update: +Use "--seg_decoder" parameter to introduce segmentation networks +swin_unet for Swin-Unet +""" + +import argparse +import datetime +import numpy as np +import os +import time +from pathlib import Path + +import torch +import torch.backends.cudnn as cudnn +from tensorboardX import SummaryWriter +import torchvision.transforms as transforms +import torchvision.datasets as datasets + +import timm + +# assert timm.__version__ == "0.3.2" # version check + +from SSL_structures import models_mae, SAE + +from utils.visual_usage import patchify, unpatchify, Draw_tri_fig +from torchvision.transforms import ToPILImage + + +def Puzzle_test(model, data_loader_test, test_dataset_size, mask_ratio, fix_position_ratio, fix_patch_size, + check_minibatch=100, enable_visualize_check=True, combined_pred_illustration=False, check_samples=1, + device=None, output_dir=None, writer=None, args=None): + # start testing + print(f"Start testing for {args.model_idx} \n with checkpoint: {args.checkpoint_path}") + start_time = time.time() + index = 0 + model_time = time.time() + # criterias, initially empty + running_loss = 0.0 + log_running_loss = 0.0 + + model.eval() + + # Iterate over data. + for inputs, labels in data_loader_test: # use different dataloder in different phase + inputs = inputs.to(device, non_blocking=True) + labels = labels.to(device, non_blocking=True) # for tracking fixme + + if args.model[0:3] == 'sae': + loss, pred, imgs_puzzled_patches = model(inputs, fix_position_ratio=fix_position_ratio, + puzzle_patch_size=fix_patch_size, + combined_pred_illustration=combined_pred_illustration) # SAE + else: # args.model[0:3] == 'mae' + loss, pred, mask_patch_indicators = model(inputs, mask_ratio=mask_ratio) # MAE + + loss_value = float(loss.cpu().detach().numpy()) if args.gpu == 1 else sum(loss.cpu().detach().numpy()) + # log criterias: update + log_running_loss += loss_value + running_loss += loss_value * inputs.size(0) + + # attach the records to the tensorboard backend + if writer is not None: + # ...log the running loss + writer.add_scalar('Test minibatch loss', + float(loss_value), + index) + + # at the checking time now + if index % check_minibatch == check_minibatch - 1: + model_time = time.time() - model_time + + check_index = index // check_minibatch + 1 + + print('Test index ' + str(check_index) + ' of ' + str(check_minibatch) + ' minibatch with batch_size of ' + + str(inputs.size(0)) + ' time used:', model_time) + print('minibatch AVG loss:', float(log_running_loss) / check_minibatch) + + model_time = time.time() + log_running_loss = 0.0 + + # paint pic + if enable_visualize_check: + if args.model[0:3] == 'sae': + imgs_puzzled_batch = unpatchify(imgs_puzzled_patches, patch_size=16) + # Reconstructed img + recons_img_batch = unpatchify(pred, patch_size=16) + + else: # MAE + sample_img_patches = patchify(inputs, patch_size=16) # on GPU + masked_img_patches = sample_img_patches * mask_patch_indicators.unsqueeze(-1).expand(-1, -1, + sample_img_patches.shape[-1]) + masked_img_batch = unpatchify(masked_img_patches, patch_size=16) + + if combined_pred_illustration: + + anti_mask_patch_indicators = 1 - mask_patch_indicators + pred_img_patches = pred * anti_mask_patch_indicators.unsqueeze(-1).\ + expand(-1, -1, sample_img_patches.shape[-1]) + + # Reconstructed img + recons_img_batch = unpatchify(masked_img_patches + pred_img_patches, patch_size=16) + else: + # Reconstructed img + recons_img_batch = unpatchify(pred, patch_size=16) + + for sampleIDX in range(check_samples): + # Ori img + sample_img = inputs.cpu()[sampleIDX] + sample_img = ToPILImage()(sample_img) + sample_img.save(os.path.join(output_dir, 'Test_sample_idx_' + str(check_index) + + '_sampleIDX_' + str(sampleIDX) + '.jpg')) + + recons_img = recons_img_batch.cpu()[sampleIDX] + recons_img = ToPILImage()(recons_img) + recons_img.save(os.path.join(output_dir, 'Test_recons_idx_' + str(check_index) + + '_sampleIDX_' + str(sampleIDX) + '.jpg')) + + # mask_img or puzzled_img + if args.model[0:3] == 'sae': + puzzled_img = imgs_puzzled_batch.cpu()[sampleIDX] + puzzled_img = ToPILImage()(puzzled_img) + puzzled_img.save(os.path.join(output_dir, 'Test_puzzled_idx_' + str(check_index) + '.jpg')) + + picpath = os.path.join(output_dir, 'Test_minibatchIDX_' + str(check_index) + + '_sampleIDX_' + str(sampleIDX) + '.jpg') + Draw_tri_fig(sample_img, puzzled_img, recons_img, picpath) + + else: # MAE + masked_img = masked_img_batch.cpu()[sampleIDX] + masked_img = ToPILImage()(masked_img) + masked_img.save(os.path.join(output_dir, 'Test_masked_idx_' + str(check_index) + '.jpg')) + + picpath = os.path.join(output_dir, 'Test_minibatchIDX_' + str(check_index) + + '_sampleIDX_' + str(sampleIDX) + '.jpg') + Draw_tri_fig(sample_img, masked_img, recons_img, picpath) + + index += 1 + + # time stamp + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + + # log criterias: print + epoch_loss = running_loss / test_dataset_size + print('\nTest_dataset_size: {} \nAvg Loss: {:.4f}'.format(test_dataset_size, epoch_loss)) + print('Testing time {}'.format(total_time_str)) + + +def main(args): + # choose decoder version + args.model = args.model + '_decoder' if args.seg_decoder is not None else args.model + # note decoder + args.model_idx = args.model_idx + args.model + '_' + args.seg_decoder if args.seg_decoder is not None \ + else args.model_idx + args.model + # note PromptTuning + args.model_idx = args.model_idx + '_Prompt_' + args.PromptTuning + '_tokennum_' + str(args.Prompt_Token_num) \ + if args.PromptTuning is not None else args.model_idx + + # Specify the Test settings + if args.fix_position_ratio is not None and args.fix_patch_size is not None and args.mask_ratio is None: + args.model_idx = 'Testing_' + args.model_idx + '_b_' + str(args.batch_size) \ + + '_hint_ratio_' + str(args.fix_position_ratio) + '_patch_size_' + str(args.fix_patch_size) + elif args.mask_ratio is not None and args.fix_position_ratio is None and args.fix_patch_size is None: + args.model_idx = 'Testing_' + args.model_idx + '_b_' + str(args.batch_size) \ + + '_mask_ratio_' + str(args.mask_ratio) + else: + print('not a correct test setting, should correctly specify fix_position_ratio/fix_patch_size/mask_ratio') + + print('\n\n' + args.model_idx + '\n\n') + + # setting k for: only card idx k is sighted for this code + if args.gpu_idx != -1: # fixme: notice for test, we are going to use single gpu only + print("Use", torch.cuda.device_count(), "GPUs of idx:", args.gpu_idx) + os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_idx) + else: + print("Use", torch.cuda.device_count(), "GPUs") + args.gpu = torch.cuda.device_count() + + print('job AImageFolderDir: {}'.format(os.path.dirname(os.path.realpath(__file__)))) + print("{}".format(args).replace(', ', ',\n')) + + device = torch.device(args.device) # cuda + + # fix the seed for reproducibility + torch.manual_seed(args.seed) + np.random.seed(args.seed) + + cudnn.benchmark = True + + # simple augmentation + transform_test = transforms.Compose([ + # transforms.RandomResizedCrop(args.input_size, scale=(0.8, 1.0), interpolation=3, ratio=(1. / 1., 1. / 1.)), + # 3 is bicubic + transforms.Resize(args.input_size), + transforms.ToTensor(), + # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + + test_dataroot = os.path.join(args.data_path) # , 'test' + dataset_test = datasets.ImageFolder(test_dataroot, transform=transform_test) + test_dataset_size = len(dataset_test) + class_names = [d.name for d in os.scandir(test_dataroot) if d.is_dir()] + class_names.sort() + + print('dataset_test', dataset_test) # Test data + + # skip minibatch, none to draw 80 figs + check_minibatch = args.check_minibatch if args.check_minibatch is not None else \ + test_dataset_size // (80 * args.batch_size) + check_minibatch = max(1, check_minibatch) + + # outputs + if args.log_dir is not None: + args.log_dir = os.path.join(args.log_dir, args.model_idx) + os.makedirs(args.log_dir, exist_ok=True) + log_writer = SummaryWriter(log_dir=args.log_dir) # Tensorboard + else: + log_writer = None + + # output_dir + if args.output_dir is not None: + args.output_dir = os.path.join(args.output_dir, args.model_idx) + os.makedirs(args.output_dir, exist_ok=True) + print('Testing output files will be at', args.output_dir) + + data_loader_test = torch.utils.data.DataLoader(dataset_test, + shuffle=args.shuffle_dataloader, + batch_size=args.batch_size, + num_workers=args.num_workers, + pin_memory=args.pin_mem, # 建议False + drop_last=True) + + # define the model + if args.model[0:3] == 'mae': + model = models_mae.__dict__[args.model](img_size=args.input_size, norm_pix_loss=args.norm_pix_loss, + prompt_mode=args.PromptTuning, Prompt_Token_num=args.Prompt_Token_num, + dec_idx=args.seg_decoder) + + elif args.model[0:3] == 'sae': + model = SAE.__dict__[args.model](img_size=args.input_size, group_shuffle_size=args.group_shuffle_size, + norm_pix_loss=args.norm_pix_loss, prompt_mode=args.PromptTuning, + Prompt_Token_num=args.Prompt_Token_num, dec_idx=args.seg_decoder) + + else: + print('This MIM test script only support SAE or MAE') + return -1 + + # take model out of checkpoint and load_model + state_dict = torch.load(args.checkpoint_path)['model'] + model.load_state_dict(state_dict, False) + model.to(device) + + # loss backward and optimizer operations and no longer needed in testing + # loss_scaler = NativeScaler() + + Puzzle_test(model, data_loader_test, test_dataset_size, + args.mask_ratio, args.fix_position_ratio, args.fix_patch_size, + check_minibatch, args.enable_visualize_check, args.combined_pred_illustration, args.check_samples, + device=device, output_dir=args.output_dir, writer=log_writer, args=args) + + # os.system("shutdown") # AUTO-DL server shutdown currently moved to .sh script for nohup task queue. + + +def get_args_parser(): + parser = argparse.ArgumentParser('MIM visualization for PuzzleTuning', add_help=False) + + # Model Name or index + parser.add_argument('--model_idx', default='PuzzleTuning_', type=str, help='Model Name or index') + + # testing batch size + parser.add_argument('--batch_size', default=16, type=int, + help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus') + + # Model parameters sae_vit_base_patch16 or mae_vit_base_patch16 + parser.add_argument('--model', default='sae_vit_base_patch16', type=str, metavar='MODEL', + help='Name of model to train') # ori mae_vit_large_patch16 + parser.add_argument('--seg_decoder', default=None, type=str, metavar='segmentation decoder', + help='Name of segmentation decoder') + + parser.add_argument('--input_size', default=224, type=int, # 原224 + help='images input size') + parser.add_argument('--num_classes', default=3, type=int, # decoder seg class set to channel + help='the number of classes for segmentation') + + # MAE mask_ratio + parser.add_argument('--mask_ratio', default=None, type=float, + help='Masking ratio (percentage of removed patches).') + # Hint tokens + parser.add_argument('--fix_position_ratio', default=None, type=float, + help='basic fix_position_ratio (percentage of position token patches).') + parser.add_argument('--fix_patch_size', default=None, type=int, # 原224 + help='images input size') + parser.add_argument('--group_shuffle_size', default=-1, type=int, help='group_shuffle_size of group shuffling,' + 'default -1 for the whole batch as a group') + # shuffle_dataloader + parser.add_argument('--shuffle_dataloader', action='store_true', help='shuffle Test dataset') + + # Tuning setting + # PromptTuning + parser.add_argument('--PromptTuning', default=None, type=str, + help='Deep/Shallow to use Prompt Tuning model instead of Finetuning model, by default None') + # Prompt_Token_num + parser.add_argument('--Prompt_Token_num', default=20, type=int, help='Prompt_Token_num') + # loss settings + parser.add_argument('--norm_pix_loss', action='store_true', + help='Use (per-patch) normalized pixels as targets for computing loss') + parser.set_defaults(norm_pix_loss=False) + + # PATH settings + # Dataset parameters /root/autodl-tmp/MARS_ALL /root/autodl-tmp/imagenet /root/autodl-tmp/datasets/All + parser.add_argument('--data_path', default='/root/autodl-tmp/datasets/PuzzleTuning_demoset', type=str, + help='dataset path') + parser.add_argument('--output_dir', default='/root/autodl-tmp/runs', + help='path where to save test log, empty for no saving') + parser.add_argument('--log_dir', default='/root/tf-logs', + help='path where to test tensorboard log') + + # Enviroment parameters + parser.add_argument('--gpu_idx', default=0, type=int, + help='use a single GPU with its index, -1 to use multiple GPU') + parser.add_argument('--device', default='cuda', + help='device to use for training / testing') + parser.add_argument('--seed', default=42, type=int) # ori 0 不过应该无所谓? + + # checkpoint_state_dict_path + parser.add_argument('--checkpoint_path', + default='/root/autodl-tmp/runs/PuzzleTuning_SAE_vit_base_patch16_Prompt_Deep_tokennum_20_tr_timm_CPIAm/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20_checkpoint-199.pth', + type=str, help='load state_dict for testing') + + # check settings + parser.add_argument('--combined_pred_illustration', action='store_true', help='check combined_pred_illustration pics') + parser.add_argument('--enable_visualize_check', action='store_true', help='check and save pics') + parser.add_argument('--check_minibatch', default=None, type=int, help='check batch_size') + parser.add_argument('--check_samples', default=1, type=int, help='check how many images in a checking batch') + + # dataloader setting + parser.add_argument('--num_workers', default=10, type=int) + parser.add_argument('--pin_mem', action='store_true', + help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') + parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') + parser.set_defaults(pin_mem=True) + + return parser + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + + if args.output_dir: + Path(args.output_dir).mkdir(parents=True, exist_ok=True) + + main(args) \ No newline at end of file diff --git a/PuzzleTuning/PuzzleTuning Colab Demo.ipynb b/PuzzleTuning/PuzzleTuning Colab Demo.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..57dc6295af259577730844482765eef26cc98180 --- /dev/null +++ b/PuzzleTuning/PuzzleTuning Colab Demo.ipynb @@ -0,0 +1 @@ +{"cells":[{"cell_type":"markdown","metadata":{"id":"-1_HUut4YYm5"},"source":["# This is the official training Illustration of PuzzleTuning\n","* Use google colab pro+ (high RAM+GPU) to run 24 hours\n","* we use the Python3.7 Pytorch 1.9.0+cu111 torchvision 0.10.0+cu111\n","* we use the A100 GPU for the data-flow illustration with Colab\n","\n","The code and Training process along with all record are Open-Source:\n","* PuzzleTuning official github page: https://github.com/sagizty/PuzzleTuning\n","* The dataset CPIA is publicly aviliable at: https://github.com/zhanglab2021/CPIA_Dataset\n"]},{"cell_type":"markdown","metadata":{"id":"dzCoT1IxZ-1B"},"source":["## Check Colab GPU"]},{"cell_type":"code","execution_count":1,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":751,"status":"ok","timestamp":1700807513684,"user":{"displayName":"Tianyi Zhang","userId":"06202607434029765461"},"user_tz":-480},"id":"ZnbrNSoSXFm5","outputId":"0aa1b390-69f7-4e80-c03e-e1a12a35d2f2"},"outputs":[{"name":"stdout","output_type":"stream","text":["Fri Nov 24 06:31:52 2023 \n","+-----------------------------------------------------------------------------+\n","| NVIDIA-SMI 525.105.17 Driver Version: 525.105.17 CUDA Version: 12.0 |\n","|-------------------------------+----------------------+----------------------+\n","| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n","| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n","| | | MIG M. |\n","|===============================+======================+======================|\n","| 0 NVIDIA A100-SXM... Off | 00000000:00:04.0 Off | 0 |\n","| N/A 34C P0 55W / 400W | 0MiB / 40960MiB | 0% Default |\n","| | | Disabled |\n","+-------------------------------+----------------------+----------------------+\n"," \n","+-----------------------------------------------------------------------------+\n","| Processes: |\n","| GPU GI CI PID Type Process name GPU Memory |\n","| ID ID Usage |\n","|=============================================================================|\n","| No running processes found |\n","+-----------------------------------------------------------------------------+\n"]}],"source":["# check GPU\n","!nvidia-smi"]},{"cell_type":"code","execution_count":2,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":10,"status":"ok","timestamp":1700807513684,"user":{"displayName":"Tianyi Zhang","userId":"06202607434029765461"},"user_tz":-480},"id":"n9GPOn5gcykA","outputId":"45658563-b504-449a-e204-5f2a1f14b959"},"outputs":[{"name":"stdout","output_type":"stream","text":["Fri Nov 24 02:31:52 PM UTC 2023\n"]}],"source":["!date --date='+8 hour' # CST time zone"]},{"cell_type":"markdown","metadata":{"id":"fbnpeHYUgsJz"},"source":["## Mount Google Drive"]},{"cell_type":"markdown","metadata":{"id":"ixynw_V1ZqqI"},"source":["This will save output images to your google drive, you can remove this line and the last part if you don't want the output"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"3obRNrIaffjK"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"markdown","metadata":{"id":"BYevYeMFYmlx"},"source":["## Build file-system enviroment"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"ePtQFcQCEPlu"},"outputs":[],"source":["# create file-system enviroment\n","# mount the google drive first\n","# https://drive.google.com/drive/u/1/my-drive\n","\n","# clear colab path\n","!rm -rf /data\n","!rm -rf /home/Pathology_Experiment\n","\n","# create path\n","!mkdir /home/Pathology_Experiment\n","!mkdir /home/Pathology_Experiment/runs\n","!mkdir /home/Pathology_Experiment/code\n","!mkdir /home/Pathology_Experiment/saved_models\n","!mkdir /home/Pathology_Experiment/imaging_results\n","\n","!mkdir /data\n","!mkdir /data/Pathology_Experiment\n","!mkdir /data/Pathology_Experiment/dataset\n","\n","print('Folder Tree Creation completed!')\n","\n","# get latest code from Github pancreatic-cancer-diagnosis-tansformer page\n","!git clone https://github.com/sagizty/PuzzleTuning.git /home/Pathology_Experiment/code\n","print('code transfer from github completed!')\n","\n","# get the CLS dataset by its zip\n","!mv /home/Pathology_Experiment/code/Archive/* /data/Pathology_Experiment/dataset/\n","# unzip\n","!unzip -q /data/Pathology_Experiment/dataset/PuzzleTuning_demoset.zip -d /data/Pathology_Experiment/dataset/\n","!unzip -q /data/Pathology_Experiment/dataset/warwick_CLS.zip -d /data/Pathology_Experiment/dataset/\n","# alter the path\n","!rm -f /data/Pathology_Experiment/dataset/PuzzleTuning_demoset.zip\n","!rm -f /data/Pathology_Experiment/dataset/warwick_CLS.zip\n","print('data transfer completed!')"]},{"cell_type":"markdown","metadata":{"id":"xLxxHGq_wwwL"},"source":["## Arrange the working enviorment"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"K1Yb2b6TGF4r"},"outputs":[{"name":"stdout","output_type":"stream","text":["Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (4.66.1)\n","Collecting timm==0.5.4\n"," Downloading timm-0.5.4-py3-none-any.whl (431 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m431.5/431.5 kB\u001b[0m \u001b[31m7.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: torch\u003e=1.4 in /usr/local/lib/python3.10/dist-packages (from timm==0.5.4) (2.1.0+cu118)\n","Requirement already satisfied: torchvision in /usr/local/lib/python3.10/dist-packages (from timm==0.5.4) (0.16.0+cu118)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch\u003e=1.4-\u003etimm==0.5.4) (3.13.1)\n","Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch\u003e=1.4-\u003etimm==0.5.4) (4.5.0)\n","Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch\u003e=1.4-\u003etimm==0.5.4) (1.12)\n","Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch\u003e=1.4-\u003etimm==0.5.4) (3.2.1)\n","Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch\u003e=1.4-\u003etimm==0.5.4) (3.1.2)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch\u003e=1.4-\u003etimm==0.5.4) (2023.6.0)\n","Requirement already satisfied: triton==2.1.0 in /usr/local/lib/python3.10/dist-packages (from torch\u003e=1.4-\u003etimm==0.5.4) (2.1.0)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from torchvision-\u003etimm==0.5.4) (1.23.5)\n","Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from torchvision-\u003etimm==0.5.4) (2.31.0)\n","Requirement already satisfied: pillow!=8.3.*,\u003e=5.3.0 in /usr/local/lib/python3.10/dist-packages (from torchvision-\u003etimm==0.5.4) (9.4.0)\n","Requirement already satisfied: MarkupSafe\u003e=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2-\u003etorch\u003e=1.4-\u003etimm==0.5.4) (2.1.3)\n","Requirement already satisfied: charset-normalizer\u003c4,\u003e=2 in /usr/local/lib/python3.10/dist-packages (from requests-\u003etorchvision-\u003etimm==0.5.4) (3.3.2)\n","Requirement already satisfied: idna\u003c4,\u003e=2.5 in /usr/local/lib/python3.10/dist-packages (from requests-\u003etorchvision-\u003etimm==0.5.4) (3.4)\n","Requirement already satisfied: urllib3\u003c3,\u003e=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests-\u003etorchvision-\u003etimm==0.5.4) (2.0.7)\n","Requirement already satisfied: certifi\u003e=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests-\u003etorchvision-\u003etimm==0.5.4) (2023.7.22)\n","Requirement already satisfied: mpmath\u003e=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy-\u003etorch\u003e=1.4-\u003etimm==0.5.4) (1.3.0)\n","Installing collected packages: timm\n","Successfully installed timm-0.5.4\n","Collecting einops\n"," Downloading einops-0.7.0-py3-none-any.whl (44 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m44.6/44.6 kB\u001b[0m \u001b[31m1.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hInstalling collected packages: einops\n","Successfully installed einops-0.7.0\n","Collecting ml_collections\n"," Downloading ml_collections-0.1.1.tar.gz (77 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m77.9/77.9 kB\u001b[0m \u001b[31m2.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n","Requirement already satisfied: absl-py in /usr/local/lib/python3.10/dist-packages (from ml_collections) (1.4.0)\n","Requirement already satisfied: PyYAML in /usr/local/lib/python3.10/dist-packages (from ml_collections) (6.0.1)\n","Requirement already satisfied: six in /usr/local/lib/python3.10/dist-packages (from ml_collections) (1.16.0)\n","Requirement already satisfied: contextlib2 in /usr/local/lib/python3.10/dist-packages (from ml_collections) (21.6.0)\n","Building wheels for collected packages: ml_collections\n"," Building wheel for ml_collections (setup.py) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for ml_collections: filename=ml_collections-0.1.1-py3-none-any.whl size=94507 sha256=b01d6551e051eb94b398f73bab7144889d7ee2a44f392da831939d3029f7119a\n"," Stored in directory: /root/.cache/pip/wheels/7b/89/c9/a9b87790789e94aadcfc393c283e3ecd5ab916aed0a31be8fe\n","Successfully built ml_collections\n","Installing collected packages: ml_collections\n","Successfully installed ml_collections-0.1.1\n","Collecting ttach\n"," Downloading ttach-0.0.3-py3-none-any.whl (9.8 kB)\n","Installing collected packages: ttach\n","Successfully installed ttach-0.0.3\n","Collecting notifyemail\n"," Downloading notifyemail-1.1.1-py3-none-any.whl (19 kB)\n","Installing collected packages: notifyemail\n","Successfully installed notifyemail-1.1.1\n","Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (5.9.5)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (1.11.3)\n","Requirement already satisfied: numpy\u003c1.28.0,\u003e=1.21.6 in /usr/local/lib/python3.10/dist-packages (from scipy) (1.23.5)\n","Requirement already satisfied: torchsummary in /usr/local/lib/python3.10/dist-packages (1.5.1)\n","Collecting tensorboardX\n"," Downloading tensorboardX-2.6.2.2-py2.py3-none-any.whl (101 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m101.7/101.7 kB\u001b[0m \u001b[31m2.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from tensorboardX) (1.23.5)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from tensorboardX) (23.2)\n","Requirement already satisfied: protobuf\u003e=3.20 in /usr/local/lib/python3.10/dist-packages (from tensorboardX) (3.20.3)\n","Installing collected packages: tensorboardX\n","Successfully installed tensorboardX-2.6.2.2\n","Requirement already satisfied: opencv_contrib_python in /usr/local/lib/python3.10/dist-packages (4.8.0.76)\n","Requirement already satisfied: numpy\u003e=1.21.2 in /usr/local/lib/python3.10/dist-packages (from opencv_contrib_python) (1.23.5)\n","Requirement already satisfied: matplotlib in /usr/local/lib/python3.10/dist-packages (3.7.1)\n","Requirement already satisfied: contourpy\u003e=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (1.2.0)\n","Requirement already satisfied: cycler\u003e=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (0.12.1)\n","Requirement already satisfied: fonttools\u003e=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (4.44.3)\n","Requirement already satisfied: kiwisolver\u003e=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (1.4.5)\n","Requirement already satisfied: numpy\u003e=1.20 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (1.23.5)\n","Requirement already satisfied: packaging\u003e=20.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (23.2)\n","Requirement already satisfied: pillow\u003e=6.2.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (9.4.0)\n","Requirement already satisfied: pyparsing\u003e=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (3.1.1)\n","Requirement already satisfied: python-dateutil\u003e=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib) (2.8.2)\n","Requirement already satisfied: six\u003e=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil\u003e=2.7-\u003ematplotlib) (1.16.0)\n","Requirement already satisfied: ipykernel in /usr/local/lib/python3.10/dist-packages (5.5.6)\n","Requirement already satisfied: ipython-genutils in /usr/local/lib/python3.10/dist-packages (from ipykernel) (0.2.0)\n","Requirement already satisfied: ipython\u003e=5.0.0 in /usr/local/lib/python3.10/dist-packages (from ipykernel) (7.34.0)\n","Requirement already satisfied: traitlets\u003e=4.1.0 in /usr/local/lib/python3.10/dist-packages (from ipykernel) (5.7.1)\n","Requirement already satisfied: jupyter-client in /usr/local/lib/python3.10/dist-packages (from ipykernel) (6.1.12)\n","Requirement already satisfied: tornado\u003e=4.2 in /usr/local/lib/python3.10/dist-packages (from ipykernel) (6.3.2)\n","Requirement already satisfied: setuptools\u003e=18.5 in /usr/local/lib/python3.10/dist-packages (from ipython\u003e=5.0.0-\u003eipykernel) (67.7.2)\n","Collecting jedi\u003e=0.16 (from ipython\u003e=5.0.0-\u003eipykernel)\n"," Downloading jedi-0.19.1-py2.py3-none-any.whl (1.6 MB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m24.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: decorator in /usr/local/lib/python3.10/dist-packages (from ipython\u003e=5.0.0-\u003eipykernel) (4.4.2)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.10/dist-packages (from ipython\u003e=5.0.0-\u003eipykernel) (0.7.5)\n","Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,\u003c3.1.0,\u003e=2.0.0 in /usr/local/lib/python3.10/dist-packages (from ipython\u003e=5.0.0-\u003eipykernel) (3.0.41)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.10/dist-packages (from ipython\u003e=5.0.0-\u003eipykernel) (2.16.1)\n","Requirement already satisfied: backcall in /usr/local/lib/python3.10/dist-packages (from ipython\u003e=5.0.0-\u003eipykernel) (0.2.0)\n","Requirement already satisfied: matplotlib-inline in /usr/local/lib/python3.10/dist-packages (from ipython\u003e=5.0.0-\u003eipykernel) (0.1.6)\n","Requirement already satisfied: pexpect\u003e4.3 in /usr/local/lib/python3.10/dist-packages (from ipython\u003e=5.0.0-\u003eipykernel) (4.8.0)\n","Requirement already satisfied: jupyter-core\u003e=4.6.0 in /usr/local/lib/python3.10/dist-packages (from jupyter-client-\u003eipykernel) (5.5.0)\n","Requirement already satisfied: pyzmq\u003e=13 in /usr/local/lib/python3.10/dist-packages (from jupyter-client-\u003eipykernel) (23.2.1)\n","Requirement already satisfied: python-dateutil\u003e=2.1 in /usr/local/lib/python3.10/dist-packages (from jupyter-client-\u003eipykernel) (2.8.2)\n","Requirement already satisfied: parso\u003c0.9.0,\u003e=0.8.3 in /usr/local/lib/python3.10/dist-packages (from jedi\u003e=0.16-\u003eipython\u003e=5.0.0-\u003eipykernel) (0.8.3)\n","Requirement already satisfied: platformdirs\u003e=2.5 in /usr/local/lib/python3.10/dist-packages (from jupyter-core\u003e=4.6.0-\u003ejupyter-client-\u003eipykernel) (4.0.0)\n","Requirement already satisfied: ptyprocess\u003e=0.5 in /usr/local/lib/python3.10/dist-packages (from pexpect\u003e4.3-\u003eipython\u003e=5.0.0-\u003eipykernel) (0.7.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.10/dist-packages (from prompt-toolkit!=3.0.0,!=3.0.1,\u003c3.1.0,\u003e=2.0.0-\u003eipython\u003e=5.0.0-\u003eipykernel) (0.2.10)\n","Requirement already satisfied: six\u003e=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil\u003e=2.1-\u003ejupyter-client-\u003eipykernel) (1.16.0)\n","Installing collected packages: jedi\n","Successfully installed jedi-0.19.1\n"]}],"source":["# get packages\n","!pip install tqdm\n","!pip install timm==0.5.4\n","!pip install einops\n","!pip install ml_collections\n","!pip install ttach\n","!pip install notifyemail\n","!pip install psutil\n","!pip install scipy\n","!pip install torchsummary\n","!pip install tensorboardX\n","!pip install opencv_contrib_python\n","!pip install matplotlib\n","!pip install ipykernel"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"87Owjg_pN2yD"},"outputs":[{"name":"stdout","output_type":"stream","text":["Python 3.10.12\n"]}],"source":["!python --version"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"GpEVUWwqK79D"},"outputs":[{"name":"stdout","output_type":"stream","text":["Package Version\n","-------------------------------- ---------------------\n","absl-py 1.4.0\n","aiohttp 3.8.6\n","aiosignal 1.3.1\n","alabaster 0.7.13\n","albumentations 1.3.1\n","altair 4.2.2\n","anyio 3.7.1\n","appdirs 1.4.4\n","argon2-cffi 23.1.0\n","argon2-cffi-bindings 21.2.0\n","array-record 0.5.0\n","arviz 0.15.1\n","astropy 5.3.4\n","astunparse 1.6.3\n","async-timeout 4.0.3\n","atpublic 4.0\n","attrs 23.1.0\n","audioread 3.0.1\n","autograd 1.6.2\n","Babel 2.13.1\n","backcall 0.2.0\n","beautifulsoup4 4.11.2\n","bidict 0.22.1\n","bigframes 0.13.0\n","bleach 6.1.0\n","blinker 1.4\n","blis 0.7.11\n","blosc2 2.0.0\n","bokeh 3.3.1\n","bqplot 0.12.42\n","branca 0.7.0\n","build 1.0.3\n","CacheControl 0.13.1\n","cachetools 5.3.2\n","catalogue 2.0.10\n","certifi 2023.7.22\n","cffi 1.16.0\n","chardet 5.2.0\n","charset-normalizer 3.3.2\n","chex 0.1.7\n","click 8.1.7\n","click-plugins 1.1.1\n","cligj 0.7.2\n","cloudpickle 2.2.1\n","cmake 3.27.7\n","cmdstanpy 1.2.0\n","colorcet 3.0.1\n","colorlover 0.3.0\n","colour 0.1.5\n","community 1.0.0b1\n","confection 0.1.3\n","cons 0.4.6\n","contextlib2 21.6.0\n","contourpy 1.2.0\n","cryptography 41.0.5\n","cufflinks 0.17.3\n","cupy-cuda11x 11.0.0\n","cvxopt 1.3.2\n","cvxpy 1.3.2\n","cycler 0.12.1\n","cymem 2.0.8\n","Cython 3.0.5\n","dask 2023.8.1\n","datascience 0.17.6\n","db-dtypes 1.1.1\n","dbus-python 1.2.18\n","debugpy 1.6.6\n","decorator 4.4.2\n","defusedxml 0.7.1\n","diskcache 5.6.3\n","distributed 2023.8.1\n","distro 1.7.0\n","dlib 19.24.2\n","dm-tree 0.1.8\n","docutils 0.18.1\n","dopamine-rl 4.0.6\n","duckdb 0.9.2\n","earthengine-api 0.1.379\n","easydict 1.11\n","ecos 2.0.12\n","editdistance 0.6.2\n","eerepr 0.0.4\n","einops 0.7.0\n","en-core-web-sm 3.6.0\n","entrypoints 0.4\n","et-xmlfile 1.1.0\n","etils 1.5.2\n","etuples 0.3.9\n","exceptiongroup 1.1.3\n","fastai 2.7.13\n","fastcore 1.5.29\n","fastdownload 0.0.7\n","fastjsonschema 2.19.0\n","fastprogress 1.0.3\n","fastrlock 0.8.2\n","filelock 3.13.1\n","fiona 1.9.5\n","firebase-admin 5.3.0\n","Flask 2.2.5\n","flatbuffers 23.5.26\n","flax 0.7.5\n","folium 0.14.0\n","fonttools 4.44.3\n","frozendict 2.3.8\n","frozenlist 1.4.0\n","fsspec 2023.6.0\n","future 0.18.3\n","gast 0.5.4\n","gcsfs 2023.6.0\n","GDAL 3.4.3\n","gdown 4.6.6\n","geemap 0.28.2\n","gensim 4.3.2\n","geocoder 1.38.1\n","geographiclib 2.0\n","geopandas 0.13.2\n","geopy 2.3.0\n","gin-config 0.5.0\n","glob2 0.7\n","google 2.0.3\n","google-ai-generativelanguage 0.3.3\n","google-api-core 2.11.1\n","google-api-python-client 2.84.0\n","google-auth 2.17.3\n","google-auth-httplib2 0.1.1\n","google-auth-oauthlib 1.0.0\n","google-cloud-bigquery 3.12.0\n","google-cloud-bigquery-connection 1.12.1\n","google-cloud-bigquery-storage 2.22.0\n","google-cloud-core 2.3.3\n","google-cloud-datastore 2.15.2\n","google-cloud-firestore 2.11.1\n","google-cloud-functions 1.13.3\n","google-cloud-iam 2.12.2\n","google-cloud-language 2.9.1\n","google-cloud-resource-manager 1.10.4\n","google-cloud-storage 2.8.0\n","google-cloud-translate 3.11.3\n","google-colab 1.0.0\n","google-crc32c 1.5.0\n","google-generativeai 0.2.2\n","google-pasta 0.2.0\n","google-resumable-media 2.6.0\n","googleapis-common-protos 1.61.0\n","googledrivedownloader 0.4\n","graphviz 0.20.1\n","greenlet 3.0.1\n","grpc-google-iam-v1 0.12.7\n","grpcio 1.59.2\n","grpcio-status 1.48.2\n","gspread 3.4.2\n","gspread-dataframe 3.3.1\n","gym 0.25.2\n","gym-notices 0.0.8\n","h5netcdf 1.3.0\n","h5py 3.9.0\n","holidays 0.36\n","holoviews 1.17.1\n","html5lib 1.1\n","httpimport 1.3.1\n","httplib2 0.22.0\n","huggingface-hub 0.19.4\n","humanize 4.7.0\n","hyperopt 0.2.7\n","ibis-framework 6.2.0\n","idna 3.4\n","imageio 2.31.6\n","imageio-ffmpeg 0.4.9\n","imagesize 1.4.1\n","imbalanced-learn 0.10.1\n","imgaug 0.4.0\n","importlib-metadata 6.8.0\n","importlib-resources 6.1.1\n","imutils 0.5.4\n","inflect 7.0.0\n","iniconfig 2.0.0\n","install 1.3.5\n","intel-openmp 2023.2.0\n","ipyevents 2.0.2\n","ipyfilechooser 0.6.0\n","ipykernel 5.5.6\n","ipyleaflet 0.17.4\n","ipython 7.34.0\n","ipython-genutils 0.2.0\n","ipython-sql 0.5.0\n","ipytree 0.2.2\n","ipywidgets 7.7.1\n","itsdangerous 2.1.2\n","jax 0.4.20\n","jaxlib 0.4.20+cuda11.cudnn86\n","jedi 0.19.1\n","jeepney 0.7.1\n","jieba 0.42.1\n","Jinja2 3.1.2\n","joblib 1.3.2\n","jsonpickle 3.0.2\n","jsonschema 4.19.2\n","jsonschema-specifications 2023.11.1\n","jupyter-client 6.1.12\n","jupyter-console 6.1.0\n","jupyter_core 5.5.0\n","jupyter-server 1.24.0\n","jupyterlab-pygments 0.2.2\n","jupyterlab-widgets 3.0.9\n","kaggle 1.5.16\n","keras 2.14.0\n","keyring 23.5.0\n","kiwisolver 1.4.5\n","langcodes 3.3.0\n","launchpadlib 1.10.16\n","lazr.restfulclient 0.14.4\n","lazr.uri 1.0.6\n","lazy_loader 0.3\n","libclang 16.0.6\n","librosa 0.10.1\n","lida 0.0.10\n","lightgbm 4.1.0\n","linkify-it-py 2.0.2\n","llmx 0.0.15a0\n","llvmlite 0.41.1\n","locket 1.0.0\n","logical-unification 0.4.6\n","lxml 4.9.3\n","malloy 2023.1064\n","Markdown 3.5.1\n","markdown-it-py 3.0.0\n","MarkupSafe 2.1.3\n","matplotlib 3.7.1\n","matplotlib-inline 0.1.6\n","matplotlib-venn 0.11.9\n","mdit-py-plugins 0.4.0\n","mdurl 0.1.2\n","miniKanren 1.0.3\n","missingno 0.5.2\n","mistune 0.8.4\n","mizani 0.9.3\n","mkl 2023.2.0\n","ml-collections 0.1.1\n","ml-dtypes 0.2.0\n","mlxtend 0.22.0\n","more-itertools 10.1.0\n","moviepy 1.0.3\n","mpmath 1.3.0\n","msgpack 1.0.7\n","multidict 6.0.4\n","multipledispatch 1.0.0\n","multitasking 0.0.11\n","murmurhash 1.0.10\n","music21 9.1.0\n","natsort 8.4.0\n","nbclassic 1.0.0\n","nbclient 0.9.0\n","nbconvert 6.5.4\n","nbformat 5.9.2\n","nest-asyncio 1.5.8\n","networkx 3.2.1\n","nibabel 4.0.2\n","nltk 3.8.1\n","notebook 6.5.5\n","notebook_shim 0.2.3\n","notifyemail 1.1.1\n","numba 0.58.1\n","numexpr 2.8.7\n","numpy 1.23.5\n","oauth2client 4.1.3\n","oauthlib 3.2.2\n","opencv-contrib-python 4.8.0.76\n","opencv-python 4.8.0.76\n","opencv-python-headless 4.8.1.78\n","openpyxl 3.1.2\n","opt-einsum 3.3.0\n","optax 0.1.7\n","orbax-checkpoint 0.4.2\n","osqp 0.6.2.post8\n","packaging 23.2\n","pandas 1.5.3\n","pandas-datareader 0.10.0\n","pandas-gbq 0.17.9\n","pandas-stubs 1.5.3.230304\n","pandocfilters 1.5.0\n","panel 1.3.1\n","param 2.0.1\n","parso 0.8.3\n","parsy 2.1\n","partd 1.4.1\n","pathlib 1.0.1\n","pathy 0.10.3\n","patsy 0.5.3\n","peewee 3.17.0\n","pexpect 4.8.0\n","pickleshare 0.7.5\n","Pillow 9.4.0\n","pip 23.1.2\n","pip-tools 6.13.0\n","platformdirs 4.0.0\n","plotly 5.15.0\n","plotnine 0.12.4\n","pluggy 1.3.0\n","polars 0.17.3\n","pooch 1.8.0\n","portpicker 1.5.2\n","prefetch-generator 1.0.3\n","preshed 3.0.9\n","prettytable 3.9.0\n","proglog 0.1.10\n","progressbar2 4.2.0\n","prometheus-client 0.18.0\n","promise 2.3\n","prompt-toolkit 3.0.41\n","prophet 1.1.5\n","proto-plus 1.22.3\n","protobuf 3.20.3\n","psutil 5.9.5\n","psycopg2 2.9.9\n","ptyprocess 0.7.0\n","py-cpuinfo 9.0.0\n","py4j 0.10.9.7\n","pyarrow 9.0.0\n","pyasn1 0.5.0\n","pyasn1-modules 0.3.0\n","pycocotools 2.0.7\n","pycparser 2.21\n","pyct 0.5.0\n","pydantic 1.10.13\n","pydata-google-auth 1.8.2\n","pydot 1.4.2\n","pydot-ng 2.0.0\n","pydotplus 2.0.2\n","PyDrive 1.3.1\n","PyDrive2 1.6.3\n","pyerfa 2.0.1.1\n","pygame 2.5.2\n","Pygments 2.16.1\n","PyGObject 3.42.1\n","PyJWT 2.3.0\n","pymc 5.7.2\n","pymystem3 0.2.0\n","PyOpenGL 3.1.7\n","pyOpenSSL 23.3.0\n","pyparsing 3.1.1\n","pyperclip 1.8.2\n","pyproj 3.6.1\n","pyproject_hooks 1.0.0\n","pyshp 2.3.1\n","PySocks 1.7.1\n","pytensor 2.14.2\n","pytest 7.4.3\n","python-apt 0.0.0\n","python-box 7.1.1\n","python-dateutil 2.8.2\n","python-louvain 0.16\n","python-slugify 8.0.1\n","python-utils 3.8.1\n","pytz 2023.3.post1\n","pyviz_comms 3.0.0\n","PyWavelets 1.4.1\n","PyYAML 6.0.1\n","pyzmq 23.2.1\n","qdldl 0.1.7.post0\n","qudida 0.0.4\n","ratelim 0.1.6\n","referencing 0.31.0\n","regex 2023.6.3\n","requests 2.31.0\n","requests-oauthlib 1.3.1\n","requirements-parser 0.5.0\n","rich 13.7.0\n","rpds-py 0.13.0\n","rpy2 3.4.2\n","rsa 4.9\n","safetensors 0.4.0\n","scikit-image 0.19.3\n","scikit-learn 1.2.2\n","scipy 1.11.3\n","scooby 0.9.2\n","scs 3.2.4\n","seaborn 0.12.2\n","SecretStorage 3.3.1\n","Send2Trash 1.8.2\n","setuptools 67.7.2\n","shapely 2.0.2\n","six 1.16.0\n","sklearn-pandas 2.2.0\n","smart-open 6.4.0\n","sniffio 1.3.0\n","snowballstemmer 2.2.0\n","sortedcontainers 2.4.0\n","soundfile 0.12.1\n","soupsieve 2.5\n","soxr 0.3.7\n","spacy 3.6.1\n","spacy-legacy 3.0.12\n","spacy-loggers 1.0.5\n","Sphinx 5.0.2\n","sphinxcontrib-applehelp 1.0.7\n","sphinxcontrib-devhelp 1.0.5\n","sphinxcontrib-htmlhelp 2.0.4\n","sphinxcontrib-jsmath 1.0.1\n","sphinxcontrib-qthelp 1.0.6\n","sphinxcontrib-serializinghtml 1.1.9\n","SQLAlchemy 2.0.23\n","sqlglot 17.16.2\n","sqlparse 0.4.4\n","srsly 2.4.8\n","stanio 0.3.0\n","statsmodels 0.14.0\n","sympy 1.12\n","tables 3.8.0\n","tabulate 0.9.0\n","tbb 2021.11.0\n","tblib 3.0.0\n","tenacity 8.2.3\n","tensorboard 2.14.1\n","tensorboard-data-server 0.7.2\n","tensorboardX 2.6.2.2\n","tensorflow 2.14.0\n","tensorflow-datasets 4.9.3\n","tensorflow-estimator 2.14.0\n","tensorflow-gcs-config 2.14.0\n","tensorflow-hub 0.15.0\n","tensorflow-io-gcs-filesystem 0.34.0\n","tensorflow-metadata 1.14.0\n","tensorflow-probability 0.22.0\n","tensorstore 0.1.45\n","termcolor 2.3.0\n","terminado 0.18.0\n","text-unidecode 1.3\n","textblob 0.17.1\n","tf-slim 1.1.0\n","thinc 8.1.12\n","threadpoolctl 3.2.0\n","tifffile 2023.9.26\n","timm 0.5.4\n","tinycss2 1.2.1\n","tokenizers 0.15.0\n","toml 0.10.2\n","tomli 2.0.1\n","toolz 0.12.0\n","torch 2.1.0+cu118\n","torchaudio 2.1.0+cu118\n","torchdata 0.7.0\n","torchsummary 1.5.1\n","torchtext 0.16.0\n","torchvision 0.16.0+cu118\n","tornado 6.3.2\n","tqdm 4.66.1\n","traitlets 5.7.1\n","traittypes 0.2.1\n","transformers 4.35.2\n","triton 2.1.0\n","ttach 0.0.3\n","tweepy 4.14.0\n","typer 0.9.0\n","types-pytz 2023.3.1.1\n","types-setuptools 68.2.0.1\n","typing_extensions 4.5.0\n","tzlocal 5.2\n","uc-micro-py 1.0.2\n","uritemplate 4.1.1\n","urllib3 2.0.7\n","vega-datasets 0.9.0\n","wadllib 1.3.6\n","wasabi 1.1.2\n","wcwidth 0.2.10\n","webcolors 1.13\n","webencodings 0.5.1\n","websocket-client 1.6.4\n","Werkzeug 3.0.1\n","wheel 0.41.3\n","widgetsnbextension 3.6.6\n","wordcloud 1.9.2\n","wrapt 1.14.1\n","xarray 2023.7.0\n","xarray-einstats 0.6.0\n","xgboost 2.0.2\n","xlrd 2.0.1\n","xxhash 3.4.1\n","xyzservices 2023.10.1\n","yarl 1.9.2\n","yellowbrick 1.5\n","yfinance 0.2.31\n","zict 3.0.0\n","zipp 3.17.0\n"]}],"source":["!pip list\n","!pip freeze\u003erequirements.txt\n","!cp requirements.txt ../runs"]},{"cell_type":"markdown","metadata":{"id":"h31KAx1ZZEl9"},"source":["# Pre-Training\n","* set up path by command line\n","* use argparse to set down hyper-parameter\n","\n","10000epochs will be trined with 400 images, for data-flow illustration.\n","We suggest you to use 4 * A100 SMX4 GPUs to train PuzzleTuning with CPIA dataset.\n","\n"]},{"cell_type":"markdown","metadata":{"id":"hveEqtxuZePT"},"source":["Our official training script is given here:"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"F-wStD9SYeXA"},"outputs":[],"source":["# nohup python PuzzleTuning.py --batch_size 64 --group_shuffle_size 16 --blr 1.5e-4 --epochs 200 --accum_iter 2 --print_freq 2000 --check_point_gap 50 --input_size 224 --warmup_epochs 20 --pin_mem --num_workers 32 --strategy loop --PromptTuning Deep --basic_state_dict ../saved_models/ViT_b16_224_Imagenet.pth --data_path ../datasets/All \u0026"]},{"cell_type":"markdown","metadata":{"id":"Y3pitng_YhqM"},"source":["All following lines are for data-flow illustation with colab"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"mjq7h2j1Wzdh"},"outputs":[{"name":"stdout","output_type":"stream","text":["/home/Pathology_Experiment/code\n"]}],"source":["# change working dir\n","import os\n","os.chdir(\"/home/Pathology_Experiment/code\")\n","!pwd"]},{"cell_type":"markdown","metadata":{"id":"7xGPpwiXa5vC"},"source":["Training"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"T6jwGi8oa69a"},"outputs":[{"name":"stdout","output_type":"stream","text":["\u001b[1;30;43mStreaming output truncated to the last 5000 lines.\u001b[0m\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39167] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0216 (0.0216) time: 0.2915 data: 0.2048 max mem: 5716\n","Epoch: [39167] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0247 (0.0241) time: 0.0953 data: 0.0172 max mem: 5716\n","Epoch: [39167] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0247 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39168] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0211 (0.0211) time: 0.2886 data: 0.1982 max mem: 5716\n","Epoch: [39168] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0244) time: 0.0966 data: 0.0167 max mem: 5716\n","Epoch: [39168] Total time: 0:00:01 (0.1007 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39169] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0210 (0.0210) time: 0.2844 data: 0.1939 max mem: 5716\n","Epoch: [39169] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0244 (0.0242) time: 0.0944 data: 0.0163 max mem: 5716\n","Epoch: [39169] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0244 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39170] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0197 (0.0197) time: 0.2880 data: 0.2013 max mem: 5716\n","Epoch: [39170] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0199 (0.0208) time: 0.0956 data: 0.0169 max mem: 5716\n","Epoch: [39170] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0199 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39171] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0222 (0.0222) time: 0.2832 data: 0.1917 max mem: 5716\n","Epoch: [39171] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0216 (0.0208) time: 0.0948 data: 0.0161 max mem: 5716\n","Epoch: [39171] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0216 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39172] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0209 (0.0209) time: 0.2895 data: 0.1995 max mem: 5716\n","Epoch: [39172] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0206 (0.0209) time: 0.0959 data: 0.0168 max mem: 5716\n","Epoch: [39172] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0206 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39173] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0233 (0.0233) time: 0.2843 data: 0.1959 max mem: 5716\n","Epoch: [39173] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0229) time: 0.0943 data: 0.0165 max mem: 5716\n","Epoch: [39173] Total time: 0:00:01 (0.0984 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0229) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39174] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.2906 data: 0.1934 max mem: 5716\n","Epoch: [39174] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0233) time: 0.0948 data: 0.0163 max mem: 5716\n","Epoch: [39174] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39175] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0242 (0.0242) time: 0.2966 data: 0.2071 max mem: 5716\n","Epoch: [39175] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0232) time: 0.0965 data: 0.0174 max mem: 5716\n","Epoch: [39175] Total time: 0:00:01 (0.1006 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39176] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0258 (0.0258) time: 0.2936 data: 0.1950 max mem: 5716\n","Epoch: [39176] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0239) time: 0.0955 data: 0.0164 max mem: 5716\n","Epoch: [39176] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0239) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39177] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0225 (0.0225) time: 0.2921 data: 0.2054 max mem: 5716\n","Epoch: [39177] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0245 (0.0242) time: 0.0989 data: 0.0173 max mem: 5716\n","Epoch: [39177] Total time: 0:00:01 (0.1031 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0245 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39178] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0232 (0.0232) time: 0.2914 data: 0.2039 max mem: 5716\n","Epoch: [39178] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0247 (0.0242) time: 0.0949 data: 0.0171 max mem: 5716\n","Epoch: [39178] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0247 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39179] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0196 (0.0196) time: 0.2880 data: 0.1962 max mem: 5716\n","Epoch: [39179] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0196 (0.0206) time: 0.0949 data: 0.0165 max mem: 5716\n","Epoch: [39179] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0196 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39180] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0212 (0.0212) time: 0.2890 data: 0.2023 max mem: 5716\n","Epoch: [39180] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0208) time: 0.0950 data: 0.0170 max mem: 5716\n","Epoch: [39180] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39181] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0242 (0.0242) time: 0.2971 data: 0.2001 max mem: 5716\n","Epoch: [39181] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0209) time: 0.0951 data: 0.0168 max mem: 5716\n","Epoch: [39181] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39182] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0185 (0.0185) time: 0.2895 data: 0.2023 max mem: 5716\n","Epoch: [39182] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0232) time: 0.0953 data: 0.0170 max mem: 5716\n","Epoch: [39182] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39183] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0217 (0.0217) time: 0.2912 data: 0.2033 max mem: 5716\n","Epoch: [39183] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0229) time: 0.0949 data: 0.0171 max mem: 5716\n","Epoch: [39183] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0229) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39184] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0240 (0.0240) time: 0.2834 data: 0.1942 max mem: 5716\n","Epoch: [39184] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0235) time: 0.0981 data: 0.0163 max mem: 5716\n","Epoch: [39184] Total time: 0:00:01 (0.1023 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39185] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0242 (0.0242) time: 0.2991 data: 0.2109 max mem: 5716\n","Epoch: [39185] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0242) time: 0.0958 data: 0.0177 max mem: 5716\n","Epoch: [39185] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39186] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0225 (0.0225) time: 0.2837 data: 0.1913 max mem: 5716\n","Epoch: [39186] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0247) time: 0.0940 data: 0.0161 max mem: 5716\n","Epoch: [39186] Total time: 0:00:01 (0.0981 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0247) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39187] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0256 (0.0256) time: 0.2908 data: 0.2052 max mem: 5716\n","Epoch: [39187] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0243) time: 0.0953 data: 0.0172 max mem: 5716\n","Epoch: [39187] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39188] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0199 (0.0199) time: 0.2964 data: 0.2084 max mem: 5716\n","Epoch: [39188] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0210) time: 0.0958 data: 0.0175 max mem: 5716\n","Epoch: [39188] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39189] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0174 (0.0174) time: 0.2920 data: 0.2044 max mem: 5716\n","Epoch: [39189] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0203 (0.0209) time: 0.0950 data: 0.0172 max mem: 5716\n","Epoch: [39189] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0203 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39190] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0200 (0.0200) time: 0.2902 data: 0.2035 max mem: 5716\n","Epoch: [39190] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0206) time: 0.0953 data: 0.0171 max mem: 5716\n","Epoch: [39190] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39191] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0179 (0.0179) time: 0.3042 data: 0.2043 max mem: 5716\n","Epoch: [39191] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0228) time: 0.0957 data: 0.0172 max mem: 5716\n","Epoch: [39191] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0228) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39192] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0204 (0.0204) time: 0.3022 data: 0.2075 max mem: 5716\n","Epoch: [39192] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0221 (0.0234) time: 0.0965 data: 0.0174 max mem: 5716\n","Epoch: [39192] Total time: 0:00:01 (0.1007 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0221 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39193] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0246 (0.0246) time: 0.2921 data: 0.2048 max mem: 5716\n","Epoch: [39193] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0221 (0.0234) time: 0.0953 data: 0.0172 max mem: 5716\n","Epoch: [39193] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0221 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39194] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0248 (0.0248) time: 0.2807 data: 0.1907 max mem: 5716\n","Epoch: [39194] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0244) time: 0.0950 data: 0.0160 max mem: 5716\n","Epoch: [39194] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39195] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0293 (0.0293) time: 0.2886 data: 0.2005 max mem: 5716\n","Epoch: [39195] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0243 (0.0244) time: 0.0949 data: 0.0169 max mem: 5716\n","Epoch: [39195] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0243 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39196] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0213 (0.0213) time: 0.2926 data: 0.1923 max mem: 5716\n","Epoch: [39196] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0241) time: 0.0949 data: 0.0162 max mem: 5716\n","Epoch: [39196] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39197] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0213 (0.0213) time: 0.2885 data: 0.1916 max mem: 5716\n","Epoch: [39197] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0209) time: 0.0949 data: 0.0161 max mem: 5716\n","Epoch: [39197] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39198] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0235 (0.0235) time: 0.2906 data: 0.2040 max mem: 5716\n","Epoch: [39198] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0212 (0.0207) time: 0.0953 data: 0.0171 max mem: 5716\n","Epoch: [39198] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0212 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39199] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.2995 data: 0.1958 max mem: 5716\n","Epoch: [39199] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0210 (0.0207) time: 0.0957 data: 0.0165 max mem: 5716\n","Epoch: [39199] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0210 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39200] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0197 (0.0197) time: 0.2956 data: 0.2058 max mem: 5716\n","Epoch: [39200] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0233) time: 0.0962 data: 0.0173 max mem: 5716\n","Epoch: [39200] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39201] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0205 (0.0205) time: 0.2909 data: 0.2029 max mem: 5716\n","Epoch: [39201] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0232) time: 0.0997 data: 0.0171 max mem: 5716\n","Epoch: [39201] Total time: 0:00:01 (0.1040 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39202] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0227 (0.0227) time: 0.2903 data: 0.2003 max mem: 5716\n","Epoch: [39202] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0236) time: 0.0976 data: 0.0168 max mem: 5716\n","Epoch: [39202] Total time: 0:00:01 (0.1018 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0236) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39203] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0226 (0.0226) time: 0.2833 data: 0.1940 max mem: 5716\n","Epoch: [39203] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0246) time: 0.0949 data: 0.0164 max mem: 5716\n","Epoch: [39203] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39204] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0214 (0.0214) time: 0.2899 data: 0.1996 max mem: 5716\n","Epoch: [39204] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0240) time: 0.0957 data: 0.0168 max mem: 5716\n","Epoch: [39204] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0240) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39205] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0237 (0.0237) time: 0.2823 data: 0.1906 max mem: 5716\n","Epoch: [39205] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0246) time: 0.0951 data: 0.0160 max mem: 5716\n","Epoch: [39205] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39206] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0203 (0.0203) time: 0.2985 data: 0.2102 max mem: 5716\n","Epoch: [39206] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0197 (0.0207) time: 0.0963 data: 0.0177 max mem: 5716\n","Epoch: [39206] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0197 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39207] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0236 (0.0236) time: 0.2897 data: 0.1970 max mem: 5716\n","Epoch: [39207] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0210 (0.0210) time: 0.0959 data: 0.0166 max mem: 5716\n","Epoch: [39207] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0210 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39208] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0245 (0.0245) time: 0.2897 data: 0.1998 max mem: 5716\n","Epoch: [39208] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0207) time: 0.0950 data: 0.0168 max mem: 5716\n","Epoch: [39208] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39209] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0242 (0.0242) time: 0.3004 data: 0.2061 max mem: 5716\n","Epoch: [39209] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0233) time: 0.0971 data: 0.0173 max mem: 5716\n","Epoch: [39209] Total time: 0:00:01 (0.1027 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39210] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0226 (0.0226) time: 0.2937 data: 0.2054 max mem: 5716\n","Epoch: [39210] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0221 (0.0231) time: 0.0966 data: 0.0173 max mem: 5716\n","Epoch: [39210] Total time: 0:00:01 (0.1008 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0221 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39211] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0198 (0.0198) time: 0.2867 data: 0.1955 max mem: 5716\n","Epoch: [39211] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0232) time: 0.0956 data: 0.0165 max mem: 5716\n","Epoch: [39211] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39212] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.3049 data: 0.2065 max mem: 5716\n","Epoch: [39212] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0247 (0.0245) time: 0.0965 data: 0.0173 max mem: 5716\n","Epoch: [39212] Total time: 0:00:01 (0.1006 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0247 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39213] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0253 (0.0253) time: 0.2945 data: 0.2034 max mem: 5716\n","Epoch: [39213] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0253 (0.0246) time: 0.0963 data: 0.0171 max mem: 5716\n","Epoch: [39213] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0253 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39214] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0289 (0.0289) time: 0.2878 data: 0.1981 max mem: 5716\n","Epoch: [39214] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0243) time: 0.0951 data: 0.0167 max mem: 5716\n","Epoch: [39214] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39215] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0234 (0.0234) time: 0.2811 data: 0.1922 max mem: 5716\n","Epoch: [39215] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0212) time: 0.0943 data: 0.0162 max mem: 5716\n","Epoch: [39215] Total time: 0:00:01 (0.0984 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0212) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39216] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0228 (0.0228) time: 0.2962 data: 0.2016 max mem: 5716\n","Epoch: [39216] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0209) time: 0.0953 data: 0.0169 max mem: 5716\n","Epoch: [39216] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39217] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0168 (0.0168) time: 0.2777 data: 0.1891 max mem: 5716\n","Epoch: [39217] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0207) time: 0.0941 data: 0.0159 max mem: 5716\n","Epoch: [39217] Total time: 0:00:01 (0.0982 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39218] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2812 data: 0.1868 max mem: 5716\n","Epoch: [39218] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0231) time: 0.0970 data: 0.0157 max mem: 5716\n","Epoch: [39218] Total time: 0:00:01 (0.1011 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39219] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0223 (0.0223) time: 0.2862 data: 0.1974 max mem: 5716\n","Epoch: [39219] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0235) time: 0.0979 data: 0.0166 max mem: 5716\n","Epoch: [39219] Total time: 0:00:01 (0.1023 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39220] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0253 (0.0253) time: 0.2909 data: 0.2040 max mem: 5716\n","Epoch: [39220] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0233) time: 0.0955 data: 0.0172 max mem: 5716\n","Epoch: [39220] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39221] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0226 (0.0226) time: 0.2874 data: 0.1964 max mem: 5716\n","Epoch: [39221] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0243) time: 0.0957 data: 0.0165 max mem: 5716\n","Epoch: [39221] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39222] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0258 (0.0258) time: 0.3034 data: 0.2141 max mem: 5716\n","Epoch: [39222] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0244) time: 0.0962 data: 0.0180 max mem: 5716\n","Epoch: [39222] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39223] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0304 (0.0304) time: 0.2919 data: 0.2009 max mem: 5716\n","Epoch: [39223] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0245) time: 0.0958 data: 0.0169 max mem: 5716\n","Epoch: [39223] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39224] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0244 (0.0244) time: 0.2855 data: 0.1900 max mem: 5716\n","Epoch: [39224] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0208) time: 0.0943 data: 0.0160 max mem: 5716\n","Epoch: [39224] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39225] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0215 (0.0215) time: 0.2878 data: 0.1984 max mem: 5716\n","Epoch: [39225] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0209) time: 0.0958 data: 0.0167 max mem: 5716\n","Epoch: [39225] Total time: 0:00:01 (0.1012 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39226] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0218 (0.0218) time: 0.2885 data: 0.1955 max mem: 5716\n","Epoch: [39226] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0214 (0.0207) time: 0.0952 data: 0.0164 max mem: 5716\n","Epoch: [39226] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0214 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39227] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0191 (0.0191) time: 0.2841 data: 0.1937 max mem: 5716\n","Epoch: [39227] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0233) time: 0.0953 data: 0.0163 max mem: 5716\n","Epoch: [39227] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39228] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0243 (0.0243) time: 0.2876 data: 0.1998 max mem: 5716\n","Epoch: [39228] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0233) time: 0.0955 data: 0.0168 max mem: 5716\n","Epoch: [39228] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39229] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0185 (0.0185) time: 0.2777 data: 0.1882 max mem: 5716\n","Epoch: [39229] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0231) time: 0.0936 data: 0.0158 max mem: 5716\n","Epoch: [39229] Total time: 0:00:01 (0.0978 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39230] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0309 (0.0309) time: 0.2854 data: 0.1951 max mem: 5716\n","Epoch: [39230] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0245) time: 0.0952 data: 0.0164 max mem: 5716\n","Epoch: [39230] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39231] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0236 (0.0236) time: 0.3027 data: 0.2006 max mem: 5716\n","Epoch: [39231] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0245 (0.0246) time: 0.0963 data: 0.0169 max mem: 5716\n","Epoch: [39231] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0245 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39232] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0215 (0.0215) time: 0.3095 data: 0.2085 max mem: 5716\n","Epoch: [39232] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0248 (0.0245) time: 0.1007 data: 0.0175 max mem: 5716\n","Epoch: [39232] Total time: 0:00:01 (0.1049 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0248 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39233] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0242 (0.0242) time: 0.2904 data: 0.2033 max mem: 5716\n","Epoch: [39233] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0208) time: 0.0979 data: 0.0171 max mem: 5716\n","Epoch: [39233] Total time: 0:00:01 (0.1020 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39234] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0232 (0.0232) time: 0.2928 data: 0.2039 max mem: 5716\n","Epoch: [39234] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0208) time: 0.0950 data: 0.0171 max mem: 5716\n","Epoch: [39234] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39235] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0211 (0.0211) time: 0.2861 data: 0.1957 max mem: 5716\n","Epoch: [39235] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0206) time: 0.0943 data: 0.0165 max mem: 5716\n","Epoch: [39235] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39236] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0195 (0.0195) time: 0.2903 data: 0.2042 max mem: 5716\n","Epoch: [39236] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0234) time: 0.0954 data: 0.0172 max mem: 5716\n","Epoch: [39236] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39237] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0221 (0.0221) time: 0.2978 data: 0.1980 max mem: 5716\n","Epoch: [39237] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0230) time: 0.0956 data: 0.0167 max mem: 5716\n","Epoch: [39237] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39238] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0212 (0.0212) time: 0.2901 data: 0.2025 max mem: 5716\n","Epoch: [39238] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0234) time: 0.0947 data: 0.0170 max mem: 5716\n","Epoch: [39238] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39239] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0236 (0.0236) time: 0.2912 data: 0.2024 max mem: 5716\n","Epoch: [39239] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0245) time: 0.0954 data: 0.0170 max mem: 5716\n","Epoch: [39239] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39240] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0229 (0.0229) time: 0.2897 data: 0.2009 max mem: 5716\n","Epoch: [39240] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0241) time: 0.0949 data: 0.0169 max mem: 5716\n","Epoch: [39240] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39241] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0210 (0.0210) time: 0.2953 data: 0.2065 max mem: 5716\n","Epoch: [39241] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0242) time: 0.0956 data: 0.0174 max mem: 5716\n","Epoch: [39241] Total time: 0:00:01 (0.1007 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39242] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0205 (0.0205) time: 0.2895 data: 0.2020 max mem: 5716\n","Epoch: [39242] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0209 (0.0208) time: 0.0950 data: 0.0170 max mem: 5716\n","Epoch: [39242] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0209 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39243] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0227 (0.0227) time: 0.2968 data: 0.2077 max mem: 5716\n","Epoch: [39243] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0204 (0.0207) time: 0.0974 data: 0.0175 max mem: 5716\n","Epoch: [39243] Total time: 0:00:01 (0.1027 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0204 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39244] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0200 (0.0200) time: 0.3002 data: 0.2107 max mem: 5716\n","Epoch: [39244] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0210 (0.0210) time: 0.0974 data: 0.0177 max mem: 5716\n","Epoch: [39244] Total time: 0:00:01 (0.1017 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0210 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39245] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0180 (0.0180) time: 0.3023 data: 0.2036 max mem: 5716\n","Epoch: [39245] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0232) time: 0.0966 data: 0.0171 max mem: 5716\n","Epoch: [39245] Total time: 0:00:01 (0.1008 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39246] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0231 (0.0231) time: 0.2818 data: 0.1918 max mem: 5716\n","Epoch: [39246] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0236) time: 0.0964 data: 0.0161 max mem: 5716\n","Epoch: [39246] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0236) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39247] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0246 (0.0246) time: 0.2810 data: 0.1918 max mem: 5716\n","Epoch: [39247] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0236) time: 0.0943 data: 0.0161 max mem: 5716\n","Epoch: [39247] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0236) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39248] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0255 (0.0255) time: 0.2887 data: 0.2019 max mem: 5716\n","Epoch: [39248] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0240) time: 0.0955 data: 0.0170 max mem: 5716\n","Epoch: [39248] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0240) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39249] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0260 (0.0260) time: 0.2970 data: 0.2001 max mem: 5716\n","Epoch: [39249] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0257 (0.0246) time: 0.0954 data: 0.0168 max mem: 5716\n","Epoch: [39249] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0257 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39250] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0247 (0.0247) time: 0.2884 data: 0.2014 max mem: 5716\n","Epoch: [39250] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0245) time: 0.0966 data: 0.0169 max mem: 5716\n","Epoch: [39250] Total time: 0:00:01 (0.1007 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39251] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0240 (0.0240) time: 0.2818 data: 0.1910 max mem: 5716\n","Epoch: [39251] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0211) time: 0.0947 data: 0.0161 max mem: 5716\n","Epoch: [39251] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39252] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0157 (0.0157) time: 0.2865 data: 0.1876 max mem: 5716\n","Epoch: [39252] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0208) time: 0.0955 data: 0.0158 max mem: 5716\n","Epoch: [39252] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39253] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0179 (0.0179) time: 0.2830 data: 0.1943 max mem: 5716\n","Epoch: [39253] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0209) time: 0.0978 data: 0.0164 max mem: 5716\n","Epoch: [39253] Total time: 0:00:01 (0.1019 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39254] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0242 (0.0242) time: 0.2960 data: 0.2054 max mem: 5716\n","Epoch: [39254] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0230) time: 0.0967 data: 0.0173 max mem: 5716\n","Epoch: [39254] Total time: 0:00:01 (0.1009 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39255] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0220 (0.0220) time: 0.3060 data: 0.2102 max mem: 5716\n","Epoch: [39255] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0234) time: 0.0975 data: 0.0177 max mem: 5716\n","Epoch: [39255] Total time: 0:00:01 (0.1017 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39256] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0265 (0.0265) time: 0.2931 data: 0.2047 max mem: 5716\n","Epoch: [39256] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0232) time: 0.0972 data: 0.0172 max mem: 5716\n","Epoch: [39256] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39257] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0259 (0.0259) time: 0.2882 data: 0.1969 max mem: 5716\n","Epoch: [39257] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0242) time: 0.0948 data: 0.0166 max mem: 5716\n","Epoch: [39257] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39258] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0295 (0.0295) time: 0.2896 data: 0.2026 max mem: 5716\n","Epoch: [39258] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0244 (0.0244) time: 0.0948 data: 0.0170 max mem: 5716\n","Epoch: [39258] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0244 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39259] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0257 (0.0257) time: 0.3086 data: 0.2031 max mem: 5716\n","Epoch: [39259] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0246 (0.0244) time: 0.0964 data: 0.0171 max mem: 5716\n","Epoch: [39259] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0246 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39260] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0204 (0.0204) time: 0.2958 data: 0.1971 max mem: 5716\n","Epoch: [39260] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0204) time: 0.0960 data: 0.0166 max mem: 5716\n","Epoch: [39260] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0204) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39261] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0202 (0.0202) time: 0.2877 data: 0.1933 max mem: 5716\n","Epoch: [39261] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0209 (0.0208) time: 0.0967 data: 0.0163 max mem: 5716\n","Epoch: [39261] Total time: 0:00:01 (0.1008 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0209 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39262] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0186 (0.0186) time: 0.2846 data: 0.1900 max mem: 5716\n","Epoch: [39262] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0196 (0.0208) time: 0.0957 data: 0.0160 max mem: 5716\n","Epoch: [39262] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0196 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39263] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0225 (0.0225) time: 0.2910 data: 0.2053 max mem: 5716\n","Epoch: [39263] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0232) time: 0.0951 data: 0.0172 max mem: 5716\n","Epoch: [39263] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39264] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0270 (0.0270) time: 0.2872 data: 0.1980 max mem: 5716\n","Epoch: [39264] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0233) time: 0.0948 data: 0.0166 max mem: 5716\n","Epoch: [39264] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39265] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0293 (0.0293) time: 0.2850 data: 0.1967 max mem: 5716\n","Epoch: [39265] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0233) time: 0.0965 data: 0.0166 max mem: 5716\n","Epoch: [39265] Total time: 0:00:01 (0.1006 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39266] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0250 (0.0250) time: 0.3023 data: 0.2050 max mem: 5716\n","Epoch: [39266] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0243 (0.0247) time: 0.0963 data: 0.0172 max mem: 5716\n","Epoch: [39266] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0243 (0.0247) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39267] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0247 (0.0247) time: 0.2872 data: 0.1971 max mem: 5716\n","Epoch: [39267] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0242) time: 0.0954 data: 0.0166 max mem: 5716\n","Epoch: [39267] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39268] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0243 (0.0243) time: 0.2984 data: 0.1955 max mem: 5716\n","Epoch: [39268] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0243 (0.0242) time: 0.0958 data: 0.0164 max mem: 5716\n","Epoch: [39268] Total time: 0:00:01 (0.1006 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0243 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39269] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0235 (0.0235) time: 0.2824 data: 0.1935 max mem: 5716\n","Epoch: [39269] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0207) time: 0.0945 data: 0.0163 max mem: 5716\n","Epoch: [39269] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39270] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0201 (0.0201) time: 0.2876 data: 0.1994 max mem: 5716\n","Epoch: [39270] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0209) time: 0.0955 data: 0.0168 max mem: 5716\n","Epoch: [39270] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39271] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0138 (0.0138) time: 0.2786 data: 0.1895 max mem: 5716\n","Epoch: [39271] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0196 (0.0208) time: 0.0942 data: 0.0159 max mem: 5716\n","Epoch: [39271] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0196 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39272] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0294 (0.0294) time: 0.2877 data: 0.1978 max mem: 5716\n","Epoch: [39272] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0234) time: 0.0942 data: 0.0166 max mem: 5716\n","Epoch: [39272] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39273] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0253 (0.0253) time: 0.2798 data: 0.1891 max mem: 5716\n","Epoch: [39273] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0231) time: 0.0938 data: 0.0159 max mem: 5716\n","Epoch: [39273] Total time: 0:00:01 (0.0979 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39274] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0213 (0.0213) time: 0.2887 data: 0.1994 max mem: 5716\n","Epoch: [39274] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0218 (0.0230) time: 0.0956 data: 0.0168 max mem: 5716\n","Epoch: [39274] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0218 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39275] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2903 data: 0.2029 max mem: 5716\n","Epoch: [39275] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0244) time: 0.0950 data: 0.0171 max mem: 5716\n","Epoch: [39275] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39276] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0230 (0.0230) time: 0.2872 data: 0.1978 max mem: 5716\n","Epoch: [39276] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0243) time: 0.0950 data: 0.0167 max mem: 5716\n","Epoch: [39276] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39277] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0220 (0.0220) time: 0.2851 data: 0.1966 max mem: 5716\n","Epoch: [39277] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0243 (0.0244) time: 0.0966 data: 0.0166 max mem: 5716\n","Epoch: [39277] Total time: 0:00:01 (0.1008 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0243 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39278] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0222 (0.0222) time: 0.2969 data: 0.2074 max mem: 5716\n","Epoch: [39278] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0214 (0.0205) time: 0.0985 data: 0.0174 max mem: 5716\n","Epoch: [39278] Total time: 0:00:01 (0.1026 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0214 (0.0205) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39279] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0169 (0.0169) time: 0.3155 data: 0.2083 max mem: 5716\n","Epoch: [39279] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0190 (0.0209) time: 0.0989 data: 0.0175 max mem: 5716\n","Epoch: [39279] Total time: 0:00:01 (0.1031 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0190 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39280] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0237 (0.0237) time: 0.2868 data: 0.1948 max mem: 5716\n","Epoch: [39280] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.0961 data: 0.0164 max mem: 5716\n","Epoch: [39280] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39281] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0176 (0.0176) time: 0.3083 data: 0.2072 max mem: 5716\n","Epoch: [39281] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0233) time: 0.0981 data: 0.0174 max mem: 5716\n","Epoch: [39281] Total time: 0:00:01 (0.1023 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39282] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0257 (0.0257) time: 0.2879 data: 0.1935 max mem: 5716\n","Epoch: [39282] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0222 (0.0233) time: 0.0948 data: 0.0163 max mem: 5716\n","Epoch: [39282] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0222 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39283] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0275 (0.0275) time: 0.2888 data: 0.2001 max mem: 5716\n","Epoch: [39283] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0235) time: 0.0980 data: 0.0168 max mem: 5716\n","Epoch: [39283] Total time: 0:00:01 (0.1034 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39284] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0222 (0.0222) time: 0.2824 data: 0.1924 max mem: 5716\n","Epoch: [39284] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0243) time: 0.0943 data: 0.0162 max mem: 5716\n","Epoch: [39284] Total time: 0:00:01 (0.0984 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39285] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0252 (0.0252) time: 0.2933 data: 0.2068 max mem: 5716\n","Epoch: [39285] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0252 (0.0242) time: 0.0957 data: 0.0174 max mem: 5716\n","Epoch: [39285] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0252 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39286] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0252 (0.0252) time: 0.2828 data: 0.1932 max mem: 5716\n","Epoch: [39286] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0243 (0.0244) time: 0.0949 data: 0.0163 max mem: 5716\n","Epoch: [39286] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0243 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39287] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0194 (0.0194) time: 0.2892 data: 0.1998 max mem: 5716\n","Epoch: [39287] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0196 (0.0207) time: 0.0955 data: 0.0168 max mem: 5716\n","Epoch: [39287] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0196 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39288] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0238 (0.0238) time: 0.2924 data: 0.2050 max mem: 5716\n","Epoch: [39288] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0221 (0.0211) time: 0.0953 data: 0.0172 max mem: 5716\n","Epoch: [39288] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0221 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39289] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0233 (0.0233) time: 0.2929 data: 0.2031 max mem: 5716\n","Epoch: [39289] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0199 (0.0207) time: 0.0959 data: 0.0171 max mem: 5716\n","Epoch: [39289] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0199 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39290] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0284 (0.0284) time: 0.2988 data: 0.2124 max mem: 5716\n","Epoch: [39290] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0231) time: 0.0993 data: 0.0179 max mem: 5716\n","Epoch: [39290] Total time: 0:00:01 (0.1035 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39291] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0193 (0.0193) time: 0.2965 data: 0.2073 max mem: 5716\n","Epoch: [39291] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0230) time: 0.0957 data: 0.0174 max mem: 5716\n","Epoch: [39291] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39292] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0218 (0.0218) time: 0.2948 data: 0.2041 max mem: 5716\n","Epoch: [39292] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0233) time: 0.0958 data: 0.0172 max mem: 5716\n","Epoch: [39292] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39293] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0230 (0.0230) time: 0.2902 data: 0.1992 max mem: 5716\n","Epoch: [39293] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0242) time: 0.0953 data: 0.0168 max mem: 5716\n","Epoch: [39293] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39294] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0232 (0.0232) time: 0.2873 data: 0.1972 max mem: 5716\n","Epoch: [39294] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0243 (0.0242) time: 0.0950 data: 0.0166 max mem: 5716\n","Epoch: [39294] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0243 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39295] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0217 (0.0217) time: 0.2893 data: 0.1985 max mem: 5716\n","Epoch: [39295] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0249 (0.0244) time: 0.0961 data: 0.0167 max mem: 5716\n","Epoch: [39295] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0249 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39296] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0273 (0.0273) time: 0.2948 data: 0.2057 max mem: 5716\n","Epoch: [39296] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0208) time: 0.0975 data: 0.0173 max mem: 5716\n","Epoch: [39296] Total time: 0:00:01 (0.1019 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39297] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0246 (0.0246) time: 0.3022 data: 0.2031 max mem: 5716\n","Epoch: [39297] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0209) time: 0.0973 data: 0.0171 max mem: 5716\n","Epoch: [39297] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39298] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0234 (0.0234) time: 0.3028 data: 0.2161 max mem: 5716\n","Epoch: [39298] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0205) time: 0.0964 data: 0.0182 max mem: 5716\n","Epoch: [39298] Total time: 0:00:01 (0.1006 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0205) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39299] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0216 (0.0216) time: 0.2877 data: 0.1969 max mem: 5716\n","Epoch: [39299] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0235) time: 0.0975 data: 0.0166 max mem: 5716\n","Epoch: [39299] Total time: 0:00:01 (0.1017 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39300] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0263 (0.0263) time: 0.2901 data: 0.2014 max mem: 5716\n","Epoch: [39300] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0221 (0.0230) time: 0.0953 data: 0.0169 max mem: 5716\n","Epoch: [39300] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0221 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39301] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0231 (0.0231) time: 0.2887 data: 0.2025 max mem: 5716\n","Epoch: [39301] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0226) time: 0.0957 data: 0.0170 max mem: 5716\n","Epoch: [39301] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0226) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39302] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0207 (0.0207) time: 0.2904 data: 0.2032 max mem: 5716\n","Epoch: [39302] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0249 (0.0247) time: 0.0948 data: 0.0171 max mem: 5716\n","Epoch: [39302] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0249 (0.0247) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39303] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0254 (0.0254) time: 0.2877 data: 0.1973 max mem: 5716\n","Epoch: [39303] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0244) time: 0.0951 data: 0.0166 max mem: 5716\n","Epoch: [39303] Total time: 0:00:01 (0.1007 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39304] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0187 (0.0187) time: 0.2890 data: 0.1934 max mem: 5716\n","Epoch: [39304] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0241) time: 0.0956 data: 0.0163 max mem: 5716\n","Epoch: [39304] Total time: 0:00:01 (0.1011 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39305] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0211 (0.0211) time: 0.3003 data: 0.1971 max mem: 5716\n","Epoch: [39305] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0214 (0.0211) time: 0.0972 data: 0.0166 max mem: 5716\n","Epoch: [39305] Total time: 0:00:01 (0.1012 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0214 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39306] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0215 (0.0215) time: 0.2831 data: 0.1922 max mem: 5716\n","Epoch: [39306] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0210 (0.0207) time: 0.0942 data: 0.0162 max mem: 5716\n","Epoch: [39306] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0210 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39307] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0206 (0.0206) time: 0.2922 data: 0.2053 max mem: 5716\n","Epoch: [39307] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0206 (0.0209) time: 0.0947 data: 0.0172 max mem: 5716\n","Epoch: [39307] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0206 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39308] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0213 (0.0213) time: 0.2921 data: 0.2040 max mem: 5716\n","Epoch: [39308] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0232) time: 0.0963 data: 0.0172 max mem: 5716\n","Epoch: [39308] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39309] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0206 (0.0206) time: 0.2957 data: 0.2084 max mem: 5716\n","Epoch: [39309] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0221 (0.0231) time: 0.0979 data: 0.0175 max mem: 5716\n","Epoch: [39309] Total time: 0:00:01 (0.1021 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0221 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39310] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0260 (0.0260) time: 0.2996 data: 0.2104 max mem: 5716\n","Epoch: [39310] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0232) time: 0.0958 data: 0.0177 max mem: 5716\n","Epoch: [39310] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39311] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0266 (0.0266) time: 0.2889 data: 0.1977 max mem: 5716\n","Epoch: [39311] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0244) time: 0.0952 data: 0.0166 max mem: 5716\n","Epoch: [39311] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39312] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0235 (0.0235) time: 0.3035 data: 0.2143 max mem: 5716\n","Epoch: [39312] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0246 (0.0246) time: 0.0960 data: 0.0180 max mem: 5716\n","Epoch: [39312] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0246 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39313] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0202 (0.0202) time: 0.2919 data: 0.2040 max mem: 5716\n","Epoch: [39313] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0248 (0.0245) time: 0.0958 data: 0.0172 max mem: 5716\n","Epoch: [39313] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0248 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39314] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0225 (0.0225) time: 0.2956 data: 0.2077 max mem: 5716\n","Epoch: [39314] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.0955 data: 0.0174 max mem: 5716\n","Epoch: [39314] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39315] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0235 (0.0235) time: 0.2819 data: 0.1896 max mem: 5716\n","Epoch: [39315] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0209 (0.0208) time: 0.0944 data: 0.0160 max mem: 5716\n","Epoch: [39315] Total time: 0:00:01 (0.0986 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0209 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39316] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0224 (0.0224) time: 0.2850 data: 0.1975 max mem: 5716\n","Epoch: [39316] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0208) time: 0.0962 data: 0.0166 max mem: 5716\n","Epoch: [39316] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39317] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0196 (0.0196) time: 0.2869 data: 0.1993 max mem: 5716\n","Epoch: [39317] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0230) time: 0.0954 data: 0.0168 max mem: 5716\n","Epoch: [39317] Total time: 0:00:01 (0.1013 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39318] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0237 (0.0237) time: 0.2861 data: 0.1969 max mem: 5716\n","Epoch: [39318] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0233) time: 0.0946 data: 0.0165 max mem: 5716\n","Epoch: [39318] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39319] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0260 (0.0260) time: 0.3064 data: 0.2054 max mem: 5716\n","Epoch: [39319] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0228) time: 0.0964 data: 0.0173 max mem: 5716\n","Epoch: [39319] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0228) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39320] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0260 (0.0260) time: 0.2993 data: 0.2096 max mem: 5716\n","Epoch: [39320] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0243) time: 0.0958 data: 0.0176 max mem: 5716\n","Epoch: [39320] Total time: 0:00:01 (0.1012 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39321] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0219 (0.0219) time: 0.2893 data: 0.1982 max mem: 5716\n","Epoch: [39321] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0239) time: 0.0962 data: 0.0167 max mem: 5716\n","Epoch: [39321] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0239) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39322] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0294 (0.0294) time: 0.2967 data: 0.2074 max mem: 5716\n","Epoch: [39322] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0244 (0.0243) time: 0.0969 data: 0.0174 max mem: 5716\n","Epoch: [39322] Total time: 0:00:01 (0.1011 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0244 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39323] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0199 (0.0199) time: 0.2986 data: 0.2078 max mem: 5716\n","Epoch: [39323] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0208) time: 0.0960 data: 0.0175 max mem: 5716\n","Epoch: [39323] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39324] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0207 (0.0207) time: 0.2936 data: 0.2038 max mem: 5716\n","Epoch: [39324] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0209 (0.0208) time: 0.0953 data: 0.0172 max mem: 5716\n","Epoch: [39324] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0209 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39325] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0221 (0.0221) time: 0.2884 data: 0.1977 max mem: 5716\n","Epoch: [39325] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0212 (0.0206) time: 0.0947 data: 0.0166 max mem: 5716\n","Epoch: [39325] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0212 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39326] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0266 (0.0266) time: 0.2933 data: 0.2058 max mem: 5716\n","Epoch: [39326] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0231) time: 0.0956 data: 0.0173 max mem: 5716\n","Epoch: [39326] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39327] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0194 (0.0194) time: 0.2892 data: 0.2018 max mem: 5716\n","Epoch: [39327] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0232) time: 0.0945 data: 0.0169 max mem: 5716\n","Epoch: [39327] Total time: 0:00:01 (0.0986 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39328] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0182 (0.0182) time: 0.2943 data: 0.2059 max mem: 5716\n","Epoch: [39328] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0222 (0.0230) time: 0.0956 data: 0.0173 max mem: 5716\n","Epoch: [39328] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0222 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39329] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0258 (0.0258) time: 0.2929 data: 0.2042 max mem: 5716\n","Epoch: [39329] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0242) time: 0.0984 data: 0.0172 max mem: 5716\n","Epoch: [39329] Total time: 0:00:01 (0.1026 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39330] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0258 (0.0258) time: 0.2955 data: 0.1973 max mem: 5716\n","Epoch: [39330] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0242) time: 0.0975 data: 0.0166 max mem: 5716\n","Epoch: [39330] Total time: 0:00:01 (0.1030 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39331] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0246 (0.0246) time: 0.2923 data: 0.2054 max mem: 5716\n","Epoch: [39331] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0245 (0.0244) time: 0.0950 data: 0.0173 max mem: 5716\n","Epoch: [39331] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0245 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39332] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0187 (0.0187) time: 0.2885 data: 0.1965 max mem: 5716\n","Epoch: [39332] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0195 (0.0207) time: 0.0961 data: 0.0166 max mem: 5716\n","Epoch: [39332] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0195 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39333] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0219 (0.0219) time: 0.2901 data: 0.2013 max mem: 5716\n","Epoch: [39333] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0207) time: 0.0957 data: 0.0169 max mem: 5716\n","Epoch: [39333] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39334] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0220 (0.0220) time: 0.2923 data: 0.2029 max mem: 5716\n","Epoch: [39334] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0208) time: 0.0957 data: 0.0171 max mem: 5716\n","Epoch: [39334] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39335] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0210 (0.0210) time: 0.3042 data: 0.2031 max mem: 5716\n","Epoch: [39335] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0234) time: 0.0972 data: 0.0171 max mem: 5716\n","Epoch: [39335] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39336] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0223 (0.0223) time: 0.2913 data: 0.1921 max mem: 5716\n","Epoch: [39336] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0223 (0.0229) time: 0.0954 data: 0.0162 max mem: 5716\n","Epoch: [39336] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0223 (0.0229) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39337] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0255 (0.0255) time: 0.2931 data: 0.2002 max mem: 5716\n","Epoch: [39337] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0234) time: 0.0966 data: 0.0168 max mem: 5716\n","Epoch: [39337] Total time: 0:00:01 (0.1022 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39338] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0294 (0.0294) time: 0.2788 data: 0.1892 max mem: 5716\n","Epoch: [39338] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0241) time: 0.0955 data: 0.0159 max mem: 5716\n","Epoch: [39338] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39339] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0302 (0.0302) time: 0.2845 data: 0.1928 max mem: 5716\n","Epoch: [39339] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0242) time: 0.0957 data: 0.0162 max mem: 5716\n","Epoch: [39339] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39340] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0200 (0.0200) time: 0.2937 data: 0.2059 max mem: 5716\n","Epoch: [39340] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0243) time: 0.0952 data: 0.0173 max mem: 5716\n","Epoch: [39340] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39341] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0247 (0.0247) time: 0.2833 data: 0.1919 max mem: 5716\n","Epoch: [39341] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0204 (0.0211) time: 0.0949 data: 0.0161 max mem: 5716\n","Epoch: [39341] Total time: 0:00:01 (0.1010 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0204 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39342] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0218 (0.0218) time: 0.2874 data: 0.1968 max mem: 5716\n","Epoch: [39342] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0196 (0.0206) time: 0.0950 data: 0.0165 max mem: 5716\n","Epoch: [39342] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0196 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39343] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0218 (0.0218) time: 0.2947 data: 0.2089 max mem: 5716\n","Epoch: [39343] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0205) time: 0.0954 data: 0.0176 max mem: 5716\n","Epoch: [39343] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0205) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39344] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2924 data: 0.2050 max mem: 5716\n","Epoch: [39344] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0233) time: 0.0952 data: 0.0173 max mem: 5716\n","Epoch: [39344] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39345] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0194 (0.0194) time: 0.2913 data: 0.1924 max mem: 5716\n","Epoch: [39345] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0222 (0.0230) time: 0.0958 data: 0.0162 max mem: 5716\n","Epoch: [39345] Total time: 0:00:01 (0.1013 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0222 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39346] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0227 (0.0227) time: 0.2926 data: 0.2009 max mem: 5716\n","Epoch: [39346] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0231) time: 0.0973 data: 0.0169 max mem: 5716\n","Epoch: [39346] Total time: 0:00:01 (0.1022 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39347] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0232 (0.0232) time: 0.2924 data: 0.2001 max mem: 5716\n","Epoch: [39347] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0242) time: 0.0960 data: 0.0168 max mem: 5716\n","Epoch: [39347] Total time: 0:00:01 (0.1011 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39348] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0215 (0.0215) time: 0.2889 data: 0.1950 max mem: 5716\n","Epoch: [39348] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0244) time: 0.0951 data: 0.0164 max mem: 5716\n","Epoch: [39348] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39349] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0232 (0.0232) time: 0.2858 data: 0.1966 max mem: 5716\n","Epoch: [39349] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0243) time: 0.0949 data: 0.0165 max mem: 5716\n","Epoch: [39349] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39350] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0203 (0.0203) time: 0.3145 data: 0.2150 max mem: 5716\n","Epoch: [39350] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0203 (0.0210) time: 0.0975 data: 0.0181 max mem: 5716\n","Epoch: [39350] Total time: 0:00:01 (0.1016 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0203 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39351] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0201 (0.0201) time: 0.2968 data: 0.2113 max mem: 5716\n","Epoch: [39351] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0204 (0.0206) time: 0.0952 data: 0.0178 max mem: 5716\n","Epoch: [39351] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0204 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39352] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0198 (0.0198) time: 0.2917 data: 0.2048 max mem: 5716\n","Epoch: [39352] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0198 (0.0206) time: 0.0952 data: 0.0172 max mem: 5716\n","Epoch: [39352] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0198 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39353] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0214 (0.0214) time: 0.2884 data: 0.1955 max mem: 5716\n","Epoch: [39353] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0233) time: 0.0981 data: 0.0164 max mem: 5716\n","Epoch: [39353] Total time: 0:00:01 (0.1023 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39354] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0274 (0.0274) time: 0.2954 data: 0.2074 max mem: 5716\n","Epoch: [39354] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0219 (0.0231) time: 0.0979 data: 0.0175 max mem: 5716\n","Epoch: [39354] Total time: 0:00:01 (0.1020 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0219 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39355] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0268 (0.0268) time: 0.2996 data: 0.2111 max mem: 5716\n","Epoch: [39355] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0222 (0.0229) time: 0.0969 data: 0.0177 max mem: 5716\n","Epoch: [39355] Total time: 0:00:01 (0.1010 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0222 (0.0229) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39356] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2927 data: 0.1996 max mem: 5716\n","Epoch: [39356] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0243 (0.0242) time: 0.0962 data: 0.0168 max mem: 5716\n","Epoch: [39356] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0243 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39357] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0201 (0.0201) time: 0.2865 data: 0.1964 max mem: 5716\n","Epoch: [39357] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0246 (0.0247) time: 0.0950 data: 0.0165 max mem: 5716\n","Epoch: [39357] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0246 (0.0247) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39358] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0291 (0.0291) time: 0.2896 data: 0.1981 max mem: 5716\n","Epoch: [39358] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0243) time: 0.0979 data: 0.0167 max mem: 5716\n","Epoch: [39358] Total time: 0:00:01 (0.1020 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39359] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0224 (0.0224) time: 0.2929 data: 0.2044 max mem: 5716\n","Epoch: [39359] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0222 (0.0209) time: 0.0952 data: 0.0172 max mem: 5716\n","Epoch: [39359] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0222 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39360] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0261 (0.0261) time: 0.2943 data: 0.2068 max mem: 5716\n","Epoch: [39360] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0208) time: 0.0955 data: 0.0174 max mem: 5716\n","Epoch: [39360] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39361] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0161 (0.0161) time: 0.2958 data: 0.2077 max mem: 5716\n","Epoch: [39361] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0207) time: 0.0953 data: 0.0175 max mem: 5716\n","Epoch: [39361] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39362] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0249 (0.0249) time: 0.2944 data: 0.2051 max mem: 5716\n","Epoch: [39362] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0230) time: 0.0952 data: 0.0172 max mem: 5716\n","Epoch: [39362] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39363] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0235 (0.0235) time: 0.2948 data: 0.2038 max mem: 5716\n","Epoch: [39363] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0233) time: 0.0989 data: 0.0171 max mem: 5716\n","Epoch: [39363] Total time: 0:00:01 (0.1031 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39364] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0250 (0.0250) time: 0.2921 data: 0.2043 max mem: 5716\n","Epoch: [39364] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0219 (0.0229) time: 0.0963 data: 0.0172 max mem: 5716\n","Epoch: [39364] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0219 (0.0229) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39365] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0265 (0.0265) time: 0.2934 data: 0.2047 max mem: 5716\n","Epoch: [39365] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0241) time: 0.0948 data: 0.0172 max mem: 5716\n","Epoch: [39365] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39366] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0229 (0.0229) time: 0.2880 data: 0.1976 max mem: 5716\n","Epoch: [39366] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0243 (0.0244) time: 0.0967 data: 0.0166 max mem: 5716\n","Epoch: [39366] Total time: 0:00:01 (0.1009 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0243 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39367] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0288 (0.0288) time: 0.2969 data: 0.2083 max mem: 5716\n","Epoch: [39367] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0245) time: 0.0966 data: 0.0175 max mem: 5716\n","Epoch: [39367] Total time: 0:00:01 (0.1008 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39368] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0257 (0.0257) time: 0.2980 data: 0.2089 max mem: 5716\n","Epoch: [39368] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0209 (0.0209) time: 0.0967 data: 0.0176 max mem: 5716\n","Epoch: [39368] Total time: 0:00:01 (0.1009 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0209 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39369] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0191 (0.0191) time: 0.2932 data: 0.2062 max mem: 5716\n","Epoch: [39369] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0209 (0.0209) time: 0.0955 data: 0.0173 max mem: 5716\n","Epoch: [39369] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0209 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39370] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0298 (0.0298) time: 0.2908 data: 0.1945 max mem: 5716\n","Epoch: [39370] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0196 (0.0208) time: 0.0949 data: 0.0164 max mem: 5716\n","Epoch: [39370] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0196 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39371] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0256 (0.0256) time: 0.2757 data: 0.1866 max mem: 5716\n","Epoch: [39371] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0216 (0.0228) time: 0.0943 data: 0.0157 max mem: 5716\n","Epoch: [39371] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0216 (0.0228) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39372] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0235 (0.0235) time: 0.2920 data: 0.2027 max mem: 5716\n","Epoch: [39372] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0221 (0.0231) time: 0.0949 data: 0.0170 max mem: 5716\n","Epoch: [39372] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0221 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39373] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0256 (0.0256) time: 0.2886 data: 0.1963 max mem: 5716\n","Epoch: [39373] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0229) time: 0.0962 data: 0.0165 max mem: 5716\n","Epoch: [39373] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0229) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39374] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0261 (0.0261) time: 0.2934 data: 0.2035 max mem: 5716\n","Epoch: [39374] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0253 (0.0245) time: 0.0957 data: 0.0171 max mem: 5716\n","Epoch: [39374] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0253 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39375] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0221 (0.0221) time: 0.2942 data: 0.2067 max mem: 5716\n","Epoch: [39375] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0240) time: 0.0958 data: 0.0174 max mem: 5716\n","Epoch: [39375] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0240) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39376] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0227 (0.0227) time: 0.2841 data: 0.1959 max mem: 5716\n","Epoch: [39376] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0243) time: 0.0946 data: 0.0165 max mem: 5716\n","Epoch: [39376] Total time: 0:00:01 (0.0986 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39377] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0231 (0.0231) time: 0.2962 data: 0.2083 max mem: 5716\n","Epoch: [39377] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0215 (0.0210) time: 0.0962 data: 0.0175 max mem: 5716\n","Epoch: [39377] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0215 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39378] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0308 (0.0308) time: 0.3009 data: 0.1984 max mem: 5716\n","Epoch: [39378] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0194 (0.0207) time: 0.0959 data: 0.0167 max mem: 5716\n","Epoch: [39378] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0194 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39379] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0132 (0.0132) time: 0.2944 data: 0.2063 max mem: 5716\n","Epoch: [39379] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0216 (0.0208) time: 0.0959 data: 0.0174 max mem: 5716\n","Epoch: [39379] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0216 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39380] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0199 (0.0199) time: 0.2951 data: 0.2013 max mem: 5716\n","Epoch: [39380] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0232) time: 0.0959 data: 0.0169 max mem: 5716\n","Epoch: [39380] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39381] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0255 (0.0255) time: 0.2940 data: 0.2067 max mem: 5716\n","Epoch: [39381] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0238) time: 0.0973 data: 0.0174 max mem: 5716\n","Epoch: [39381] Total time: 0:00:01 (0.1016 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0238) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39382] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0206 (0.0206) time: 0.2881 data: 0.1955 max mem: 5716\n","Epoch: [39382] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0231) time: 0.0946 data: 0.0164 max mem: 5716\n","Epoch: [39382] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39383] [ 0/12] eta: 0:00:05 lr: 0.000000 loss: 0.0285 (0.0285) time: 0.4891 data: 0.4065 max mem: 5716\n","Epoch: [39383] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0249 (0.0246) time: 0.1111 data: 0.0340 max mem: 5716\n","Epoch: [39383] Total time: 0:00:01 (0.1152 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0249 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39384] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0204 (0.0204) time: 0.2843 data: 0.1968 max mem: 5716\n","Epoch: [39384] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0254 (0.0242) time: 0.0943 data: 0.0165 max mem: 5716\n","Epoch: [39384] Total time: 0:00:01 (0.0984 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0254 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39385] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0251 (0.0251) time: 0.2860 data: 0.1982 max mem: 5716\n","Epoch: [39385] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0240) time: 0.0944 data: 0.0167 max mem: 5716\n","Epoch: [39385] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0240) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39386] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0254 (0.0254) time: 0.2885 data: 0.2004 max mem: 5716\n","Epoch: [39386] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0203 (0.0209) time: 0.0949 data: 0.0168 max mem: 5716\n","Epoch: [39386] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0203 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39387] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0183 (0.0183) time: 0.2876 data: 0.1983 max mem: 5716\n","Epoch: [39387] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0207) time: 0.0956 data: 0.0167 max mem: 5716\n","Epoch: [39387] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39388] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0201 (0.0201) time: 0.2847 data: 0.1961 max mem: 5716\n","Epoch: [39388] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0210) time: 0.0945 data: 0.0165 max mem: 5716\n","Epoch: [39388] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39389] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0255 (0.0255) time: 0.2927 data: 0.2044 max mem: 5716\n","Epoch: [39389] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0233) time: 0.0953 data: 0.0172 max mem: 5716\n","Epoch: [39389] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39390] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0200 (0.0200) time: 0.2874 data: 0.1964 max mem: 5716\n","Epoch: [39390] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0232) time: 0.0962 data: 0.0165 max mem: 5716\n","Epoch: [39390] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39391] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0242 (0.0242) time: 0.2886 data: 0.2009 max mem: 5716\n","Epoch: [39391] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0233) time: 0.0952 data: 0.0169 max mem: 5716\n","Epoch: [39391] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39392] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0240 (0.0240) time: 0.3101 data: 0.2233 max mem: 5716\n","Epoch: [39392] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0241) time: 0.0970 data: 0.0188 max mem: 5716\n","Epoch: [39392] Total time: 0:00:01 (0.1011 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39393] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0264 (0.0264) time: 0.2939 data: 0.2004 max mem: 5716\n","Epoch: [39393] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0243) time: 0.0974 data: 0.0169 max mem: 5716\n","Epoch: [39393] Total time: 0:00:01 (0.1015 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39394] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0256 (0.0256) time: 0.2986 data: 0.1978 max mem: 5716\n","Epoch: [39394] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0243) time: 0.0962 data: 0.0166 max mem: 5716\n","Epoch: [39394] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39395] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0218 (0.0218) time: 0.3086 data: 0.2095 max mem: 5716\n","Epoch: [39395] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0203 (0.0208) time: 0.0971 data: 0.0176 max mem: 5716\n","Epoch: [39395] Total time: 0:00:01 (0.1012 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0203 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39396] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0165 (0.0165) time: 0.2912 data: 0.2035 max mem: 5716\n","Epoch: [39396] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0207) time: 0.0957 data: 0.0171 max mem: 5716\n","Epoch: [39396] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39397] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0264 (0.0264) time: 0.2861 data: 0.1952 max mem: 5716\n","Epoch: [39397] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0209 (0.0210) time: 0.0952 data: 0.0164 max mem: 5716\n","Epoch: [39397] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0209 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39398] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0209 (0.0209) time: 0.2881 data: 0.1982 max mem: 5716\n","Epoch: [39398] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0216 (0.0234) time: 0.0956 data: 0.0167 max mem: 5716\n","Epoch: [39398] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0216 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39399] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0220 (0.0220) time: 0.2882 data: 0.2008 max mem: 5716\n","Epoch: [39399] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0231) time: 0.0985 data: 0.0169 max mem: 5716\n","Epoch: [39399] Total time: 0:00:01 (0.1026 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39400] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0199 (0.0199) time: 0.2898 data: 0.2029 max mem: 5716\n","Epoch: [39400] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0233) time: 0.0950 data: 0.0170 max mem: 5716\n","Epoch: [39400] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39401] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0237 (0.0237) time: 0.2831 data: 0.1956 max mem: 5716\n","Epoch: [39401] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0243) time: 0.0951 data: 0.0165 max mem: 5716\n","Epoch: [39401] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39402] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0278 (0.0278) time: 0.2805 data: 0.1918 max mem: 5716\n","Epoch: [39402] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0239) time: 0.0945 data: 0.0161 max mem: 5716\n","Epoch: [39402] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0239) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39403] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0235 (0.0235) time: 0.2811 data: 0.1887 max mem: 5716\n","Epoch: [39403] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0244 (0.0245) time: 0.0956 data: 0.0159 max mem: 5716\n","Epoch: [39403] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0244 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39404] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0218 (0.0218) time: 0.2864 data: 0.1977 max mem: 5716\n","Epoch: [39404] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0211 (0.0210) time: 0.0956 data: 0.0166 max mem: 5716\n","Epoch: [39404] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0211 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39405] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0200 (0.0200) time: 0.2925 data: 0.2036 max mem: 5716\n","Epoch: [39405] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0207) time: 0.0977 data: 0.0171 max mem: 5716\n","Epoch: [39405] Total time: 0:00:01 (0.1019 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39406] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0234 (0.0234) time: 0.2931 data: 0.2053 max mem: 5716\n","Epoch: [39406] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0207) time: 0.0968 data: 0.0173 max mem: 5716\n","Epoch: [39406] Total time: 0:00:01 (0.1010 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39407] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0226 (0.0226) time: 0.2947 data: 0.2062 max mem: 5716\n","Epoch: [39407] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0223 (0.0235) time: 0.0963 data: 0.0173 max mem: 5716\n","Epoch: [39407] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0223 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39408] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0209 (0.0209) time: 0.2987 data: 0.1991 max mem: 5716\n","Epoch: [39408] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0232) time: 0.0959 data: 0.0167 max mem: 5716\n","Epoch: [39408] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39409] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0214 (0.0214) time: 0.2844 data: 0.1946 max mem: 5716\n","Epoch: [39409] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0232) time: 0.0942 data: 0.0164 max mem: 5716\n","Epoch: [39409] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39410] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0206 (0.0206) time: 0.2946 data: 0.2026 max mem: 5716\n","Epoch: [39410] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0245 (0.0247) time: 0.0958 data: 0.0170 max mem: 5716\n","Epoch: [39410] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0245 (0.0247) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39411] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0248 (0.0248) time: 0.2897 data: 0.1965 max mem: 5716\n","Epoch: [39411] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0242) time: 0.0976 data: 0.0165 max mem: 5716\n","Epoch: [39411] Total time: 0:00:01 (0.1018 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39412] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0228 (0.0228) time: 0.2908 data: 0.2040 max mem: 5716\n","Epoch: [39412] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0243) time: 0.0953 data: 0.0172 max mem: 5716\n","Epoch: [39412] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39413] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0164 (0.0164) time: 0.2857 data: 0.1967 max mem: 5716\n","Epoch: [39413] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0216 (0.0207) time: 0.0949 data: 0.0165 max mem: 5716\n","Epoch: [39413] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0216 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39414] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0189 (0.0189) time: 0.2989 data: 0.1944 max mem: 5716\n","Epoch: [39414] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0212 (0.0207) time: 0.0957 data: 0.0164 max mem: 5716\n","Epoch: [39414] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0212 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39415] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0183 (0.0183) time: 0.2931 data: 0.1895 max mem: 5716\n","Epoch: [39415] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0206 (0.0208) time: 0.0948 data: 0.0160 max mem: 5716\n","Epoch: [39415] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0206 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39416] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0248 (0.0248) time: 0.2941 data: 0.1972 max mem: 5716\n","Epoch: [39416] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0230) time: 0.0954 data: 0.0166 max mem: 5716\n","Epoch: [39416] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39417] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0253 (0.0253) time: 0.2814 data: 0.1927 max mem: 5716\n","Epoch: [39417] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0233) time: 0.0946 data: 0.0162 max mem: 5716\n","Epoch: [39417] Total time: 0:00:01 (0.0986 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39418] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0272 (0.0272) time: 0.2854 data: 0.1969 max mem: 5716\n","Epoch: [39418] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0231) time: 0.0951 data: 0.0166 max mem: 5716\n","Epoch: [39418] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39419] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0251 (0.0251) time: 0.2974 data: 0.2104 max mem: 5716\n","Epoch: [39419] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0241) time: 0.0957 data: 0.0177 max mem: 5716\n","Epoch: [39419] Total time: 0:00:01 (0.1021 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39420] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0283 (0.0283) time: 0.2871 data: 0.1964 max mem: 5716\n","Epoch: [39420] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0248 (0.0247) time: 0.0950 data: 0.0165 max mem: 5716\n","Epoch: [39420] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0248 (0.0247) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39421] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0231 (0.0231) time: 0.2837 data: 0.1943 max mem: 5716\n","Epoch: [39421] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0239) time: 0.0942 data: 0.0163 max mem: 5716\n","Epoch: [39421] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0239) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39422] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0210 (0.0210) time: 0.2987 data: 0.2024 max mem: 5716\n","Epoch: [39422] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0198 (0.0206) time: 0.0958 data: 0.0170 max mem: 5716\n","Epoch: [39422] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0198 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39423] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0184 (0.0184) time: 0.2824 data: 0.1930 max mem: 5716\n","Epoch: [39423] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0212) time: 0.0943 data: 0.0162 max mem: 5716\n","Epoch: [39423] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0212) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39424] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0200 (0.0200) time: 0.2900 data: 0.1969 max mem: 5716\n","Epoch: [39424] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0211 (0.0210) time: 0.0982 data: 0.0166 max mem: 5716\n","Epoch: [39424] Total time: 0:00:01 (0.1024 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0211 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39425] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0176 (0.0176) time: 0.2919 data: 0.2039 max mem: 5716\n","Epoch: [39425] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0233) time: 0.0951 data: 0.0171 max mem: 5716\n","Epoch: [39425] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39426] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0181 (0.0181) time: 0.2887 data: 0.2021 max mem: 5716\n","Epoch: [39426] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0233) time: 0.0955 data: 0.0170 max mem: 5716\n","Epoch: [39426] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39427] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0221 (0.0221) time: 0.2831 data: 0.1929 max mem: 5716\n","Epoch: [39427] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0232) time: 0.0942 data: 0.0162 max mem: 5716\n","Epoch: [39427] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39428] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0248 (0.0248) time: 0.2871 data: 0.1975 max mem: 5716\n","Epoch: [39428] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0243) time: 0.0947 data: 0.0166 max mem: 5716\n","Epoch: [39428] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39429] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0199 (0.0199) time: 0.2887 data: 0.1985 max mem: 5716\n","Epoch: [39429] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0241) time: 0.0948 data: 0.0167 max mem: 5716\n","Epoch: [39429] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39430] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0223 (0.0223) time: 0.2868 data: 0.1969 max mem: 5716\n","Epoch: [39430] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0245 (0.0243) time: 0.0946 data: 0.0166 max mem: 5716\n","Epoch: [39430] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0245 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39431] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0185 (0.0185) time: 0.3016 data: 0.2137 max mem: 5716\n","Epoch: [39431] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0210) time: 0.0968 data: 0.0180 max mem: 5716\n","Epoch: [39431] Total time: 0:00:01 (0.1011 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39432] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0239 (0.0239) time: 0.3014 data: 0.2115 max mem: 5716\n","Epoch: [39432] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0207) time: 0.0967 data: 0.0178 max mem: 5716\n","Epoch: [39432] Total time: 0:00:01 (0.1008 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39433] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0250 (0.0250) time: 0.2968 data: 0.2085 max mem: 5716\n","Epoch: [39433] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0207) time: 0.0970 data: 0.0175 max mem: 5716\n","Epoch: [39433] Total time: 0:00:01 (0.1012 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39434] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0219 (0.0219) time: 0.2904 data: 0.2022 max mem: 5716\n","Epoch: [39434] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0237) time: 0.0948 data: 0.0170 max mem: 5716\n","Epoch: [39434] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0237) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39435] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0207 (0.0207) time: 0.2852 data: 0.1940 max mem: 5716\n","Epoch: [39435] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0215 (0.0231) time: 0.0944 data: 0.0163 max mem: 5716\n","Epoch: [39435] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0215 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39436] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0205 (0.0205) time: 0.2868 data: 0.1962 max mem: 5716\n","Epoch: [39436] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0230) time: 0.0947 data: 0.0165 max mem: 5716\n","Epoch: [39436] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39437] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0240 (0.0240) time: 0.2872 data: 0.1969 max mem: 5716\n","Epoch: [39437] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0243) time: 0.0947 data: 0.0166 max mem: 5716\n","Epoch: [39437] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39438] [ 0/12] eta: 0:00:05 lr: 0.000000 loss: 0.0249 (0.0249) time: 0.4923 data: 0.4070 max mem: 5716\n","Epoch: [39438] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0241) time: 0.1126 data: 0.0341 max mem: 5716\n","Epoch: [39438] Total time: 0:00:01 (0.1168 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39439] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0291 (0.0291) time: 0.2875 data: 0.1990 max mem: 5716\n","Epoch: [39439] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0246) time: 0.0957 data: 0.0168 max mem: 5716\n","Epoch: [39439] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39440] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0197 (0.0197) time: 0.2973 data: 0.2062 max mem: 5716\n","Epoch: [39440] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0208) time: 0.0974 data: 0.0174 max mem: 5716\n","Epoch: [39440] Total time: 0:00:01 (0.1027 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39441] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0204 (0.0204) time: 0.2853 data: 0.1960 max mem: 5716\n","Epoch: [39441] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0209) time: 0.0957 data: 0.0165 max mem: 5716\n","Epoch: [39441] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39442] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0168 (0.0168) time: 0.2954 data: 0.2081 max mem: 5716\n","Epoch: [39442] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0198 (0.0207) time: 0.0959 data: 0.0175 max mem: 5716\n","Epoch: [39442] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0198 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39443] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0283 (0.0283) time: 0.2917 data: 0.1999 max mem: 5716\n","Epoch: [39443] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0220 (0.0228) time: 0.0983 data: 0.0168 max mem: 5716\n","Epoch: [39443] Total time: 0:00:01 (0.1025 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0220 (0.0228) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39444] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0273 (0.0273) time: 0.2917 data: 0.2049 max mem: 5716\n","Epoch: [39444] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0235) time: 0.0961 data: 0.0172 max mem: 5716\n","Epoch: [39444] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39445] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0291 (0.0291) time: 0.2934 data: 0.2067 max mem: 5716\n","Epoch: [39445] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0214 (0.0229) time: 0.0959 data: 0.0174 max mem: 5716\n","Epoch: [39445] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0214 (0.0229) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39446] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0234 (0.0234) time: 0.2854 data: 0.1955 max mem: 5716\n","Epoch: [39446] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0248) time: 0.0944 data: 0.0164 max mem: 5716\n","Epoch: [39446] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0248) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39447] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0231 (0.0231) time: 0.2967 data: 0.2098 max mem: 5716\n","Epoch: [39447] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0245 (0.0245) time: 0.0957 data: 0.0176 max mem: 5716\n","Epoch: [39447] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0245 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39448] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2931 data: 0.1969 max mem: 5716\n","Epoch: [39448] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0242) time: 0.0990 data: 0.0166 max mem: 5716\n","Epoch: [39448] Total time: 0:00:01 (0.1032 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39449] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0192 (0.0192) time: 0.2835 data: 0.1942 max mem: 5716\n","Epoch: [39449] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0207) time: 0.0955 data: 0.0164 max mem: 5716\n","Epoch: [39449] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39450] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0183 (0.0183) time: 0.2881 data: 0.1972 max mem: 5716\n","Epoch: [39450] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0204 (0.0206) time: 0.0956 data: 0.0166 max mem: 5716\n","Epoch: [39450] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0204 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39451] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0168 (0.0168) time: 0.2900 data: 0.2035 max mem: 5716\n","Epoch: [39451] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0196 (0.0208) time: 0.0951 data: 0.0171 max mem: 5716\n","Epoch: [39451] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0196 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39452] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0218 (0.0218) time: 0.2929 data: 0.2058 max mem: 5716\n","Epoch: [39452] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0220 (0.0229) time: 0.0949 data: 0.0173 max mem: 5716\n","Epoch: [39452] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0220 (0.0229) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39453] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0239 (0.0239) time: 0.2895 data: 0.2025 max mem: 5716\n","Epoch: [39453] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0233) time: 0.0946 data: 0.0170 max mem: 5716\n","Epoch: [39453] Total time: 0:00:01 (0.0986 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39454] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0240 (0.0240) time: 0.2824 data: 0.1924 max mem: 5716\n","Epoch: [39454] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0234) time: 0.0942 data: 0.0162 max mem: 5716\n","Epoch: [39454] Total time: 0:00:01 (0.0982 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39455] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0228 (0.0228) time: 0.2938 data: 0.2055 max mem: 5716\n","Epoch: [39455] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0242) time: 0.0961 data: 0.0173 max mem: 5716\n","Epoch: [39455] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39456] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0251 (0.0251) time: 0.3070 data: 0.2092 max mem: 5716\n","Epoch: [39456] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0250 (0.0246) time: 0.0979 data: 0.0176 max mem: 5716\n","Epoch: [39456] Total time: 0:00:01 (0.1021 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0250 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39457] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0268 (0.0268) time: 0.2921 data: 0.2065 max mem: 5716\n","Epoch: [39457] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0247) time: 0.0950 data: 0.0174 max mem: 5716\n","Epoch: [39457] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0247) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39458] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0261 (0.0261) time: 0.2884 data: 0.1899 max mem: 5716\n","Epoch: [39458] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0199 (0.0209) time: 0.0973 data: 0.0160 max mem: 5716\n","Epoch: [39458] Total time: 0:00:01 (0.1015 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0199 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39459] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0212 (0.0212) time: 0.2852 data: 0.1941 max mem: 5716\n","Epoch: [39459] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0211 (0.0206) time: 0.0941 data: 0.0163 max mem: 5716\n","Epoch: [39459] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0211 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39460] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0201 (0.0201) time: 0.2906 data: 0.1912 max mem: 5716\n","Epoch: [39460] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0210 (0.0209) time: 0.0946 data: 0.0161 max mem: 5716\n","Epoch: [39460] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0210 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39461] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0191 (0.0191) time: 0.2846 data: 0.1954 max mem: 5716\n","Epoch: [39461] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0229) time: 0.0942 data: 0.0164 max mem: 5716\n","Epoch: [39461] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0229) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39462] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0258 (0.0258) time: 0.2953 data: 0.2081 max mem: 5716\n","Epoch: [39462] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0232) time: 0.0956 data: 0.0175 max mem: 5716\n","Epoch: [39462] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39463] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0285 (0.0285) time: 0.2844 data: 0.1962 max mem: 5716\n","Epoch: [39463] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0233) time: 0.0941 data: 0.0165 max mem: 5716\n","Epoch: [39463] Total time: 0:00:01 (0.0982 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39464] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0254 (0.0254) time: 0.2924 data: 0.2066 max mem: 5716\n","Epoch: [39464] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0247 (0.0246) time: 0.0950 data: 0.0174 max mem: 5716\n","Epoch: [39464] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0247 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39465] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0270 (0.0270) time: 0.2877 data: 0.1951 max mem: 5716\n","Epoch: [39465] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0242 (0.0242) time: 0.0949 data: 0.0164 max mem: 5716\n","Epoch: [39465] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0242 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39466] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0230 (0.0230) time: 0.2955 data: 0.2074 max mem: 5716\n","Epoch: [39466] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0243) time: 0.0959 data: 0.0174 max mem: 5716\n","Epoch: [39466] Total time: 0:00:01 (0.1011 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39467] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0181 (0.0181) time: 0.3002 data: 0.1989 max mem: 5716\n","Epoch: [39467] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0215 (0.0205) time: 0.0968 data: 0.0167 max mem: 5716\n","Epoch: [39467] Total time: 0:00:01 (0.1010 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0215 (0.0205) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39468] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0185 (0.0185) time: 0.2852 data: 0.1965 max mem: 5716\n","Epoch: [39468] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0194 (0.0208) time: 0.0958 data: 0.0166 max mem: 5716\n","Epoch: [39468] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0194 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39469] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0195 (0.0195) time: 0.2871 data: 0.1955 max mem: 5716\n","Epoch: [39469] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0207) time: 0.0952 data: 0.0164 max mem: 5716\n","Epoch: [39469] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39470] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0215 (0.0215) time: 0.2899 data: 0.1899 max mem: 5716\n","Epoch: [39470] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0237) time: 0.0950 data: 0.0160 max mem: 5716\n","Epoch: [39470] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0237) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39471] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0215 (0.0215) time: 0.2855 data: 0.1958 max mem: 5716\n","Epoch: [39471] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0230) time: 0.0942 data: 0.0165 max mem: 5716\n","Epoch: [39471] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39472] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0271 (0.0271) time: 0.2892 data: 0.1986 max mem: 5716\n","Epoch: [39472] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0235) time: 0.0958 data: 0.0167 max mem: 5716\n","Epoch: [39472] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39473] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0289 (0.0289) time: 0.2865 data: 0.1976 max mem: 5716\n","Epoch: [39473] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0241) time: 0.0949 data: 0.0166 max mem: 5716\n","Epoch: [39473] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39474] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0270 (0.0270) time: 0.2995 data: 0.1945 max mem: 5716\n","Epoch: [39474] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0245) time: 0.0972 data: 0.0164 max mem: 5716\n","Epoch: [39474] Total time: 0:00:01 (0.1013 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39475] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0287 (0.0287) time: 0.2949 data: 0.1967 max mem: 5716\n","Epoch: [39475] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0243) time: 0.0952 data: 0.0166 max mem: 5716\n","Epoch: [39475] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39476] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0222 (0.0222) time: 0.2892 data: 0.2014 max mem: 5716\n","Epoch: [39476] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0209) time: 0.0955 data: 0.0169 max mem: 5716\n","Epoch: [39476] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39477] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2913 data: 0.2007 max mem: 5716\n","Epoch: [39477] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0206 (0.0206) time: 0.0971 data: 0.0169 max mem: 5716\n","Epoch: [39477] Total time: 0:00:01 (0.1013 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0206 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39478] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0260 (0.0260) time: 0.2858 data: 0.1968 max mem: 5716\n","Epoch: [39478] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0196 (0.0207) time: 0.0944 data: 0.0166 max mem: 5716\n","Epoch: [39478] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0196 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39479] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0245 (0.0245) time: 0.2940 data: 0.2060 max mem: 5716\n","Epoch: [39479] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0233) time: 0.0953 data: 0.0173 max mem: 5716\n","Epoch: [39479] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39480] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2801 data: 0.1919 max mem: 5716\n","Epoch: [39480] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0233) time: 0.0941 data: 0.0161 max mem: 5716\n","Epoch: [39480] Total time: 0:00:01 (0.0982 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39481] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0184 (0.0184) time: 0.2825 data: 0.1912 max mem: 5716\n","Epoch: [39481] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0220 (0.0232) time: 0.0956 data: 0.0161 max mem: 5716\n","Epoch: [39481] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0220 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39482] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0227 (0.0227) time: 0.2909 data: 0.2032 max mem: 5716\n","Epoch: [39482] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0248) time: 0.0957 data: 0.0171 max mem: 5716\n","Epoch: [39482] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0248) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39483] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0266 (0.0266) time: 0.2881 data: 0.1975 max mem: 5716\n","Epoch: [39483] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0250 (0.0245) time: 0.0959 data: 0.0166 max mem: 5716\n","Epoch: [39483] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0250 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39484] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0264 (0.0264) time: 0.2990 data: 0.1967 max mem: 5716\n","Epoch: [39484] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0243) time: 0.0984 data: 0.0166 max mem: 5716\n","Epoch: [39484] Total time: 0:00:01 (0.1025 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39485] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0172 (0.0172) time: 0.2979 data: 0.1983 max mem: 5716\n","Epoch: [39485] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0213 (0.0207) time: 0.0960 data: 0.0167 max mem: 5716\n","Epoch: [39485] Total time: 0:00:01 (0.1021 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0213 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39486] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0229 (0.0229) time: 0.2904 data: 0.1984 max mem: 5716\n","Epoch: [39486] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0204 (0.0211) time: 0.0953 data: 0.0167 max mem: 5716\n","Epoch: [39486] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0204 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39487] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0242 (0.0242) time: 0.2855 data: 0.1962 max mem: 5716\n","Epoch: [39487] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0210 (0.0208) time: 0.0958 data: 0.0165 max mem: 5716\n","Epoch: [39487] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0210 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39488] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0251 (0.0251) time: 0.3007 data: 0.2132 max mem: 5716\n","Epoch: [39488] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0231) time: 0.0966 data: 0.0179 max mem: 5716\n","Epoch: [39488] Total time: 0:00:01 (0.1008 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39489] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0193 (0.0193) time: 0.2895 data: 0.2008 max mem: 5716\n","Epoch: [39489] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0232) time: 0.0953 data: 0.0169 max mem: 5716\n","Epoch: [39489] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39490] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0222 (0.0222) time: 0.2886 data: 0.1991 max mem: 5716\n","Epoch: [39490] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0229) time: 0.0947 data: 0.0167 max mem: 5716\n","Epoch: [39490] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0229) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39491] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0213 (0.0213) time: 0.2959 data: 0.2051 max mem: 5716\n","Epoch: [39491] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0245 (0.0242) time: 0.0955 data: 0.0172 max mem: 5716\n","Epoch: [39491] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0245 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39492] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0249 (0.0249) time: 0.2931 data: 0.2053 max mem: 5716\n","Epoch: [39492] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0251 (0.0250) time: 0.0954 data: 0.0173 max mem: 5716\n","Epoch: [39492] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0251 (0.0250) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39493] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0216 (0.0216) time: 0.3029 data: 0.2124 max mem: 5716\n","Epoch: [39493] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0245) time: 0.0969 data: 0.0179 max mem: 5716\n","Epoch: [39493] Total time: 0:00:01 (0.1026 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39494] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0234 (0.0234) time: 0.2864 data: 0.1963 max mem: 5716\n","Epoch: [39494] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0198 (0.0209) time: 0.0952 data: 0.0165 max mem: 5716\n","Epoch: [39494] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0198 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39495] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0181 (0.0181) time: 0.2902 data: 0.1976 max mem: 5716\n","Epoch: [39495] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0185 (0.0208) time: 0.0951 data: 0.0166 max mem: 5716\n","Epoch: [39495] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0185 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39496] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0217 (0.0217) time: 0.2823 data: 0.1935 max mem: 5716\n","Epoch: [39496] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0204 (0.0211) time: 0.0940 data: 0.0163 max mem: 5716\n","Epoch: [39496] Total time: 0:00:01 (0.0981 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0204 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39497] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0202 (0.0202) time: 0.2934 data: 0.2045 max mem: 5716\n","Epoch: [39497] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0223 (0.0232) time: 0.0955 data: 0.0172 max mem: 5716\n","Epoch: [39497] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0223 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39498] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0192 (0.0192) time: 0.2884 data: 0.2019 max mem: 5716\n","Epoch: [39498] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0228) time: 0.0951 data: 0.0170 max mem: 5716\n","Epoch: [39498] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0228) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39499] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0213 (0.0213) time: 0.2846 data: 0.1943 max mem: 5716\n","Epoch: [39499] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0231) time: 0.0945 data: 0.0164 max mem: 5716\n","Epoch: [39499] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39500] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0283 (0.0283) time: 0.2972 data: 0.2021 max mem: 5716\n","Epoch: [39500] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0247) time: 0.0955 data: 0.0170 max mem: 5716\n","Epoch: [39500] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0247) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39501] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0229 (0.0229) time: 0.2900 data: 0.1951 max mem: 5716\n","Epoch: [39501] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0240) time: 0.0977 data: 0.0164 max mem: 5716\n","Epoch: [39501] Total time: 0:00:01 (0.1018 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0240) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39502] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0267 (0.0267) time: 0.2877 data: 0.1977 max mem: 5716\n","Epoch: [39502] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0252 (0.0247) time: 0.0946 data: 0.0166 max mem: 5716\n","Epoch: [39502] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0252 (0.0247) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39503] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0176 (0.0176) time: 0.2855 data: 0.1939 max mem: 5716\n","Epoch: [39503] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0203 (0.0206) time: 0.0969 data: 0.0163 max mem: 5716\n","Epoch: [39503] Total time: 0:00:01 (0.1010 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0203 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39504] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0184 (0.0184) time: 0.2878 data: 0.1980 max mem: 5716\n","Epoch: [39504] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0222 (0.0209) time: 0.0951 data: 0.0167 max mem: 5716\n","Epoch: [39504] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0222 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39505] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0189 (0.0189) time: 0.2999 data: 0.2067 max mem: 5716\n","Epoch: [39505] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0204 (0.0207) time: 0.0956 data: 0.0174 max mem: 5716\n","Epoch: [39505] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0204 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39506] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0202 (0.0202) time: 0.3105 data: 0.2215 max mem: 5716\n","Epoch: [39506] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0219 (0.0233) time: 0.0988 data: 0.0186 max mem: 5716\n","Epoch: [39506] Total time: 0:00:01 (0.1029 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0219 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39507] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0279 (0.0279) time: 0.2906 data: 0.2001 max mem: 5716\n","Epoch: [39507] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0233) time: 0.0960 data: 0.0169 max mem: 5716\n","Epoch: [39507] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39508] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0196 (0.0196) time: 0.2853 data: 0.1953 max mem: 5716\n","Epoch: [39508] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0209 (0.0233) time: 0.0952 data: 0.0164 max mem: 5716\n","Epoch: [39508] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0209 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39509] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0292 (0.0292) time: 0.2848 data: 0.1961 max mem: 5716\n","Epoch: [39509] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0247 (0.0246) time: 0.0967 data: 0.0165 max mem: 5716\n","Epoch: [39509] Total time: 0:00:01 (0.1022 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0247 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39510] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0211 (0.0211) time: 0.2821 data: 0.1922 max mem: 5716\n","Epoch: [39510] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0238) time: 0.0945 data: 0.0162 max mem: 5716\n","Epoch: [39510] Total time: 0:00:01 (0.0986 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0238) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39511] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0232 (0.0232) time: 0.2877 data: 0.1998 max mem: 5716\n","Epoch: [39511] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0244) time: 0.0949 data: 0.0168 max mem: 5716\n","Epoch: [39511] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39512] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0189 (0.0189) time: 0.2918 data: 0.2032 max mem: 5716\n","Epoch: [39512] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0207) time: 0.0949 data: 0.0171 max mem: 5716\n","Epoch: [39512] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39513] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0180 (0.0180) time: 0.2804 data: 0.1904 max mem: 5716\n","Epoch: [39513] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0206) time: 0.0943 data: 0.0160 max mem: 5716\n","Epoch: [39513] Total time: 0:00:01 (0.0984 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39514] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0185 (0.0185) time: 0.2900 data: 0.2001 max mem: 5716\n","Epoch: [39514] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0209 (0.0209) time: 0.0958 data: 0.0168 max mem: 5716\n","Epoch: [39514] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0209 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39515] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0268 (0.0268) time: 0.2919 data: 0.2061 max mem: 5716\n","Epoch: [39515] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0223 (0.0230) time: 0.0961 data: 0.0174 max mem: 5716\n","Epoch: [39515] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0223 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39516] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0255 (0.0255) time: 0.2966 data: 0.2086 max mem: 5716\n","Epoch: [39516] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0235) time: 0.0975 data: 0.0175 max mem: 5716\n","Epoch: [39516] Total time: 0:00:01 (0.1017 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39517] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0209 (0.0209) time: 0.2840 data: 0.1944 max mem: 5716\n","Epoch: [39517] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0232) time: 0.0944 data: 0.0164 max mem: 5716\n","Epoch: [39517] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39518] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0230 (0.0230) time: 0.3005 data: 0.2123 max mem: 5716\n","Epoch: [39518] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0247 (0.0243) time: 0.0973 data: 0.0179 max mem: 5716\n","Epoch: [39518] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0247 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39519] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0273 (0.0273) time: 0.2910 data: 0.2017 max mem: 5716\n","Epoch: [39519] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0243 (0.0242) time: 0.0965 data: 0.0170 max mem: 5716\n","Epoch: [39519] Total time: 0:00:01 (0.1006 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0243 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39520] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0239 (0.0239) time: 0.3041 data: 0.1996 max mem: 5716\n","Epoch: [39520] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0242) time: 0.0958 data: 0.0168 max mem: 5716\n","Epoch: [39520] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39521] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0187 (0.0187) time: 0.2844 data: 0.1939 max mem: 5716\n","Epoch: [39521] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0189 (0.0207) time: 0.0953 data: 0.0163 max mem: 5716\n","Epoch: [39521] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0189 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39522] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0223 (0.0223) time: 0.2962 data: 0.1997 max mem: 5716\n","Epoch: [39522] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0216 (0.0208) time: 0.0958 data: 0.0168 max mem: 5716\n","Epoch: [39522] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0216 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39523] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0216 (0.0216) time: 0.2877 data: 0.1983 max mem: 5716\n","Epoch: [39523] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0210 (0.0211) time: 0.0953 data: 0.0167 max mem: 5716\n","Epoch: [39523] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0210 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39524] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0262 (0.0262) time: 0.2947 data: 0.2067 max mem: 5716\n","Epoch: [39524] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0233) time: 0.0964 data: 0.0174 max mem: 5716\n","Epoch: [39524] Total time: 0:00:01 (0.1006 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39525] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0234 (0.0234) time: 0.2840 data: 0.1945 max mem: 5716\n","Epoch: [39525] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0234) time: 0.0950 data: 0.0164 max mem: 5716\n","Epoch: [39525] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39526] [ 0/12] eta: 0:00:06 lr: 0.000000 loss: 0.0185 (0.0185) time: 0.5113 data: 0.4237 max mem: 5716\n","Epoch: [39526] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0231) time: 0.1134 data: 0.0355 max mem: 5716\n","Epoch: [39526] Total time: 0:00:01 (0.1176 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39527] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0232 (0.0232) time: 0.2982 data: 0.1986 max mem: 5716\n","Epoch: [39527] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0242) time: 0.0958 data: 0.0167 max mem: 5716\n","Epoch: [39527] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39528] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0230 (0.0230) time: 0.2881 data: 0.2002 max mem: 5716\n","Epoch: [39528] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0242) time: 0.0944 data: 0.0168 max mem: 5716\n","Epoch: [39528] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39529] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0261 (0.0261) time: 0.2851 data: 0.1973 max mem: 5716\n","Epoch: [39529] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0241) time: 0.0942 data: 0.0166 max mem: 5716\n","Epoch: [39529] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39530] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0239 (0.0239) time: 0.2973 data: 0.2092 max mem: 5716\n","Epoch: [39530] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0203 (0.0210) time: 0.0962 data: 0.0176 max mem: 5716\n","Epoch: [39530] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0203 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39531] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0200 (0.0200) time: 0.2914 data: 0.1999 max mem: 5716\n","Epoch: [39531] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0208) time: 0.0955 data: 0.0168 max mem: 5716\n","Epoch: [39531] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39532] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0181 (0.0181) time: 0.2900 data: 0.2032 max mem: 5716\n","Epoch: [39532] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0199 (0.0205) time: 0.0953 data: 0.0171 max mem: 5716\n","Epoch: [39532] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0199 (0.0205) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39533] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0180 (0.0180) time: 0.2881 data: 0.2016 max mem: 5716\n","Epoch: [39533] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0232) time: 0.0949 data: 0.0169 max mem: 5716\n","Epoch: [39533] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39534] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0264 (0.0264) time: 0.2838 data: 0.1942 max mem: 5716\n","Epoch: [39534] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0235) time: 0.0960 data: 0.0163 max mem: 5716\n","Epoch: [39534] Total time: 0:00:01 (0.1013 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39535] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0237 (0.0237) time: 0.2918 data: 0.1983 max mem: 5716\n","Epoch: [39535] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0235) time: 0.0983 data: 0.0167 max mem: 5716\n","Epoch: [39535] Total time: 0:00:01 (0.1024 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39536] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0223 (0.0223) time: 0.2934 data: 0.2059 max mem: 5716\n","Epoch: [39536] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0241) time: 0.0951 data: 0.0173 max mem: 5716\n","Epoch: [39536] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39537] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0209 (0.0209) time: 0.2874 data: 0.1970 max mem: 5716\n","Epoch: [39537] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0242) time: 0.0947 data: 0.0166 max mem: 5716\n","Epoch: [39537] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39538] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0287 (0.0287) time: 0.2865 data: 0.2014 max mem: 5716\n","Epoch: [39538] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0245 (0.0243) time: 0.0942 data: 0.0170 max mem: 5716\n","Epoch: [39538] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0245 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39539] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0243 (0.0243) time: 0.2919 data: 0.1941 max mem: 5716\n","Epoch: [39539] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0205) time: 0.0946 data: 0.0163 max mem: 5716\n","Epoch: [39539] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0205) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39540] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0247 (0.0247) time: 0.2956 data: 0.1979 max mem: 5716\n","Epoch: [39540] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0206 (0.0205) time: 0.0957 data: 0.0166 max mem: 5716\n","Epoch: [39540] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0206 (0.0205) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39541] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0253 (0.0253) time: 0.2842 data: 0.1939 max mem: 5716\n","Epoch: [39541] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0208) time: 0.0950 data: 0.0163 max mem: 5716\n","Epoch: [39541] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39542] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0225 (0.0225) time: 0.2857 data: 0.1939 max mem: 5716\n","Epoch: [39542] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0234) time: 0.0946 data: 0.0163 max mem: 5716\n","Epoch: [39542] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39543] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0234 (0.0234) time: 0.2931 data: 0.1936 max mem: 5716\n","Epoch: [39543] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0231) time: 0.0976 data: 0.0163 max mem: 5716\n","Epoch: [39543] Total time: 0:00:01 (0.1032 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39544] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0217 (0.0217) time: 0.2947 data: 0.2046 max mem: 5716\n","Epoch: [39544] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0247 (0.0236) time: 0.0971 data: 0.0172 max mem: 5716\n","Epoch: [39544] Total time: 0:00:01 (0.1012 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0247 (0.0236) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39545] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0246 (0.0246) time: 0.2965 data: 0.2048 max mem: 5716\n","Epoch: [39545] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0245) time: 0.0956 data: 0.0172 max mem: 5716\n","Epoch: [39545] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39546] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0269 (0.0269) time: 0.2847 data: 0.1973 max mem: 5716\n","Epoch: [39546] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0244) time: 0.0944 data: 0.0166 max mem: 5716\n","Epoch: [39546] Total time: 0:00:01 (0.0984 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39547] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0257 (0.0257) time: 0.2887 data: 0.1989 max mem: 5716\n","Epoch: [39547] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0244) time: 0.0947 data: 0.0167 max mem: 5716\n","Epoch: [39547] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39548] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0177 (0.0177) time: 0.2890 data: 0.1985 max mem: 5716\n","Epoch: [39548] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0211 (0.0209) time: 0.0955 data: 0.0167 max mem: 5716\n","Epoch: [39548] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0211 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39549] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0216 (0.0216) time: 0.2919 data: 0.2045 max mem: 5716\n","Epoch: [39549] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0211) time: 0.0955 data: 0.0172 max mem: 5716\n","Epoch: [39549] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39550] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0237 (0.0237) time: 0.2953 data: 0.2077 max mem: 5716\n","Epoch: [39550] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0198 (0.0207) time: 0.0961 data: 0.0175 max mem: 5716\n","Epoch: [39550] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0198 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39551] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0239 (0.0239) time: 0.2908 data: 0.2007 max mem: 5716\n","Epoch: [39551] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0238) time: 0.0956 data: 0.0169 max mem: 5716\n","Epoch: [39551] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0238) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39552] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0270 (0.0270) time: 0.2869 data: 0.1977 max mem: 5716\n","Epoch: [39552] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0230) time: 0.0947 data: 0.0166 max mem: 5716\n","Epoch: [39552] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39553] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0227 (0.0227) time: 0.2883 data: 0.1990 max mem: 5716\n","Epoch: [39553] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0235) time: 0.0961 data: 0.0167 max mem: 5716\n","Epoch: [39553] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39554] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0248 (0.0248) time: 0.2896 data: 0.2017 max mem: 5716\n","Epoch: [39554] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0246 (0.0245) time: 0.0948 data: 0.0169 max mem: 5716\n","Epoch: [39554] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0246 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39555] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0245 (0.0245) time: 0.2840 data: 0.1902 max mem: 5716\n","Epoch: [39555] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0245 (0.0244) time: 0.0975 data: 0.0160 max mem: 5716\n","Epoch: [39555] Total time: 0:00:01 (0.1016 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0245 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39556] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0206 (0.0206) time: 0.2838 data: 0.1915 max mem: 5716\n","Epoch: [39556] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0243) time: 0.0953 data: 0.0161 max mem: 5716\n","Epoch: [39556] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39557] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0187 (0.0187) time: 0.2873 data: 0.1947 max mem: 5716\n","Epoch: [39557] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0207) time: 0.0960 data: 0.0164 max mem: 5716\n","Epoch: [39557] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39558] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0212 (0.0212) time: 0.2774 data: 0.1874 max mem: 5716\n","Epoch: [39558] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0198 (0.0206) time: 0.0936 data: 0.0158 max mem: 5716\n","Epoch: [39558] Total time: 0:00:01 (0.0977 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0198 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39559] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0205 (0.0205) time: 0.2915 data: 0.2058 max mem: 5716\n","Epoch: [39559] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0207) time: 0.0953 data: 0.0173 max mem: 5716\n","Epoch: [39559] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39560] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0248 (0.0248) time: 0.2922 data: 0.2033 max mem: 5716\n","Epoch: [39560] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0236) time: 0.0961 data: 0.0172 max mem: 5716\n","Epoch: [39560] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0236) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39561] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0216 (0.0216) time: 0.2834 data: 0.1949 max mem: 5716\n","Epoch: [39561] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0232) time: 0.0949 data: 0.0164 max mem: 5716\n","Epoch: [39561] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39562] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0234 (0.0234) time: 0.2949 data: 0.2072 max mem: 5716\n","Epoch: [39562] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0232) time: 0.0976 data: 0.0174 max mem: 5716\n","Epoch: [39562] Total time: 0:00:01 (0.1018 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39563] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0262 (0.0262) time: 0.2903 data: 0.2029 max mem: 5716\n","Epoch: [39563] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0244) time: 0.0951 data: 0.0171 max mem: 5716\n","Epoch: [39563] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39564] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0253 (0.0253) time: 0.2883 data: 0.1937 max mem: 5716\n","Epoch: [39564] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0239) time: 0.0973 data: 0.0163 max mem: 5716\n","Epoch: [39564] Total time: 0:00:01 (0.1015 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0239) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39565] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0256 (0.0256) time: 0.2916 data: 0.1902 max mem: 5716\n","Epoch: [39565] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0241) time: 0.0964 data: 0.0160 max mem: 5716\n","Epoch: [39565] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39566] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0206 (0.0206) time: 0.2875 data: 0.2012 max mem: 5716\n","Epoch: [39566] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0207) time: 0.0952 data: 0.0169 max mem: 5716\n","Epoch: [39566] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39567] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2914 data: 0.2013 max mem: 5716\n","Epoch: [39567] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.0954 data: 0.0169 max mem: 5716\n","Epoch: [39567] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39568] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0187 (0.0187) time: 0.2876 data: 0.1984 max mem: 5716\n","Epoch: [39568] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0211) time: 0.0955 data: 0.0167 max mem: 5716\n","Epoch: [39568] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39569] [ 0/12] eta: 0:00:06 lr: 0.000000 loss: 0.0230 (0.0230) time: 0.5141 data: 0.4305 max mem: 5716\n","Epoch: [39569] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0222 (0.0232) time: 0.1147 data: 0.0360 max mem: 5716\n","Epoch: [39569] Total time: 0:00:01 (0.1187 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0222 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39570] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0233 (0.0233) time: 0.2943 data: 0.1944 max mem: 5716\n","Epoch: [39570] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0235) time: 0.0959 data: 0.0164 max mem: 5716\n","Epoch: [39570] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39571] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0243 (0.0243) time: 0.2841 data: 0.1961 max mem: 5716\n","Epoch: [39571] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0242 (0.0232) time: 0.0943 data: 0.0165 max mem: 5716\n","Epoch: [39571] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0242 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39572] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0276 (0.0276) time: 0.2859 data: 0.1976 max mem: 5716\n","Epoch: [39572] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0243) time: 0.0941 data: 0.0166 max mem: 5716\n","Epoch: [39572] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39573] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0262 (0.0262) time: 0.2899 data: 0.2045 max mem: 5716\n","Epoch: [39573] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0243) time: 0.0947 data: 0.0172 max mem: 5716\n","Epoch: [39573] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39574] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0204 (0.0204) time: 0.2975 data: 0.2106 max mem: 5716\n","Epoch: [39574] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0251 (0.0245) time: 0.0960 data: 0.0177 max mem: 5716\n","Epoch: [39574] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0251 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39575] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0185 (0.0185) time: 0.2838 data: 0.1953 max mem: 5716\n","Epoch: [39575] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0199 (0.0212) time: 0.0942 data: 0.0164 max mem: 5716\n","Epoch: [39575] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0199 (0.0212) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39576] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0198 (0.0198) time: 0.2831 data: 0.1942 max mem: 5716\n","Epoch: [39576] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0196 (0.0204) time: 0.0945 data: 0.0163 max mem: 5716\n","Epoch: [39576] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0196 (0.0204) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39577] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0169 (0.0169) time: 0.2943 data: 0.1956 max mem: 5716\n","Epoch: [39577] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0190 (0.0208) time: 0.0952 data: 0.0164 max mem: 5716\n","Epoch: [39577] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0190 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39578] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0214 (0.0214) time: 0.3043 data: 0.2075 max mem: 5716\n","Epoch: [39578] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0233) time: 0.0995 data: 0.0174 max mem: 5716\n","Epoch: [39578] Total time: 0:00:01 (0.1036 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39579] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0219 (0.0219) time: 0.2877 data: 0.1989 max mem: 5716\n","Epoch: [39579] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0223 (0.0231) time: 0.0950 data: 0.0167 max mem: 5716\n","Epoch: [39579] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0223 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39580] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0247 (0.0247) time: 0.2950 data: 0.2062 max mem: 5716\n","Epoch: [39580] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0243 (0.0235) time: 0.0962 data: 0.0174 max mem: 5716\n","Epoch: [39580] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0243 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39581] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0270 (0.0270) time: 0.2921 data: 0.2024 max mem: 5716\n","Epoch: [39581] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0240) time: 0.0969 data: 0.0170 max mem: 5716\n","Epoch: [39581] Total time: 0:00:01 (0.1010 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0240) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39582] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0271 (0.0271) time: 0.3071 data: 0.2090 max mem: 5716\n","Epoch: [39582] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0243) time: 0.0965 data: 0.0176 max mem: 5716\n","Epoch: [39582] Total time: 0:00:01 (0.1007 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39583] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0248 (0.0248) time: 0.2890 data: 0.1989 max mem: 5716\n","Epoch: [39583] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0243 (0.0245) time: 0.0951 data: 0.0167 max mem: 5716\n","Epoch: [39583] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0243 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39584] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0196 (0.0196) time: 0.2842 data: 0.1917 max mem: 5716\n","Epoch: [39584] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0191 (0.0207) time: 0.0977 data: 0.0161 max mem: 5716\n","Epoch: [39584] Total time: 0:00:01 (0.1018 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0191 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39585] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0224 (0.0224) time: 0.2832 data: 0.1941 max mem: 5716\n","Epoch: [39585] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0210) time: 0.0948 data: 0.0163 max mem: 5716\n","Epoch: [39585] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39586] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0197 (0.0197) time: 0.2905 data: 0.2014 max mem: 5716\n","Epoch: [39586] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0198 (0.0207) time: 0.0952 data: 0.0169 max mem: 5716\n","Epoch: [39586] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0198 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39587] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.2911 data: 0.2027 max mem: 5716\n","Epoch: [39587] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0218 (0.0232) time: 0.0956 data: 0.0171 max mem: 5716\n","Epoch: [39587] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0218 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39588] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.2816 data: 0.1909 max mem: 5716\n","Epoch: [39588] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0232) time: 0.0943 data: 0.0161 max mem: 5716\n","Epoch: [39588] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39589] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0244 (0.0244) time: 0.2797 data: 0.1893 max mem: 5716\n","Epoch: [39589] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0233) time: 0.0941 data: 0.0159 max mem: 5716\n","Epoch: [39589] Total time: 0:00:01 (0.0981 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39590] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0282 (0.0282) time: 0.2860 data: 0.1960 max mem: 5716\n","Epoch: [39590] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0247 (0.0247) time: 0.0950 data: 0.0165 max mem: 5716\n","Epoch: [39590] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0247 (0.0247) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39591] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0233 (0.0233) time: 0.3078 data: 0.1974 max mem: 5716\n","Epoch: [39591] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0241) time: 0.0962 data: 0.0166 max mem: 5716\n","Epoch: [39591] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39592] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0244 (0.0244) time: 0.2874 data: 0.1983 max mem: 5716\n","Epoch: [39592] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0242 (0.0240) time: 0.0953 data: 0.0167 max mem: 5716\n","Epoch: [39592] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0242 (0.0240) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39593] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0245 (0.0245) time: 0.2864 data: 0.1978 max mem: 5716\n","Epoch: [39593] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0185 (0.0204) time: 0.0950 data: 0.0166 max mem: 5716\n","Epoch: [39593] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0185 (0.0204) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39594] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0203 (0.0203) time: 0.2904 data: 0.1999 max mem: 5716\n","Epoch: [39594] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0203 (0.0208) time: 0.0969 data: 0.0168 max mem: 5716\n","Epoch: [39594] Total time: 0:00:01 (0.1010 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0203 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39595] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0265 (0.0265) time: 0.2871 data: 0.1958 max mem: 5716\n","Epoch: [39595] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0209 (0.0208) time: 0.0955 data: 0.0165 max mem: 5716\n","Epoch: [39595] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0209 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39596] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.2910 data: 0.2016 max mem: 5716\n","Epoch: [39596] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0233) time: 0.0953 data: 0.0169 max mem: 5716\n","Epoch: [39596] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39597] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0270 (0.0270) time: 0.2857 data: 0.1940 max mem: 5716\n","Epoch: [39597] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0231) time: 0.0946 data: 0.0163 max mem: 5716\n","Epoch: [39597] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39598] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0198 (0.0198) time: 0.2991 data: 0.2087 max mem: 5716\n","Epoch: [39598] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0233) time: 0.0964 data: 0.0176 max mem: 5716\n","Epoch: [39598] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39599] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0209 (0.0209) time: 0.2880 data: 0.2009 max mem: 5716\n","Epoch: [39599] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0243) time: 0.0945 data: 0.0169 max mem: 5716\n","Epoch: [39599] Total time: 0:00:01 (0.0986 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39600] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0286 (0.0286) time: 0.2891 data: 0.1986 max mem: 5716\n","Epoch: [39600] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0245) time: 0.0949 data: 0.0167 max mem: 5716\n","Epoch: [39600] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39601] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0267 (0.0267) time: 0.2831 data: 0.1938 max mem: 5716\n","Epoch: [39601] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0244 (0.0244) time: 0.0956 data: 0.0163 max mem: 5716\n","Epoch: [39601] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0244 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39602] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0204 (0.0204) time: 0.2934 data: 0.2032 max mem: 5716\n","Epoch: [39602] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0214 (0.0209) time: 0.0958 data: 0.0171 max mem: 5716\n","Epoch: [39602] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0214 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39603] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0198 (0.0198) time: 0.2834 data: 0.1949 max mem: 5716\n","Epoch: [39603] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0198 (0.0206) time: 0.0944 data: 0.0164 max mem: 5716\n","Epoch: [39603] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0198 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39604] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0172 (0.0172) time: 0.2916 data: 0.2015 max mem: 5716\n","Epoch: [39604] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0199 (0.0209) time: 0.0980 data: 0.0169 max mem: 5716\n","Epoch: [39604] Total time: 0:00:01 (0.1022 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0199 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39605] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0197 (0.0197) time: 0.2956 data: 0.1988 max mem: 5716\n","Epoch: [39605] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0231) time: 0.0960 data: 0.0167 max mem: 5716\n","Epoch: [39605] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39606] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0261 (0.0261) time: 0.3007 data: 0.2129 max mem: 5716\n","Epoch: [39606] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0234) time: 0.0966 data: 0.0179 max mem: 5716\n","Epoch: [39606] Total time: 0:00:01 (0.1008 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39607] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0219 (0.0219) time: 0.2966 data: 0.2029 max mem: 5716\n","Epoch: [39607] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0219 (0.0234) time: 0.0957 data: 0.0171 max mem: 5716\n","Epoch: [39607] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0219 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39608] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0273 (0.0273) time: 0.2901 data: 0.2032 max mem: 5716\n","Epoch: [39608] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0239) time: 0.0950 data: 0.0171 max mem: 5716\n","Epoch: [39608] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0239) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39609] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0259 (0.0259) time: 0.2975 data: 0.2095 max mem: 5716\n","Epoch: [39609] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0244) time: 0.0954 data: 0.0176 max mem: 5716\n","Epoch: [39609] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39610] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0271 (0.0271) time: 0.2911 data: 0.2041 max mem: 5716\n","Epoch: [39610] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0242 (0.0247) time: 0.0960 data: 0.0172 max mem: 5716\n","Epoch: [39610] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0242 (0.0247) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39611] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0240 (0.0240) time: 0.2872 data: 0.1974 max mem: 5716\n","Epoch: [39611] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0206) time: 0.0953 data: 0.0166 max mem: 5716\n","Epoch: [39611] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39612] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0184 (0.0184) time: 0.2899 data: 0.1986 max mem: 5716\n","Epoch: [39612] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0196 (0.0206) time: 0.0955 data: 0.0167 max mem: 5716\n","Epoch: [39612] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0196 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39613] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0230 (0.0230) time: 0.2878 data: 0.1984 max mem: 5716\n","Epoch: [39613] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0217 (0.0207) time: 0.0957 data: 0.0167 max mem: 5716\n","Epoch: [39613] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0217 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39614] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0250 (0.0250) time: 0.2950 data: 0.2085 max mem: 5716\n","Epoch: [39614] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0222 (0.0232) time: 0.0951 data: 0.0175 max mem: 5716\n","Epoch: [39614] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0222 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39615] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0260 (0.0260) time: 0.2852 data: 0.1964 max mem: 5716\n","Epoch: [39615] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0235) time: 0.0946 data: 0.0165 max mem: 5716\n","Epoch: [39615] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39616] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0234 (0.0234) time: 0.2976 data: 0.2105 max mem: 5716\n","Epoch: [39616] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0231) time: 0.0958 data: 0.0177 max mem: 5716\n","Epoch: [39616] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39617] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0255 (0.0255) time: 0.2972 data: 0.1945 max mem: 5716\n","Epoch: [39617] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0243) time: 0.0959 data: 0.0164 max mem: 5716\n","Epoch: [39617] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39618] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0233 (0.0233) time: 0.2837 data: 0.1935 max mem: 5716\n","Epoch: [39618] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0240) time: 0.0956 data: 0.0163 max mem: 5716\n","Epoch: [39618] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0240) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39619] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0242 (0.0242) time: 0.2912 data: 0.1983 max mem: 5716\n","Epoch: [39619] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0246) time: 0.0960 data: 0.0167 max mem: 5716\n","Epoch: [39619] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39620] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0295 (0.0295) time: 0.3007 data: 0.2130 max mem: 5716\n","Epoch: [39620] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0210) time: 0.0969 data: 0.0179 max mem: 5716\n","Epoch: [39620] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39621] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0230 (0.0230) time: 0.2875 data: 0.1997 max mem: 5716\n","Epoch: [39621] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0196 (0.0207) time: 0.0967 data: 0.0168 max mem: 5716\n","Epoch: [39621] Total time: 0:00:01 (0.1011 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0196 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39622] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0261 (0.0261) time: 0.2829 data: 0.1915 max mem: 5716\n","Epoch: [39622] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0196 (0.0207) time: 0.0947 data: 0.0161 max mem: 5716\n","Epoch: [39622] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0196 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39623] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2801 data: 0.1887 max mem: 5716\n","Epoch: [39623] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0223 (0.0232) time: 0.0948 data: 0.0159 max mem: 5716\n","Epoch: [39623] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0223 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39624] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0253 (0.0253) time: 0.3055 data: 0.2045 max mem: 5716\n","Epoch: [39624] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0233) time: 0.0959 data: 0.0172 max mem: 5716\n","Epoch: [39624] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39625] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0207 (0.0207) time: 0.2877 data: 0.2003 max mem: 5716\n","Epoch: [39625] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0232) time: 0.0950 data: 0.0168 max mem: 5716\n","Epoch: [39625] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39626] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0244 (0.0244) time: 0.2902 data: 0.2026 max mem: 5716\n","Epoch: [39626] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0242) time: 0.0992 data: 0.0171 max mem: 5716\n","Epoch: [39626] Total time: 0:00:01 (0.1033 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39627] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0207 (0.0207) time: 0.2883 data: 0.1990 max mem: 5716\n","Epoch: [39627] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0243) time: 0.0950 data: 0.0167 max mem: 5716\n","Epoch: [39627] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39628] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0243 (0.0243) time: 0.2917 data: 0.1990 max mem: 5716\n","Epoch: [39628] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0251 (0.0242) time: 0.0982 data: 0.0167 max mem: 5716\n","Epoch: [39628] Total time: 0:00:01 (0.1024 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0251 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39629] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0182 (0.0182) time: 0.2978 data: 0.2100 max mem: 5716\n","Epoch: [39629] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0209) time: 0.0963 data: 0.0177 max mem: 5716\n","Epoch: [39629] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39630] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0221 (0.0221) time: 0.2961 data: 0.2082 max mem: 5716\n","Epoch: [39630] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0210 (0.0209) time: 0.0961 data: 0.0175 max mem: 5716\n","Epoch: [39630] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0210 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39631] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0152 (0.0152) time: 0.2833 data: 0.1930 max mem: 5716\n","Epoch: [39631] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0203 (0.0208) time: 0.0947 data: 0.0162 max mem: 5716\n","Epoch: [39631] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0203 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39632] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0238 (0.0238) time: 0.2972 data: 0.1995 max mem: 5716\n","Epoch: [39632] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0232) time: 0.0958 data: 0.0168 max mem: 5716\n","Epoch: [39632] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39633] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0192 (0.0192) time: 0.2866 data: 0.1957 max mem: 5716\n","Epoch: [39633] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0235) time: 0.0963 data: 0.0165 max mem: 5716\n","Epoch: [39633] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39634] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0264 (0.0264) time: 0.2830 data: 0.1913 max mem: 5716\n","Epoch: [39634] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0229) time: 0.0947 data: 0.0161 max mem: 5716\n","Epoch: [39634] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0229) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39635] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0253 (0.0253) time: 0.2925 data: 0.1969 max mem: 5716\n","Epoch: [39635] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0243) time: 0.0949 data: 0.0166 max mem: 5716\n","Epoch: [39635] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39636] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0234 (0.0234) time: 0.2940 data: 0.2061 max mem: 5716\n","Epoch: [39636] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0238) time: 0.0962 data: 0.0173 max mem: 5716\n","Epoch: [39636] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0238) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39637] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0254 (0.0254) time: 0.3015 data: 0.2018 max mem: 5716\n","Epoch: [39637] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0244) time: 0.0958 data: 0.0170 max mem: 5716\n","Epoch: [39637] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39638] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0231 (0.0231) time: 0.3016 data: 0.2042 max mem: 5716\n","Epoch: [39638] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0189 (0.0208) time: 0.0976 data: 0.0172 max mem: 5716\n","Epoch: [39638] Total time: 0:00:01 (0.1017 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0189 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39639] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0163 (0.0163) time: 0.3245 data: 0.2157 max mem: 5716\n","Epoch: [39639] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0198 (0.0206) time: 0.0987 data: 0.0182 max mem: 5716\n","Epoch: [39639] Total time: 0:00:01 (0.1029 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0198 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39640] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0152 (0.0152) time: 0.2893 data: 0.1987 max mem: 5716\n","Epoch: [39640] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0206 (0.0205) time: 0.0958 data: 0.0167 max mem: 5716\n","Epoch: [39640] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0206 (0.0205) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39641] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0207 (0.0207) time: 0.2865 data: 0.1971 max mem: 5716\n","Epoch: [39641] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0231) time: 0.0947 data: 0.0166 max mem: 5716\n","Epoch: [39641] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39642] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0199 (0.0199) time: 0.2983 data: 0.2084 max mem: 5716\n","Epoch: [39642] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0233) time: 0.0959 data: 0.0175 max mem: 5716\n","Epoch: [39642] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39643] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0214 (0.0214) time: 0.2844 data: 0.1968 max mem: 5716\n","Epoch: [39643] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0219 (0.0232) time: 0.0955 data: 0.0166 max mem: 5716\n","Epoch: [39643] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0219 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39644] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0250 (0.0250) time: 0.2824 data: 0.1912 max mem: 5716\n","Epoch: [39644] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0238) time: 0.0949 data: 0.0161 max mem: 5716\n","Epoch: [39644] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0238) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39645] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0225 (0.0225) time: 0.2914 data: 0.1978 max mem: 5716\n","Epoch: [39645] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0244) time: 0.0954 data: 0.0167 max mem: 5716\n","Epoch: [39645] Total time: 0:00:01 (0.1009 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39646] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0219 (0.0219) time: 0.2968 data: 0.1973 max mem: 5716\n","Epoch: [39646] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0241) time: 0.0953 data: 0.0166 max mem: 5716\n","Epoch: [39646] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39647] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0198 (0.0198) time: 0.2932 data: 0.2046 max mem: 5716\n","Epoch: [39647] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0198 (0.0207) time: 0.0962 data: 0.0172 max mem: 5716\n","Epoch: [39647] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0198 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39648] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0211 (0.0211) time: 0.2938 data: 0.2043 max mem: 5716\n","Epoch: [39648] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0207) time: 0.0953 data: 0.0172 max mem: 5716\n","Epoch: [39648] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39649] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0151 (0.0151) time: 0.2935 data: 0.2056 max mem: 5716\n","Epoch: [39649] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0210 (0.0206) time: 0.0954 data: 0.0173 max mem: 5716\n","Epoch: [39649] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0210 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39650] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2799 data: 0.1897 max mem: 5716\n","Epoch: [39650] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0230) time: 0.0939 data: 0.0160 max mem: 5716\n","Epoch: [39650] Total time: 0:00:01 (0.0979 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39651] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0262 (0.0262) time: 0.2831 data: 0.1922 max mem: 5716\n","Epoch: [39651] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0234) time: 0.0976 data: 0.0162 max mem: 5716\n","Epoch: [39651] Total time: 0:00:01 (0.1018 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39652] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0211 (0.0211) time: 0.2969 data: 0.1957 max mem: 5716\n","Epoch: [39652] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0232) time: 0.0959 data: 0.0165 max mem: 5716\n","Epoch: [39652] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39653] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0256 (0.0256) time: 0.2956 data: 0.2068 max mem: 5716\n","Epoch: [39653] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0244) time: 0.0958 data: 0.0174 max mem: 5716\n","Epoch: [39653] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39654] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0228 (0.0228) time: 0.2945 data: 0.2054 max mem: 5716\n","Epoch: [39654] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0241) time: 0.0954 data: 0.0173 max mem: 5716\n","Epoch: [39654] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39655] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0211 (0.0211) time: 0.2864 data: 0.1937 max mem: 5716\n","Epoch: [39655] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0247 (0.0243) time: 0.0973 data: 0.0163 max mem: 5716\n","Epoch: [39655] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0247 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39656] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0185 (0.0185) time: 0.2887 data: 0.2009 max mem: 5716\n","Epoch: [39656] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0195 (0.0209) time: 0.0951 data: 0.0169 max mem: 5716\n","Epoch: [39656] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0195 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39657] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0228 (0.0228) time: 0.3013 data: 0.2102 max mem: 5716\n","Epoch: [39657] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0203 (0.0206) time: 0.0972 data: 0.0177 max mem: 5716\n","Epoch: [39657] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0203 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39658] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0188 (0.0188) time: 0.2865 data: 0.1972 max mem: 5716\n","Epoch: [39658] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0203) time: 0.0955 data: 0.0166 max mem: 5716\n","Epoch: [39658] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0203) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39659] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0215 (0.0215) time: 0.3044 data: 0.2059 max mem: 5716\n","Epoch: [39659] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0232) time: 0.0969 data: 0.0173 max mem: 5716\n","Epoch: [39659] Total time: 0:00:01 (0.1010 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39660] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0269 (0.0269) time: 0.2952 data: 0.2074 max mem: 5716\n","Epoch: [39660] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0234) time: 0.0959 data: 0.0174 max mem: 5716\n","Epoch: [39660] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39661] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2886 data: 0.1973 max mem: 5716\n","Epoch: [39661] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0231) time: 0.0960 data: 0.0166 max mem: 5716\n","Epoch: [39661] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39662] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0318 (0.0318) time: 0.3002 data: 0.2118 max mem: 5716\n","Epoch: [39662] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0244) time: 0.0973 data: 0.0178 max mem: 5716\n","Epoch: [39662] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39663] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0328 (0.0328) time: 0.2959 data: 0.2076 max mem: 5716\n","Epoch: [39663] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0218 (0.0245) time: 0.0962 data: 0.0175 max mem: 5716\n","Epoch: [39663] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0218 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39664] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0231 (0.0231) time: 0.2967 data: 0.2074 max mem: 5716\n","Epoch: [39664] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0242) time: 0.0996 data: 0.0175 max mem: 5716\n","Epoch: [39664] Total time: 0:00:01 (0.1038 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39665] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.2888 data: 0.1994 max mem: 5716\n","Epoch: [39665] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0198 (0.0206) time: 0.0953 data: 0.0168 max mem: 5716\n","Epoch: [39665] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0198 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39666] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0229 (0.0229) time: 0.2893 data: 0.2014 max mem: 5716\n","Epoch: [39666] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0206) time: 0.0949 data: 0.0169 max mem: 5716\n","Epoch: [39666] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39667] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0181 (0.0181) time: 0.2891 data: 0.1985 max mem: 5716\n","Epoch: [39667] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0211) time: 0.0948 data: 0.0167 max mem: 5716\n","Epoch: [39667] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39668] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0222 (0.0222) time: 0.2832 data: 0.1941 max mem: 5716\n","Epoch: [39668] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0233) time: 0.0941 data: 0.0163 max mem: 5716\n","Epoch: [39668] Total time: 0:00:01 (0.0981 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39669] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0253 (0.0253) time: 0.2909 data: 0.2031 max mem: 5716\n","Epoch: [39669] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0232) time: 0.0951 data: 0.0171 max mem: 5716\n","Epoch: [39669] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39670] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0275 (0.0275) time: 0.2938 data: 0.2068 max mem: 5716\n","Epoch: [39670] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0235) time: 0.0954 data: 0.0174 max mem: 5716\n","Epoch: [39670] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39671] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0239 (0.0239) time: 0.2961 data: 0.2063 max mem: 5716\n","Epoch: [39671] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0246 (0.0245) time: 0.0964 data: 0.0174 max mem: 5716\n","Epoch: [39671] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0246 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39672] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0221 (0.0221) time: 0.2939 data: 0.2071 max mem: 5716\n","Epoch: [39672] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0223 (0.0241) time: 0.0958 data: 0.0174 max mem: 5716\n","Epoch: [39672] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0223 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39673] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0273 (0.0273) time: 0.2889 data: 0.1962 max mem: 5716\n","Epoch: [39673] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0247 (0.0243) time: 0.0988 data: 0.0165 max mem: 5716\n","Epoch: [39673] Total time: 0:00:01 (0.1029 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0247 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39674] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0214 (0.0214) time: 0.2893 data: 0.1971 max mem: 5716\n","Epoch: [39674] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0209) time: 0.0972 data: 0.0166 max mem: 5716\n","Epoch: [39674] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39675] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0196 (0.0196) time: 0.2946 data: 0.2033 max mem: 5716\n","Epoch: [39675] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0197 (0.0204) time: 0.0980 data: 0.0171 max mem: 5716\n","Epoch: [39675] Total time: 0:00:01 (0.1021 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0197 (0.0204) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39676] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0230 (0.0230) time: 0.2895 data: 0.1994 max mem: 5716\n","Epoch: [39676] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0212 (0.0208) time: 0.0964 data: 0.0168 max mem: 5716\n","Epoch: [39676] Total time: 0:00:01 (0.1006 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0212 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39677] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0214 (0.0214) time: 0.2938 data: 0.2041 max mem: 5716\n","Epoch: [39677] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0233) time: 0.0961 data: 0.0171 max mem: 5716\n","Epoch: [39677] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39678] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0255 (0.0255) time: 0.2969 data: 0.2083 max mem: 5716\n","Epoch: [39678] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0232) time: 0.0955 data: 0.0175 max mem: 5716\n","Epoch: [39678] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39679] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0182 (0.0182) time: 0.2844 data: 0.1941 max mem: 5716\n","Epoch: [39679] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0230) time: 0.0951 data: 0.0164 max mem: 5716\n","Epoch: [39679] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39680] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0221 (0.0221) time: 0.2957 data: 0.2081 max mem: 5716\n","Epoch: [39680] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0241) time: 0.0955 data: 0.0175 max mem: 5716\n","Epoch: [39680] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39681] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0253 (0.0253) time: 0.2909 data: 0.2046 max mem: 5716\n","Epoch: [39681] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0242 (0.0244) time: 0.0985 data: 0.0172 max mem: 5716\n","Epoch: [39681] Total time: 0:00:01 (0.1026 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0242 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39682] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0238 (0.0238) time: 0.2903 data: 0.2036 max mem: 5716\n","Epoch: [39682] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0244) time: 0.0951 data: 0.0171 max mem: 5716\n","Epoch: [39682] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39683] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0228 (0.0228) time: 0.2904 data: 0.2025 max mem: 5716\n","Epoch: [39683] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0194 (0.0209) time: 0.0947 data: 0.0170 max mem: 5716\n","Epoch: [39683] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0194 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39684] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0270 (0.0270) time: 0.2939 data: 0.2055 max mem: 5716\n","Epoch: [39684] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0195 (0.0208) time: 0.0954 data: 0.0173 max mem: 5716\n","Epoch: [39684] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0195 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39685] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0162 (0.0162) time: 0.2894 data: 0.2038 max mem: 5716\n","Epoch: [39685] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0197 (0.0209) time: 0.0947 data: 0.0171 max mem: 5716\n","Epoch: [39685] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0197 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39686] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0236 (0.0236) time: 0.2886 data: 0.1974 max mem: 5716\n","Epoch: [39686] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0234) time: 0.0952 data: 0.0166 max mem: 5716\n","Epoch: [39686] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39687] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0247 (0.0247) time: 0.2904 data: 0.2008 max mem: 5716\n","Epoch: [39687] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0233) time: 0.0952 data: 0.0169 max mem: 5716\n","Epoch: [39687] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39688] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0302 (0.0302) time: 0.2967 data: 0.2059 max mem: 5716\n","Epoch: [39688] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0221 (0.0235) time: 0.0965 data: 0.0173 max mem: 5716\n","Epoch: [39688] Total time: 0:00:01 (0.1016 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0221 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39689] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0205 (0.0205) time: 0.2990 data: 0.2124 max mem: 5716\n","Epoch: [39689] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0246 (0.0240) time: 0.0959 data: 0.0179 max mem: 5716\n","Epoch: [39689] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0246 (0.0240) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39690] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0227 (0.0227) time: 0.2829 data: 0.1939 max mem: 5716\n","Epoch: [39690] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0242 (0.0239) time: 0.0947 data: 0.0163 max mem: 5716\n","Epoch: [39690] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0242 (0.0239) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39691] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0229 (0.0229) time: 0.2840 data: 0.1935 max mem: 5716\n","Epoch: [39691] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0245) time: 0.0943 data: 0.0163 max mem: 5716\n","Epoch: [39691] Total time: 0:00:01 (0.0984 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39692] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0224 (0.0224) time: 0.2939 data: 0.2021 max mem: 5716\n","Epoch: [39692] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0204 (0.0208) time: 0.0954 data: 0.0170 max mem: 5716\n","Epoch: [39692] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0204 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39693] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0188 (0.0188) time: 0.2904 data: 0.2037 max mem: 5716\n","Epoch: [39693] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0198 (0.0209) time: 0.0955 data: 0.0171 max mem: 5716\n","Epoch: [39693] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0198 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39694] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0181 (0.0181) time: 0.2954 data: 0.1961 max mem: 5716\n","Epoch: [39694] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0207) time: 0.0986 data: 0.0165 max mem: 5716\n","Epoch: [39694] Total time: 0:00:01 (0.1027 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39695] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0234 (0.0234) time: 0.2892 data: 0.1948 max mem: 5716\n","Epoch: [39695] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0231) time: 0.0964 data: 0.0164 max mem: 5716\n","Epoch: [39695] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39696] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0262 (0.0262) time: 0.3051 data: 0.2088 max mem: 5716\n","Epoch: [39696] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0230) time: 0.0970 data: 0.0176 max mem: 5716\n","Epoch: [39696] Total time: 0:00:01 (0.1012 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39697] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0204 (0.0204) time: 0.2916 data: 0.1994 max mem: 5716\n","Epoch: [39697] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0234) time: 0.0965 data: 0.0168 max mem: 5716\n","Epoch: [39697] Total time: 0:00:01 (0.1007 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39698] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0264 (0.0264) time: 0.2841 data: 0.1915 max mem: 5716\n","Epoch: [39698] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0247 (0.0242) time: 0.0965 data: 0.0161 max mem: 5716\n","Epoch: [39698] Total time: 0:00:01 (0.1006 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0247 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39699] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0298 (0.0298) time: 0.2849 data: 0.1960 max mem: 5716\n","Epoch: [39699] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0243) time: 0.0969 data: 0.0165 max mem: 5716\n","Epoch: [39699] Total time: 0:00:01 (0.1012 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39700] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0279 (0.0279) time: 0.2898 data: 0.1989 max mem: 5716\n","Epoch: [39700] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0242) time: 0.0961 data: 0.0167 max mem: 5716\n","Epoch: [39700] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39701] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0190 (0.0190) time: 0.2902 data: 0.2009 max mem: 5716\n","Epoch: [39701] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0210 (0.0208) time: 0.0954 data: 0.0169 max mem: 5716\n","Epoch: [39701] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0210 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39702] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0172 (0.0172) time: 0.2858 data: 0.1960 max mem: 5716\n","Epoch: [39702] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0208) time: 0.0942 data: 0.0165 max mem: 5716\n","Epoch: [39702] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39703] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0152 (0.0152) time: 0.2894 data: 0.2024 max mem: 5716\n","Epoch: [39703] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0218 (0.0209) time: 0.0948 data: 0.0170 max mem: 5716\n","Epoch: [39703] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0218 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39704] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0252 (0.0252) time: 0.2990 data: 0.2100 max mem: 5716\n","Epoch: [39704] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0230) time: 0.0992 data: 0.0177 max mem: 5716\n","Epoch: [39704] Total time: 0:00:01 (0.1033 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39705] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.2920 data: 0.2011 max mem: 5716\n","Epoch: [39705] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0234) time: 0.0960 data: 0.0169 max mem: 5716\n","Epoch: [39705] Total time: 0:00:01 (0.1006 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39706] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0209 (0.0209) time: 0.2893 data: 0.1993 max mem: 5716\n","Epoch: [39706] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0220 (0.0231) time: 0.0955 data: 0.0168 max mem: 5716\n","Epoch: [39706] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0220 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39707] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0220 (0.0220) time: 0.3046 data: 0.2029 max mem: 5716\n","Epoch: [39707] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0243 (0.0242) time: 0.0963 data: 0.0171 max mem: 5716\n","Epoch: [39707] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0243 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39708] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0218 (0.0218) time: 0.2901 data: 0.1892 max mem: 5716\n","Epoch: [39708] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0239) time: 0.0988 data: 0.0159 max mem: 5716\n","Epoch: [39708] Total time: 0:00:01 (0.1028 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0239) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39709] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0244 (0.0244) time: 0.2841 data: 0.1940 max mem: 5716\n","Epoch: [39709] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0244 (0.0242) time: 0.0954 data: 0.0163 max mem: 5716\n","Epoch: [39709] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0244 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39710] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0194 (0.0194) time: 0.3031 data: 0.2054 max mem: 5716\n","Epoch: [39710] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0194 (0.0206) time: 0.0964 data: 0.0173 max mem: 5716\n","Epoch: [39710] Total time: 0:00:01 (0.1019 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0194 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39711] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0175 (0.0175) time: 0.2854 data: 0.1956 max mem: 5716\n","Epoch: [39711] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0206) time: 0.0948 data: 0.0164 max mem: 5716\n","Epoch: [39711] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39712] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0179 (0.0179) time: 0.2990 data: 0.1987 max mem: 5716\n","Epoch: [39712] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0193 (0.0204) time: 0.0958 data: 0.0167 max mem: 5716\n","Epoch: [39712] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0193 (0.0204) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39713] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0193 (0.0193) time: 0.2855 data: 0.1961 max mem: 5716\n","Epoch: [39713] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0218 (0.0233) time: 0.0947 data: 0.0165 max mem: 5716\n","Epoch: [39713] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0218 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39714] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0249 (0.0249) time: 0.2969 data: 0.1977 max mem: 5716\n","Epoch: [39714] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0223 (0.0234) time: 0.0966 data: 0.0166 max mem: 5716\n","Epoch: [39714] Total time: 0:00:01 (0.1021 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0223 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39715] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0269 (0.0269) time: 0.2939 data: 0.1968 max mem: 5716\n","Epoch: [39715] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0234) time: 0.0960 data: 0.0166 max mem: 5716\n","Epoch: [39715] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39716] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0244 (0.0244) time: 0.2845 data: 0.1938 max mem: 5716\n","Epoch: [39716] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0241) time: 0.0954 data: 0.0163 max mem: 5716\n","Epoch: [39716] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39717] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0276 (0.0276) time: 0.2920 data: 0.2050 max mem: 5716\n","Epoch: [39717] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0243) time: 0.0952 data: 0.0172 max mem: 5716\n","Epoch: [39717] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39718] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0260 (0.0260) time: 0.2833 data: 0.1916 max mem: 5716\n","Epoch: [39718] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0251 (0.0245) time: 0.0941 data: 0.0161 max mem: 5716\n","Epoch: [39718] Total time: 0:00:01 (0.0982 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0251 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39719] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0186 (0.0186) time: 0.2947 data: 0.2062 max mem: 5716\n","Epoch: [39719] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0206 (0.0206) time: 0.0949 data: 0.0173 max mem: 5716\n","Epoch: [39719] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0206 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39720] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0207 (0.0207) time: 0.2903 data: 0.1981 max mem: 5716\n","Epoch: [39720] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0206) time: 0.0949 data: 0.0167 max mem: 5716\n","Epoch: [39720] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39721] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2921 data: 0.2040 max mem: 5716\n","Epoch: [39721] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0197 (0.0208) time: 0.0980 data: 0.0171 max mem: 5716\n","Epoch: [39721] Total time: 0:00:01 (0.1021 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0197 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39722] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0240 (0.0240) time: 0.2921 data: 0.2042 max mem: 5716\n","Epoch: [39722] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0230) time: 0.0953 data: 0.0172 max mem: 5716\n","Epoch: [39722] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39723] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0272 (0.0272) time: 0.2924 data: 0.2047 max mem: 5716\n","Epoch: [39723] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0234) time: 0.0961 data: 0.0172 max mem: 5716\n","Epoch: [39723] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39724] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0254 (0.0254) time: 0.2875 data: 0.1942 max mem: 5716\n","Epoch: [39724] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0232) time: 0.0950 data: 0.0163 max mem: 5716\n","Epoch: [39724] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39725] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0245 (0.0245) time: 0.2923 data: 0.2036 max mem: 5716\n","Epoch: [39725] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0245 (0.0243) time: 0.0981 data: 0.0171 max mem: 5716\n","Epoch: [39725] Total time: 0:00:01 (0.1024 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0245 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39726] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0181 (0.0181) time: 0.2983 data: 0.1940 max mem: 5716\n","Epoch: [39726] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0247 (0.0243) time: 0.0950 data: 0.0163 max mem: 5716\n","Epoch: [39726] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0247 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39727] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0235 (0.0235) time: 0.2833 data: 0.1955 max mem: 5716\n","Epoch: [39727] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0244) time: 0.0943 data: 0.0164 max mem: 5716\n","Epoch: [39727] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39728] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0227 (0.0227) time: 0.2855 data: 0.1954 max mem: 5716\n","Epoch: [39728] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0203 (0.0211) time: 0.0943 data: 0.0164 max mem: 5716\n","Epoch: [39728] Total time: 0:00:01 (0.0984 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0203 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39729] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0218 (0.0218) time: 0.2957 data: 0.1960 max mem: 5716\n","Epoch: [39729] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0199 (0.0207) time: 0.0951 data: 0.0165 max mem: 5716\n","Epoch: [39729] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0199 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39730] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0186 (0.0186) time: 0.3024 data: 0.2060 max mem: 5716\n","Epoch: [39730] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0206) time: 0.0956 data: 0.0173 max mem: 5716\n","Epoch: [39730] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39731] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0249 (0.0249) time: 0.2802 data: 0.1884 max mem: 5716\n","Epoch: [39731] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0232) time: 0.0955 data: 0.0158 max mem: 5716\n","Epoch: [39731] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39732] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0192 (0.0192) time: 0.2935 data: 0.2056 max mem: 5716\n","Epoch: [39732] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0233) time: 0.0972 data: 0.0173 max mem: 5716\n","Epoch: [39732] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39733] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0233 (0.0233) time: 0.3096 data: 0.2172 max mem: 5716\n","Epoch: [39733] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0228) time: 0.0964 data: 0.0183 max mem: 5716\n","Epoch: [39733] Total time: 0:00:01 (0.1009 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0228) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39734] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0284 (0.0284) time: 0.2859 data: 0.1979 max mem: 5716\n","Epoch: [39734] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0242) time: 0.0945 data: 0.0166 max mem: 5716\n","Epoch: [39734] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39735] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0214 (0.0214) time: 0.2857 data: 0.1962 max mem: 5716\n","Epoch: [39735] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0242 (0.0244) time: 0.0946 data: 0.0165 max mem: 5716\n","Epoch: [39735] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0242 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39736] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0206 (0.0206) time: 0.2997 data: 0.1992 max mem: 5716\n","Epoch: [39736] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0242) time: 0.0960 data: 0.0168 max mem: 5716\n","Epoch: [39736] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39737] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0175 (0.0175) time: 0.2874 data: 0.1958 max mem: 5716\n","Epoch: [39737] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0212 (0.0204) time: 0.0967 data: 0.0164 max mem: 5716\n","Epoch: [39737] Total time: 0:00:01 (0.1008 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0212 (0.0204) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39738] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0219 (0.0219) time: 0.2814 data: 0.1919 max mem: 5716\n","Epoch: [39738] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0206 (0.0209) time: 0.0940 data: 0.0161 max mem: 5716\n","Epoch: [39738] Total time: 0:00:01 (0.0981 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0206 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39739] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0242 (0.0242) time: 0.2812 data: 0.1917 max mem: 5716\n","Epoch: [39739] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0197 (0.0208) time: 0.0942 data: 0.0161 max mem: 5716\n","Epoch: [39739] Total time: 0:00:01 (0.0982 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0197 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39740] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0232 (0.0232) time: 0.2911 data: 0.1927 max mem: 5716\n","Epoch: [39740] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0231) time: 0.0946 data: 0.0162 max mem: 5716\n","Epoch: [39740] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39741] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0267 (0.0267) time: 0.2792 data: 0.1893 max mem: 5716\n","Epoch: [39741] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0232) time: 0.0952 data: 0.0160 max mem: 5716\n","Epoch: [39741] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39742] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0211 (0.0211) time: 0.2893 data: 0.2008 max mem: 5716\n","Epoch: [39742] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0221 (0.0234) time: 0.0950 data: 0.0169 max mem: 5716\n","Epoch: [39742] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0221 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39743] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0276 (0.0276) time: 0.2927 data: 0.2046 max mem: 5716\n","Epoch: [39743] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0242) time: 0.0956 data: 0.0172 max mem: 5716\n","Epoch: [39743] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39744] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0238 (0.0238) time: 0.2901 data: 0.2000 max mem: 5716\n","Epoch: [39744] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0243) time: 0.0949 data: 0.0168 max mem: 5716\n","Epoch: [39744] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39745] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0244 (0.0244) time: 0.2907 data: 0.2047 max mem: 5716\n","Epoch: [39745] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0243) time: 0.0951 data: 0.0172 max mem: 5716\n","Epoch: [39745] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39746] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0161 (0.0161) time: 0.2854 data: 0.1946 max mem: 5716\n","Epoch: [39746] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0203 (0.0205) time: 0.0949 data: 0.0164 max mem: 5716\n","Epoch: [39746] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0203 (0.0205) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39747] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0200 (0.0200) time: 0.2898 data: 0.2018 max mem: 5716\n","Epoch: [39747] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0211) time: 0.0948 data: 0.0170 max mem: 5716\n","Epoch: [39747] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39748] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0170 (0.0170) time: 0.2962 data: 0.2072 max mem: 5716\n","Epoch: [39748] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0211) time: 0.0971 data: 0.0174 max mem: 5716\n","Epoch: [39748] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39749] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.2832 data: 0.1927 max mem: 5716\n","Epoch: [39749] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0234) time: 0.0951 data: 0.0162 max mem: 5716\n","Epoch: [39749] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39750] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0287 (0.0287) time: 0.2830 data: 0.1933 max mem: 5716\n","Epoch: [39750] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0221 (0.0232) time: 0.0945 data: 0.0163 max mem: 5716\n","Epoch: [39750] Total time: 0:00:01 (0.0986 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0221 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39751] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0226 (0.0226) time: 0.2798 data: 0.1910 max mem: 5716\n","Epoch: [39751] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0232) time: 0.0940 data: 0.0161 max mem: 5716\n","Epoch: [39751] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39752] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0240 (0.0240) time: 0.2834 data: 0.1944 max mem: 5716\n","Epoch: [39752] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0238) time: 0.0950 data: 0.0163 max mem: 5716\n","Epoch: [39752] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0238) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39753] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0220 (0.0220) time: 0.2859 data: 0.1993 max mem: 5716\n","Epoch: [39753] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0241) time: 0.0947 data: 0.0167 max mem: 5716\n","Epoch: [39753] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39754] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0195 (0.0195) time: 0.2872 data: 0.1993 max mem: 5716\n","Epoch: [39754] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0251 (0.0243) time: 0.0947 data: 0.0168 max mem: 5716\n","Epoch: [39754] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0251 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39755] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.2929 data: 0.2063 max mem: 5716\n","Epoch: [39755] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0214 (0.0208) time: 0.0979 data: 0.0173 max mem: 5716\n","Epoch: [39755] Total time: 0:00:01 (0.1021 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0214 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39756] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0238 (0.0238) time: 0.2837 data: 0.1944 max mem: 5716\n","Epoch: [39756] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0203 (0.0210) time: 0.0944 data: 0.0163 max mem: 5716\n","Epoch: [39756] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0203 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39757] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0228 (0.0228) time: 0.2933 data: 0.2003 max mem: 5716\n","Epoch: [39757] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0210 (0.0204) time: 0.0954 data: 0.0168 max mem: 5716\n","Epoch: [39757] Total time: 0:00:01 (0.1010 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0210 (0.0204) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39758] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0185 (0.0185) time: 0.2925 data: 0.2044 max mem: 5716\n","Epoch: [39758] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0234) time: 0.0962 data: 0.0172 max mem: 5716\n","Epoch: [39758] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39759] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0215 (0.0215) time: 0.2999 data: 0.2001 max mem: 5716\n","Epoch: [39759] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0233) time: 0.0969 data: 0.0168 max mem: 5716\n","Epoch: [39759] Total time: 0:00:01 (0.1011 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39760] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0277 (0.0277) time: 0.2915 data: 0.2040 max mem: 5716\n","Epoch: [39760] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0232) time: 0.0959 data: 0.0172 max mem: 5716\n","Epoch: [39760] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39761] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0226 (0.0226) time: 0.2948 data: 0.1937 max mem: 5716\n","Epoch: [39761] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0244 (0.0243) time: 0.0962 data: 0.0163 max mem: 5716\n","Epoch: [39761] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0244 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39762] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0216 (0.0216) time: 0.3055 data: 0.2035 max mem: 5716\n","Epoch: [39762] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0245) time: 0.0984 data: 0.0171 max mem: 5716\n","Epoch: [39762] Total time: 0:00:01 (0.1025 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39763] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0220 (0.0220) time: 0.2832 data: 0.1936 max mem: 5716\n","Epoch: [39763] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0237) time: 0.0958 data: 0.0163 max mem: 5716\n","Epoch: [39763] Total time: 0:00:01 (0.1018 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0237) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39764] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0189 (0.0189) time: 0.2862 data: 0.1921 max mem: 5716\n","Epoch: [39764] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0209 (0.0207) time: 0.0943 data: 0.0161 max mem: 5716\n","Epoch: [39764] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0209 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39765] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0173 (0.0173) time: 0.2922 data: 0.2043 max mem: 5716\n","Epoch: [39765] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0196 (0.0208) time: 0.0965 data: 0.0172 max mem: 5716\n","Epoch: [39765] Total time: 0:00:01 (0.1006 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0196 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39766] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0223 (0.0223) time: 0.2860 data: 0.1958 max mem: 5716\n","Epoch: [39766] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0204 (0.0207) time: 0.0953 data: 0.0165 max mem: 5716\n","Epoch: [39766] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0204 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39767] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0183 (0.0183) time: 0.2898 data: 0.1956 max mem: 5716\n","Epoch: [39767] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0213 (0.0233) time: 0.0971 data: 0.0165 max mem: 5716\n","Epoch: [39767] Total time: 0:00:01 (0.1012 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0213 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39768] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0231 (0.0231) time: 0.2861 data: 0.1952 max mem: 5716\n","Epoch: [39768] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0232) time: 0.0953 data: 0.0164 max mem: 5716\n","Epoch: [39768] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39769] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0183 (0.0183) time: 0.2871 data: 0.1974 max mem: 5716\n","Epoch: [39769] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0231) time: 0.0953 data: 0.0166 max mem: 5716\n","Epoch: [39769] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39770] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0222 (0.0222) time: 0.2891 data: 0.1982 max mem: 5716\n","Epoch: [39770] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0243 (0.0244) time: 0.0955 data: 0.0167 max mem: 5716\n","Epoch: [39770] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0243 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39771] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0211 (0.0211) time: 0.2968 data: 0.2056 max mem: 5716\n","Epoch: [39771] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0245) time: 0.0959 data: 0.0173 max mem: 5716\n","Epoch: [39771] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39772] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0239 (0.0239) time: 0.2873 data: 0.1981 max mem: 5716\n","Epoch: [39772] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0240) time: 0.0947 data: 0.0166 max mem: 5716\n","Epoch: [39772] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0240) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39773] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0195 (0.0195) time: 0.2903 data: 0.1997 max mem: 5716\n","Epoch: [39773] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0209) time: 0.0954 data: 0.0168 max mem: 5716\n","Epoch: [39773] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39774] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0195 (0.0195) time: 0.2829 data: 0.1906 max mem: 5716\n","Epoch: [39774] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0210 (0.0209) time: 0.0947 data: 0.0160 max mem: 5716\n","Epoch: [39774] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0210 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39775] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0191 (0.0191) time: 0.2815 data: 0.1891 max mem: 5716\n","Epoch: [39775] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0211) time: 0.0972 data: 0.0159 max mem: 5716\n","Epoch: [39775] Total time: 0:00:01 (0.1013 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39776] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0232 (0.0232) time: 0.2881 data: 0.2005 max mem: 5716\n","Epoch: [39776] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0230) time: 0.0948 data: 0.0169 max mem: 5716\n","Epoch: [39776] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39777] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0186 (0.0186) time: 0.2877 data: 0.1999 max mem: 5716\n","Epoch: [39777] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0234) time: 0.0947 data: 0.0168 max mem: 5716\n","Epoch: [39777] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39778] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0242 (0.0242) time: 0.2887 data: 0.1970 max mem: 5716\n","Epoch: [39778] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0230) time: 0.0951 data: 0.0166 max mem: 5716\n","Epoch: [39778] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39779] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0282 (0.0282) time: 0.2965 data: 0.2087 max mem: 5716\n","Epoch: [39779] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0239) time: 0.0960 data: 0.0176 max mem: 5716\n","Epoch: [39779] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0239) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39780] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0191 (0.0191) time: 0.2951 data: 0.2076 max mem: 5716\n","Epoch: [39780] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0244) time: 0.0958 data: 0.0175 max mem: 5716\n","Epoch: [39780] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39781] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0235 (0.0235) time: 0.2925 data: 0.1934 max mem: 5716\n","Epoch: [39781] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0242 (0.0244) time: 0.0955 data: 0.0163 max mem: 5716\n","Epoch: [39781] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0242 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39782] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0155 (0.0155) time: 0.2866 data: 0.1912 max mem: 5716\n","Epoch: [39782] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0208) time: 0.0950 data: 0.0161 max mem: 5716\n","Epoch: [39782] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39783] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0192 (0.0192) time: 0.2842 data: 0.1911 max mem: 5716\n","Epoch: [39783] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0197 (0.0210) time: 0.0972 data: 0.0161 max mem: 5716\n","Epoch: [39783] Total time: 0:00:01 (0.1013 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0197 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39784] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0186 (0.0186) time: 0.2869 data: 0.1986 max mem: 5716\n","Epoch: [39784] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0207) time: 0.0955 data: 0.0167 max mem: 5716\n","Epoch: [39784] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39785] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0260 (0.0260) time: 0.2774 data: 0.1880 max mem: 5716\n","Epoch: [39785] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0245 (0.0237) time: 0.0937 data: 0.0158 max mem: 5716\n","Epoch: [39785] Total time: 0:00:01 (0.0978 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0245 (0.0237) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39786] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0257 (0.0257) time: 0.2872 data: 0.1961 max mem: 5716\n","Epoch: [39786] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0229) time: 0.0946 data: 0.0165 max mem: 5716\n","Epoch: [39786] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0229) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39787] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0221 (0.0221) time: 0.2896 data: 0.1987 max mem: 5716\n","Epoch: [39787] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0235) time: 0.0950 data: 0.0167 max mem: 5716\n","Epoch: [39787] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39788] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0210 (0.0210) time: 0.2857 data: 0.1972 max mem: 5716\n","Epoch: [39788] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0242) time: 0.0946 data: 0.0166 max mem: 5716\n","Epoch: [39788] Total time: 0:00:01 (0.0986 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39789] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0205 (0.0205) time: 0.2980 data: 0.1985 max mem: 5716\n","Epoch: [39789] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0240) time: 0.0962 data: 0.0167 max mem: 5716\n","Epoch: [39789] Total time: 0:00:01 (0.1019 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0240) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39790] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0223 (0.0223) time: 0.2838 data: 0.1941 max mem: 5716\n","Epoch: [39790] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0244) time: 0.0962 data: 0.0163 max mem: 5716\n","Epoch: [39790] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39791] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0232 (0.0232) time: 0.2992 data: 0.2031 max mem: 5716\n","Epoch: [39791] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0209) time: 0.0958 data: 0.0171 max mem: 5716\n","Epoch: [39791] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39792] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0289 (0.0289) time: 0.2848 data: 0.1952 max mem: 5716\n","Epoch: [39792] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0210) time: 0.0949 data: 0.0164 max mem: 5716\n","Epoch: [39792] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39793] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0161 (0.0161) time: 0.2850 data: 0.1947 max mem: 5716\n","Epoch: [39793] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0206 (0.0209) time: 0.0940 data: 0.0164 max mem: 5716\n","Epoch: [39793] Total time: 0:00:01 (0.0981 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0206 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39794] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.2865 data: 0.2002 max mem: 5716\n","Epoch: [39794] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0233) time: 0.0944 data: 0.0168 max mem: 5716\n","Epoch: [39794] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39795] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0245 (0.0245) time: 0.2807 data: 0.1899 max mem: 5716\n","Epoch: [39795] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0230) time: 0.0941 data: 0.0160 max mem: 5716\n","Epoch: [39795] Total time: 0:00:01 (0.0981 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39796] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0225 (0.0225) time: 0.2820 data: 0.1912 max mem: 5716\n","Epoch: [39796] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0232) time: 0.0940 data: 0.0161 max mem: 5716\n","Epoch: [39796] Total time: 0:00:01 (0.0980 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39797] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0272 (0.0272) time: 0.2777 data: 0.1879 max mem: 5716\n","Epoch: [39797] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0244) time: 0.0942 data: 0.0158 max mem: 5716\n","Epoch: [39797] Total time: 0:00:01 (0.0982 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39798] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0237 (0.0237) time: 0.2971 data: 0.2022 max mem: 5716\n","Epoch: [39798] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0242) time: 0.0959 data: 0.0170 max mem: 5716\n","Epoch: [39798] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39799] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0246 (0.0246) time: 0.2922 data: 0.2036 max mem: 5716\n","Epoch: [39799] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0243 (0.0241) time: 0.0961 data: 0.0171 max mem: 5716\n","Epoch: [39799] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0243 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39800] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0218 (0.0218) time: 0.2885 data: 0.1960 max mem: 5716\n","Epoch: [39800] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0218 (0.0212) time: 0.0977 data: 0.0165 max mem: 5716\n","Epoch: [39800] Total time: 0:00:01 (0.1018 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0218 (0.0212) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39801] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0177 (0.0177) time: 0.3013 data: 0.2003 max mem: 5716\n","Epoch: [39801] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0206) time: 0.0970 data: 0.0169 max mem: 5716\n","Epoch: [39801] Total time: 0:00:01 (0.1011 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39802] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0227 (0.0227) time: 0.2892 data: 0.1984 max mem: 5716\n","Epoch: [39802] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0199 (0.0205) time: 0.0953 data: 0.0167 max mem: 5716\n","Epoch: [39802] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0199 (0.0205) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39803] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0213 (0.0213) time: 0.2857 data: 0.1972 max mem: 5716\n","Epoch: [39803] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0232) time: 0.0949 data: 0.0166 max mem: 5716\n","Epoch: [39803] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39804] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0243 (0.0243) time: 0.2899 data: 0.2018 max mem: 5716\n","Epoch: [39804] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0232) time: 0.0964 data: 0.0170 max mem: 5716\n","Epoch: [39804] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39805] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0179 (0.0179) time: 0.2780 data: 0.1887 max mem: 5716\n","Epoch: [39805] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0222 (0.0229) time: 0.0936 data: 0.0159 max mem: 5716\n","Epoch: [39805] Total time: 0:00:01 (0.0976 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0222 (0.0229) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39806] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0257 (0.0257) time: 0.2890 data: 0.1990 max mem: 5716\n","Epoch: [39806] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0246) time: 0.0956 data: 0.0167 max mem: 5716\n","Epoch: [39806] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39807] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0195 (0.0195) time: 0.2845 data: 0.1915 max mem: 5716\n","Epoch: [39807] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0243) time: 0.0946 data: 0.0161 max mem: 5716\n","Epoch: [39807] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39808] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0222 (0.0222) time: 0.2943 data: 0.1934 max mem: 5716\n","Epoch: [39808] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0244) time: 0.0966 data: 0.0163 max mem: 5716\n","Epoch: [39808] Total time: 0:00:01 (0.1008 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39809] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0177 (0.0177) time: 0.2984 data: 0.2041 max mem: 5716\n","Epoch: [39809] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0197 (0.0210) time: 0.0969 data: 0.0171 max mem: 5716\n","Epoch: [39809] Total time: 0:00:01 (0.1014 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0197 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39810] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0198 (0.0198) time: 0.2904 data: 0.1978 max mem: 5716\n","Epoch: [39810] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0198 (0.0209) time: 0.0957 data: 0.0166 max mem: 5716\n","Epoch: [39810] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0198 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39811] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0205 (0.0205) time: 0.2855 data: 0.1962 max mem: 5716\n","Epoch: [39811] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0206) time: 0.0946 data: 0.0165 max mem: 5716\n","Epoch: [39811] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39812] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0222 (0.0222) time: 0.2899 data: 0.1970 max mem: 5716\n","Epoch: [39812] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0222 (0.0233) time: 0.0953 data: 0.0166 max mem: 5716\n","Epoch: [39812] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0222 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39813] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0196 (0.0196) time: 0.2868 data: 0.1956 max mem: 5716\n","Epoch: [39813] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0233) time: 0.0964 data: 0.0165 max mem: 5716\n","Epoch: [39813] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39814] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0226 (0.0226) time: 0.2991 data: 0.2016 max mem: 5716\n","Epoch: [39814] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0231) time: 0.0960 data: 0.0169 max mem: 5716\n","Epoch: [39814] Total time: 0:00:01 (0.1003 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39815] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0280 (0.0280) time: 0.2861 data: 0.1956 max mem: 5716\n","Epoch: [39815] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0239) time: 0.0952 data: 0.0165 max mem: 5716\n","Epoch: [39815] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0239) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39816] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0227 (0.0227) time: 0.2973 data: 0.2091 max mem: 5716\n","Epoch: [39816] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0246 (0.0246) time: 0.0965 data: 0.0176 max mem: 5716\n","Epoch: [39816] Total time: 0:00:01 (0.1006 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0246 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39817] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0251 (0.0251) time: 0.2884 data: 0.2010 max mem: 5716\n","Epoch: [39817] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0251 (0.0245) time: 0.0958 data: 0.0169 max mem: 5716\n","Epoch: [39817] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0251 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39818] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0197 (0.0197) time: 0.2827 data: 0.1920 max mem: 5716\n","Epoch: [39818] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0194 (0.0206) time: 0.0948 data: 0.0162 max mem: 5716\n","Epoch: [39818] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0194 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39819] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0170 (0.0170) time: 0.2806 data: 0.1899 max mem: 5716\n","Epoch: [39819] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0206 (0.0206) time: 0.0941 data: 0.0160 max mem: 5716\n","Epoch: [39819] Total time: 0:00:01 (0.0981 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0206 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39820] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0180 (0.0180) time: 0.2860 data: 0.1964 max mem: 5716\n","Epoch: [39820] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0204 (0.0208) time: 0.0958 data: 0.0165 max mem: 5716\n","Epoch: [39820] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0204 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39821] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2950 data: 0.2094 max mem: 5716\n","Epoch: [39821] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0221 (0.0234) time: 0.0950 data: 0.0176 max mem: 5716\n","Epoch: [39821] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0221 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39822] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0250 (0.0250) time: 0.2899 data: 0.2039 max mem: 5716\n","Epoch: [39822] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0233) time: 0.0947 data: 0.0171 max mem: 5716\n","Epoch: [39822] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39823] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.2887 data: 0.2001 max mem: 5716\n","Epoch: [39823] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0236) time: 0.0949 data: 0.0168 max mem: 5716\n","Epoch: [39823] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0236) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39824] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2910 data: 0.1973 max mem: 5716\n","Epoch: [39824] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0243) time: 0.0948 data: 0.0166 max mem: 5716\n","Epoch: [39824] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39825] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0273 (0.0273) time: 0.2930 data: 0.1989 max mem: 5716\n","Epoch: [39825] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0248 (0.0246) time: 0.0979 data: 0.0167 max mem: 5716\n","Epoch: [39825] Total time: 0:00:01 (0.1020 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0248 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39826] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0220 (0.0220) time: 0.2873 data: 0.1956 max mem: 5716\n","Epoch: [39826] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.0974 data: 0.0165 max mem: 5716\n","Epoch: [39826] Total time: 0:00:01 (0.1015 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39827] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0230 (0.0230) time: 0.2783 data: 0.1874 max mem: 5716\n","Epoch: [39827] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0203 (0.0204) time: 0.0954 data: 0.0158 max mem: 5716\n","Epoch: [39827] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0203 (0.0204) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39828] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0162 (0.0162) time: 0.2792 data: 0.1874 max mem: 5716\n","Epoch: [39828] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0210) time: 0.0948 data: 0.0158 max mem: 5716\n","Epoch: [39828] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39829] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0218 (0.0218) time: 0.2861 data: 0.1959 max mem: 5716\n","Epoch: [39829] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0206) time: 0.0956 data: 0.0165 max mem: 5716\n","Epoch: [39829] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39830] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0212 (0.0212) time: 0.2837 data: 0.1928 max mem: 5716\n","Epoch: [39830] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0231) time: 0.0946 data: 0.0162 max mem: 5716\n","Epoch: [39830] Total time: 0:00:01 (0.0986 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39831] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0200 (0.0200) time: 0.2930 data: 0.2060 max mem: 5716\n","Epoch: [39831] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0212 (0.0228) time: 0.0948 data: 0.0173 max mem: 5716\n","Epoch: [39831] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0212 (0.0228) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39832] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0180 (0.0180) time: 0.2872 data: 0.1957 max mem: 5716\n","Epoch: [39832] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0231) time: 0.0947 data: 0.0164 max mem: 5716\n","Epoch: [39832] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39833] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0202 (0.0202) time: 0.2830 data: 0.1941 max mem: 5716\n","Epoch: [39833] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0244) time: 0.0951 data: 0.0163 max mem: 5716\n","Epoch: [39833] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39834] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0268 (0.0268) time: 0.2839 data: 0.1925 max mem: 5716\n","Epoch: [39834] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0249 (0.0243) time: 0.0950 data: 0.0162 max mem: 5716\n","Epoch: [39834] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0249 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39835] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0232 (0.0232) time: 0.2928 data: 0.2058 max mem: 5716\n","Epoch: [39835] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0242) time: 0.0953 data: 0.0173 max mem: 5716\n","Epoch: [39835] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39836] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0152 (0.0152) time: 0.2955 data: 0.1970 max mem: 5716\n","Epoch: [39836] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0208) time: 0.0959 data: 0.0166 max mem: 5716\n","Epoch: [39836] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39837] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0172 (0.0172) time: 0.2846 data: 0.1957 max mem: 5716\n","Epoch: [39837] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0213 (0.0209) time: 0.0944 data: 0.0164 max mem: 5716\n","Epoch: [39837] Total time: 0:00:01 (0.0986 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0213 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39838] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0227 (0.0227) time: 0.2891 data: 0.1980 max mem: 5716\n","Epoch: [39838] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0209) time: 0.0949 data: 0.0166 max mem: 5716\n","Epoch: [39838] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39839] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0239 (0.0239) time: 0.2862 data: 0.1964 max mem: 5716\n","Epoch: [39839] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0233) time: 0.0944 data: 0.0165 max mem: 5716\n","Epoch: [39839] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39840] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0256 (0.0256) time: 0.2917 data: 0.2033 max mem: 5716\n","Epoch: [39840] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0235) time: 0.0947 data: 0.0171 max mem: 5716\n","Epoch: [39840] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39841] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0259 (0.0259) time: 0.2838 data: 0.1938 max mem: 5716\n","Epoch: [39841] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0233) time: 0.0942 data: 0.0163 max mem: 5716\n","Epoch: [39841] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39842] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0235 (0.0235) time: 0.2871 data: 0.1959 max mem: 5716\n","Epoch: [39842] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0242) time: 0.0949 data: 0.0165 max mem: 5716\n","Epoch: [39842] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39843] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2857 data: 0.1974 max mem: 5716\n","Epoch: [39843] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0243) time: 0.0950 data: 0.0166 max mem: 5716\n","Epoch: [39843] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39844] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0255 (0.0255) time: 0.2909 data: 0.2003 max mem: 5716\n","Epoch: [39844] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0242) time: 0.0964 data: 0.0169 max mem: 5716\n","Epoch: [39844] Total time: 0:00:01 (0.1006 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39845] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0181 (0.0181) time: 0.2961 data: 0.2082 max mem: 5716\n","Epoch: [39845] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0192 (0.0209) time: 0.0987 data: 0.0175 max mem: 5716\n","Epoch: [39845] Total time: 0:00:01 (0.1028 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0192 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39846] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0180 (0.0180) time: 0.2933 data: 0.2058 max mem: 5716\n","Epoch: [39846] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0209 (0.0209) time: 0.0954 data: 0.0173 max mem: 5716\n","Epoch: [39846] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0209 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39847] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0235 (0.0235) time: 0.2917 data: 0.2037 max mem: 5716\n","Epoch: [39847] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0204 (0.0209) time: 0.0951 data: 0.0171 max mem: 5716\n","Epoch: [39847] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0204 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39848] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0234 (0.0234) time: 0.2820 data: 0.1925 max mem: 5716\n","Epoch: [39848] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0231) time: 0.0946 data: 0.0162 max mem: 5716\n","Epoch: [39848] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39849] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0226 (0.0226) time: 0.2823 data: 0.1923 max mem: 5716\n","Epoch: [39849] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0232) time: 0.0944 data: 0.0162 max mem: 5716\n","Epoch: [39849] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39850] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0275 (0.0275) time: 0.2834 data: 0.1916 max mem: 5716\n","Epoch: [39850] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0233) time: 0.0943 data: 0.0161 max mem: 5716\n","Epoch: [39850] Total time: 0:00:01 (0.0984 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39851] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0245 (0.0245) time: 0.2801 data: 0.1894 max mem: 5716\n","Epoch: [39851] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0245 (0.0246) time: 0.0945 data: 0.0159 max mem: 5716\n","Epoch: [39851] Total time: 0:00:01 (0.0986 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0245 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39852] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0253 (0.0253) time: 0.2963 data: 0.1946 max mem: 5716\n","Epoch: [39852] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0244 (0.0241) time: 0.0954 data: 0.0164 max mem: 5716\n","Epoch: [39852] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0244 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39853] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0246 (0.0246) time: 0.2910 data: 0.2034 max mem: 5716\n","Epoch: [39853] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0244) time: 0.0950 data: 0.0171 max mem: 5716\n","Epoch: [39853] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39854] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0171 (0.0171) time: 0.2869 data: 0.1983 max mem: 5716\n","Epoch: [39854] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0210 (0.0211) time: 0.0959 data: 0.0167 max mem: 5716\n","Epoch: [39854] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0210 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39855] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0231 (0.0231) time: 0.2926 data: 0.2047 max mem: 5716\n","Epoch: [39855] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0206) time: 0.0950 data: 0.0172 max mem: 5716\n","Epoch: [39855] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39856] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0165 (0.0165) time: 0.2875 data: 0.1987 max mem: 5716\n","Epoch: [39856] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0209) time: 0.0949 data: 0.0167 max mem: 5716\n","Epoch: [39856] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39857] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0207 (0.0207) time: 0.2920 data: 0.2054 max mem: 5716\n","Epoch: [39857] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0228) time: 0.0950 data: 0.0173 max mem: 5716\n","Epoch: [39857] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0228) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39858] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0230 (0.0230) time: 0.2959 data: 0.2079 max mem: 5716\n","Epoch: [39858] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0231) time: 0.0963 data: 0.0175 max mem: 5716\n","Epoch: [39858] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39859] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0224 (0.0224) time: 0.2839 data: 0.1940 max mem: 5716\n","Epoch: [39859] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0232) time: 0.0950 data: 0.0163 max mem: 5716\n","Epoch: [39859] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39860] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0239 (0.0239) time: 0.2908 data: 0.1997 max mem: 5716\n","Epoch: [39860] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0242) time: 0.0953 data: 0.0168 max mem: 5716\n","Epoch: [39860] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39861] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0204 (0.0204) time: 0.2899 data: 0.1975 max mem: 5716\n","Epoch: [39861] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0244) time: 0.0991 data: 0.0166 max mem: 5716\n","Epoch: [39861] Total time: 0:00:01 (0.1032 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39862] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0306 (0.0306) time: 0.2937 data: 0.2056 max mem: 5716\n","Epoch: [39862] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0246) time: 0.0953 data: 0.0173 max mem: 5716\n","Epoch: [39862] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39863] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0221 (0.0221) time: 0.2945 data: 0.2054 max mem: 5716\n","Epoch: [39863] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0199 (0.0208) time: 0.0954 data: 0.0173 max mem: 5716\n","Epoch: [39863] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0199 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39864] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0173 (0.0173) time: 0.2912 data: 0.1996 max mem: 5716\n","Epoch: [39864] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0211 (0.0210) time: 0.0960 data: 0.0168 max mem: 5716\n","Epoch: [39864] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0211 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39865] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0188 (0.0188) time: 0.2855 data: 0.1970 max mem: 5716\n","Epoch: [39865] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0212) time: 0.0942 data: 0.0166 max mem: 5716\n","Epoch: [39865] Total time: 0:00:01 (0.0984 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0212) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39866] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0263 (0.0263) time: 0.3080 data: 0.2177 max mem: 5716\n","Epoch: [39866] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0233) time: 0.0962 data: 0.0183 max mem: 5716\n","Epoch: [39866] Total time: 0:00:01 (0.1034 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39867] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0213 (0.0213) time: 0.2885 data: 0.2034 max mem: 5716\n","Epoch: [39867] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0231) time: 0.0961 data: 0.0171 max mem: 5716\n","Epoch: [39867] Total time: 0:00:01 (0.1001 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39868] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0197 (0.0197) time: 0.2864 data: 0.1969 max mem: 5716\n","Epoch: [39868] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0234) time: 0.0952 data: 0.0166 max mem: 5716\n","Epoch: [39868] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39869] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0170 (0.0170) time: 0.2868 data: 0.1943 max mem: 5716\n","Epoch: [39869] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0243) time: 0.0986 data: 0.0163 max mem: 5716\n","Epoch: [39869] Total time: 0:00:01 (0.1036 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39870] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0275 (0.0275) time: 0.2971 data: 0.2026 max mem: 5716\n","Epoch: [39870] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0243) time: 0.0955 data: 0.0170 max mem: 5716\n","Epoch: [39870] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39871] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0207 (0.0207) time: 0.2801 data: 0.1909 max mem: 5716\n","Epoch: [39871] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0247) time: 0.0940 data: 0.0161 max mem: 5716\n","Epoch: [39871] Total time: 0:00:01 (0.0980 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0247) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39872] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0250 (0.0250) time: 0.2903 data: 0.1909 max mem: 5716\n","Epoch: [39872] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0217 (0.0208) time: 0.0950 data: 0.0160 max mem: 5716\n","Epoch: [39872] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0217 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39873] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0169 (0.0169) time: 0.2861 data: 0.1888 max mem: 5716\n","Epoch: [39873] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0199 (0.0208) time: 0.0941 data: 0.0159 max mem: 5716\n","Epoch: [39873] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0199 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39874] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0185 (0.0185) time: 0.2919 data: 0.2028 max mem: 5716\n","Epoch: [39874] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0206 (0.0206) time: 0.0951 data: 0.0170 max mem: 5716\n","Epoch: [39874] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0206 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39875] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0203 (0.0203) time: 0.2903 data: 0.2037 max mem: 5716\n","Epoch: [39875] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0235) time: 0.0947 data: 0.0171 max mem: 5716\n","Epoch: [39875] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39876] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0200 (0.0200) time: 0.2938 data: 0.2064 max mem: 5716\n","Epoch: [39876] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0221 (0.0228) time: 0.0951 data: 0.0173 max mem: 5716\n","Epoch: [39876] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0221 (0.0228) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39877] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0235 (0.0235) time: 0.2914 data: 0.2027 max mem: 5716\n","Epoch: [39877] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0223 (0.0231) time: 0.0980 data: 0.0170 max mem: 5716\n","Epoch: [39877] Total time: 0:00:01 (0.1021 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0223 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39878] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0250 (0.0250) time: 0.2920 data: 0.1980 max mem: 5716\n","Epoch: [39878] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0241) time: 0.0957 data: 0.0166 max mem: 5716\n","Epoch: [39878] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39879] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0239 (0.0239) time: 0.2899 data: 0.2028 max mem: 5716\n","Epoch: [39879] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0242) time: 0.0954 data: 0.0170 max mem: 5716\n","Epoch: [39879] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39880] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0246 (0.0246) time: 0.2882 data: 0.1988 max mem: 5716\n","Epoch: [39880] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0244 (0.0241) time: 0.0947 data: 0.0167 max mem: 5716\n","Epoch: [39880] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0244 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39881] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0226 (0.0226) time: 0.2844 data: 0.1987 max mem: 5716\n","Epoch: [39881] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0206 (0.0207) time: 0.0970 data: 0.0167 max mem: 5716\n","Epoch: [39881] Total time: 0:00:01 (0.1012 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0206 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39882] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0215 (0.0215) time: 0.2916 data: 0.2046 max mem: 5716\n","Epoch: [39882] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0209 (0.0208) time: 0.0948 data: 0.0172 max mem: 5716\n","Epoch: [39882] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0209 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39883] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0192 (0.0192) time: 0.2896 data: 0.2004 max mem: 5716\n","Epoch: [39883] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0206) time: 0.0947 data: 0.0168 max mem: 5716\n","Epoch: [39883] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39884] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0253 (0.0253) time: 0.3036 data: 0.2053 max mem: 5716\n","Epoch: [39884] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0233) time: 0.0965 data: 0.0173 max mem: 5716\n","Epoch: [39884] Total time: 0:00:01 (0.1006 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39885] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0240 (0.0240) time: 0.3003 data: 0.2125 max mem: 5716\n","Epoch: [39885] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0236) time: 0.0969 data: 0.0179 max mem: 5716\n","Epoch: [39885] Total time: 0:00:01 (0.1010 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0236) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39886] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0199 (0.0199) time: 0.2989 data: 0.2111 max mem: 5716\n","Epoch: [39886] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0219 (0.0230) time: 0.0959 data: 0.0178 max mem: 5716\n","Epoch: [39886] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0219 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39887] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0229 (0.0229) time: 0.2821 data: 0.1928 max mem: 5716\n","Epoch: [39887] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0252 (0.0245) time: 0.0943 data: 0.0162 max mem: 5716\n","Epoch: [39887] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0252 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39888] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0224 (0.0224) time: 0.2961 data: 0.1993 max mem: 5716\n","Epoch: [39888] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0242 (0.0243) time: 0.0952 data: 0.0167 max mem: 5716\n","Epoch: [39888] Total time: 0:00:01 (0.1007 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0242 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39889] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0219 (0.0219) time: 0.2851 data: 0.1957 max mem: 5716\n","Epoch: [39889] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0246 (0.0245) time: 0.0949 data: 0.0165 max mem: 5716\n","Epoch: [39889] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0246 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39890] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0160 (0.0160) time: 0.2920 data: 0.1973 max mem: 5716\n","Epoch: [39890] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0208) time: 0.0949 data: 0.0166 max mem: 5716\n","Epoch: [39890] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39891] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0159 (0.0159) time: 0.2904 data: 0.2033 max mem: 5716\n","Epoch: [39891] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.0948 data: 0.0171 max mem: 5716\n","Epoch: [39891] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39892] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0215 (0.0215) time: 0.2812 data: 0.1919 max mem: 5716\n","Epoch: [39892] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.0939 data: 0.0161 max mem: 5716\n","Epoch: [39892] Total time: 0:00:01 (0.0979 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39893] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0219 (0.0219) time: 0.2849 data: 0.1980 max mem: 5716\n","Epoch: [39893] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0230) time: 0.0942 data: 0.0167 max mem: 5716\n","Epoch: [39893] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39894] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0202 (0.0202) time: 0.2939 data: 0.2006 max mem: 5716\n","Epoch: [39894] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0233) time: 0.0969 data: 0.0169 max mem: 5716\n","Epoch: [39894] Total time: 0:00:01 (0.1022 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39895] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0292 (0.0292) time: 0.2840 data: 0.1918 max mem: 5716\n","Epoch: [39895] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0233) time: 0.0956 data: 0.0161 max mem: 5716\n","Epoch: [39895] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39896] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0261 (0.0261) time: 0.2865 data: 0.1966 max mem: 5716\n","Epoch: [39896] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0251 (0.0244) time: 0.0948 data: 0.0165 max mem: 5716\n","Epoch: [39896] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0251 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39897] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0221 (0.0221) time: 0.2846 data: 0.1959 max mem: 5716\n","Epoch: [39897] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.0946 data: 0.0165 max mem: 5716\n","Epoch: [39897] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39898] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0242 (0.0242) time: 0.2873 data: 0.1970 max mem: 5716\n","Epoch: [39898] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0246) time: 0.0950 data: 0.0166 max mem: 5716\n","Epoch: [39898] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39899] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0159 (0.0159) time: 0.2857 data: 0.1955 max mem: 5716\n","Epoch: [39899] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0203 (0.0208) time: 0.0950 data: 0.0164 max mem: 5716\n","Epoch: [39899] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0203 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39900] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0206 (0.0206) time: 0.2858 data: 0.1963 max mem: 5716\n","Epoch: [39900] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0207) time: 0.0956 data: 0.0165 max mem: 5716\n","Epoch: [39900] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39901] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0212 (0.0212) time: 0.2903 data: 0.2038 max mem: 5716\n","Epoch: [39901] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0211) time: 0.0948 data: 0.0171 max mem: 5716\n","Epoch: [39901] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39902] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0253 (0.0253) time: 0.2925 data: 0.2059 max mem: 5716\n","Epoch: [39902] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0236) time: 0.0950 data: 0.0173 max mem: 5716\n","Epoch: [39902] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0236) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39903] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0228 (0.0228) time: 0.2855 data: 0.1939 max mem: 5716\n","Epoch: [39903] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0234) time: 0.0952 data: 0.0163 max mem: 5716\n","Epoch: [39903] Total time: 0:00:01 (0.1008 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39904] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0245 (0.0245) time: 0.3069 data: 0.2161 max mem: 5716\n","Epoch: [39904] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0215 (0.0227) time: 0.1001 data: 0.0182 max mem: 5716\n","Epoch: [39904] Total time: 0:00:01 (0.1042 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0215 (0.0227) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39905] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0275 (0.0275) time: 0.2878 data: 0.1976 max mem: 5716\n","Epoch: [39905] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0238) time: 0.0958 data: 0.0166 max mem: 5716\n","Epoch: [39905] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0238) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39906] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0192 (0.0192) time: 0.2968 data: 0.2001 max mem: 5716\n","Epoch: [39906] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0243) time: 0.0955 data: 0.0168 max mem: 5716\n","Epoch: [39906] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39907] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0199 (0.0199) time: 0.2914 data: 0.2055 max mem: 5716\n","Epoch: [39907] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0248 (0.0246) time: 0.0948 data: 0.0173 max mem: 5716\n","Epoch: [39907] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0248 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39908] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0173 (0.0173) time: 0.2855 data: 0.1958 max mem: 5716\n","Epoch: [39908] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0208) time: 0.0956 data: 0.0165 max mem: 5716\n","Epoch: [39908] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39909] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0191 (0.0191) time: 0.2947 data: 0.2070 max mem: 5716\n","Epoch: [39909] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0193 (0.0207) time: 0.0954 data: 0.0174 max mem: 5716\n","Epoch: [39909] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0193 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39910] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0285 (0.0285) time: 0.2918 data: 0.2030 max mem: 5716\n","Epoch: [39910] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0208) time: 0.0947 data: 0.0170 max mem: 5716\n","Epoch: [39910] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39911] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.2896 data: 0.1965 max mem: 5716\n","Epoch: [39911] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0232) time: 0.0985 data: 0.0165 max mem: 5716\n","Epoch: [39911] Total time: 0:00:01 (0.1027 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39912] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0301 (0.0301) time: 0.2783 data: 0.1896 max mem: 5716\n","Epoch: [39912] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0223 (0.0232) time: 0.0943 data: 0.0160 max mem: 5716\n","Epoch: [39912] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0223 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39913] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0210 (0.0210) time: 0.2927 data: 0.1945 max mem: 5716\n","Epoch: [39913] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0232) time: 0.0949 data: 0.0164 max mem: 5716\n","Epoch: [39913] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39914] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0237 (0.0237) time: 0.2910 data: 0.1938 max mem: 5716\n","Epoch: [39914] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0243) time: 0.0954 data: 0.0163 max mem: 5716\n","Epoch: [39914] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39915] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0265 (0.0265) time: 0.2838 data: 0.1921 max mem: 5716\n","Epoch: [39915] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0241) time: 0.0950 data: 0.0162 max mem: 5716\n","Epoch: [39915] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39916] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0249 (0.0249) time: 0.2802 data: 0.1892 max mem: 5716\n","Epoch: [39916] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0242 (0.0247) time: 0.0941 data: 0.0159 max mem: 5716\n","Epoch: [39916] Total time: 0:00:01 (0.0982 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0242 (0.0247) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39917] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0240 (0.0240) time: 0.2819 data: 0.1920 max mem: 5716\n","Epoch: [39917] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0206) time: 0.0943 data: 0.0161 max mem: 5716\n","Epoch: [39917] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39918] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0181 (0.0181) time: 0.2849 data: 0.1963 max mem: 5716\n","Epoch: [39918] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0207) time: 0.0947 data: 0.0165 max mem: 5716\n","Epoch: [39918] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39919] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0199 (0.0199) time: 0.2943 data: 0.2062 max mem: 5716\n","Epoch: [39919] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0196 (0.0209) time: 0.0958 data: 0.0174 max mem: 5716\n","Epoch: [39919] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0196 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39920] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0219 (0.0219) time: 0.2956 data: 0.2021 max mem: 5716\n","Epoch: [39920] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0220 (0.0229) time: 0.0960 data: 0.0170 max mem: 5716\n","Epoch: [39920] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0220 (0.0229) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39921] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0233 (0.0233) time: 0.2869 data: 0.1975 max mem: 5716\n","Epoch: [39921] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0237) time: 0.0961 data: 0.0166 max mem: 5716\n","Epoch: [39921] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0237) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39922] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0205 (0.0205) time: 0.2975 data: 0.2094 max mem: 5716\n","Epoch: [39922] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0232) time: 0.0956 data: 0.0176 max mem: 5716\n","Epoch: [39922] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39923] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0310 (0.0310) time: 0.2917 data: 0.2004 max mem: 5716\n","Epoch: [39923] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0245) time: 0.0959 data: 0.0169 max mem: 5716\n","Epoch: [39923] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39924] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0221 (0.0221) time: 0.2885 data: 0.1977 max mem: 5716\n","Epoch: [39924] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0245 (0.0245) time: 0.0951 data: 0.0166 max mem: 5716\n","Epoch: [39924] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0245 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39925] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0200 (0.0200) time: 0.2845 data: 0.1944 max mem: 5716\n","Epoch: [39925] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0242) time: 0.0948 data: 0.0164 max mem: 5716\n","Epoch: [39925] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39926] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0186 (0.0186) time: 0.2945 data: 0.2040 max mem: 5716\n","Epoch: [39926] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0208) time: 0.0959 data: 0.0172 max mem: 5716\n","Epoch: [39926] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39927] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0281 (0.0281) time: 0.2994 data: 0.2107 max mem: 5716\n","Epoch: [39927] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0208) time: 0.0958 data: 0.0177 max mem: 5716\n","Epoch: [39927] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39928] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.3006 data: 0.2132 max mem: 5716\n","Epoch: [39928] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0192 (0.0207) time: 0.0962 data: 0.0179 max mem: 5716\n","Epoch: [39928] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0192 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39929] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0233 (0.0233) time: 0.2871 data: 0.1972 max mem: 5716\n","Epoch: [39929] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0232) time: 0.0949 data: 0.0166 max mem: 5716\n","Epoch: [39929] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39930] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0245 (0.0245) time: 0.2902 data: 0.2019 max mem: 5716\n","Epoch: [39930] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0232) time: 0.0963 data: 0.0170 max mem: 5716\n","Epoch: [39930] Total time: 0:00:01 (0.1015 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39931] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0245 (0.0245) time: 0.2850 data: 0.1957 max mem: 5716\n","Epoch: [39931] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0228 (0.0229) time: 0.0946 data: 0.0165 max mem: 5716\n","Epoch: [39931] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0228 (0.0229) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39932] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0278 (0.0278) time: 0.2972 data: 0.2096 max mem: 5716\n","Epoch: [39932] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0243 (0.0242) time: 0.0956 data: 0.0176 max mem: 5716\n","Epoch: [39932] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0243 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39933] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0229 (0.0229) time: 0.2820 data: 0.1927 max mem: 5716\n","Epoch: [39933] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0244) time: 0.0942 data: 0.0162 max mem: 5716\n","Epoch: [39933] Total time: 0:00:01 (0.0983 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39934] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0244 (0.0244) time: 0.2879 data: 0.1984 max mem: 5716\n","Epoch: [39934] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0227 (0.0237) time: 0.0947 data: 0.0167 max mem: 5716\n","Epoch: [39934] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0227 (0.0237) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39935] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0225 (0.0225) time: 0.2936 data: 0.2049 max mem: 5716\n","Epoch: [39935] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0209) time: 0.0957 data: 0.0172 max mem: 5716\n","Epoch: [39935] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39936] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0219 (0.0219) time: 0.2897 data: 0.1987 max mem: 5716\n","Epoch: [39936] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0202 (0.0208) time: 0.0956 data: 0.0167 max mem: 5716\n","Epoch: [39936] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0202 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39937] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0231 (0.0231) time: 0.2914 data: 0.2031 max mem: 5716\n","Epoch: [39937] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.0954 data: 0.0171 max mem: 5716\n","Epoch: [39937] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39938] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0229 (0.0229) time: 0.2989 data: 0.1956 max mem: 5716\n","Epoch: [39938] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0232) time: 0.0959 data: 0.0165 max mem: 5716\n","Epoch: [39938] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39939] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0256 (0.0256) time: 0.2887 data: 0.1949 max mem: 5716\n","Epoch: [39939] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0232) time: 0.0978 data: 0.0164 max mem: 5716\n","Epoch: [39939] Total time: 0:00:01 (0.1020 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39940] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0284 (0.0284) time: 0.2880 data: 0.1989 max mem: 5716\n","Epoch: [39940] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0219 (0.0232) time: 0.0946 data: 0.0167 max mem: 5716\n","Epoch: [39940] Total time: 0:00:01 (0.0988 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0219 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39941] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0174 (0.0174) time: 0.2859 data: 0.1969 max mem: 5716\n","Epoch: [39941] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0240) time: 0.0951 data: 0.0166 max mem: 5716\n","Epoch: [39941] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0240) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39942] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0194 (0.0194) time: 0.2897 data: 0.2033 max mem: 5716\n","Epoch: [39942] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0241) time: 0.0957 data: 0.0171 max mem: 5716\n","Epoch: [39942] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0241) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39943] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0289 (0.0289) time: 0.2835 data: 0.1940 max mem: 5716\n","Epoch: [39943] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0248 (0.0244) time: 0.0974 data: 0.0163 max mem: 5716\n","Epoch: [39943] Total time: 0:00:01 (0.1016 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0248 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39944] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0221 (0.0221) time: 0.2894 data: 0.2006 max mem: 5716\n","Epoch: [39944] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0208 (0.0206) time: 0.0951 data: 0.0169 max mem: 5716\n","Epoch: [39944] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0208 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39945] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0158 (0.0158) time: 0.2924 data: 0.1985 max mem: 5716\n","Epoch: [39945] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0209 (0.0206) time: 0.0958 data: 0.0167 max mem: 5716\n","Epoch: [39945] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0209 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39946] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0274 (0.0274) time: 0.2913 data: 0.2048 max mem: 5716\n","Epoch: [39946] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0197 (0.0207) time: 0.0955 data: 0.0172 max mem: 5716\n","Epoch: [39946] Total time: 0:00:01 (0.0996 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0197 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39947] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0233 (0.0233) time: 0.2927 data: 0.2011 max mem: 5716\n","Epoch: [39947] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0233) time: 0.0969 data: 0.0169 max mem: 5716\n","Epoch: [39947] Total time: 0:00:01 (0.1011 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39948] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0241 (0.0241) time: 0.2860 data: 0.1948 max mem: 5716\n","Epoch: [39948] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0231) time: 0.0943 data: 0.0164 max mem: 5716\n","Epoch: [39948] Total time: 0:00:01 (0.0984 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39949] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0228 (0.0228) time: 0.2854 data: 0.1949 max mem: 5716\n","Epoch: [39949] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0232) time: 0.0944 data: 0.0164 max mem: 5716\n","Epoch: [39949] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39950] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0236 (0.0236) time: 0.2852 data: 0.1960 max mem: 5716\n","Epoch: [39950] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0246 (0.0244) time: 0.0948 data: 0.0165 max mem: 5716\n","Epoch: [39950] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0246 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39951] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0216 (0.0216) time: 0.2973 data: 0.1968 max mem: 5716\n","Epoch: [39951] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0246) time: 0.0957 data: 0.0166 max mem: 5716\n","Epoch: [39951] Total time: 0:00:01 (0.0998 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0246) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39952] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0257 (0.0257) time: 0.2933 data: 0.2065 max mem: 5716\n","Epoch: [39952] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0242 (0.0243) time: 0.0950 data: 0.0173 max mem: 5716\n","Epoch: [39952] Total time: 0:00:01 (0.0992 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0242 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39953] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0151 (0.0151) time: 0.2861 data: 0.1968 max mem: 5716\n","Epoch: [39953] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0201 (0.0207) time: 0.0950 data: 0.0165 max mem: 5716\n","Epoch: [39953] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0201 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39954] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0177 (0.0177) time: 0.2907 data: 0.1996 max mem: 5716\n","Epoch: [39954] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0210) time: 0.0952 data: 0.0168 max mem: 5716\n","Epoch: [39954] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0210) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39955] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0174 (0.0174) time: 0.2848 data: 0.1953 max mem: 5716\n","Epoch: [39955] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0198 (0.0204) time: 0.0944 data: 0.0164 max mem: 5716\n","Epoch: [39955] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0198 (0.0204) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39956] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0263 (0.0263) time: 0.2876 data: 0.2002 max mem: 5716\n","Epoch: [39956] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0221 (0.0232) time: 0.0946 data: 0.0168 max mem: 5716\n","Epoch: [39956] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0221 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39957] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0261 (0.0261) time: 0.2889 data: 0.1984 max mem: 5716\n","Epoch: [39957] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0237) time: 0.0955 data: 0.0167 max mem: 5716\n","Epoch: [39957] Total time: 0:00:01 (0.0997 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0237) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39958] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0267 (0.0267) time: 0.2839 data: 0.1946 max mem: 5716\n","Epoch: [39958] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0230) time: 0.0947 data: 0.0164 max mem: 5716\n","Epoch: [39958] Total time: 0:00:01 (0.0989 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39959] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0224 (0.0224) time: 0.2860 data: 0.1967 max mem: 5716\n","Epoch: [39959] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0243) time: 0.0945 data: 0.0165 max mem: 5716\n","Epoch: [39959] Total time: 0:00:01 (0.0986 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39960] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0271 (0.0271) time: 0.2851 data: 0.1935 max mem: 5716\n","Epoch: [39960] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0240) time: 0.0948 data: 0.0163 max mem: 5716\n","Epoch: [39960] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0240) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39961] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0268 (0.0268) time: 0.2989 data: 0.2105 max mem: 5716\n","Epoch: [39961] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0239 (0.0245) time: 0.0967 data: 0.0177 max mem: 5716\n","Epoch: [39961] Total time: 0:00:01 (0.1022 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0239 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39962] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0193 (0.0193) time: 0.2939 data: 0.2054 max mem: 5716\n","Epoch: [39962] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0195 (0.0206) time: 0.0958 data: 0.0173 max mem: 5716\n","Epoch: [39962] Total time: 0:00:01 (0.1000 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0195 (0.0206) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39963] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0179 (0.0179) time: 0.2927 data: 0.2042 max mem: 5716\n","Epoch: [39963] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0186 (0.0209) time: 0.0953 data: 0.0172 max mem: 5716\n","Epoch: [39963] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0186 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39964] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0207 (0.0207) time: 0.3069 data: 0.2036 max mem: 5716\n","Epoch: [39964] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0204 (0.0209) time: 0.0969 data: 0.0171 max mem: 5716\n","Epoch: [39964] Total time: 0:00:01 (0.1011 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0204 (0.0209) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39965] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0222 (0.0222) time: 0.2856 data: 0.1969 max mem: 5716\n","Epoch: [39965] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0228) time: 0.0941 data: 0.0165 max mem: 5716\n","Epoch: [39965] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0228) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39966] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0210 (0.0210) time: 0.2983 data: 0.1982 max mem: 5716\n","Epoch: [39966] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0225 (0.0232) time: 0.0965 data: 0.0166 max mem: 5716\n","Epoch: [39966] Total time: 0:00:01 (0.1008 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0225 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39967] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0243 (0.0243) time: 0.2984 data: 0.2049 max mem: 5716\n","Epoch: [39967] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0226 (0.0233) time: 0.0952 data: 0.0172 max mem: 5716\n","Epoch: [39967] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0226 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39968] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0262 (0.0262) time: 0.2845 data: 0.1961 max mem: 5716\n","Epoch: [39968] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0245) time: 0.0941 data: 0.0165 max mem: 5716\n","Epoch: [39968] Total time: 0:00:01 (0.0984 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39969] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0209 (0.0209) time: 0.2846 data: 0.1950 max mem: 5716\n","Epoch: [39969] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0240 (0.0244) time: 0.0943 data: 0.0164 max mem: 5716\n","Epoch: [39969] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0240 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39970] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0211 (0.0211) time: 0.2852 data: 0.1950 max mem: 5716\n","Epoch: [39970] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0233 (0.0244) time: 0.0950 data: 0.0164 max mem: 5716\n","Epoch: [39970] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0233 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39971] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0208 (0.0208) time: 0.3030 data: 0.2058 max mem: 5716\n","Epoch: [39971] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0198 (0.0207) time: 0.0977 data: 0.0173 max mem: 5716\n","Epoch: [39971] Total time: 0:00:01 (0.1019 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0198 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39972] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0205 (0.0205) time: 0.2925 data: 0.1988 max mem: 5716\n","Epoch: [39972] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0205 (0.0207) time: 0.0981 data: 0.0167 max mem: 5716\n","Epoch: [39972] Total time: 0:00:01 (0.1023 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0205 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39973] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0167 (0.0167) time: 0.2938 data: 0.2049 max mem: 5716\n","Epoch: [39973] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0207 (0.0207) time: 0.0987 data: 0.0172 max mem: 5716\n","Epoch: [39973] Total time: 0:00:01 (0.1028 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0207 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39974] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0226 (0.0226) time: 0.2919 data: 0.2046 max mem: 5716\n","Epoch: [39974] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0232 (0.0232) time: 0.0948 data: 0.0172 max mem: 5716\n","Epoch: [39974] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0232 (0.0232) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39975] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0218 (0.0218) time: 0.2844 data: 0.1959 max mem: 5716\n","Epoch: [39975] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0224 (0.0235) time: 0.0943 data: 0.0165 max mem: 5716\n","Epoch: [39975] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0224 (0.0235) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39976] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0216 (0.0216) time: 0.2888 data: 0.2026 max mem: 5716\n","Epoch: [39976] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0220 (0.0233) time: 0.0945 data: 0.0170 max mem: 5716\n","Epoch: [39976] Total time: 0:00:01 (0.0986 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0220 (0.0233) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39977] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0247 (0.0247) time: 0.2842 data: 0.1956 max mem: 5716\n","Epoch: [39977] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0244 (0.0242) time: 0.0944 data: 0.0164 max mem: 5716\n","Epoch: [39977] Total time: 0:00:01 (0.0985 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0244 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39978] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0191 (0.0191) time: 0.2922 data: 0.2048 max mem: 5716\n","Epoch: [39978] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0246 (0.0243) time: 0.0952 data: 0.0172 max mem: 5716\n","Epoch: [39978] Total time: 0:00:01 (0.0993 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0246 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39979] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0286 (0.0286) time: 0.2807 data: 0.1871 max mem: 5716\n","Epoch: [39979] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0236 (0.0245) time: 0.0946 data: 0.0158 max mem: 5716\n","Epoch: [39979] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0236 (0.0245) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39980] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0223 (0.0223) time: 0.2803 data: 0.1911 max mem: 5716\n","Epoch: [39980] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0188 (0.0205) time: 0.0941 data: 0.0161 max mem: 5716\n","Epoch: [39980] Total time: 0:00:01 (0.0982 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0188 (0.0205) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39981] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0223 (0.0223) time: 0.2977 data: 0.2065 max mem: 5716\n","Epoch: [39981] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0204 (0.0208) time: 0.0962 data: 0.0174 max mem: 5716\n","Epoch: [39981] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0204 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39982] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0197 (0.0197) time: 0.2786 data: 0.1891 max mem: 5716\n","Epoch: [39982] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0204 (0.0208) time: 0.0941 data: 0.0159 max mem: 5716\n","Epoch: [39982] Total time: 0:00:01 (0.0982 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0204 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39983] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0235 (0.0235) time: 0.2895 data: 0.2017 max mem: 5716\n","Epoch: [39983] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0229 (0.0230) time: 0.0953 data: 0.0170 max mem: 5716\n","Epoch: [39983] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0229 (0.0230) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39984] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0242 (0.0242) time: 0.2954 data: 0.2106 max mem: 5716\n","Epoch: [39984] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0231) time: 0.0955 data: 0.0177 max mem: 5716\n","Epoch: [39984] Total time: 0:00:01 (0.0995 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39985] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0250 (0.0250) time: 0.2868 data: 0.2009 max mem: 5716\n","Epoch: [39985] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0231 (0.0234) time: 0.0946 data: 0.0169 max mem: 5716\n","Epoch: [39985] Total time: 0:00:01 (0.0987 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0231 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39986] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0230 (0.0230) time: 0.2831 data: 0.1945 max mem: 5716\n","Epoch: [39986] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0244) time: 0.0940 data: 0.0164 max mem: 5716\n","Epoch: [39986] Total time: 0:00:01 (0.0982 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0244) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39987] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0302 (0.0302) time: 0.2867 data: 0.1967 max mem: 5716\n","Epoch: [39987] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0234 (0.0243) time: 0.0948 data: 0.0165 max mem: 5716\n","Epoch: [39987] Total time: 0:00:01 (0.0991 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0234 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39988] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0286 (0.0286) time: 0.2916 data: 0.2012 max mem: 5716\n","Epoch: [39988] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0238 (0.0243) time: 0.0952 data: 0.0169 max mem: 5716\n","Epoch: [39988] Total time: 0:00:01 (0.0994 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0238 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39989] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0231 (0.0231) time: 0.2851 data: 0.1950 max mem: 5716\n","Epoch: [39989] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0200 (0.0207) time: 0.0948 data: 0.0164 max mem: 5716\n","Epoch: [39989] Total time: 0:00:01 (0.0990 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0200 (0.0207) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39990] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0203 (0.0203) time: 0.3000 data: 0.2131 max mem: 5716\n","Epoch: [39990] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0204 (0.0208) time: 0.0961 data: 0.0179 max mem: 5716\n","Epoch: [39990] Total time: 0:00:01 (0.1002 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0204 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39991] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0246 (0.0246) time: 0.2931 data: 0.2050 max mem: 5716\n","Epoch: [39991] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0210 (0.0208) time: 0.0978 data: 0.0172 max mem: 5716\n","Epoch: [39991] Total time: 0:00:01 (0.1020 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0210 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39992] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0255 (0.0255) time: 0.2835 data: 0.1954 max mem: 5716\n","Epoch: [39992] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0237 (0.0231) time: 0.0939 data: 0.0164 max mem: 5716\n","Epoch: [39992] Total time: 0:00:01 (0.0981 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0237 (0.0231) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39993] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0243 (0.0243) time: 0.2853 data: 0.1943 max mem: 5716\n","Epoch: [39993] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0235 (0.0234) time: 0.0943 data: 0.0164 max mem: 5716\n","Epoch: [39993] Total time: 0:00:01 (0.0984 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0235 (0.0234) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39994] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0220 (0.0220) time: 0.2834 data: 0.1945 max mem: 5716\n","Epoch: [39994] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0220 (0.0226) time: 0.0939 data: 0.0164 max mem: 5716\n","Epoch: [39994] Total time: 0:00:01 (0.0980 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0220 (0.0226) fix_position_ratio: 0.25 puzzle_patch_size: 32\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39995] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0207 (0.0207) time: 0.2875 data: 0.1968 max mem: 5716\n","Epoch: [39995] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0230 (0.0243) time: 0.0964 data: 0.0165 max mem: 5716\n","Epoch: [39995] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0230 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39996] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0250 (0.0250) time: 0.2857 data: 0.1941 max mem: 5716\n","Epoch: [39996] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0249 (0.0242) time: 0.0955 data: 0.0163 max mem: 5716\n","Epoch: [39996] Total time: 0:00:01 (0.1005 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0249 (0.0242) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39997] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0232 (0.0232) time: 0.2845 data: 0.1943 max mem: 5716\n","Epoch: [39997] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0241 (0.0243) time: 0.0958 data: 0.0163 max mem: 5716\n","Epoch: [39997] Total time: 0:00:01 (0.0999 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0241 (0.0243) fix_position_ratio: 0.25 puzzle_patch_size: 112\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39998] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0228 (0.0228) time: 0.2939 data: 0.1968 max mem: 5716\n","Epoch: [39998] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0213 (0.0208) time: 0.0974 data: 0.0165 max mem: 5716\n","Epoch: [39998] Total time: 0:00:01 (0.1034 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0213 (0.0208) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","log_dir: /home/Pathology_Experiment/imaging_results/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20\n","Epoch: [39999] [ 0/12] eta: 0:00:03 lr: 0.000000 loss: 0.0227 (0.0227) time: 0.3011 data: 0.2095 max mem: 5716\n","Epoch: [39999] [11/12] eta: 0:00:00 lr: 0.000000 loss: 0.0222 (0.0211) time: 0.0963 data: 0.0176 max mem: 5716\n","Epoch: [39999] Total time: 0:00:01 (0.1004 s / it)\n","Averaged stats: lr: 0.000000 loss: 0.0222 (0.0211) fix_position_ratio: 0.25 puzzle_patch_size: 16\n","Figure(640x480)\n","Training time 17:43:58\n"]}],"source":["!python PuzzleTuning.py --model sae_vit_base_patch16 --PromptTuning Deep --batch_size 32 --group_shuffle_size 8 --strategy loop --blr 1.5e-5 --epochs 40000 --warmup_epochs 20 --accum_iter 2 --print_freq 200 --check_point_gap 10000 --input_size 224 --pin_mem --num_workers 2 --basic_state_dict timm --data_path /data/Pathology_Experiment/dataset/PuzzleTuning_demoset --output_dir /home/Pathology_Experiment/runs --log_dir /home/Pathology_Experiment/imaging_results"]},{"cell_type":"markdown","metadata":{"id":"toMbJKBKa7Sw"},"source":["Visulization"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"v4jPIn7DbAaX"},"outputs":[{"name":"stdout","output_type":"stream","text":["\n","\n","Testing_PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20_b_8_hint_ratio_0.5_patch_size_16\n","\n","\n","Use 1 GPUs of idx: 0\n","job dir: /home/Pathology_Experiment/code\n","Namespace(model_idx='Testing_PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20_b_8_hint_ratio_0.5_patch_size_16',\n","batch_size=8,\n","model='sae_vit_base_patch16',\n","seg_decoder=None,\n","input_size=224,\n","num_classes=3,\n","mask_ratio=None,\n","fix_position_ratio=0.5,\n","fix_patch_size=16,\n","group_shuffle_size=-1,\n","shuffle_dataloader=False,\n","PromptTuning='Deep',\n","Prompt_Token_num=20,\n","norm_pix_loss=False,\n","data_path='/data/Pathology_Experiment/dataset/PuzzleTuning_demoset',\n","output_dir='/home/Pathology_Experiment/imaging_results',\n","log_dir='/home/Pathology_Experiment/imaging_results',\n","gpu_idx=0,\n","device='cuda',\n","seed=42,\n","checkpoint_path='/home/Pathology_Experiment/runs/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20_checkpoint-39999.pth',\n","combined_pred_illustration=False,\n","enable_visualize_check=True,\n","check_minibatch=None,\n","check_samples=1,\n","num_workers=10,\n","pin_mem=True,\n","gpu=1)\n","dataset_test Dataset ImageFolder\n"," Number of datapoints: 400\n"," Root location: /data/Pathology_Experiment/dataset/PuzzleTuning_demoset\n"," StandardTransform\n","Transform: Compose(\n"," Resize(size=224, interpolation=bilinear, max_size=None, antialias=warn)\n"," ToTensor()\n"," )\n","Testing output files will be at /home/Pathology_Experiment/imaging_results/Testing_PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20_b_8_hint_ratio_0.5_patch_size_16\n","Decoder: None\n","Start testing for Testing_PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20_b_8_hint_ratio_0.5_patch_size_16 \n"," with checkpoint: /home/Pathology_Experiment/runs/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20_checkpoint-39999.pth\n","Test index 1 of 1 minibatch with batch_size of 8 time used: 2.576171636581421\n","minibatch AVG loss: 0.026270559057593346\n","Figure(640x480)\n","Test index 2 of 1 minibatch with batch_size of 8 time used: 0.6659231185913086\n","minibatch AVG loss: 0.024316485971212387\n","Figure(640x480)\n","Test index 3 of 1 minibatch with batch_size of 8 time used: 0.34589219093322754\n","minibatch AVG loss: 0.029779721051454544\n","Figure(640x480)\n","Test index 4 of 1 minibatch with batch_size of 8 time used: 0.3440580368041992\n","minibatch AVG loss: 0.034707095474004745\n","Figure(640x480)\n","Test index 5 of 1 minibatch with batch_size of 8 time used: 0.3439669609069824\n","minibatch AVG loss: 0.029156949371099472\n","Figure(640x480)\n","Test index 6 of 1 minibatch with batch_size of 8 time used: 0.3427398204803467\n","minibatch AVG loss: 0.02971576154232025\n","Figure(640x480)\n","Test index 7 of 1 minibatch with batch_size of 8 time used: 0.5957798957824707\n","minibatch AVG loss: 0.0266494769603014\n","Figure(640x480)\n","Test index 8 of 1 minibatch with batch_size of 8 time used: 0.34395861625671387\n","minibatch AVG loss: 0.034395135939121246\n","Figure(640x480)\n","Test index 9 of 1 minibatch with batch_size of 8 time used: 0.3422093391418457\n","minibatch AVG loss: 0.01643659919500351\n","Figure(640x480)\n","Test index 10 of 1 minibatch with batch_size of 8 time used: 0.34267401695251465\n","minibatch AVG loss: 0.027027731761336327\n","Figure(640x480)\n","Test index 11 of 1 minibatch with batch_size of 8 time used: 0.34317874908447266\n","minibatch AVG loss: 0.03650519251823425\n","Figure(640x480)\n","Test index 12 of 1 minibatch with batch_size of 8 time used: 0.34625768661499023\n","minibatch AVG loss: 0.023865383118391037\n","Figure(640x480)\n","Test index 13 of 1 minibatch with batch_size of 8 time used: 0.34453797340393066\n","minibatch AVG loss: 0.027605310082435608\n","Figure(640x480)\n","Test index 14 of 1 minibatch with batch_size of 8 time used: 0.35005617141723633\n","minibatch AVG loss: 0.05249357223510742\n","Figure(640x480)\n","Test index 15 of 1 minibatch with batch_size of 8 time used: 0.34470367431640625\n","minibatch AVG loss: 0.06539571285247803\n","Figure(640x480)\n","Test index 16 of 1 minibatch with batch_size of 8 time used: 0.34235501289367676\n","minibatch AVG loss: 0.0607786625623703\n","Figure(640x480)\n","Test index 17 of 1 minibatch with batch_size of 8 time used: 0.5056579113006592\n","minibatch AVG loss: 0.0522107295691967\n","Figure(640x480)\n","Test index 18 of 1 minibatch with batch_size of 8 time used: 0.3416411876678467\n","minibatch AVG loss: 0.04164537042379379\n","Figure(640x480)\n","Test index 19 of 1 minibatch with batch_size of 8 time used: 0.341935396194458\n","minibatch AVG loss: 0.05334985628724098\n","Figure(640x480)\n","Test index 20 of 1 minibatch with batch_size of 8 time used: 0.34108638763427734\n","minibatch AVG loss: 0.04166930913925171\n","Figure(640x480)\n","Test index 21 of 1 minibatch with batch_size of 8 time used: 0.34204626083374023\n","minibatch AVG loss: 0.0406920425593853\n","Figure(640x480)\n","Test index 22 of 1 minibatch with batch_size of 8 time used: 0.3456096649169922\n","minibatch AVG loss: 0.04499432444572449\n","Figure(640x480)\n","Test index 23 of 1 minibatch with batch_size of 8 time used: 0.3513031005859375\n","minibatch AVG loss: 0.049177490174770355\n","Figure(640x480)\n","Test index 24 of 1 minibatch with batch_size of 8 time used: 0.34375739097595215\n","minibatch AVG loss: 0.0517561100423336\n","Figure(640x480)\n","Test index 25 of 1 minibatch with batch_size of 8 time used: 0.35756635665893555\n","minibatch AVG loss: 0.053525157272815704\n","Figure(640x480)\n","Test index 26 of 1 minibatch with batch_size of 8 time used: 0.3438384532928467\n","minibatch AVG loss: 0.016326481476426125\n","Figure(640x480)\n","Test index 27 of 1 minibatch with batch_size of 8 time used: 0.5202908515930176\n","minibatch AVG loss: 0.023099487647414207\n","Figure(640x480)\n","Test index 28 of 1 minibatch with batch_size of 8 time used: 0.3466987609863281\n","minibatch AVG loss: 0.020996764302253723\n","Figure(640x480)\n","Test index 29 of 1 minibatch with batch_size of 8 time used: 0.3557114601135254\n","minibatch AVG loss: 0.02975727617740631\n","Figure(640x480)\n","Test index 30 of 1 minibatch with batch_size of 8 time used: 0.35158228874206543\n","minibatch AVG loss: 0.022020038217306137\n","Figure(640x480)\n","Test index 31 of 1 minibatch with batch_size of 8 time used: 0.3453495502471924\n","minibatch AVG loss: 0.02332591824233532\n","Figure(640x480)\n","Test index 32 of 1 minibatch with batch_size of 8 time used: 0.353748083114624\n","minibatch AVG loss: 0.022864310070872307\n","Figure(640x480)\n","Test index 33 of 1 minibatch with batch_size of 8 time used: 0.3469088077545166\n","minibatch AVG loss: 0.027498576790094376\n","Figure(640x480)\n","Test index 34 of 1 minibatch with batch_size of 8 time used: 0.3442854881286621\n","minibatch AVG loss: 0.028304098173975945\n","Figure(640x480)\n","Test index 35 of 1 minibatch with batch_size of 8 time used: 0.3444187641143799\n","minibatch AVG loss: 0.024217385798692703\n","Figure(640x480)\n","Test index 36 of 1 minibatch with batch_size of 8 time used: 0.34152936935424805\n","minibatch AVG loss: 0.020597632974386215\n","Figure(640x480)\n","Test index 37 of 1 minibatch with batch_size of 8 time used: 0.34096765518188477\n","minibatch AVG loss: 0.025661714375019073\n","Figure(640x480)\n","Test index 38 of 1 minibatch with batch_size of 8 time used: 0.5249478816986084\n","minibatch AVG loss: 0.03880901634693146\n","Figure(640x480)\n","Test index 39 of 1 minibatch with batch_size of 8 time used: 0.35788869857788086\n","minibatch AVG loss: 0.04646195098757744\n","Figure(640x480)\n","Test index 40 of 1 minibatch with batch_size of 8 time used: 0.34569787979125977\n","minibatch AVG loss: 0.054550252854824066\n","Figure(640x480)\n","Test index 41 of 1 minibatch with batch_size of 8 time used: 0.3424832820892334\n","minibatch AVG loss: 0.04401148483157158\n","Figure(640x480)\n","Test index 42 of 1 minibatch with batch_size of 8 time used: 0.3472878932952881\n","minibatch AVG loss: 0.053878482431173325\n","Figure(640x480)\n","Test index 43 of 1 minibatch with batch_size of 8 time used: 0.3536717891693115\n","minibatch AVG loss: 0.04746308550238609\n","Figure(640x480)\n","Test index 44 of 1 minibatch with batch_size of 8 time used: 0.343564510345459\n","minibatch AVG loss: 0.04708326607942581\n","Figure(640x480)\n","Test index 45 of 1 minibatch with batch_size of 8 time used: 0.3419811725616455\n","minibatch AVG loss: 0.04378387704491615\n","Figure(640x480)\n","Test index 46 of 1 minibatch with batch_size of 8 time used: 0.3454432487487793\n","minibatch AVG loss: 0.043106887489557266\n","Figure(640x480)\n","Test index 47 of 1 minibatch with batch_size of 8 time used: 0.34154748916625977\n","minibatch AVG loss: 0.06279397755861282\n","Figure(640x480)\n","Test index 48 of 1 minibatch with batch_size of 8 time used: 0.5151286125183105\n","minibatch AVG loss: 0.05888595059514046\n","Figure(640x480)\n","Test index 49 of 1 minibatch with batch_size of 8 time used: 0.3433067798614502\n","minibatch AVG loss: 0.04294142872095108\n","Figure(640x480)\n","Test index 50 of 1 minibatch with batch_size of 8 time used: 0.34302759170532227\n","minibatch AVG loss: 0.056246597319841385\n","Figure(640x480)\n","\n","Test_dataset_size: 400 \n","Avg Loss: 0.0380\n","Testing time 0:00:21\n"]}],"source":["!python PuzzleTesting.py --model sae_vit_base_patch16 --PromptTuning Deep --Prompt_Token_num 20 --batch_size 8 --fix_position_ratio 0.5 --fix_patch_size 16 --enable_visualize_check --data_path /data/Pathology_Experiment/dataset/PuzzleTuning_demoset --output_dir /home/Pathology_Experiment/imaging_results --log_dir /home/Pathology_Experiment/imaging_results --checkpoint_path /home/Pathology_Experiment/runs/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20_checkpoint-39999.pth"]},{"cell_type":"markdown","metadata":{"id":"SbLcZyoHbBLR"},"source":["Load-up the ViT prompt weight from pre-trained checkpoint"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"cECU8Q9XqAB1"},"outputs":[],"source":["os.chdir(\"/home/Pathology_Experiment/code/utils\")"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"vhoXesRYbEal"},"outputs":[{"name":"stdout","output_type":"stream","text":["model forward cheacked\n","model is ready now!\n","checkpoint epoch 39999\n","DataParallel model loaded\n","model : ViT\n","prompt obtained\n","model trained by multi-GPUs has its single GPU copy saved at /home/Pathology_Experiment/saved_models/ViT_b16_224_timm_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_promptstate.pth\n"]}],"source":["!python transfermodel.py --given_name ViT_b16_224_timm_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_promptstate.pth --model_idx ViT --PromptTuning Deep --Prompt_Token_num 20 --edge_size 224 --checkpoint_path /home/Pathology_Experiment/runs/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20_checkpoint-39999.pth --save_model_path /home/Pathology_Experiment/saved_models"]},{"cell_type":"markdown","metadata":{"id":"Ia9qphxi86wH"},"source":["# Finetuning and comparison\n","* set up path by command line\n","* use argparse to set down hyper-parameter"]},{"cell_type":"markdown","metadata":{"id":"akuwL6GlbF8h"},"source":["## Finetuning without PuzzleTuning"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"yvoJWsQNrzhP"},"outputs":[],"source":["os.chdir(\"/home/Pathology_Experiment/code\")"]},{"cell_type":"markdown","metadata":{"id":"5cfIDQpn2L5H"},"source":["### ViT (with timm weight)"]},{"cell_type":"markdown","metadata":{"id":"0pgSCO1EbNSm"},"source":["Train"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"xTNPSavybFTm"},"outputs":[{"name":"stdout","output_type":"stream","text":["class_names: ['benign', 'malignant']\n","*********************************setting*************************************\n","Namespace(model_idx='ViT_base_timm_401_lf15_finetuning_warwick_CLS', drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, cls_token_off=False, pos_embedding_off=False, att_module='SimAM', backbone_PT_off=False, gpu_idx=-1, dataroot='/data/Pathology_Experiment/dataset/warwick_CLS', model_path='/home/Pathology_Experiment/saved_models', draw_root='/home/Pathology_Experiment/runs/ViT_finetuning_with_timm', paint=True, enable_tensorboard=True, enable_attention_check=False, enable_visualize_check=False, PromptTuning=None, Prompt_Token_num=20, PromptUnFreeze=False, linearprobing=False, Pre_Trained_model_path=None, Prompt_state_path=None, enable_sam=False, augmentation_name=None, ratio_strategy=None, patch_strategy=None, loss_drive_threshold=4.0, fix_position_ratio=0.5, fix_patch_size=None, patch_size_jump=None, num_classes=0, edge_size=224, data_augmentation_mode=3, batch_size=8, num_epochs=50, intake_epochs=0, lr=1e-05, lrf=0.15, opt_name='Adam', check_minibatch=None, num_workers=2)\n","we dont have more GPU idx here, try to use gpu_idx=0\n","['convit_base',\n"," 'convit_small',\n"," 'convit_tiny',\n"," 'crossvit_9_240',\n"," 'crossvit_9_dagger_240',\n"," 'crossvit_15_240',\n"," 'crossvit_15_dagger_240',\n"," 'crossvit_15_dagger_408',\n"," 'crossvit_18_240',\n"," 'crossvit_18_dagger_240',\n"," 'crossvit_18_dagger_408',\n"," 'crossvit_base_240',\n"," 'crossvit_small_240',\n"," 'crossvit_tiny_240',\n"," 'levit_128',\n"," 'levit_128s',\n"," 'levit_192',\n"," 'levit_256',\n"," 'levit_384',\n"," 'vit_base_patch8_224',\n"," 'vit_base_patch8_224_in21k',\n"," 'vit_base_patch16_224',\n"," 'vit_base_patch16_224_in21k',\n"," 'vit_base_patch16_224_miil',\n"," 'vit_base_patch16_224_miil_in21k',\n"," 'vit_base_patch16_384',\n"," 'vit_base_patch16_sam_224',\n"," 'vit_base_patch32_224',\n"," 'vit_base_patch32_224_in21k',\n"," 'vit_base_patch32_384',\n"," 'vit_base_patch32_sam_224',\n"," 'vit_base_r26_s32_224',\n"," 'vit_base_r50_s16_224',\n"," 'vit_base_r50_s16_224_in21k',\n"," 'vit_base_r50_s16_384',\n"," 'vit_base_resnet26d_224',\n"," 'vit_base_resnet50_224_in21k',\n"," 'vit_base_resnet50_384',\n"," 'vit_base_resnet50d_224',\n"," 'vit_giant_patch14_224',\n"," 'vit_gigantic_patch14_224',\n"," 'vit_huge_patch14_224',\n"," 'vit_huge_patch14_224_in21k',\n"," 'vit_large_patch16_224',\n"," 'vit_large_patch16_224_in21k',\n"," 'vit_large_patch16_384',\n"," 'vit_large_patch32_224',\n"," 'vit_large_patch32_224_in21k',\n"," 'vit_large_patch32_384',\n"," 'vit_large_r50_s32_224',\n"," 'vit_large_r50_s32_224_in21k',\n"," 'vit_large_r50_s32_384',\n"," 'vit_small_patch16_224',\n"," 'vit_small_patch16_224_in21k',\n"," 'vit_small_patch16_384',\n"," 'vit_small_patch32_224',\n"," 'vit_small_patch32_224_in21k',\n"," 'vit_small_patch32_384',\n"," 'vit_small_r26_s32_224',\n"," 'vit_small_r26_s32_224_in21k',\n"," 'vit_small_r26_s32_384',\n"," 'vit_small_resnet26d_224',\n"," 'vit_small_resnet50d_s16_224',\n"," 'vit_tiny_patch16_224',\n"," 'vit_tiny_patch16_224_in21k',\n"," 'vit_tiny_patch16_384',\n"," 'vit_tiny_r_s16_p8_224',\n"," 'vit_tiny_r_s16_p8_224_in21k',\n"," 'vit_tiny_r_s16_p8_384']\n","test model output: tensor([[-0.4719, 2.0119]], grad_fn=\u003cAddmmBackward0\u003e)\n","model is ready now!\n","building model (no-prompt) with pretrained_backbone status: True\n","timm loaded\n","GPU: 0\n","----------------------------------------------------------------\n"," Layer (type) Output Shape Param #\n","================================================================\n"," Conv2d-1 [-1, 768, 14, 14] 590,592\n"," Identity-2 [-1, 196, 768] 0\n"," PatchEmbed-3 [-1, 196, 768] 0\n"," Dropout-4 [-1, 197, 768] 0\n"," LayerNorm-5 [-1, 197, 768] 1,536\n"," Linear-6 [-1, 197, 2304] 1,771,776\n"," Dropout-7 [-1, 12, 197, 197] 0\n"," Linear-8 [-1, 197, 768] 590,592\n"," Dropout-9 [-1, 197, 768] 0\n"," Attention-10 [-1, 197, 768] 0\n"," Identity-11 [-1, 197, 768] 0\n"," LayerNorm-12 [-1, 197, 768] 1,536\n"," Linear-13 [-1, 197, 3072] 2,362,368\n"," GELU-14 [-1, 197, 3072] 0\n"," Dropout-15 [-1, 197, 3072] 0\n"," Linear-16 [-1, 197, 768] 2,360,064\n"," Dropout-17 [-1, 197, 768] 0\n"," Mlp-18 [-1, 197, 768] 0\n"," Identity-19 [-1, 197, 768] 0\n"," Block-20 [-1, 197, 768] 0\n"," LayerNorm-21 [-1, 197, 768] 1,536\n"," Linear-22 [-1, 197, 2304] 1,771,776\n"," Dropout-23 [-1, 12, 197, 197] 0\n"," Linear-24 [-1, 197, 768] 590,592\n"," Dropout-25 [-1, 197, 768] 0\n"," Attention-26 [-1, 197, 768] 0\n"," Identity-27 [-1, 197, 768] 0\n"," LayerNorm-28 [-1, 197, 768] 1,536\n"," Linear-29 [-1, 197, 3072] 2,362,368\n"," GELU-30 [-1, 197, 3072] 0\n"," Dropout-31 [-1, 197, 3072] 0\n"," Linear-32 [-1, 197, 768] 2,360,064\n"," Dropout-33 [-1, 197, 768] 0\n"," Mlp-34 [-1, 197, 768] 0\n"," Identity-35 [-1, 197, 768] 0\n"," Block-36 [-1, 197, 768] 0\n"," LayerNorm-37 [-1, 197, 768] 1,536\n"," Linear-38 [-1, 197, 2304] 1,771,776\n"," Dropout-39 [-1, 12, 197, 197] 0\n"," Linear-40 [-1, 197, 768] 590,592\n"," Dropout-41 [-1, 197, 768] 0\n"," Attention-42 [-1, 197, 768] 0\n"," Identity-43 [-1, 197, 768] 0\n"," LayerNorm-44 [-1, 197, 768] 1,536\n"," Linear-45 [-1, 197, 3072] 2,362,368\n"," GELU-46 [-1, 197, 3072] 0\n"," Dropout-47 [-1, 197, 3072] 0\n"," Linear-48 [-1, 197, 768] 2,360,064\n"," Dropout-49 [-1, 197, 768] 0\n"," Mlp-50 [-1, 197, 768] 0\n"," Identity-51 [-1, 197, 768] 0\n"," Block-52 [-1, 197, 768] 0\n"," LayerNorm-53 [-1, 197, 768] 1,536\n"," Linear-54 [-1, 197, 2304] 1,771,776\n"," Dropout-55 [-1, 12, 197, 197] 0\n"," Linear-56 [-1, 197, 768] 590,592\n"," Dropout-57 [-1, 197, 768] 0\n"," Attention-58 [-1, 197, 768] 0\n"," Identity-59 [-1, 197, 768] 0\n"," LayerNorm-60 [-1, 197, 768] 1,536\n"," Linear-61 [-1, 197, 3072] 2,362,368\n"," GELU-62 [-1, 197, 3072] 0\n"," Dropout-63 [-1, 197, 3072] 0\n"," Linear-64 [-1, 197, 768] 2,360,064\n"," Dropout-65 [-1, 197, 768] 0\n"," Mlp-66 [-1, 197, 768] 0\n"," Identity-67 [-1, 197, 768] 0\n"," Block-68 [-1, 197, 768] 0\n"," LayerNorm-69 [-1, 197, 768] 1,536\n"," Linear-70 [-1, 197, 2304] 1,771,776\n"," Dropout-71 [-1, 12, 197, 197] 0\n"," Linear-72 [-1, 197, 768] 590,592\n"," Dropout-73 [-1, 197, 768] 0\n"," Attention-74 [-1, 197, 768] 0\n"," Identity-75 [-1, 197, 768] 0\n"," LayerNorm-76 [-1, 197, 768] 1,536\n"," Linear-77 [-1, 197, 3072] 2,362,368\n"," GELU-78 [-1, 197, 3072] 0\n"," Dropout-79 [-1, 197, 3072] 0\n"," Linear-80 [-1, 197, 768] 2,360,064\n"," Dropout-81 [-1, 197, 768] 0\n"," Mlp-82 [-1, 197, 768] 0\n"," Identity-83 [-1, 197, 768] 0\n"," Block-84 [-1, 197, 768] 0\n"," LayerNorm-85 [-1, 197, 768] 1,536\n"," Linear-86 [-1, 197, 2304] 1,771,776\n"," Dropout-87 [-1, 12, 197, 197] 0\n"," Linear-88 [-1, 197, 768] 590,592\n"," Dropout-89 [-1, 197, 768] 0\n"," Attention-90 [-1, 197, 768] 0\n"," Identity-91 [-1, 197, 768] 0\n"," LayerNorm-92 [-1, 197, 768] 1,536\n"," Linear-93 [-1, 197, 3072] 2,362,368\n"," GELU-94 [-1, 197, 3072] 0\n"," Dropout-95 [-1, 197, 3072] 0\n"," Linear-96 [-1, 197, 768] 2,360,064\n"," Dropout-97 [-1, 197, 768] 0\n"," Mlp-98 [-1, 197, 768] 0\n"," Identity-99 [-1, 197, 768] 0\n"," Block-100 [-1, 197, 768] 0\n"," LayerNorm-101 [-1, 197, 768] 1,536\n"," Linear-102 [-1, 197, 2304] 1,771,776\n"," Dropout-103 [-1, 12, 197, 197] 0\n"," Linear-104 [-1, 197, 768] 590,592\n"," Dropout-105 [-1, 197, 768] 0\n"," Attention-106 [-1, 197, 768] 0\n"," Identity-107 [-1, 197, 768] 0\n"," LayerNorm-108 [-1, 197, 768] 1,536\n"," Linear-109 [-1, 197, 3072] 2,362,368\n"," GELU-110 [-1, 197, 3072] 0\n"," Dropout-111 [-1, 197, 3072] 0\n"," Linear-112 [-1, 197, 768] 2,360,064\n"," Dropout-113 [-1, 197, 768] 0\n"," Mlp-114 [-1, 197, 768] 0\n"," Identity-115 [-1, 197, 768] 0\n"," Block-116 [-1, 197, 768] 0\n"," LayerNorm-117 [-1, 197, 768] 1,536\n"," Linear-118 [-1, 197, 2304] 1,771,776\n"," Dropout-119 [-1, 12, 197, 197] 0\n"," Linear-120 [-1, 197, 768] 590,592\n"," Dropout-121 [-1, 197, 768] 0\n"," Attention-122 [-1, 197, 768] 0\n"," Identity-123 [-1, 197, 768] 0\n"," LayerNorm-124 [-1, 197, 768] 1,536\n"," Linear-125 [-1, 197, 3072] 2,362,368\n"," GELU-126 [-1, 197, 3072] 0\n"," Dropout-127 [-1, 197, 3072] 0\n"," Linear-128 [-1, 197, 768] 2,360,064\n"," Dropout-129 [-1, 197, 768] 0\n"," Mlp-130 [-1, 197, 768] 0\n"," Identity-131 [-1, 197, 768] 0\n"," Block-132 [-1, 197, 768] 0\n"," LayerNorm-133 [-1, 197, 768] 1,536\n"," Linear-134 [-1, 197, 2304] 1,771,776\n"," Dropout-135 [-1, 12, 197, 197] 0\n"," Linear-136 [-1, 197, 768] 590,592\n"," Dropout-137 [-1, 197, 768] 0\n"," Attention-138 [-1, 197, 768] 0\n"," Identity-139 [-1, 197, 768] 0\n"," LayerNorm-140 [-1, 197, 768] 1,536\n"," Linear-141 [-1, 197, 3072] 2,362,368\n"," GELU-142 [-1, 197, 3072] 0\n"," Dropout-143 [-1, 197, 3072] 0\n"," Linear-144 [-1, 197, 768] 2,360,064\n"," Dropout-145 [-1, 197, 768] 0\n"," Mlp-146 [-1, 197, 768] 0\n"," Identity-147 [-1, 197, 768] 0\n"," Block-148 [-1, 197, 768] 0\n"," LayerNorm-149 [-1, 197, 768] 1,536\n"," Linear-150 [-1, 197, 2304] 1,771,776\n"," Dropout-151 [-1, 12, 197, 197] 0\n"," Linear-152 [-1, 197, 768] 590,592\n"," Dropout-153 [-1, 197, 768] 0\n"," Attention-154 [-1, 197, 768] 0\n"," Identity-155 [-1, 197, 768] 0\n"," LayerNorm-156 [-1, 197, 768] 1,536\n"," Linear-157 [-1, 197, 3072] 2,362,368\n"," GELU-158 [-1, 197, 3072] 0\n"," Dropout-159 [-1, 197, 3072] 0\n"," Linear-160 [-1, 197, 768] 2,360,064\n"," Dropout-161 [-1, 197, 768] 0\n"," Mlp-162 [-1, 197, 768] 0\n"," Identity-163 [-1, 197, 768] 0\n"," Block-164 [-1, 197, 768] 0\n"," LayerNorm-165 [-1, 197, 768] 1,536\n"," Linear-166 [-1, 197, 2304] 1,771,776\n"," Dropout-167 [-1, 12, 197, 197] 0\n"," Linear-168 [-1, 197, 768] 590,592\n"," Dropout-169 [-1, 197, 768] 0\n"," Attention-170 [-1, 197, 768] 0\n"," Identity-171 [-1, 197, 768] 0\n"," LayerNorm-172 [-1, 197, 768] 1,536\n"," Linear-173 [-1, 197, 3072] 2,362,368\n"," GELU-174 [-1, 197, 3072] 0\n"," Dropout-175 [-1, 197, 3072] 0\n"," Linear-176 [-1, 197, 768] 2,360,064\n"," Dropout-177 [-1, 197, 768] 0\n"," Mlp-178 [-1, 197, 768] 0\n"," Identity-179 [-1, 197, 768] 0\n"," Block-180 [-1, 197, 768] 0\n"," LayerNorm-181 [-1, 197, 768] 1,536\n"," Linear-182 [-1, 197, 2304] 1,771,776\n"," Dropout-183 [-1, 12, 197, 197] 0\n"," Linear-184 [-1, 197, 768] 590,592\n"," Dropout-185 [-1, 197, 768] 0\n"," Attention-186 [-1, 197, 768] 0\n"," Identity-187 [-1, 197, 768] 0\n"," LayerNorm-188 [-1, 197, 768] 1,536\n"," Linear-189 [-1, 197, 3072] 2,362,368\n"," GELU-190 [-1, 197, 3072] 0\n"," Dropout-191 [-1, 197, 3072] 0\n"," Linear-192 [-1, 197, 768] 2,360,064\n"," Dropout-193 [-1, 197, 768] 0\n"," Mlp-194 [-1, 197, 768] 0\n"," Identity-195 [-1, 197, 768] 0\n"," Block-196 [-1, 197, 768] 0\n"," LayerNorm-197 [-1, 197, 768] 1,536\n"," Identity-198 [-1, 768] 0\n"," Linear-199 [-1, 2] 1,538\n","================================================================\n","Total params: 85,648,130\n","Trainable params: 85,648,130\n","Non-trainable params: 0\n","----------------------------------------------------------------\n","Input size (MB): 0.57\n","Forward/backward pass size (MB): 408.54\n","Params size (MB): 326.72\n","Estimated Total Size (MB): 735.83\n","----------------------------------------------------------------\n","model : ViT_base_timm_401_lf15_finetuning_warwick_CLS\n","no valid counterparts augmentation selected\n","Epoch 1/50\n","----------\n","\n","Epoch: 1 train \n","Loss: 0.4487 Acc: 71.0145\n","benign precision: 76.9231 recall: 68.9655\n","benign sensitivity: 68.9655 specificity: 82.8571\n","benign FPR: 17.1429 NPV: 76.3158\n","benign TP: 20.0\n","benign TN: 29.0\n","benign FP: 6.0\n","benign FN: 9.0\n","malignant precision: 76.3158 recall: 82.8571\n","malignant sensitivity: 82.8571 specificity: 68.9655\n","malignant FPR: 31.0345 NPV: 76.9231\n","malignant TP: 29.0\n","malignant TN: 20.0\n","malignant FP: 9.0\n","malignant FN: 6.0\n","\n","\n","\n","Epoch: 1 val \n","Loss: 0.0832 Acc: 93.7500\n","benign precision: 87.5000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 88.8889\n","benign FPR: 11.1111 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 8.0\n","benign FP: 1.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 88.8889\n","malignant sensitivity: 88.8889 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 87.5000\n","malignant TP: 8.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 1.0\n","\n","\n","\n","Epoch 2/50\n","----------\n","\n","Epoch: 2 train \n","Loss: 0.0566 Acc: 91.3043\n","benign precision: 96.4286 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 97.2973\n","benign FPR: 2.7027 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 36.0\n","benign FP: 1.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 97.2973\n","malignant sensitivity: 97.2973 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 96.4286\n","malignant TP: 36.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 1.0\n","\n","\n","\n","Epoch: 2 val \n","Loss: 0.2076 Acc: 87.5000\n","benign precision: 100.0000 recall: 71.4286\n","benign sensitivity: 71.4286 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 81.8182\n","benign TP: 5.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 2.0\n","malignant precision: 81.8182 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 71.4286\n","malignant FPR: 28.5714 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 5.0\n","malignant FP: 2.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 3/50\n","----------\n","\n","Epoch: 3 train \n","Loss: 0.0412 Acc: 89.8551\n","benign precision: 100.0000 recall: 93.1034\n","benign sensitivity: 93.1034 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 94.5946\n","benign TP: 27.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 2.0\n","malignant precision: 94.5946 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 93.1034\n","malignant FPR: 6.8966 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 27.0\n","malignant FP: 2.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 3 val \n","Loss: 0.0047 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 4/50\n","----------\n","\n","Epoch: 4 train \n","Loss: 0.0056 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 4 val \n","Loss: 0.0107 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 5/50\n","----------\n","\n","Epoch: 5 train \n","Loss: 0.0021 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 26.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 26.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 5 val \n","Loss: 0.0044 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 6/50\n","----------\n","\n","Epoch: 6 train \n","Loss: 0.0006 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 6 val \n","Loss: 0.0027 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 7/50\n","----------\n","\n","Epoch: 7 train \n","Loss: 0.0004 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 7 val \n","Loss: 0.0024 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 8/50\n","----------\n","\n","Epoch: 8 train \n","Loss: 0.0004 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 26.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 26.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 8 val \n","Loss: 0.0025 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 9/50\n","----------\n","\n","Epoch: 9 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 9 val \n","Loss: 0.0026 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 10/50\n","----------\n","\n","Epoch: 10 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 10 val \n","Loss: 0.0028 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 11/50\n","----------\n","\n","Epoch: 11 train \n","Loss: 0.0007 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 11 val \n","Loss: 0.0027 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 12/50\n","----------\n","\n","Epoch: 12 train \n","Loss: 0.0003 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 12 val \n","Loss: 0.0024 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 13/50\n","----------\n","\n","Epoch: 13 train \n","Loss: 0.0003 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 13 val \n","Loss: 0.0024 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 14/50\n","----------\n","\n","Epoch: 14 train \n","Loss: 0.0004 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 14 val \n","Loss: 0.0024 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 15/50\n","----------\n","\n","Epoch: 15 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 15 val \n","Loss: 0.0024 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 16/50\n","----------\n","\n","Epoch: 16 train \n","Loss: 0.0005 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 16 val \n","Loss: 0.0024 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 17/50\n","----------\n","\n","Epoch: 17 train \n","Loss: 0.0003 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 17 val \n","Loss: 0.0024 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 18/50\n","----------\n","\n","Epoch: 18 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 26.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 26.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 18 val \n","Loss: 0.0024 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 19/50\n","----------\n","\n","Epoch: 19 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 19 val \n","Loss: 0.0025 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 20/50\n","----------\n","\n","Epoch: 20 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 20 val \n","Loss: 0.0025 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 21/50\n","----------\n","\n","Epoch: 21 train \n","Loss: 0.0005 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 21 val \n","Loss: 0.0024 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 22/50\n","----------\n","\n","Epoch: 22 train \n","Loss: 0.0003 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 22 val \n","Loss: 0.0025 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 23/50\n","----------\n","\n","Epoch: 23 train \n","Loss: 0.0003 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 23 val \n","Loss: 0.0025 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 24/50\n","----------\n","\n","Epoch: 24 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 24 val \n","Loss: 0.0026 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 25/50\n","----------\n","\n","Epoch: 25 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 25 val \n","Loss: 0.0026 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 26/50\n","----------\n","\n","Epoch: 26 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 26 val \n","Loss: 0.0026 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 27/50\n","----------\n","\n","Epoch: 27 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 27 val \n","Loss: 0.0026 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 28/50\n","----------\n","\n","Epoch: 28 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 28 val \n","Loss: 0.0026 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 29/50\n","----------\n","\n","Epoch: 29 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 29 val \n","Loss: 0.0026 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 30/50\n","----------\n","\n","Epoch: 30 train \n","Loss: 0.0003 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 30 val \n","Loss: 0.0026 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 31/50\n","----------\n","\n","Epoch: 31 train \n","Loss: 0.0003 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 31 val \n","Loss: 0.0025 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 32/50\n","----------\n","\n","Epoch: 32 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 32 val \n","Loss: 0.0025 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 33/50\n","----------\n","\n","Epoch: 33 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 33 val \n","Loss: 0.0025 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 34/50\n","----------\n","\n","Epoch: 34 train \n","Loss: 0.0003 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 34 val \n","Loss: 0.0025 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 35/50\n","----------\n","\n","Epoch: 35 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 35 val \n","Loss: 0.0025 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 36/50\n","----------\n","\n","Epoch: 36 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 36 val \n","Loss: 0.0024 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 37/50\n","----------\n","\n","Epoch: 37 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 37 val \n","Loss: 0.0024 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 38/50\n","----------\n","\n","Epoch: 38 train \n","Loss: 0.0003 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 38 val \n","Loss: 0.0024 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 39/50\n","----------\n","\n","Epoch: 39 train \n","Loss: 0.0004 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 39 val \n","Loss: 0.0024 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 40/50\n","----------\n","\n","Epoch: 40 train \n","Loss: 0.0003 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 40 val \n","Loss: 0.0023 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 41/50\n","----------\n","\n","Epoch: 41 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 41 val \n","Loss: 0.0023 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 42/50\n","----------\n","\n","Epoch: 42 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 42 val \n","Loss: 0.0023 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 43/50\n","----------\n","\n","Epoch: 43 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 43 val \n","Loss: 0.0023 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 44/50\n","----------\n","\n","Epoch: 44 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 30.0\n","benign TN: 34.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 34.0\n","malignant TN: 30.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 44 val \n","Loss: 0.0023 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 45/50\n","----------\n","\n","Epoch: 45 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 45 val \n","Loss: 0.0023 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 46/50\n","----------\n","\n","Epoch: 46 train \n","Loss: 0.0003 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 46 val \n","Loss: 0.0023 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 47/50\n","----------\n","\n","Epoch: 47 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 47 val \n","Loss: 0.0023 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 48/50\n","----------\n","\n","Epoch: 48 train \n","Loss: 0.0003 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 48 val \n","Loss: 0.0023 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 49/50\n","----------\n","\n","Epoch: 49 train \n","Loss: 0.0003 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 49 val \n","Loss: 0.0023 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 50/50\n","----------\n","\n","Epoch: 50 train \n","Loss: 0.0003 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 50 val \n","Loss: 0.0023 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Training complete in 0m 53s\n","Best epoch idx: 50\n","Best epoch train Acc: 92.753623\n","Best epoch val Acc: 100.000000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","model trained by GPU (idx:0) has been saved at /home/Pathology_Experiment/saved_models/CLS_ViT_base_timm_401_lf15_finetuning_warwick_CLS.pth\n"]}],"source":["!python Train.py --edge_size 224 --data_augmentation_mode 3 --lr 1e-05 --lrf 0.15 --enable_tensorboard --model_idx ViT_base_timm_401_lf15_finetuning_warwick_CLS --dataroot /data/Pathology_Experiment/dataset/warwick_CLS --draw_root /home/Pathology_Experiment/runs/ViT_finetuning_with_timm --model_path /home/Pathology_Experiment/saved_models"]},{"cell_type":"markdown","metadata":{"id":"Di0tovUQbPb1"},"source":["Test"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"3HeuBzgsbQJ6"},"outputs":[{"name":"stdout","output_type":"stream","text":["class_names: ['benign', 'malignant']\n","['convit_base',\n"," 'convit_small',\n"," 'convit_tiny',\n"," 'crossvit_9_240',\n"," 'crossvit_9_dagger_240',\n"," 'crossvit_15_240',\n"," 'crossvit_15_dagger_240',\n"," 'crossvit_15_dagger_408',\n"," 'crossvit_18_240',\n"," 'crossvit_18_dagger_240',\n"," 'crossvit_18_dagger_408',\n"," 'crossvit_base_240',\n"," 'crossvit_small_240',\n"," 'crossvit_tiny_240',\n"," 'levit_128',\n"," 'levit_128s',\n"," 'levit_192',\n"," 'levit_256',\n"," 'levit_384',\n"," 'vit_base_patch8_224',\n"," 'vit_base_patch8_224_in21k',\n"," 'vit_base_patch16_224',\n"," 'vit_base_patch16_224_in21k',\n"," 'vit_base_patch16_224_miil',\n"," 'vit_base_patch16_224_miil_in21k',\n"," 'vit_base_patch16_384',\n"," 'vit_base_patch16_sam_224',\n"," 'vit_base_patch32_224',\n"," 'vit_base_patch32_224_in21k',\n"," 'vit_base_patch32_384',\n"," 'vit_base_patch32_sam_224',\n"," 'vit_base_r26_s32_224',\n"," 'vit_base_r50_s16_224',\n"," 'vit_base_r50_s16_224_in21k',\n"," 'vit_base_r50_s16_384',\n"," 'vit_base_resnet26d_224',\n"," 'vit_base_resnet50_224_in21k',\n"," 'vit_base_resnet50_384',\n"," 'vit_base_resnet50d_224',\n"," 'vit_giant_patch14_224',\n"," 'vit_gigantic_patch14_224',\n"," 'vit_huge_patch14_224',\n"," 'vit_huge_patch14_224_in21k',\n"," 'vit_large_patch16_224',\n"," 'vit_large_patch16_224_in21k',\n"," 'vit_large_patch16_384',\n"," 'vit_large_patch32_224',\n"," 'vit_large_patch32_224_in21k',\n"," 'vit_large_patch32_384',\n"," 'vit_large_r50_s32_224',\n"," 'vit_large_r50_s32_224_in21k',\n"," 'vit_large_r50_s32_384',\n"," 'vit_small_patch16_224',\n"," 'vit_small_patch16_224_in21k',\n"," 'vit_small_patch16_384',\n"," 'vit_small_patch32_224',\n"," 'vit_small_patch32_224_in21k',\n"," 'vit_small_patch32_384',\n"," 'vit_small_r26_s32_224',\n"," 'vit_small_r26_s32_224_in21k',\n"," 'vit_small_r26_s32_384',\n"," 'vit_small_resnet26d_224',\n"," 'vit_small_resnet50d_s16_224',\n"," 'vit_tiny_patch16_224',\n"," 'vit_tiny_patch16_224_in21k',\n"," 'vit_tiny_patch16_384',\n"," 'vit_tiny_r_s16_p8_224',\n"," 'vit_tiny_r_s16_p8_224_in21k',\n"," 'vit_tiny_r_s16_p8_384']\n","test model output: tensor([[-0.2566, -0.2077]], grad_fn=\u003cAddmmBackward0\u003e)\n","model is ready now!\n","model loaded\n","model : ViT_base_timm_401_lf15_finetuning_warwick_CLS\n","*********************************setting*************************************\n","Namespace(model_idx='ViT_base_timm_401_lf15_finetuning_warwick_CLS', drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, cls_token_off=False, pos_embedding_off=False, att_module='SimAM', gpu_idx=0, dataroot='/data/Pathology_Experiment/dataset/warwick_CLS', model_path='/home/Pathology_Experiment/saved_models', draw_root='/home/Pathology_Experiment/runs/ViT_finetuning_with_timm', model_path_by_hand=None, paint=True, enable_tensorboard=False, enable_attention_check=False, enable_visualize_check=False, data_augmentation_mode=3, PromptTuning=None, Prompt_Token_num=20, PromptUnFreeze=False, Pre_Trained_model_path=None, num_classes=0, edge_size=224, batch_size=1, check_minibatch=None)\n","Epoch: Test\n","----------\n","Epoch: test test index of 4 minibatch: 1 time used: 2.0843310356140137\n","minibatch AVG loss: 0.049242537235841155\n","Epoch: test test index of 4 minibatch: 2 time used: 0.04948902130126953\n","minibatch AVG loss: 0.0026592123062982864\n","Epoch: test test index of 4 minibatch: 3 time used: 0.049538612365722656\n","minibatch AVG loss: 7.384463651760598e-05\n","Epoch: test test index of 4 minibatch: 4 time used: 0.04914069175720215\n","minibatch AVG loss: 0.03960687841677668\n","Epoch: test test index of 4 minibatch: 5 time used: 0.04950857162475586\n","minibatch AVG loss: 0.20808867284495136\n","Epoch: test test index of 4 minibatch: 6 time used: 0.04882478713989258\n","minibatch AVG loss: 0.00309721480675762\n","Epoch: test test index of 4 minibatch: 7 time used: 0.05091118812561035\n","minibatch AVG loss: 0.09648202347295864\n","Epoch: test test index of 4 minibatch: 8 time used: 0.047635555267333984\n","minibatch AVG loss: 0.000145436038110347\n","Epoch: test test index of 4 minibatch: 9 time used: 0.04693150520324707\n","minibatch AVG loss: 0.0013968248640594538\n","Epoch: test test index of 4 minibatch: 10 time used: 0.04672741889953613\n","minibatch AVG loss: 0.001999404456000775\n","Epoch: test test index of 4 minibatch: 11 time used: 0.04696464538574219\n","minibatch AVG loss: 0.012991284020245075\n","Epoch: test test index of 4 minibatch: 12 time used: 0.04732871055603027\n","minibatch AVG loss: 0.0032983069049805636\n","Epoch: test test index of 4 minibatch: 13 time used: 0.04779481887817383\n","minibatch AVG loss: 0.0027486889557621907\n","Epoch: test test index of 4 minibatch: 14 time used: 0.04719042778015137\n","minibatch AVG loss: 0.002299645588209387\n","Epoch: test test index of 4 minibatch: 15 time used: 0.04710078239440918\n","minibatch AVG loss: 0.0003831974645436276\n","Epoch: test test index of 4 minibatch: 16 time used: 0.04697871208190918\n","minibatch AVG loss: 0.0005818266072310507\n","Epoch: test test index of 4 minibatch: 17 time used: 0.048592329025268555\n","minibatch AVG loss: 0.0008116918361338321\n","Epoch: test test index of 4 minibatch: 18 time used: 0.048664093017578125\n","minibatch AVG loss: 0.012041694470099173\n","Epoch: test test index of 4 minibatch: 19 time used: 0.04766678810119629\n","minibatch AVG loss: 6.261242378968745e-05\n","Epoch: test test index of 4 minibatch: 20 time used: 0.0470576286315918\n","minibatch AVG loss: 0.022635395129327662\n","\n","Epoch: test \n","Loss: 0.0230 Acc: 98.7500\n","benign precision: 100.0000 recall: 97.2973\n","benign sensitivity: 97.2973 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 97.7273\n","benign TP: 36.0\n","benign TN: 43.0\n","benign FP: 0.0\n","benign FN: 1.0\n","malignant precision: 97.7273 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 97.2973\n","malignant FPR: 2.7027 NPV: 100.0000\n","malignant TP: 43.0\n","malignant TN: 36.0\n","malignant FP: 1.0\n","malignant FN: 0.0\n","\n","\n","Testing complete in 0m 3s\n"]}],"source":["!python Test.py --edge_size 224 --data_augmentation_mode 3 --model_idx ViT_base_timm_401_lf15_finetuning_warwick_CLS --dataroot /data/Pathology_Experiment/dataset/warwick_CLS --draw_root /home/Pathology_Experiment/runs/ViT_finetuning_with_timm --model_path /home/Pathology_Experiment/saved_models"]},{"cell_type":"markdown","metadata":{"id":"I4siigu52Od8"},"source":["### VPT + finetuning (with timm weight)"]},{"cell_type":"markdown","metadata":{"id":"j8tEUJf22XL-"},"source":["Train"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"3m2pR6ba2QWf"},"outputs":[{"name":"stdout","output_type":"stream","text":["class_names: ['benign', 'malignant']\n","*********************************setting*************************************\n","Namespace(model_idx='ViT_base_timm_PromptDeep_20_401_lf15_finetuning_warwick_CLS', drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, cls_token_off=False, pos_embedding_off=False, att_module='SimAM', backbone_PT_off=False, gpu_idx=-1, dataroot='/data/Pathology_Experiment/dataset/warwick_CLS', model_path='/home/Pathology_Experiment/saved_models', draw_root='/home/Pathology_Experiment/runs/VPT_finetuning_with_timm', paint=True, enable_tensorboard=True, enable_attention_check=False, enable_visualize_check=False, PromptTuning='Deep', Prompt_Token_num=20, PromptUnFreeze=False, linearprobing=False, Pre_Trained_model_path=None, Prompt_state_path=None, enable_sam=False, augmentation_name=None, ratio_strategy=None, patch_strategy=None, loss_drive_threshold=4.0, fix_position_ratio=0.5, fix_patch_size=None, patch_size_jump=None, num_classes=0, edge_size=224, data_augmentation_mode=3, batch_size=8, num_epochs=50, intake_epochs=0, lr=1e-05, lrf=0.15, opt_name='Adam', check_minibatch=None, num_workers=2)\n","we dont have more GPU idx here, try to use gpu_idx=0\n","PromptTuning of ViT_base_timm_PromptDeep_20_401_lf15_finetuning_warwick_CLS\n","Prompt VPT type: Deep\n","backbone base_state_dict of timm\n","prompting with empty prompt_state: prompt_state of None\n","in prompt model building, timm ViT loaded for base_state_dict\n","model forward cheacked\n","model is ready now!\n","GPU: 0\n","----------------------------------------------------------------\n"," Layer (type) Output Shape Param #\n","================================================================\n"," Conv2d-1 [-1, 768, 14, 14] 590,592\n"," Identity-2 [-1, 196, 768] 0\n"," PatchEmbed-3 [-1, 196, 768] 0\n"," Dropout-4 [-1, 197, 768] 0\n"," LayerNorm-5 [-1, 217, 768] 1,536\n"," Linear-6 [-1, 217, 2304] 1,771,776\n"," Dropout-7 [-1, 12, 217, 217] 0\n"," Linear-8 [-1, 217, 768] 590,592\n"," Dropout-9 [-1, 217, 768] 0\n"," Attention-10 [-1, 217, 768] 0\n"," Identity-11 [-1, 217, 768] 0\n"," LayerNorm-12 [-1, 217, 768] 1,536\n"," Linear-13 [-1, 217, 3072] 2,362,368\n"," GELU-14 [-1, 217, 3072] 0\n"," Dropout-15 [-1, 217, 3072] 0\n"," Linear-16 [-1, 217, 768] 2,360,064\n"," Dropout-17 [-1, 217, 768] 0\n"," Mlp-18 [-1, 217, 768] 0\n"," Identity-19 [-1, 217, 768] 0\n"," Block-20 [-1, 217, 768] 0\n"," LayerNorm-21 [-1, 217, 768] 1,536\n"," Linear-22 [-1, 217, 2304] 1,771,776\n"," Dropout-23 [-1, 12, 217, 217] 0\n"," Linear-24 [-1, 217, 768] 590,592\n"," Dropout-25 [-1, 217, 768] 0\n"," Attention-26 [-1, 217, 768] 0\n"," Identity-27 [-1, 217, 768] 0\n"," LayerNorm-28 [-1, 217, 768] 1,536\n"," Linear-29 [-1, 217, 3072] 2,362,368\n"," GELU-30 [-1, 217, 3072] 0\n"," Dropout-31 [-1, 217, 3072] 0\n"," Linear-32 [-1, 217, 768] 2,360,064\n"," Dropout-33 [-1, 217, 768] 0\n"," Mlp-34 [-1, 217, 768] 0\n"," Identity-35 [-1, 217, 768] 0\n"," Block-36 [-1, 217, 768] 0\n"," LayerNorm-37 [-1, 217, 768] 1,536\n"," Linear-38 [-1, 217, 2304] 1,771,776\n"," Dropout-39 [-1, 12, 217, 217] 0\n"," Linear-40 [-1, 217, 768] 590,592\n"," Dropout-41 [-1, 217, 768] 0\n"," Attention-42 [-1, 217, 768] 0\n"," Identity-43 [-1, 217, 768] 0\n"," LayerNorm-44 [-1, 217, 768] 1,536\n"," Linear-45 [-1, 217, 3072] 2,362,368\n"," GELU-46 [-1, 217, 3072] 0\n"," Dropout-47 [-1, 217, 3072] 0\n"," Linear-48 [-1, 217, 768] 2,360,064\n"," Dropout-49 [-1, 217, 768] 0\n"," Mlp-50 [-1, 217, 768] 0\n"," Identity-51 [-1, 217, 768] 0\n"," Block-52 [-1, 217, 768] 0\n"," LayerNorm-53 [-1, 217, 768] 1,536\n"," Linear-54 [-1, 217, 2304] 1,771,776\n"," Dropout-55 [-1, 12, 217, 217] 0\n"," Linear-56 [-1, 217, 768] 590,592\n"," Dropout-57 [-1, 217, 768] 0\n"," Attention-58 [-1, 217, 768] 0\n"," Identity-59 [-1, 217, 768] 0\n"," LayerNorm-60 [-1, 217, 768] 1,536\n"," Linear-61 [-1, 217, 3072] 2,362,368\n"," GELU-62 [-1, 217, 3072] 0\n"," Dropout-63 [-1, 217, 3072] 0\n"," Linear-64 [-1, 217, 768] 2,360,064\n"," Dropout-65 [-1, 217, 768] 0\n"," Mlp-66 [-1, 217, 768] 0\n"," Identity-67 [-1, 217, 768] 0\n"," Block-68 [-1, 217, 768] 0\n"," LayerNorm-69 [-1, 217, 768] 1,536\n"," Linear-70 [-1, 217, 2304] 1,771,776\n"," Dropout-71 [-1, 12, 217, 217] 0\n"," Linear-72 [-1, 217, 768] 590,592\n"," Dropout-73 [-1, 217, 768] 0\n"," Attention-74 [-1, 217, 768] 0\n"," Identity-75 [-1, 217, 768] 0\n"," LayerNorm-76 [-1, 217, 768] 1,536\n"," Linear-77 [-1, 217, 3072] 2,362,368\n"," GELU-78 [-1, 217, 3072] 0\n"," Dropout-79 [-1, 217, 3072] 0\n"," Linear-80 [-1, 217, 768] 2,360,064\n"," Dropout-81 [-1, 217, 768] 0\n"," Mlp-82 [-1, 217, 768] 0\n"," Identity-83 [-1, 217, 768] 0\n"," Block-84 [-1, 217, 768] 0\n"," LayerNorm-85 [-1, 217, 768] 1,536\n"," Linear-86 [-1, 217, 2304] 1,771,776\n"," Dropout-87 [-1, 12, 217, 217] 0\n"," Linear-88 [-1, 217, 768] 590,592\n"," Dropout-89 [-1, 217, 768] 0\n"," Attention-90 [-1, 217, 768] 0\n"," Identity-91 [-1, 217, 768] 0\n"," LayerNorm-92 [-1, 217, 768] 1,536\n"," Linear-93 [-1, 217, 3072] 2,362,368\n"," GELU-94 [-1, 217, 3072] 0\n"," Dropout-95 [-1, 217, 3072] 0\n"," Linear-96 [-1, 217, 768] 2,360,064\n"," Dropout-97 [-1, 217, 768] 0\n"," Mlp-98 [-1, 217, 768] 0\n"," Identity-99 [-1, 217, 768] 0\n"," Block-100 [-1, 217, 768] 0\n"," LayerNorm-101 [-1, 217, 768] 1,536\n"," Linear-102 [-1, 217, 2304] 1,771,776\n"," Dropout-103 [-1, 12, 217, 217] 0\n"," Linear-104 [-1, 217, 768] 590,592\n"," Dropout-105 [-1, 217, 768] 0\n"," Attention-106 [-1, 217, 768] 0\n"," Identity-107 [-1, 217, 768] 0\n"," LayerNorm-108 [-1, 217, 768] 1,536\n"," Linear-109 [-1, 217, 3072] 2,362,368\n"," GELU-110 [-1, 217, 3072] 0\n"," Dropout-111 [-1, 217, 3072] 0\n"," Linear-112 [-1, 217, 768] 2,360,064\n"," Dropout-113 [-1, 217, 768] 0\n"," Mlp-114 [-1, 217, 768] 0\n"," Identity-115 [-1, 217, 768] 0\n"," Block-116 [-1, 217, 768] 0\n"," LayerNorm-117 [-1, 217, 768] 1,536\n"," Linear-118 [-1, 217, 2304] 1,771,776\n"," Dropout-119 [-1, 12, 217, 217] 0\n"," Linear-120 [-1, 217, 768] 590,592\n"," Dropout-121 [-1, 217, 768] 0\n"," Attention-122 [-1, 217, 768] 0\n"," Identity-123 [-1, 217, 768] 0\n"," LayerNorm-124 [-1, 217, 768] 1,536\n"," Linear-125 [-1, 217, 3072] 2,362,368\n"," GELU-126 [-1, 217, 3072] 0\n"," Dropout-127 [-1, 217, 3072] 0\n"," Linear-128 [-1, 217, 768] 2,360,064\n"," Dropout-129 [-1, 217, 768] 0\n"," Mlp-130 [-1, 217, 768] 0\n"," Identity-131 [-1, 217, 768] 0\n"," Block-132 [-1, 217, 768] 0\n"," LayerNorm-133 [-1, 217, 768] 1,536\n"," Linear-134 [-1, 217, 2304] 1,771,776\n"," Dropout-135 [-1, 12, 217, 217] 0\n"," Linear-136 [-1, 217, 768] 590,592\n"," Dropout-137 [-1, 217, 768] 0\n"," Attention-138 [-1, 217, 768] 0\n"," Identity-139 [-1, 217, 768] 0\n"," LayerNorm-140 [-1, 217, 768] 1,536\n"," Linear-141 [-1, 217, 3072] 2,362,368\n"," GELU-142 [-1, 217, 3072] 0\n"," Dropout-143 [-1, 217, 3072] 0\n"," Linear-144 [-1, 217, 768] 2,360,064\n"," Dropout-145 [-1, 217, 768] 0\n"," Mlp-146 [-1, 217, 768] 0\n"," Identity-147 [-1, 217, 768] 0\n"," Block-148 [-1, 217, 768] 0\n"," LayerNorm-149 [-1, 217, 768] 1,536\n"," Linear-150 [-1, 217, 2304] 1,771,776\n"," Dropout-151 [-1, 12, 217, 217] 0\n"," Linear-152 [-1, 217, 768] 590,592\n"," Dropout-153 [-1, 217, 768] 0\n"," Attention-154 [-1, 217, 768] 0\n"," Identity-155 [-1, 217, 768] 0\n"," LayerNorm-156 [-1, 217, 768] 1,536\n"," Linear-157 [-1, 217, 3072] 2,362,368\n"," GELU-158 [-1, 217, 3072] 0\n"," Dropout-159 [-1, 217, 3072] 0\n"," Linear-160 [-1, 217, 768] 2,360,064\n"," Dropout-161 [-1, 217, 768] 0\n"," Mlp-162 [-1, 217, 768] 0\n"," Identity-163 [-1, 217, 768] 0\n"," Block-164 [-1, 217, 768] 0\n"," LayerNorm-165 [-1, 217, 768] 1,536\n"," Linear-166 [-1, 217, 2304] 1,771,776\n"," Dropout-167 [-1, 12, 217, 217] 0\n"," Linear-168 [-1, 217, 768] 590,592\n"," Dropout-169 [-1, 217, 768] 0\n"," Attention-170 [-1, 217, 768] 0\n"," Identity-171 [-1, 217, 768] 0\n"," LayerNorm-172 [-1, 217, 768] 1,536\n"," Linear-173 [-1, 217, 3072] 2,362,368\n"," GELU-174 [-1, 217, 3072] 0\n"," Dropout-175 [-1, 217, 3072] 0\n"," Linear-176 [-1, 217, 768] 2,360,064\n"," Dropout-177 [-1, 217, 768] 0\n"," Mlp-178 [-1, 217, 768] 0\n"," Identity-179 [-1, 217, 768] 0\n"," Block-180 [-1, 217, 768] 0\n"," LayerNorm-181 [-1, 217, 768] 1,536\n"," Linear-182 [-1, 217, 2304] 1,771,776\n"," Dropout-183 [-1, 12, 217, 217] 0\n"," Linear-184 [-1, 217, 768] 590,592\n"," Dropout-185 [-1, 217, 768] 0\n"," Attention-186 [-1, 217, 768] 0\n"," Identity-187 [-1, 217, 768] 0\n"," LayerNorm-188 [-1, 217, 768] 1,536\n"," Linear-189 [-1, 217, 3072] 2,362,368\n"," GELU-190 [-1, 217, 3072] 0\n"," Dropout-191 [-1, 217, 3072] 0\n"," Linear-192 [-1, 217, 768] 2,360,064\n"," Dropout-193 [-1, 217, 768] 0\n"," Mlp-194 [-1, 217, 768] 0\n"," Identity-195 [-1, 217, 768] 0\n"," Block-196 [-1, 217, 768] 0\n"," LayerNorm-197 [-1, 197, 768] 1,536\n"," Identity-198 [-1, 768] 0\n"," Linear-199 [-1, 2] 1,538\n","================================================================\n","Total params: 85,648,130\n","Trainable params: 1,538\n","Non-trainable params: 85,646,592\n","----------------------------------------------------------------\n","Input size (MB): 0.57\n","Forward/backward pass size (MB): 454.20\n","Params size (MB): 326.72\n","Estimated Total Size (MB): 781.49\n","----------------------------------------------------------------\n","model : ViT_base_timm_PromptDeep_20_401_lf15_finetuning_warwick_CLS\n","no valid counterparts augmentation selected\n","Epoch 1/50\n","----------\n","\n","Epoch: 1 train \n","Loss: 0.7798 Acc: 55.0725\n","benign precision: 55.5556 recall: 51.7241\n","benign sensitivity: 51.7241 specificity: 65.7143\n","benign FPR: 34.2857 NPV: 62.1622\n","benign TP: 15.0\n","benign TN: 23.0\n","benign FP: 12.0\n","benign FN: 14.0\n","malignant precision: 62.1622 recall: 65.7143\n","malignant sensitivity: 65.7143 specificity: 51.7241\n","malignant FPR: 48.2759 NPV: 55.5556\n","malignant TP: 23.0\n","malignant TN: 15.0\n","malignant FP: 14.0\n","malignant FN: 12.0\n","\n","\n","\n","Epoch: 1 val \n","Loss: 0.6996 Acc: 62.5000\n","benign precision: 60.0000 recall: 42.8571\n","benign sensitivity: 42.8571 specificity: 77.7778\n","benign FPR: 22.2222 NPV: 63.6364\n","benign TP: 3.0\n","benign TN: 7.0\n","benign FP: 2.0\n","benign FN: 4.0\n","malignant precision: 63.6364 recall: 77.7778\n","malignant sensitivity: 77.7778 specificity: 42.8571\n","malignant FPR: 57.1429 NPV: 60.0000\n","malignant TP: 7.0\n","malignant TN: 3.0\n","malignant FP: 4.0\n","malignant FN: 2.0\n","\n","\n","\n","Epoch 2/50\n","----------\n","\n","Epoch: 2 train \n","Loss: 0.5416 Acc: 62.3188\n","benign precision: 60.8696 recall: 53.8462\n","benign sensitivity: 53.8462 specificity: 76.3158\n","benign FPR: 23.6842 NPV: 70.7317\n","benign TP: 14.0\n","benign TN: 29.0\n","benign FP: 9.0\n","benign FN: 12.0\n","malignant precision: 70.7317 recall: 76.3158\n","malignant sensitivity: 76.3158 specificity: 53.8462\n","malignant FPR: 46.1538 NPV: 60.8696\n","malignant TP: 29.0\n","malignant TN: 14.0\n","malignant FP: 12.0\n","malignant FN: 9.0\n","\n","\n","\n","Epoch: 2 val \n","Loss: 0.5944 Acc: 75.0000\n","benign precision: 80.0000 recall: 57.1429\n","benign sensitivity: 57.1429 specificity: 88.8889\n","benign FPR: 11.1111 NPV: 72.7273\n","benign TP: 4.0\n","benign TN: 8.0\n","benign FP: 1.0\n","benign FN: 3.0\n","malignant precision: 72.7273 recall: 88.8889\n","malignant sensitivity: 88.8889 specificity: 57.1429\n","malignant FPR: 42.8571 NPV: 80.0000\n","malignant TP: 8.0\n","malignant TN: 4.0\n","malignant FP: 3.0\n","malignant FN: 1.0\n","\n","\n","\n","Epoch 3/50\n","----------\n","\n","Epoch: 3 train \n","Loss: 0.4691 Acc: 71.0145\n","benign precision: 78.9474 recall: 57.6923\n","benign sensitivity: 57.6923 specificity: 89.4737\n","benign FPR: 10.5263 NPV: 75.5556\n","benign TP: 15.0\n","benign TN: 34.0\n","benign FP: 4.0\n","benign FN: 11.0\n","malignant precision: 75.5556 recall: 89.4737\n","malignant sensitivity: 89.4737 specificity: 57.6923\n","malignant FPR: 42.3077 NPV: 78.9474\n","malignant TP: 34.0\n","malignant TN: 15.0\n","malignant FP: 11.0\n","malignant FN: 4.0\n","\n","\n","\n","Epoch: 3 val \n","Loss: 0.5269 Acc: 81.2500\n","benign precision: 100.0000 recall: 57.1429\n","benign sensitivity: 57.1429 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 75.0000\n","benign TP: 4.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 3.0\n","malignant precision: 75.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 57.1429\n","malignant FPR: 42.8571 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 4.0\n","malignant FP: 3.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 4/50\n","----------\n","\n","Epoch: 4 train \n","Loss: 0.3390 Acc: 81.1594\n","benign precision: 88.8889 recall: 82.7586\n","benign sensitivity: 82.7586 specificity: 91.4286\n","benign FPR: 8.5714 NPV: 86.4865\n","benign TP: 24.0\n","benign TN: 32.0\n","benign FP: 3.0\n","benign FN: 5.0\n","malignant precision: 86.4865 recall: 91.4286\n","malignant sensitivity: 91.4286 specificity: 82.7586\n","malignant FPR: 17.2414 NPV: 88.8889\n","malignant TP: 32.0\n","malignant TN: 24.0\n","malignant FP: 5.0\n","malignant FN: 3.0\n","\n","\n","\n","Epoch: 4 val \n","Loss: 0.4431 Acc: 81.2500\n","benign precision: 100.0000 recall: 57.1429\n","benign sensitivity: 57.1429 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 75.0000\n","benign TP: 4.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 3.0\n","malignant precision: 75.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 57.1429\n","malignant FPR: 42.8571 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 4.0\n","malignant FP: 3.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 5/50\n","----------\n","\n","Epoch: 5 train \n","Loss: 0.2642 Acc: 84.0580\n","benign precision: 88.8889 recall: 88.8889\n","benign sensitivity: 88.8889 specificity: 91.8919\n","benign FPR: 8.1081 NPV: 91.8919\n","benign TP: 24.0\n","benign TN: 34.0\n","benign FP: 3.0\n","benign FN: 3.0\n","malignant precision: 91.8919 recall: 91.8919\n","malignant sensitivity: 91.8919 specificity: 88.8889\n","malignant FPR: 11.1111 NPV: 88.8889\n","malignant TP: 34.0\n","malignant TN: 24.0\n","malignant FP: 3.0\n","malignant FN: 3.0\n","\n","\n","\n","Epoch: 5 val \n","Loss: 0.4339 Acc: 81.2500\n","benign precision: 100.0000 recall: 57.1429\n","benign sensitivity: 57.1429 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 75.0000\n","benign TP: 4.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 3.0\n","malignant precision: 75.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 57.1429\n","malignant FPR: 42.8571 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 4.0\n","malignant FP: 3.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 6/50\n","----------\n","\n","Epoch: 6 train \n","Loss: 0.2274 Acc: 88.4058\n","benign precision: 100.0000 recall: 88.4615\n","benign sensitivity: 88.4615 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 92.6829\n","benign TP: 23.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 3.0\n","malignant precision: 92.6829 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 88.4615\n","malignant FPR: 11.5385 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 23.0\n","malignant FP: 3.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 6 val \n","Loss: 0.3520 Acc: 81.2500\n","benign precision: 100.0000 recall: 57.1429\n","benign sensitivity: 57.1429 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 75.0000\n","benign TP: 4.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 3.0\n","malignant precision: 75.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 57.1429\n","malignant FPR: 42.8571 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 4.0\n","malignant FP: 3.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 7/50\n","----------\n","\n","Epoch: 7 train \n","Loss: 0.2065 Acc: 88.4058\n","benign precision: 96.1538 recall: 92.5926\n","benign sensitivity: 92.5926 specificity: 97.2973\n","benign FPR: 2.7027 NPV: 94.7368\n","benign TP: 25.0\n","benign TN: 36.0\n","benign FP: 1.0\n","benign FN: 2.0\n","malignant precision: 94.7368 recall: 97.2973\n","malignant sensitivity: 97.2973 specificity: 92.5926\n","malignant FPR: 7.4074 NPV: 96.1538\n","malignant TP: 36.0\n","malignant TN: 25.0\n","malignant FP: 2.0\n","malignant FN: 1.0\n","\n","\n","\n","Epoch: 7 val \n","Loss: 0.2767 Acc: 81.2500\n","benign precision: 100.0000 recall: 57.1429\n","benign sensitivity: 57.1429 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 75.0000\n","benign TP: 4.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 3.0\n","malignant precision: 75.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 57.1429\n","malignant FPR: 42.8571 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 4.0\n","malignant FP: 3.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 8/50\n","----------\n","\n","Epoch: 8 train \n","Loss: 0.1894 Acc: 88.4058\n","benign precision: 100.0000 recall: 90.0000\n","benign sensitivity: 90.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 91.8919\n","benign TP: 27.0\n","benign TN: 34.0\n","benign FP: 0.0\n","benign FN: 3.0\n","malignant precision: 91.8919 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 90.0000\n","malignant FPR: 10.0000 NPV: 100.0000\n","malignant TP: 34.0\n","malignant TN: 27.0\n","malignant FP: 3.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 8 val \n","Loss: 0.3026 Acc: 81.2500\n","benign precision: 100.0000 recall: 57.1429\n","benign sensitivity: 57.1429 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 75.0000\n","benign TP: 4.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 3.0\n","malignant precision: 75.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 57.1429\n","malignant FPR: 42.8571 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 4.0\n","malignant FP: 3.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 9/50\n","----------\n","\n","Epoch: 9 train \n","Loss: 0.1697 Acc: 88.4058\n","benign precision: 96.1538 recall: 92.5926\n","benign sensitivity: 92.5926 specificity: 97.2973\n","benign FPR: 2.7027 NPV: 94.7368\n","benign TP: 25.0\n","benign TN: 36.0\n","benign FP: 1.0\n","benign FN: 2.0\n","malignant precision: 94.7368 recall: 97.2973\n","malignant sensitivity: 97.2973 specificity: 92.5926\n","malignant FPR: 7.4074 NPV: 96.1538\n","malignant TP: 36.0\n","malignant TN: 25.0\n","malignant FP: 2.0\n","malignant FN: 1.0\n","\n","\n","\n","Epoch: 9 val \n","Loss: 0.1931 Acc: 93.7500\n","benign precision: 100.0000 recall: 85.7143\n","benign sensitivity: 85.7143 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 90.0000\n","benign TP: 6.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 1.0\n","malignant precision: 90.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 85.7143\n","malignant FPR: 14.2857 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 6.0\n","malignant FP: 1.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 10/50\n","----------\n","\n","Epoch: 10 train \n","Loss: 0.1084 Acc: 89.8551\n","benign precision: 96.4286 recall: 96.4286\n","benign sensitivity: 96.4286 specificity: 97.2222\n","benign FPR: 2.7778 NPV: 97.2222\n","benign TP: 27.0\n","benign TN: 35.0\n","benign FP: 1.0\n","benign FN: 1.0\n","malignant precision: 97.2222 recall: 97.2222\n","malignant sensitivity: 97.2222 specificity: 96.4286\n","malignant FPR: 3.5714 NPV: 96.4286\n","malignant TP: 35.0\n","malignant TN: 27.0\n","malignant FP: 1.0\n","malignant FN: 1.0\n","\n","\n","\n","Epoch: 10 val \n","Loss: 0.2491 Acc: 87.5000\n","benign precision: 100.0000 recall: 71.4286\n","benign sensitivity: 71.4286 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 81.8182\n","benign TP: 5.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 2.0\n","malignant precision: 81.8182 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 71.4286\n","malignant FPR: 28.5714 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 5.0\n","malignant FP: 2.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 11/50\n","----------\n","\n","Epoch: 11 train \n","Loss: 0.0966 Acc: 91.3043\n","benign precision: 100.0000 recall: 96.2963\n","benign sensitivity: 96.2963 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 97.3684\n","benign TP: 26.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 1.0\n","malignant precision: 97.3684 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 96.2963\n","malignant FPR: 3.7037 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 26.0\n","malignant FP: 1.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 11 val \n","Loss: 0.1859 Acc: 93.7500\n","benign precision: 100.0000 recall: 85.7143\n","benign sensitivity: 85.7143 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 90.0000\n","benign TP: 6.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 1.0\n","malignant precision: 90.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 85.7143\n","malignant FPR: 14.2857 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 6.0\n","malignant FP: 1.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 12/50\n","----------\n","\n","Epoch: 12 train \n","Loss: 0.0863 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 12 val \n","Loss: 0.1140 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 13/50\n","----------\n","\n","Epoch: 13 train \n","Loss: 0.0857 Acc: 91.3043\n","benign precision: 100.0000 recall: 96.2963\n","benign sensitivity: 96.2963 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 97.3684\n","benign TP: 26.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 1.0\n","malignant precision: 97.3684 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 96.2963\n","malignant FPR: 3.7037 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 26.0\n","malignant FP: 1.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 13 val \n","Loss: 0.1308 Acc: 93.7500\n","benign precision: 100.0000 recall: 85.7143\n","benign sensitivity: 85.7143 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 90.0000\n","benign TP: 6.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 1.0\n","malignant precision: 90.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 85.7143\n","malignant FPR: 14.2857 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 6.0\n","malignant FP: 1.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 14/50\n","----------\n","\n","Epoch: 14 train \n","Loss: 0.0749 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 14 val \n","Loss: 0.1053 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 15/50\n","----------\n","\n","Epoch: 15 train \n","Loss: 0.0503 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 26.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 26.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 15 val \n","Loss: 0.1226 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 16/50\n","----------\n","\n","Epoch: 16 train \n","Loss: 0.0510 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 16 val \n","Loss: 0.0832 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 17/50\n","----------\n","\n","Epoch: 17 train \n","Loss: 0.0495 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 17 val \n","Loss: 0.0738 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 18/50\n","----------\n","\n","Epoch: 18 train \n","Loss: 0.0476 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 18 val \n","Loss: 0.0692 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 19/50\n","----------\n","\n","Epoch: 19 train \n","Loss: 0.0376 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 19 val \n","Loss: 0.0681 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 20/50\n","----------\n","\n","Epoch: 20 train \n","Loss: 0.0319 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 20 val \n","Loss: 0.0694 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 21/50\n","----------\n","\n","Epoch: 21 train \n","Loss: 0.0264 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 21 val \n","Loss: 0.0643 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 22/50\n","----------\n","\n","Epoch: 22 train \n","Loss: 0.0258 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 22 val \n","Loss: 0.0568 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 23/50\n","----------\n","\n","Epoch: 23 train \n","Loss: 0.0238 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 23 val \n","Loss: 0.0567 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 24/50\n","----------\n","\n","Epoch: 24 train \n","Loss: 0.0212 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 24 val \n","Loss: 0.0572 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 25/50\n","----------\n","\n","Epoch: 25 train \n","Loss: 0.0195 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 25 val \n","Loss: 0.0650 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 26/50\n","----------\n","\n","Epoch: 26 train \n","Loss: 0.0256 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 30.0\n","benign TN: 34.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 34.0\n","malignant TN: 30.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 26 val \n","Loss: 0.0390 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 27/50\n","----------\n","\n","Epoch: 27 train \n","Loss: 0.0197 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 26.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 26.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 27 val \n","Loss: 0.0506 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 28/50\n","----------\n","\n","Epoch: 28 train \n","Loss: 0.0260 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 28 val \n","Loss: 0.0452 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 29/50\n","----------\n","\n","Epoch: 29 train \n","Loss: 0.0164 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 29 val \n","Loss: 0.0368 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 30/50\n","----------\n","\n","Epoch: 30 train \n","Loss: 0.0166 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 26.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 26.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 30 val \n","Loss: 0.0475 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 31/50\n","----------\n","\n","Epoch: 31 train \n","Loss: 0.0150 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 26.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 26.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 31 val \n","Loss: 0.0456 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 32/50\n","----------\n","\n","Epoch: 32 train \n","Loss: 0.0183 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 32 val \n","Loss: 0.0325 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 33/50\n","----------\n","\n","Epoch: 33 train \n","Loss: 0.0151 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 33 val \n","Loss: 0.0345 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 34/50\n","----------\n","\n","Epoch: 34 train \n","Loss: 0.0171 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 34 val \n","Loss: 0.0338 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 35/50\n","----------\n","\n","Epoch: 35 train \n","Loss: 0.0119 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 26.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 26.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 35 val \n","Loss: 0.0363 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 36/50\n","----------\n","\n","Epoch: 36 train \n","Loss: 0.0127 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 36 val \n","Loss: 0.0330 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 37/50\n","----------\n","\n","Epoch: 37 train \n","Loss: 0.0155 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 37 val \n","Loss: 0.0273 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 38/50\n","----------\n","\n","Epoch: 38 train \n","Loss: 0.0097 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 38 val \n","Loss: 0.0256 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 39/50\n","----------\n","\n","Epoch: 39 train \n","Loss: 0.0122 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 39 val \n","Loss: 0.0268 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 40/50\n","----------\n","\n","Epoch: 40 train \n","Loss: 0.0106 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 40 val \n","Loss: 0.0289 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 41/50\n","----------\n","\n","Epoch: 41 train \n","Loss: 0.0095 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 41 val \n","Loss: 0.0293 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 42/50\n","----------\n","\n","Epoch: 42 train \n","Loss: 0.0093 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 42 val \n","Loss: 0.0299 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 43/50\n","----------\n","\n","Epoch: 43 train \n","Loss: 0.0066 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 43 val \n","Loss: 0.0290 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 44/50\n","----------\n","\n","Epoch: 44 train \n","Loss: 0.0105 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 44 val \n","Loss: 0.0268 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 45/50\n","----------\n","\n","Epoch: 45 train \n","Loss: 0.0096 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 45 val \n","Loss: 0.0287 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 46/50\n","----------\n","\n","Epoch: 46 train \n","Loss: 0.0093 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 46 val \n","Loss: 0.0282 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 47/50\n","----------\n","\n","Epoch: 47 train \n","Loss: 0.0103 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 47 val \n","Loss: 0.0255 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 48/50\n","----------\n","\n","Epoch: 48 train \n","Loss: 0.0100 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 26.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 26.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 48 val \n","Loss: 0.0256 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 49/50\n","----------\n","\n","Epoch: 49 train \n","Loss: 0.0074 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 49 val \n","Loss: 0.0256 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 50/50\n","----------\n","\n","Epoch: 50 train \n","Loss: 0.0099 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 50 val \n","Loss: 0.0294 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Training complete in 0m 46s\n","Best epoch idx: 50\n","Best epoch train Acc: 92.753623\n","Best epoch val Acc: 100.000000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","model trained by GPU (idx:0) has been saved at /home/Pathology_Experiment/saved_models/CLS_ViT_base_timm_PromptDeep_20_401_lf15_finetuning_warwick_CLS.pth\n"]}],"source":["!python Train.py --edge_size 224 --data_augmentation_mode 3 --lr 1e-05 --lrf 0.15 --enable_tensorboard --model_idx ViT_base_timm_PromptDeep_20_401_lf15_finetuning_warwick_CLS --PromptTuning Deep --dataroot /data/Pathology_Experiment/dataset/warwick_CLS --draw_root /home/Pathology_Experiment/runs/VPT_finetuning_with_timm --model_path /home/Pathology_Experiment/saved_models"]},{"cell_type":"markdown","metadata":{"id":"q2mnI-UW2YFS"},"source":["Test"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"98WKJutR2QtY"},"outputs":[{"name":"stdout","output_type":"stream","text":["class_names: ['benign', 'malignant']\n","base_state_dict of timm\n","Test the PromptTuning of ViT_base_timm_PromptDeep_20_401_lf15_finetuning_warwick_CLS\n","Prompt VPT type: Deep\n","in prompt model building, timm ViT loaded for base_state_dict\n","model forward cheacked\n","model is ready now!\n","prompt head match\n","model loaded\n","model : ViT_base_timm_PromptDeep_20_401_lf15_finetuning_warwick_CLS\n","*********************************setting*************************************\n","Namespace(model_idx='ViT_base_timm_PromptDeep_20_401_lf15_finetuning_warwick_CLS', drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, cls_token_off=False, pos_embedding_off=False, att_module='SimAM', gpu_idx=0, dataroot='/data/Pathology_Experiment/dataset/warwick_CLS', model_path='/home/Pathology_Experiment/saved_models', draw_root='/home/Pathology_Experiment/runs/VPT_finetuning_with_timm', model_path_by_hand=None, paint=True, enable_tensorboard=False, enable_attention_check=False, enable_visualize_check=False, data_augmentation_mode=3, PromptTuning='Deep', Prompt_Token_num=20, PromptUnFreeze=False, Pre_Trained_model_path=None, num_classes=0, edge_size=224, batch_size=1, check_minibatch=None)\n","Epoch: Test\n","----------\n","Epoch: test test index of 4 minibatch: 1 time used: 2.232135534286499\n","minibatch AVG loss: 0.26816506078466773\n","Epoch: test test index of 4 minibatch: 2 time used: 0.05264568328857422\n","minibatch AVG loss: 0.05344134452752769\n","Epoch: test test index of 4 minibatch: 3 time used: 0.05270862579345703\n","minibatch AVG loss: 0.0027116276614833623\n","Epoch: test test index of 4 minibatch: 4 time used: 0.052893638610839844\n","minibatch AVG loss: 0.4669480860720796\n","Epoch: test test index of 4 minibatch: 5 time used: 0.2150881290435791\n","minibatch AVG loss: 0.6584409326897003\n","Epoch: test test index of 4 minibatch: 6 time used: 0.07256197929382324\n","minibatch AVG loss: 0.14450758622115245\n","Epoch: test test index of 4 minibatch: 7 time used: 0.06723260879516602\n","minibatch AVG loss: 0.20112267805961892\n","Epoch: test test index of 4 minibatch: 8 time used: 0.0691368579864502\n","minibatch AVG loss: 0.014065294861211441\n","Epoch: test test index of 4 minibatch: 9 time used: 0.06417989730834961\n","minibatch AVG loss: 0.01810028008185327\n","Epoch: test test index of 4 minibatch: 10 time used: 0.0646204948425293\n","minibatch AVG loss: 0.10914140564273112\n","Epoch: test test index of 4 minibatch: 11 time used: 0.052018165588378906\n","minibatch AVG loss: 0.025396305602043867\n","Epoch: test test index of 4 minibatch: 12 time used: 0.0520014762878418\n","minibatch AVG loss: 0.13099558325484395\n","Epoch: test test index of 4 minibatch: 13 time used: 0.05219769477844238\n","minibatch AVG loss: 0.010371054049755912\n","Epoch: test test index of 4 minibatch: 14 time used: 0.05182027816772461\n","minibatch AVG loss: 0.010890420817304403\n","Epoch: test test index of 4 minibatch: 15 time used: 0.05169200897216797\n","minibatch AVG loss: 0.005727577343350276\n","Epoch: test test index of 4 minibatch: 16 time used: 0.0532376766204834\n","minibatch AVG loss: 0.001175819052150473\n","Epoch: test test index of 4 minibatch: 17 time used: 0.05185651779174805\n","minibatch AVG loss: 0.015627381930244155\n","Epoch: test test index of 4 minibatch: 18 time used: 0.05151867866516113\n","minibatch AVG loss: 0.011185463983565569\n","Epoch: test test index of 4 minibatch: 19 time used: 0.05146026611328125\n","minibatch AVG loss: 0.0034086240502801957\n","Epoch: test test index of 4 minibatch: 20 time used: 0.05474138259887695\n","minibatch AVG loss: 0.11933412203507032\n","\n","Epoch: test \n","Loss: 0.1135 Acc: 95.0000\n","benign precision: 100.0000 recall: 89.1892\n","benign sensitivity: 89.1892 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 91.4894\n","benign TP: 33.0\n","benign TN: 43.0\n","benign FP: 0.0\n","benign FN: 4.0\n","malignant precision: 91.4894 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 89.1892\n","malignant FPR: 10.8108 NPV: 100.0000\n","malignant TP: 43.0\n","malignant TN: 33.0\n","malignant FP: 4.0\n","malignant FN: 0.0\n","\n","\n","Testing complete in 0m 4s\n"]}],"source":["!python Test.py --edge_size 224 --data_augmentation_mode 3 --model_idx ViT_base_timm_PromptDeep_20_401_lf15_finetuning_warwick_CLS --PromptTuning Deep --dataroot /data/Pathology_Experiment/dataset/warwick_CLS --draw_root /home/Pathology_Experiment/runs/VPT_finetuning_with_timm --model_path /home/Pathology_Experiment/saved_models"]},{"cell_type":"markdown","metadata":{"id":"P5rRq1HhbSiC"},"source":["## Finetuning with PuzzleTuning Prompt\n","VPT + finetuning (with timm weight \u0026 PuzzleTuning Prompt)"]},{"cell_type":"markdown","metadata":{"id":"viVkMdz5bVQ_"},"source":["Train"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"UUYpNJ74bV4n"},"outputs":[{"name":"stdout","output_type":"stream","text":["class_names: ['benign', 'malignant']\n","*********************************setting*************************************\n","Namespace(model_idx='ViT_base_timm_PuzzleTuning_SAE_promptstate_PromptDeep_20_401_lf15_finetuning_warwick_CLS', drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, cls_token_off=False, pos_embedding_off=False, att_module='SimAM', backbone_PT_off=False, gpu_idx=-1, dataroot='/data/Pathology_Experiment/dataset/warwick_CLS', model_path='/home/Pathology_Experiment/saved_models', draw_root='/home/Pathology_Experiment/runs/SAE-timm-start_promptstate_VPT_finetuning_with_timm', paint=True, enable_tensorboard=True, enable_attention_check=False, enable_visualize_check=False, PromptTuning='Deep', Prompt_Token_num=20, PromptUnFreeze=True, linearprobing=False, Pre_Trained_model_path=None, Prompt_state_path='/home/Pathology_Experiment/saved_models/ViT_b16_224_timm_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_promptstate.pth', enable_sam=False, augmentation_name=None, ratio_strategy=None, patch_strategy=None, loss_drive_threshold=4.0, fix_position_ratio=0.5, fix_patch_size=None, patch_size_jump=None, num_classes=0, edge_size=224, data_augmentation_mode=3, batch_size=8, num_epochs=50, intake_epochs=0, lr=1e-05, lrf=0.15, opt_name='Adam', check_minibatch=None, num_workers=2)\n","we dont have more GPU idx here, try to use gpu_idx=0\n","PromptTuning of ViT_base_timm_PuzzleTuning_SAE_promptstate_PromptDeep_20_401_lf15_finetuning_warwick_CLS\n","Prompt VPT type: Deep\n","backbone base_state_dict of timm\n","prompting with prompt_state at: /home/Pathology_Experiment/saved_models/ViT_b16_224_timm_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_promptstate.pth\n","in prompt model building, timm ViT loaded for base_state_dict\n","head not match, so skip head\n","in prompt model building, a .pth prompt_state_dict loaded\n","model forward cheacked\n","model is ready now!\n","prompt tuning with all parameaters un-freezed\n","GPU: 0\n","----------------------------------------------------------------\n"," Layer (type) Output Shape Param #\n","================================================================\n"," Conv2d-1 [-1, 768, 14, 14] 590,592\n"," Identity-2 [-1, 196, 768] 0\n"," PatchEmbed-3 [-1, 196, 768] 0\n"," Dropout-4 [-1, 197, 768] 0\n"," LayerNorm-5 [-1, 217, 768] 1,536\n"," Linear-6 [-1, 217, 2304] 1,771,776\n"," Dropout-7 [-1, 12, 217, 217] 0\n"," Linear-8 [-1, 217, 768] 590,592\n"," Dropout-9 [-1, 217, 768] 0\n"," Attention-10 [-1, 217, 768] 0\n"," Identity-11 [-1, 217, 768] 0\n"," LayerNorm-12 [-1, 217, 768] 1,536\n"," Linear-13 [-1, 217, 3072] 2,362,368\n"," GELU-14 [-1, 217, 3072] 0\n"," Dropout-15 [-1, 217, 3072] 0\n"," Linear-16 [-1, 217, 768] 2,360,064\n"," Dropout-17 [-1, 217, 768] 0\n"," Mlp-18 [-1, 217, 768] 0\n"," Identity-19 [-1, 217, 768] 0\n"," Block-20 [-1, 217, 768] 0\n"," LayerNorm-21 [-1, 217, 768] 1,536\n"," Linear-22 [-1, 217, 2304] 1,771,776\n"," Dropout-23 [-1, 12, 217, 217] 0\n"," Linear-24 [-1, 217, 768] 590,592\n"," Dropout-25 [-1, 217, 768] 0\n"," Attention-26 [-1, 217, 768] 0\n"," Identity-27 [-1, 217, 768] 0\n"," LayerNorm-28 [-1, 217, 768] 1,536\n"," Linear-29 [-1, 217, 3072] 2,362,368\n"," GELU-30 [-1, 217, 3072] 0\n"," Dropout-31 [-1, 217, 3072] 0\n"," Linear-32 [-1, 217, 768] 2,360,064\n"," Dropout-33 [-1, 217, 768] 0\n"," Mlp-34 [-1, 217, 768] 0\n"," Identity-35 [-1, 217, 768] 0\n"," Block-36 [-1, 217, 768] 0\n"," LayerNorm-37 [-1, 217, 768] 1,536\n"," Linear-38 [-1, 217, 2304] 1,771,776\n"," Dropout-39 [-1, 12, 217, 217] 0\n"," Linear-40 [-1, 217, 768] 590,592\n"," Dropout-41 [-1, 217, 768] 0\n"," Attention-42 [-1, 217, 768] 0\n"," Identity-43 [-1, 217, 768] 0\n"," LayerNorm-44 [-1, 217, 768] 1,536\n"," Linear-45 [-1, 217, 3072] 2,362,368\n"," GELU-46 [-1, 217, 3072] 0\n"," Dropout-47 [-1, 217, 3072] 0\n"," Linear-48 [-1, 217, 768] 2,360,064\n"," Dropout-49 [-1, 217, 768] 0\n"," Mlp-50 [-1, 217, 768] 0\n"," Identity-51 [-1, 217, 768] 0\n"," Block-52 [-1, 217, 768] 0\n"," LayerNorm-53 [-1, 217, 768] 1,536\n"," Linear-54 [-1, 217, 2304] 1,771,776\n"," Dropout-55 [-1, 12, 217, 217] 0\n"," Linear-56 [-1, 217, 768] 590,592\n"," Dropout-57 [-1, 217, 768] 0\n"," Attention-58 [-1, 217, 768] 0\n"," Identity-59 [-1, 217, 768] 0\n"," LayerNorm-60 [-1, 217, 768] 1,536\n"," Linear-61 [-1, 217, 3072] 2,362,368\n"," GELU-62 [-1, 217, 3072] 0\n"," Dropout-63 [-1, 217, 3072] 0\n"," Linear-64 [-1, 217, 768] 2,360,064\n"," Dropout-65 [-1, 217, 768] 0\n"," Mlp-66 [-1, 217, 768] 0\n"," Identity-67 [-1, 217, 768] 0\n"," Block-68 [-1, 217, 768] 0\n"," LayerNorm-69 [-1, 217, 768] 1,536\n"," Linear-70 [-1, 217, 2304] 1,771,776\n"," Dropout-71 [-1, 12, 217, 217] 0\n"," Linear-72 [-1, 217, 768] 590,592\n"," Dropout-73 [-1, 217, 768] 0\n"," Attention-74 [-1, 217, 768] 0\n"," Identity-75 [-1, 217, 768] 0\n"," LayerNorm-76 [-1, 217, 768] 1,536\n"," Linear-77 [-1, 217, 3072] 2,362,368\n"," GELU-78 [-1, 217, 3072] 0\n"," Dropout-79 [-1, 217, 3072] 0\n"," Linear-80 [-1, 217, 768] 2,360,064\n"," Dropout-81 [-1, 217, 768] 0\n"," Mlp-82 [-1, 217, 768] 0\n"," Identity-83 [-1, 217, 768] 0\n"," Block-84 [-1, 217, 768] 0\n"," LayerNorm-85 [-1, 217, 768] 1,536\n"," Linear-86 [-1, 217, 2304] 1,771,776\n"," Dropout-87 [-1, 12, 217, 217] 0\n"," Linear-88 [-1, 217, 768] 590,592\n"," Dropout-89 [-1, 217, 768] 0\n"," Attention-90 [-1, 217, 768] 0\n"," Identity-91 [-1, 217, 768] 0\n"," LayerNorm-92 [-1, 217, 768] 1,536\n"," Linear-93 [-1, 217, 3072] 2,362,368\n"," GELU-94 [-1, 217, 3072] 0\n"," Dropout-95 [-1, 217, 3072] 0\n"," Linear-96 [-1, 217, 768] 2,360,064\n"," Dropout-97 [-1, 217, 768] 0\n"," Mlp-98 [-1, 217, 768] 0\n"," Identity-99 [-1, 217, 768] 0\n"," Block-100 [-1, 217, 768] 0\n"," LayerNorm-101 [-1, 217, 768] 1,536\n"," Linear-102 [-1, 217, 2304] 1,771,776\n"," Dropout-103 [-1, 12, 217, 217] 0\n"," Linear-104 [-1, 217, 768] 590,592\n"," Dropout-105 [-1, 217, 768] 0\n"," Attention-106 [-1, 217, 768] 0\n"," Identity-107 [-1, 217, 768] 0\n"," LayerNorm-108 [-1, 217, 768] 1,536\n"," Linear-109 [-1, 217, 3072] 2,362,368\n"," GELU-110 [-1, 217, 3072] 0\n"," Dropout-111 [-1, 217, 3072] 0\n"," Linear-112 [-1, 217, 768] 2,360,064\n"," Dropout-113 [-1, 217, 768] 0\n"," Mlp-114 [-1, 217, 768] 0\n"," Identity-115 [-1, 217, 768] 0\n"," Block-116 [-1, 217, 768] 0\n"," LayerNorm-117 [-1, 217, 768] 1,536\n"," Linear-118 [-1, 217, 2304] 1,771,776\n"," Dropout-119 [-1, 12, 217, 217] 0\n"," Linear-120 [-1, 217, 768] 590,592\n"," Dropout-121 [-1, 217, 768] 0\n"," Attention-122 [-1, 217, 768] 0\n"," Identity-123 [-1, 217, 768] 0\n"," LayerNorm-124 [-1, 217, 768] 1,536\n"," Linear-125 [-1, 217, 3072] 2,362,368\n"," GELU-126 [-1, 217, 3072] 0\n"," Dropout-127 [-1, 217, 3072] 0\n"," Linear-128 [-1, 217, 768] 2,360,064\n"," Dropout-129 [-1, 217, 768] 0\n"," Mlp-130 [-1, 217, 768] 0\n"," Identity-131 [-1, 217, 768] 0\n"," Block-132 [-1, 217, 768] 0\n"," LayerNorm-133 [-1, 217, 768] 1,536\n"," Linear-134 [-1, 217, 2304] 1,771,776\n"," Dropout-135 [-1, 12, 217, 217] 0\n"," Linear-136 [-1, 217, 768] 590,592\n"," Dropout-137 [-1, 217, 768] 0\n"," Attention-138 [-1, 217, 768] 0\n"," Identity-139 [-1, 217, 768] 0\n"," LayerNorm-140 [-1, 217, 768] 1,536\n"," Linear-141 [-1, 217, 3072] 2,362,368\n"," GELU-142 [-1, 217, 3072] 0\n"," Dropout-143 [-1, 217, 3072] 0\n"," Linear-144 [-1, 217, 768] 2,360,064\n"," Dropout-145 [-1, 217, 768] 0\n"," Mlp-146 [-1, 217, 768] 0\n"," Identity-147 [-1, 217, 768] 0\n"," Block-148 [-1, 217, 768] 0\n"," LayerNorm-149 [-1, 217, 768] 1,536\n"," Linear-150 [-1, 217, 2304] 1,771,776\n"," Dropout-151 [-1, 12, 217, 217] 0\n"," Linear-152 [-1, 217, 768] 590,592\n"," Dropout-153 [-1, 217, 768] 0\n"," Attention-154 [-1, 217, 768] 0\n"," Identity-155 [-1, 217, 768] 0\n"," LayerNorm-156 [-1, 217, 768] 1,536\n"," Linear-157 [-1, 217, 3072] 2,362,368\n"," GELU-158 [-1, 217, 3072] 0\n"," Dropout-159 [-1, 217, 3072] 0\n"," Linear-160 [-1, 217, 768] 2,360,064\n"," Dropout-161 [-1, 217, 768] 0\n"," Mlp-162 [-1, 217, 768] 0\n"," Identity-163 [-1, 217, 768] 0\n"," Block-164 [-1, 217, 768] 0\n"," LayerNorm-165 [-1, 217, 768] 1,536\n"," Linear-166 [-1, 217, 2304] 1,771,776\n"," Dropout-167 [-1, 12, 217, 217] 0\n"," Linear-168 [-1, 217, 768] 590,592\n"," Dropout-169 [-1, 217, 768] 0\n"," Attention-170 [-1, 217, 768] 0\n"," Identity-171 [-1, 217, 768] 0\n"," LayerNorm-172 [-1, 217, 768] 1,536\n"," Linear-173 [-1, 217, 3072] 2,362,368\n"," GELU-174 [-1, 217, 3072] 0\n"," Dropout-175 [-1, 217, 3072] 0\n"," Linear-176 [-1, 217, 768] 2,360,064\n"," Dropout-177 [-1, 217, 768] 0\n"," Mlp-178 [-1, 217, 768] 0\n"," Identity-179 [-1, 217, 768] 0\n"," Block-180 [-1, 217, 768] 0\n"," LayerNorm-181 [-1, 217, 768] 1,536\n"," Linear-182 [-1, 217, 2304] 1,771,776\n"," Dropout-183 [-1, 12, 217, 217] 0\n"," Linear-184 [-1, 217, 768] 590,592\n"," Dropout-185 [-1, 217, 768] 0\n"," Attention-186 [-1, 217, 768] 0\n"," Identity-187 [-1, 217, 768] 0\n"," LayerNorm-188 [-1, 217, 768] 1,536\n"," Linear-189 [-1, 217, 3072] 2,362,368\n"," GELU-190 [-1, 217, 3072] 0\n"," Dropout-191 [-1, 217, 3072] 0\n"," Linear-192 [-1, 217, 768] 2,360,064\n"," Dropout-193 [-1, 217, 768] 0\n"," Mlp-194 [-1, 217, 768] 0\n"," Identity-195 [-1, 217, 768] 0\n"," Block-196 [-1, 217, 768] 0\n"," LayerNorm-197 [-1, 197, 768] 1,536\n"," Identity-198 [-1, 768] 0\n"," Linear-199 [-1, 2] 1,538\n","================================================================\n","Total params: 85,648,130\n","Trainable params: 85,648,130\n","Non-trainable params: 0\n","----------------------------------------------------------------\n","Input size (MB): 0.57\n","Forward/backward pass size (MB): 454.20\n","Params size (MB): 326.72\n","Estimated Total Size (MB): 781.49\n","----------------------------------------------------------------\n","model : ViT_base_timm_PuzzleTuning_SAE_promptstate_PromptDeep_20_401_lf15_finetuning_warwick_CLS\n","no valid counterparts augmentation selected\n","Epoch 1/50\n","----------\n","\n","Epoch: 1 train \n","Loss: 0.9057 Acc: 47.8261\n","benign precision: 47.5000 recall: 65.5172\n","benign sensitivity: 65.5172 specificity: 40.0000\n","benign FPR: 60.0000 NPV: 58.3333\n","benign TP: 19.0\n","benign TN: 14.0\n","benign FP: 21.0\n","benign FN: 10.0\n","malignant precision: 58.3333 recall: 40.0000\n","malignant sensitivity: 40.0000 specificity: 65.5172\n","malignant FPR: 34.4828 NPV: 47.5000\n","malignant TP: 14.0\n","malignant TN: 19.0\n","malignant FP: 10.0\n","malignant FN: 21.0\n","\n","\n","\n","Epoch: 1 val \n","Loss: 0.6906 Acc: 56.2500\n","benign precision: 0.0000 recall: 0.0000\n","benign sensitivity: 0.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 56.2500\n","benign TP: 0.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 7.0\n","malignant precision: 56.2500 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 0.0000\n","malignant FPR: 100.0000 NPV: 0.0000\n","malignant TP: 9.0\n","malignant TN: 0.0\n","malignant FP: 7.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 2/50\n","----------\n","\n","Epoch: 2 train \n","Loss: 0.7041 Acc: 55.0725\n","benign precision: 0.0000 recall: 0.0000\n","benign sensitivity: 0.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 59.3750\n","benign TP: 0.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 26.0\n","malignant precision: 59.3750 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 0.0000\n","malignant FPR: 100.0000 NPV: 0.0000\n","malignant TP: 38.0\n","malignant TN: 0.0\n","malignant FP: 26.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 2 val \n","Loss: 0.6821 Acc: 56.2500\n","benign precision: 0.0000 recall: 0.0000\n","benign sensitivity: 0.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 56.2500\n","benign TP: 0.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 7.0\n","malignant precision: 56.2500 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 0.0000\n","malignant FPR: 100.0000 NPV: 0.0000\n","malignant TP: 9.0\n","malignant TN: 0.0\n","malignant FP: 7.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 3/50\n","----------\n","\n","Epoch: 3 train \n","Loss: 0.6266 Acc: 55.0725\n","benign precision: 0.0000 recall: 0.0000\n","benign sensitivity: 0.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 59.3750\n","benign TP: 0.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 26.0\n","malignant precision: 59.3750 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 0.0000\n","malignant FPR: 100.0000 NPV: 0.0000\n","malignant TP: 38.0\n","malignant TN: 0.0\n","malignant FP: 26.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 3 val \n","Loss: 0.6837 Acc: 56.2500\n","benign precision: 0.0000 recall: 0.0000\n","benign sensitivity: 0.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 56.2500\n","benign TP: 0.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 7.0\n","malignant precision: 56.2500 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 0.0000\n","malignant FPR: 100.0000 NPV: 0.0000\n","malignant TP: 9.0\n","malignant TN: 0.0\n","malignant FP: 7.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 4/50\n","----------\n","\n","Epoch: 4 train \n","Loss: 0.6440 Acc: 50.7246\n","benign precision: 0.0000 recall: 0.0000\n","benign sensitivity: 0.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 54.6875\n","benign TP: 0.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 29.0\n","malignant precision: 54.6875 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 0.0000\n","malignant FPR: 100.0000 NPV: 0.0000\n","malignant TP: 35.0\n","malignant TN: 0.0\n","malignant FP: 29.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 4 val \n","Loss: 0.6855 Acc: 56.2500\n","benign precision: 0.0000 recall: 0.0000\n","benign sensitivity: 0.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 56.2500\n","benign TP: 0.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 7.0\n","malignant precision: 56.2500 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 0.0000\n","malignant FPR: 100.0000 NPV: 0.0000\n","malignant TP: 9.0\n","malignant TN: 0.0\n","malignant FP: 7.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 5/50\n","----------\n","\n","Epoch: 5 train \n","Loss: 0.6487 Acc: 44.9275\n","benign precision: 36.3636 recall: 29.6296\n","benign sensitivity: 29.6296 specificity: 62.1622\n","benign FPR: 37.8378 NPV: 54.7619\n","benign TP: 8.0\n","benign TN: 23.0\n","benign FP: 14.0\n","benign FN: 19.0\n","malignant precision: 54.7619 recall: 62.1622\n","malignant sensitivity: 62.1622 specificity: 29.6296\n","malignant FPR: 70.3704 NPV: 36.3636\n","malignant TP: 23.0\n","malignant TN: 8.0\n","malignant FP: 19.0\n","malignant FN: 14.0\n","\n","\n","\n","Epoch: 5 val \n","Loss: 0.7125 Acc: 56.2500\n","benign precision: 0.0000 recall: 0.0000\n","benign sensitivity: 0.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 56.2500\n","benign TP: 0.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 7.0\n","malignant precision: 56.2500 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 0.0000\n","malignant FPR: 100.0000 NPV: 0.0000\n","malignant TP: 9.0\n","malignant TN: 0.0\n","malignant FP: 7.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 6/50\n","----------\n","\n","Epoch: 6 train \n","Loss: 0.6396 Acc: 55.0725\n","benign precision: 0.0000 recall: 0.0000\n","benign sensitivity: 0.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 59.3750\n","benign TP: 0.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 26.0\n","malignant precision: 59.3750 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 0.0000\n","malignant FPR: 100.0000 NPV: 0.0000\n","malignant TP: 38.0\n","malignant TN: 0.0\n","malignant FP: 26.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 6 val \n","Loss: 0.6673 Acc: 56.2500\n","benign precision: 0.0000 recall: 0.0000\n","benign sensitivity: 0.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 56.2500\n","benign TP: 0.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 7.0\n","malignant precision: 56.2500 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 0.0000\n","malignant FPR: 100.0000 NPV: 0.0000\n","malignant TP: 9.0\n","malignant TN: 0.0\n","malignant FP: 7.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 7/50\n","----------\n","\n","Epoch: 7 train \n","Loss: 0.6158 Acc: 49.2754\n","benign precision: 40.0000 recall: 22.2222\n","benign sensitivity: 22.2222 specificity: 75.6757\n","benign FPR: 24.3243 NPV: 57.1429\n","benign TP: 6.0\n","benign TN: 28.0\n","benign FP: 9.0\n","benign FN: 21.0\n","malignant precision: 57.1429 recall: 75.6757\n","malignant sensitivity: 75.6757 specificity: 22.2222\n","malignant FPR: 77.7778 NPV: 40.0000\n","malignant TP: 28.0\n","malignant TN: 6.0\n","malignant FP: 21.0\n","malignant FN: 9.0\n","\n","\n","\n","Epoch: 7 val \n","Loss: 0.6687 Acc: 75.0000\n","benign precision: 66.6667 recall: 85.7143\n","benign sensitivity: 85.7143 specificity: 66.6667\n","benign FPR: 33.3333 NPV: 85.7143\n","benign TP: 6.0\n","benign TN: 6.0\n","benign FP: 3.0\n","benign FN: 1.0\n","malignant precision: 85.7143 recall: 66.6667\n","malignant sensitivity: 66.6667 specificity: 85.7143\n","malignant FPR: 14.2857 NPV: 66.6667\n","malignant TP: 6.0\n","malignant TN: 6.0\n","malignant FP: 1.0\n","malignant FN: 3.0\n","\n","\n","\n","Epoch 8/50\n","----------\n","\n","Epoch: 8 train \n","Loss: 0.5457 Acc: 69.5652\n","benign precision: 81.8182 recall: 60.0000\n","benign sensitivity: 60.0000 specificity: 88.2353\n","benign FPR: 11.7647 NPV: 71.4286\n","benign TP: 18.0\n","benign TN: 30.0\n","benign FP: 4.0\n","benign FN: 12.0\n","malignant precision: 71.4286 recall: 88.2353\n","malignant sensitivity: 88.2353 specificity: 60.0000\n","malignant FPR: 40.0000 NPV: 81.8182\n","malignant TP: 30.0\n","malignant TN: 18.0\n","malignant FP: 12.0\n","malignant FN: 4.0\n","\n","\n","\n","Epoch: 8 val \n","Loss: 0.5382 Acc: 81.2500\n","benign precision: 70.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 66.6667\n","benign FPR: 33.3333 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 6.0\n","benign FP: 3.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 66.6667\n","malignant sensitivity: 66.6667 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 70.0000\n","malignant TP: 6.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 3.0\n","\n","\n","\n","Epoch 9/50\n","----------\n","\n","Epoch: 9 train \n","Loss: 0.5411 Acc: 59.4203\n","benign precision: 55.5556 recall: 74.0741\n","benign sensitivity: 74.0741 specificity: 56.7568\n","benign FPR: 43.2432 NPV: 75.0000\n","benign TP: 20.0\n","benign TN: 21.0\n","benign FP: 16.0\n","benign FN: 7.0\n","malignant precision: 75.0000 recall: 56.7568\n","malignant sensitivity: 56.7568 specificity: 74.0741\n","malignant FPR: 25.9259 NPV: 55.5556\n","malignant TP: 21.0\n","malignant TN: 20.0\n","malignant FP: 7.0\n","malignant FN: 16.0\n","\n","\n","\n","Epoch: 9 val \n","Loss: 0.7515 Acc: 37.5000\n","benign precision: 38.4615 recall: 71.4286\n","benign sensitivity: 71.4286 specificity: 11.1111\n","benign FPR: 88.8889 NPV: 33.3333\n","benign TP: 5.0\n","benign TN: 1.0\n","benign FP: 8.0\n","benign FN: 2.0\n","malignant precision: 33.3333 recall: 11.1111\n","malignant sensitivity: 11.1111 specificity: 71.4286\n","malignant FPR: 28.5714 NPV: 38.4615\n","malignant TP: 1.0\n","malignant TN: 5.0\n","malignant FP: 2.0\n","malignant FN: 8.0\n","\n","\n","\n","Epoch 10/50\n","----------\n","\n","Epoch: 10 train \n","Loss: 0.5343 Acc: 59.4203\n","benign precision: 56.0976 recall: 82.1429\n","benign sensitivity: 82.1429 specificity: 50.0000\n","benign FPR: 50.0000 NPV: 78.2609\n","benign TP: 23.0\n","benign TN: 18.0\n","benign FP: 18.0\n","benign FN: 5.0\n","malignant precision: 78.2609 recall: 50.0000\n","malignant sensitivity: 50.0000 specificity: 82.1429\n","malignant FPR: 17.8571 NPV: 56.0976\n","malignant TP: 18.0\n","malignant TN: 23.0\n","malignant FP: 5.0\n","malignant FN: 18.0\n","\n","\n","\n","Epoch: 10 val \n","Loss: 0.6291 Acc: 56.2500\n","benign precision: 0.0000 recall: 0.0000\n","benign sensitivity: 0.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 56.2500\n","benign TP: 0.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 7.0\n","malignant precision: 56.2500 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 0.0000\n","malignant FPR: 100.0000 NPV: 0.0000\n","malignant TP: 9.0\n","malignant TN: 0.0\n","malignant FP: 7.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 11/50\n","----------\n","\n","Epoch: 11 train \n","Loss: 0.3603 Acc: 68.1159\n","benign precision: 91.6667 recall: 40.7407\n","benign sensitivity: 40.7407 specificity: 97.2973\n","benign FPR: 2.7027 NPV: 69.2308\n","benign TP: 11.0\n","benign TN: 36.0\n","benign FP: 1.0\n","benign FN: 16.0\n","malignant precision: 69.2308 recall: 97.2973\n","malignant sensitivity: 97.2973 specificity: 40.7407\n","malignant FPR: 59.2593 NPV: 91.6667\n","malignant TP: 36.0\n","malignant TN: 11.0\n","malignant FP: 16.0\n","malignant FN: 1.0\n","\n","\n","\n","Epoch: 11 val \n","Loss: 0.1934 Acc: 93.7500\n","benign precision: 100.0000 recall: 85.7143\n","benign sensitivity: 85.7143 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 90.0000\n","benign TP: 6.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 1.0\n","malignant precision: 90.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 85.7143\n","malignant FPR: 14.2857 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 6.0\n","malignant FP: 1.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 12/50\n","----------\n","\n","Epoch: 12 train \n","Loss: 0.1909 Acc: 85.5072\n","benign precision: 90.0000 recall: 93.1034\n","benign sensitivity: 93.1034 specificity: 91.4286\n","benign FPR: 8.5714 NPV: 94.1176\n","benign TP: 27.0\n","benign TN: 32.0\n","benign FP: 3.0\n","benign FN: 2.0\n","malignant precision: 94.1176 recall: 91.4286\n","malignant sensitivity: 91.4286 specificity: 93.1034\n","malignant FPR: 6.8966 NPV: 90.0000\n","malignant TP: 32.0\n","malignant TN: 27.0\n","malignant FP: 2.0\n","malignant FN: 3.0\n","\n","\n","\n","Epoch: 12 val \n","Loss: 0.1050 Acc: 93.7500\n","benign precision: 100.0000 recall: 85.7143\n","benign sensitivity: 85.7143 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 90.0000\n","benign TP: 6.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 1.0\n","malignant precision: 90.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 85.7143\n","malignant FPR: 14.2857 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 6.0\n","malignant FP: 1.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 13/50\n","----------\n","\n","Epoch: 13 train \n","Loss: 0.0564 Acc: 89.8551\n","benign precision: 100.0000 recall: 92.5926\n","benign sensitivity: 92.5926 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 94.8718\n","benign TP: 25.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 2.0\n","malignant precision: 94.8718 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 92.5926\n","malignant FPR: 7.4074 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 25.0\n","malignant FP: 2.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 13 val \n","Loss: 0.1332 Acc: 93.7500\n","benign precision: 87.5000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 88.8889\n","benign FPR: 11.1111 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 8.0\n","benign FP: 1.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 88.8889\n","malignant sensitivity: 88.8889 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 87.5000\n","malignant TP: 8.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 1.0\n","\n","\n","\n","Epoch 14/50\n","----------\n","\n","Epoch: 14 train \n","Loss: 0.0390 Acc: 91.3043\n","benign precision: 96.4286 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 97.2973\n","benign FPR: 2.7027 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 36.0\n","benign FP: 1.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 97.2973\n","malignant sensitivity: 97.2973 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 96.4286\n","malignant TP: 36.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 1.0\n","\n","\n","\n","Epoch: 14 val \n","Loss: 0.0150 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 15/50\n","----------\n","\n","Epoch: 15 train \n","Loss: 0.0162 Acc: 91.3043\n","benign precision: 100.0000 recall: 96.1538\n","benign sensitivity: 96.1538 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 97.4359\n","benign TP: 25.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 1.0\n","malignant precision: 97.4359 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 96.1538\n","malignant FPR: 3.8462 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 25.0\n","malignant FP: 1.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 15 val \n","Loss: 0.0762 Acc: 93.7500\n","benign precision: 87.5000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 88.8889\n","benign FPR: 11.1111 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 8.0\n","benign FP: 1.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 88.8889\n","malignant sensitivity: 88.8889 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 87.5000\n","malignant TP: 8.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 1.0\n","\n","\n","\n","Epoch 16/50\n","----------\n","\n","Epoch: 16 train \n","Loss: 0.0214 Acc: 91.3043\n","benign precision: 96.5517 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 97.2222\n","benign FPR: 2.7778 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 35.0\n","benign FP: 1.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 97.2222\n","malignant sensitivity: 97.2222 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 96.5517\n","malignant TP: 35.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 1.0\n","\n","\n","\n","Epoch: 16 val \n","Loss: 0.0315 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 17/50\n","----------\n","\n","Epoch: 17 train \n","Loss: 0.0052 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 17 val \n","Loss: 0.1970 Acc: 93.7500\n","benign precision: 100.0000 recall: 85.7143\n","benign sensitivity: 85.7143 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 90.0000\n","benign TP: 6.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 1.0\n","malignant precision: 90.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 85.7143\n","malignant FPR: 14.2857 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 6.0\n","malignant FP: 1.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 18/50\n","----------\n","\n","Epoch: 18 train \n","Loss: 0.0092 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 18 val \n","Loss: 0.3976 Acc: 87.5000\n","benign precision: 77.7778 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 77.7778\n","benign FPR: 22.2222 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 7.0\n","benign FP: 2.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 77.7778\n","malignant sensitivity: 77.7778 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 77.7778\n","malignant TP: 7.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 2.0\n","\n","\n","\n","Epoch 19/50\n","----------\n","\n","Epoch: 19 train \n","Loss: 0.0221 Acc: 91.3043\n","benign precision: 96.4286 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 97.2973\n","benign FPR: 2.7027 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 36.0\n","benign FP: 1.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 97.2973\n","malignant sensitivity: 97.2973 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 96.4286\n","malignant TP: 36.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 1.0\n","\n","\n","\n","Epoch: 19 val \n","Loss: 0.0045 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 20/50\n","----------\n","\n","Epoch: 20 train \n","Loss: 0.0001 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 20 val \n","Loss: 0.0233 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 21/50\n","----------\n","\n","Epoch: 21 train \n","Loss: 0.0004 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 21 val \n","Loss: 0.0371 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 22/50\n","----------\n","\n","Epoch: 22 train \n","Loss: 0.0001 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 22 val \n","Loss: 0.0309 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 23/50\n","----------\n","\n","Epoch: 23 train \n","Loss: 0.0001 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 23 val \n","Loss: 0.0265 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 24/50\n","----------\n","\n","Epoch: 24 train \n","Loss: 0.0000 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 24 val \n","Loss: 0.0246 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 25/50\n","----------\n","\n","Epoch: 25 train \n","Loss: 0.0001 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 25 val \n","Loss: 0.0212 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 26/50\n","----------\n","\n","Epoch: 26 train \n","Loss: 0.0000 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 30.0\n","benign TN: 34.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 34.0\n","malignant TN: 30.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 26 val \n","Loss: 0.0198 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 27/50\n","----------\n","\n","Epoch: 27 train \n","Loss: 0.0000 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 26.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 26.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 27 val \n","Loss: 0.0195 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 28/50\n","----------\n","\n","Epoch: 28 train \n","Loss: 0.0001 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 28 val \n","Loss: 0.0182 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 29/50\n","----------\n","\n","Epoch: 29 train \n","Loss: 0.0001 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 29 val \n","Loss: 0.0163 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 30/50\n","----------\n","\n","Epoch: 30 train \n","Loss: 0.0000 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 26.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 26.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 30 val \n","Loss: 0.0156 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 31/50\n","----------\n","\n","Epoch: 31 train \n","Loss: 0.0000 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 26.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 26.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 31 val \n","Loss: 0.0153 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 32/50\n","----------\n","\n","Epoch: 32 train \n","Loss: 0.0001 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 32 val \n","Loss: 0.0150 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 33/50\n","----------\n","\n","Epoch: 33 train \n","Loss: 0.0001 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 33 val \n","Loss: 0.0145 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 34/50\n","----------\n","\n","Epoch: 34 train \n","Loss: 0.0001 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 34 val \n","Loss: 0.0141 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 35/50\n","----------\n","\n","Epoch: 35 train \n","Loss: 0.0000 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 26.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 26.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 35 val \n","Loss: 0.0140 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 36/50\n","----------\n","\n","Epoch: 36 train \n","Loss: 0.0001 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 36 val \n","Loss: 0.0133 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 37/50\n","----------\n","\n","Epoch: 37 train \n","Loss: 0.0001 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 37 val \n","Loss: 0.0127 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 38/50\n","----------\n","\n","Epoch: 38 train \n","Loss: 0.0001 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 38 val \n","Loss: 0.0123 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 39/50\n","----------\n","\n","Epoch: 39 train \n","Loss: 0.0000 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 39 val \n","Loss: 0.0121 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 40/50\n","----------\n","\n","Epoch: 40 train \n","Loss: 0.0001 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 40 val \n","Loss: 0.0119 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 41/50\n","----------\n","\n","Epoch: 41 train \n","Loss: 0.0000 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 41 val \n","Loss: 0.0119 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 42/50\n","----------\n","\n","Epoch: 42 train \n","Loss: 0.0001 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 29.0\n","benign TN: 35.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 35.0\n","malignant TN: 29.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 42 val \n","Loss: 0.0117 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 43/50\n","----------\n","\n","Epoch: 43 train \n","Loss: 0.0000 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 43 val \n","Loss: 0.0117 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 44/50\n","----------\n","\n","Epoch: 44 train \n","Loss: 0.0000 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 44 val \n","Loss: 0.0117 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 45/50\n","----------\n","\n","Epoch: 45 train \n","Loss: 0.0000 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 45 val \n","Loss: 0.0118 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 46/50\n","----------\n","\n","Epoch: 46 train \n","Loss: 0.0000 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 46 val \n","Loss: 0.0119 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 47/50\n","----------\n","\n","Epoch: 47 train \n","Loss: 0.0001 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 47 val \n","Loss: 0.0119 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 48/50\n","----------\n","\n","Epoch: 48 train \n","Loss: 0.0000 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 26.0\n","benign TN: 38.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 38.0\n","malignant TN: 26.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 48 val \n","Loss: 0.0120 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 49/50\n","----------\n","\n","Epoch: 49 train \n","Loss: 0.0002 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 28.0\n","benign TN: 36.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 36.0\n","malignant TN: 28.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 49 val \n","Loss: 0.0111 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch 50/50\n","----------\n","\n","Epoch: 50 train \n","Loss: 0.0000 Acc: 92.7536\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 27.0\n","benign TN: 37.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 37.0\n","malignant TN: 27.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Epoch: 50 val \n","Loss: 0.0108 Acc: 100.0000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","benign TP: 7.0\n","benign TN: 9.0\n","benign FP: 0.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","malignant TP: 9.0\n","malignant TN: 7.0\n","malignant FP: 0.0\n","malignant FN: 0.0\n","\n","\n","\n","Training complete in 0m 56s\n","Best epoch idx: 50\n","Best epoch train Acc: 92.753623\n","Best epoch val Acc: 100.000000\n","benign precision: 100.0000 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 100.0000\n","benign FPR: 0.0000 NPV: 100.0000\n","malignant precision: 100.0000 recall: 100.0000\n","malignant sensitivity: 100.0000 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 100.0000\n","model trained by GPU (idx:0) has been saved at /home/Pathology_Experiment/saved_models/CLS_ViT_base_timm_PuzzleTuning_SAE_promptstate_PromptDeep_20_401_lf15_finetuning_warwick_CLS.pth\n"]}],"source":["!python Train.py --edge_size 224 --data_augmentation_mode 3 --lr 1e-05 --lrf 0.15 --enable_tensorboard --model_idx ViT_base_timm_PuzzleTuning_SAE_promptstate_PromptDeep_20_401_lf15_finetuning_warwick_CLS --PromptTuning Deep --Prompt_Token_num 20 --PromptUnFreeze --dataroot /data/Pathology_Experiment/dataset/warwick_CLS --draw_root /home/Pathology_Experiment/runs/SAE-timm-start_promptstate_VPT_finetuning_with_timm --Prompt_state_path /home/Pathology_Experiment/saved_models/ViT_b16_224_timm_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_promptstate.pth --model_path /home/Pathology_Experiment/saved_models"]},{"cell_type":"markdown","metadata":{"id":"tjdLl1IgbWJA"},"source":["Test"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"JWODC-3dbWy5"},"outputs":[{"name":"stdout","output_type":"stream","text":["class_names: ['benign', 'malignant']\n","base_state_dict of timm\n","Test the PromptTuning of ViT_base_timm_PuzzleTuning_SAE_promptstate_PromptDeep_20_401_lf15_finetuning_warwick_CLS\n","Prompt VPT type: Deep\n","in prompt model building, timm ViT loaded for base_state_dict\n","model forward cheacked\n","model is ready now!\n","model loaded\n","model : ViT_base_timm_PuzzleTuning_SAE_promptstate_PromptDeep_20_401_lf15_finetuning_warwick_CLS\n","*********************************setting*************************************\n","Namespace(model_idx='ViT_base_timm_PuzzleTuning_SAE_promptstate_PromptDeep_20_401_lf15_finetuning_warwick_CLS', drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, cls_token_off=False, pos_embedding_off=False, att_module='SimAM', gpu_idx=0, dataroot='/data/Pathology_Experiment/dataset/warwick_CLS', model_path='/home/Pathology_Experiment/saved_models', draw_root='/home/Pathology_Experiment/runs/SAE-timm-start_promptstate_VPT_finetuning_with_timm', model_path_by_hand=None, paint=True, enable_tensorboard=False, enable_attention_check=False, enable_visualize_check=False, data_augmentation_mode=3, PromptTuning='Deep', Prompt_Token_num=20, PromptUnFreeze=True, Pre_Trained_model_path=None, num_classes=0, edge_size=224, batch_size=1, check_minibatch=None)\n","Epoch: Test\n","----------\n","Epoch: test test index of 4 minibatch: 1 time used: 2.460519790649414\n","minibatch AVG loss: 0.0005482856868184172\n","Epoch: test test index of 4 minibatch: 2 time used: 0.05422377586364746\n","minibatch AVG loss: 0.028580010335645056\n","Epoch: test test index of 4 minibatch: 3 time used: 0.05495309829711914\n","minibatch AVG loss: 0.00031065010671227355\n","Epoch: test test index of 4 minibatch: 4 time used: 0.05390286445617676\n","minibatch AVG loss: 0.0004322398812064421\n","Epoch: test test index of 4 minibatch: 5 time used: 0.05517458915710449\n","minibatch AVG loss: 0.12173168856133998\n","Epoch: test test index of 4 minibatch: 6 time used: 0.056728363037109375\n","minibatch AVG loss: 0.0001147254761235672\n","Epoch: test test index of 4 minibatch: 7 time used: 0.055054664611816406\n","minibatch AVG loss: 0.00036354226699586434\n","Epoch: test test index of 4 minibatch: 8 time used: 0.05412697792053223\n","minibatch AVG loss: 0.00014341383098326332\n","Epoch: test test index of 4 minibatch: 9 time used: 0.05386781692504883\n","minibatch AVG loss: 0.006190998042256979\n","Epoch: test test index of 4 minibatch: 10 time used: 0.05511641502380371\n","minibatch AVG loss: 0.003244622026613797\n","Epoch: test test index of 4 minibatch: 11 time used: 0.055419206619262695\n","minibatch AVG loss: 0.04551573219941929\n","Epoch: test test index of 4 minibatch: 12 time used: 0.0545496940612793\n","minibatch AVG loss: 0.07759279117362894\n","Epoch: test test index of 4 minibatch: 13 time used: 0.05438709259033203\n","minibatch AVG loss: 1.6769601333962783\n","Epoch: test test index of 4 minibatch: 14 time used: 0.0557103157043457\n","minibatch AVG loss: 0.021024980960646644\n","Epoch: test test index of 4 minibatch: 15 time used: 0.055322885513305664\n","minibatch AVG loss: 0.003789418142332579\n","Epoch: test test index of 4 minibatch: 16 time used: 0.05902862548828125\n","minibatch AVG loss: 0.0001979180588023155\n","Epoch: test test index of 4 minibatch: 17 time used: 0.057543039321899414\n","minibatch AVG loss: 0.003576442887720077\n","Epoch: test test index of 4 minibatch: 18 time used: 0.05739569664001465\n","minibatch AVG loss: 0.0008242135859291011\n","Epoch: test test index of 4 minibatch: 19 time used: 0.05773782730102539\n","minibatch AVG loss: 0.0002545509980791394\n","Epoch: test test index of 4 minibatch: 20 time used: 0.05846238136291504\n","minibatch AVG loss: 0.0012478914995881496\n","\n","Epoch: test \n","Loss: 0.0996 Acc: 98.7500\n","benign precision: 97.3684 recall: 100.0000\n","benign sensitivity: 100.0000 specificity: 97.6744\n","benign FPR: 2.3256 NPV: 100.0000\n","benign TP: 37.0\n","benign TN: 42.0\n","benign FP: 1.0\n","benign FN: 0.0\n","malignant precision: 100.0000 recall: 97.6744\n","malignant sensitivity: 97.6744 specificity: 100.0000\n","malignant FPR: 0.0000 NPV: 97.3684\n","malignant TP: 42.0\n","malignant TN: 37.0\n","malignant FP: 0.0\n","malignant FN: 1.0\n","\n","\n","Testing complete in 0m 4s\n"]}],"source":["!python Test.py --edge_size 224 --data_augmentation_mode 3 --model_idx ViT_base_timm_PuzzleTuning_SAE_promptstate_PromptDeep_20_401_lf15_finetuning_warwick_CLS --PromptTuning Deep --Prompt_Token_num 20 --PromptUnFreeze --dataroot /data/Pathology_Experiment/dataset/warwick_CLS --draw_root /home/Pathology_Experiment/runs/SAE-timm-start_promptstate_VPT_finetuning_with_timm --model_path /home/Pathology_Experiment/saved_models"]},{"cell_type":"markdown","metadata":{"id":"XX6Vjy9ec2b2"},"source":["# check the Tensorboard output"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"MWtYyRI1ff0q"},"outputs":[{"data":{"application/javascript":["\n"," (async () =\u003e {\n"," const url = new URL(await google.colab.kernel.proxyPort(6006, {'cache': true}));\n"," url.searchParams.set('tensorboardColab', 'true');\n"," const iframe = document.createElement('iframe');\n"," iframe.src = url;\n"," iframe.setAttribute('width', '100%');\n"," iframe.setAttribute('height', '800');\n"," iframe.setAttribute('frameborder', 0);\n"," document.body.appendChild(iframe);\n"," })();\n"," "],"text/plain":["\u003cIPython.core.display.Javascript object\u003e"]},"metadata":{},"output_type":"display_data"}],"source":["%load_ext tensorboard\n","%tensorboard --logdir '/home/Pathology_Experiment/runs'"]},{"cell_type":"markdown","metadata":{"id":"XSGgbUQ3E0H5"},"source":["# After the task, save the output to google drive\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"ZQlei_AVLknG"},"outputs":[{"name":"stdout","output_type":"stream","text":["usage: check_log_json.py [-h] [--ONE_LOG] [--draw_root DRAW_ROOT] [--record_dir RECORD_DIR]\n","check_log_json.py: error: unrecognized arguments: --enable_notify\n"]}],"source":["# change working dir\n","import os\n","os.chdir(\"/home/Pathology_Experiment/code/utils\")\n","!python check_log_json.py --enable_notify --draw_root /home/Pathology_Experiment/runs --record_dir /home/Pathology_Experiment/CSV_logs"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"_Wx0ymiiEuyS"},"outputs":[{"name":"stdout","output_type":"stream","text":["/bin/cp: target '/content/drive/MyDrive/Pathology_Experiment/runs/' is not a directory\n","runs copy completed!\n","/bin/cp: target '/content/drive/MyDrive/Pathology_Experiment/saved_models/' is not a directory\n","models copy completed!\n","/bin/cp: target '/content/drive/MyDrive/Pathology_Experiment/imaging_results/' is not a directory\n","imaging_results copy completed!\n"]}],"source":["# copy tensorboard runs\n","!/bin/cp -rf /home/Pathology_Experiment/runs/* /content/drive/MyDrive/Pathology_Experiment/runs/\n","print('runs copy completed!')\n","# copy the traind models\n","!/bin/cp -rf /home/Pathology_Experiment/saved_models/* /content/drive/MyDrive/Pathology_Experiment/saved_models/\n","print('models copy completed!')\n","# copy the imaging_results\n","!/bin/cp -rf /home/Pathology_Experiment/imaging_results/* /content/drive/MyDrive/Pathology_Experiment/imaging_results/\n","print('imaging_results copy completed!')"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"9lzAtLIhnGe5"},"outputs":[{"name":"stdout","output_type":"stream","text":["Sat Nov 25 08:22:43 AM UTC 2023\n"]}],"source":["!date --date='+8 hour' # CST time zone"]}],"metadata":{"accelerator":"GPU","colab":{"machine_shape":"hm","name":"","version":""},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0} \ No newline at end of file diff --git a/PuzzleTuning/PuzzleTuning.py b/PuzzleTuning/PuzzleTuning.py new file mode 100644 index 0000000000000000000000000000000000000000..c5084bfbf7188ade6bf4e746b6696c38759bdc99 --- /dev/null +++ b/PuzzleTuning/PuzzleTuning.py @@ -0,0 +1,455 @@ +""" +Puzzle Tuning Script ver: Feb 11th 14:00 + +Paper: +https://arxiv.org/abs/2311.06712 +Code: +https://github.com/sagizty/PuzzleTuning +Ref: MAE +https://github.com/facebookresearch/mae + +Step 1: PreTraining on the ImagetNet-1k style dataset (others) +Step 2: Domain Prompt Tuning (PuzzleTuning) on Pathological Images (in ImageFolder) +Step 3: FineTuning on the Downstream Tasks + +This is the training code for step 2 + + +Pre-training Experiments: +DP (data-parallel bash) +python PuzzleTuning.py --batch_size 64 --blr 1.5e-4 --epochs 200 --accum_iter 2 --print_freq 2000 --check_point_gap 50 +--input_size 224 --warmup_epochs 20 --pin_mem --num_workers 32 --strategy loop --PromptTuning Deep --basic_state_dict +/data/saved_models/ViT_b16_224_Imagenet.pth +--data_path /root/datasets/All + +DDP (distributed data-parallel bash) for one machine with 12 GPU +python -m torch.distributed.launch --nproc_per_node=12 --nnodes 1 --node_rank 0 PuzzleTuning.py --DDP_distributed +--batch_size 64 --blr 1.5e-4 --epochs 200 --accum_iter 2 --print_freq 2000 --check_point_gap 50 --input_size 224 +--warmup_epochs 20 --pin_mem --num_workers 32 --strategy loop --PromptTuning Deep --basic_state_dict +/data/saved_models/ViT_b16_224_Imagenet.pth +--data_path /root/datasets/All + + +update: +Use "--seg_decoder" parameter to introduce segmentation networks +swin_unet for Swin-Unet +""" + +import argparse +import datetime +import json +import numpy as np +import os +import time +from pathlib import Path + +import torch +import torch.backends.cudnn as cudnn +from tensorboardX import SummaryWriter +import torchvision.transforms as transforms +import torchvision.datasets as datasets + +import timm + +# assert timm.__version__ == "0.3.2" # version check +import timm.optim.optim_factory as optim_factory + +import SSL_structures.misc as misc +from SSL_structures.misc import NativeScalerWithGradNormCount as NativeScaler +from utils.schedulers import patch_scheduler, ratio_scheduler + +from SSL_structures import models_mae, SAE + +from SSL_structures.engine_pretrain import train_one_epoch + + +def main(args): + # choose encoder for timm + basic_encoder = args.model[4:] + + # choose decoder version + args.model = args.model + '_decoder' if args.seg_decoder is not None else args.model + # note decoder + args.model_idx = args.model_idx + args.model + '_' + args.seg_decoder if args.seg_decoder is not None \ + else args.model_idx + args.model + # note PromptTuning + args.model_idx = args.model_idx + '_Prompt_' + args.PromptTuning + '_tokennum_' + str(args.Prompt_Token_num) \ + if args.PromptTuning is not None else args.model_idx + + # fix the seed for reproducibility + if args.DDP_distributed: + misc.init_distributed_mode(args) + seed = args.seed + misc.get_rank() + else: + seed = args.seed + torch.manual_seed(seed) + np.random.seed(seed) + + # set GPUs + cudnn.benchmark = True + device = torch.device(args.device) # cuda + + # simple augmentation + transform_train = transforms.Compose([ + transforms.RandomResizedCrop(args.input_size, scale=(0.8, 1.0), interpolation=3, ratio=(1. / 1., 1. / 1.)), + # 3 is bicubic + # transforms.Resize(args.input_size), + transforms.RandomVerticalFlip(), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + + dataset_train = datasets.ImageFolder(os.path.join(args.data_path), transform=transform_train) # , 'train' + print('dataset_train:', dataset_train) # Train data + + if args.DDP_distributed: # args.DDP_distributed is True we use distributed data parallel(DDP) + num_tasks = misc.get_world_size() # use misc to set up DDP + global_rank = misc.get_rank() # get the rank of the current running + + sampler_train = torch.utils.data.DistributedSampler( + dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True) + print("Sampler_train = %s" % str(sampler_train)) + enable_DistributedSampler = True + batch_size_for_Dataloader = args.batch_size + + else: # Data parallel(DP) instead of distributed data parallel(DDP) + global_rank = 0 + sampler_train = torch.utils.data.RandomSampler(dataset_train) + enable_DistributedSampler = False + batch_size_for_Dataloader = args.batch_size * torch.cuda.device_count() + + # set log on the main process + if global_rank == 0 and args.log_dir is not None: + args.log_dir = os.path.join(args.log_dir, args.model_idx) + os.makedirs(args.log_dir, exist_ok=True) + log_writer = SummaryWriter(log_dir=args.log_dir) # Tensorboard + + print('Task: ' + args.model_idx) + print("Use", torch.cuda.device_count(), "GPUs!") + print('job AImageFolderDir: {}'.format(os.path.dirname(os.path.realpath(__file__)))) + print("{}".format(args).replace(', ', ',\n')) + else: + log_writer = None + + # output_dir + if args.output_dir is not None: + args.output_dir = os.path.join(args.output_dir, args.model_idx) + os.makedirs(args.output_dir, exist_ok=True) + os.makedirs(os.path.join(args.output_dir, 'figs'), exist_ok=True) + print('Training output files will be at', args.output_dir) + else: + print('no out put path specified!') + raise + + data_loader_train = torch.utils.data.DataLoader( + dataset_train, sampler=sampler_train, # the shuffle=True is already set in the sampler + batch_size=batch_size_for_Dataloader, + num_workers=args.num_workers, + pin_memory=args.pin_mem, + drop_last=True) + + # define the model + if args.model[0:3] == 'mae': + + if args.basic_state_dict is not None: # Transfer-learning + try: + if args.basic_state_dict == 'timm': + basic_model = timm.create_model('vit_base_patch' + str(16) + '_' + str(args.input_size), + pretrained=True) + basic_state_dict = basic_model.state_dict() + print('MAE Transfer-learning with timm') + else: + basic_state_dict = torch.load(args.basic_state_dict) + if 'model' in basic_state_dict: + basic_state_dict = basic_state_dict['model'] + except: + print('erro in args.basic_state_dict:', args.basic_state_dict) + if args.PromptTuning is not None: + print( + 'In PromptTuning, the basic_state_dict is required, without specification now, timm loaded.\n') + # timm model name basic_encoder + basic_model = timm.create_model(basic_encoder + '_' + str(args.input_size), pretrained=True) + basic_state_dict = basic_model.state_dict() + else: + basic_state_dict = None + print('MAE Restart with a empty backbone') + else: + print('MAE Transfer-learning with:', args.basic_state_dict) + + else: + if args.PromptTuning is not None: + print('In PromptTuning, the basic_state_dict is required, without specification now, timm loaded.\n') + # timm model name basic_encoder + basic_model = timm.create_model(basic_encoder + '_' + str(args.input_size), pretrained=True) + basic_state_dict = basic_model.state_dict() + else: + basic_state_dict = None + print('MAE Restart with a empty backbone') + + # mae-vit-base-patch16 + model = models_mae.__dict__[args.model](img_size=args.input_size, norm_pix_loss=args.norm_pix_loss, + prompt_mode=args.PromptTuning, Prompt_Token_num=args.Prompt_Token_num, + basic_state_dict=basic_state_dict, dec_idx=args.seg_decoder) + # setting puzzle_patch_size to not use SAE + puzzle_patch_size_scheduler = None + fix_position_ratio_scheduler = None + + # PuzzleTuning + elif args.model[0:3] == 'sae': + if args.basic_state_dict is not None: + try: + if args.basic_state_dict == 'timm': + print("using timm") + basic_model = timm.create_model(basic_encoder + '_' + str(args.input_size), pretrained=True) + basic_state_dict = basic_model.state_dict() + else: + basic_state_dict = torch.load(args.basic_state_dict) + except: + print('erro in args.basic_state_dict:', args.basic_state_dict) + if args.PromptTuning is not None: + print( + 'In PromptTuning, the basic_state_dict is required, without specification now, timm loaded.\n') + # timm model name basic_encoder + basic_model = timm.create_model(basic_encoder + '_' + str(args.input_size), pretrained=True) + basic_state_dict = basic_model.state_dict() + else: + basic_state_dict = None + print('SAE Restart with a empty backbone') + else: + print('Puzzle tuning with Transfer-learning:', args.basic_state_dict) + else: + if args.PromptTuning is not None: + print('In PromptTuning, the basic_state_dict is required, without specification now, timm loaded.\n') + # timm model name basic_encoder + basic_model = timm.create_model(basic_encoder + '_' + str(args.input_size), pretrained=True) + basic_state_dict = basic_model.state_dict() + else: + basic_state_dict = None + print('Puzzle tuning with a empty backbone') + + model = SAE.__dict__[args.model](img_size=args.input_size, group_shuffle_size=args.group_shuffle_size, + norm_pix_loss=args.norm_pix_loss, + prompt_mode=args.PromptTuning, Prompt_Token_num=args.Prompt_Token_num, + basic_state_dict=basic_state_dict, dec_idx=args.seg_decoder) + + fix_position_ratio_scheduler = ratio_scheduler(total_epoches=args.epochs, + warmup_epochs=args.warmup_epochs, + basic_ratio=0.25, # start ratio + fix_position_ratio=args.fix_position_ratio, # None + strategy=args.strategy) + # strategy=None for fixed else reduce ratio gradually + + # setting puzzle_patch_size to not use MAE + puzzle_patch_size_scheduler = patch_scheduler(total_epoches=args.epochs, + warmup_epochs=args.warmup_epochs, + edge_size=args.input_size, + basic_patch=model.patch_embed.patch_size[0], + fix_patch_size=args.fix_patch_size, # None + strategy=args.strategy) # 'linear' + # NOTICE strategy are used for setting up both the ratio-scheduler and patch-scheduler + + else: + print('This Tuning script only support SAE(PuzzleTuning) or MAE') + return -1 + + # the effective batch size for setting up lr + if args.DDP_distributed: + eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size() + else: + eff_batch_size = args.batch_size * torch.cuda.device_count() * args.accum_iter + print('eff_batch_size:', eff_batch_size) + + if args.lr is None: # when only base_lr is specified + args.lr = args.blr * eff_batch_size / 256 + + print("base lr: %.2e" % (args.lr * 256 / eff_batch_size)) + print("actual lr: %.2e" % args.lr) + + print("accumulate grad iterations: %d" % args.accum_iter) + print("effective batch size: %d" % eff_batch_size) + + # take the model parameters for optimizer update + model_without_ddp = model + + if args.DDP_distributed: + model.cuda() # args.gpu is obtained by misc.py + # find_unused_parameters=True for the DDP to correctly synchronize layers in back propagation + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) + else: + model = torch.nn.DataParallel(model) + model.to(device) + + print("Model = %s" % str(model_without_ddp)) + + # following timm: set wd as 0 for bias and norm layers + param_groups = optim_factory.add_weight_decay(model_without_ddp, args.weight_decay) + optimizer = torch.optim.AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95)) + print(optimizer) + + # loss scaler with gradient clipping + loss_scaler = NativeScaler(GPU_count=torch.cuda.device_count(), DDP_distributed=args.DDP_distributed) + + # if we have --resume,we will load the checkpoint and continue training, if not, we start a new training + # the checkpoint should include model, optimizer, loss_scaler information + misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler) + + # Training by epochs + print(f"Start training for {args.epochs} epochs") + start_time = time.time() + + for epoch in range(args.start_epoch, args.epochs): + # use args.start_epoch to jump to resume checkpoint + + if enable_DistributedSampler: # DistributedSampler need to .set_epoch(epoch) at each epoch + data_loader_train.sampler.set_epoch(epoch) + + # training iterations + train_stats = train_one_epoch(model, data_loader_train, optimizer, device, epoch, loss_scaler, + fix_position_ratio_scheduler=fix_position_ratio_scheduler, + puzzle_patch_size_scheduler=puzzle_patch_size_scheduler, + check_samples=args.check_samples, + print_freq=args.print_freq, log_writer=log_writer, args=args) + + if args.output_dir and (epoch % args.check_point_gap == 0 or epoch + 1 == args.epochs): + misc.save_model(args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, + loss_scaler=loss_scaler, epoch=epoch, model_idx=args.model_idx) + + log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, + 'epoch': epoch, } + + # Write log + if args.output_dir and misc.is_main_process(): + if log_writer is not None: + log_writer.flush() + with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + + # time stamp + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +def get_args_parser(): + parser = argparse.ArgumentParser('SAE pre-training', add_help=False) + + # disable_notify + parser.add_argument('--disable_notify', action='store_true', help='do not send email of tracking') + + # Model Name or index + parser.add_argument('--model_idx', default='PuzzleTuning_', type=str, help='Model Name or index') + + # Original MAE(224->64) MAE A100(224->256 384->128)SAE(224->128 384->64)SAE-VPT(224->256 384->128) + parser.add_argument('--batch_size', default=64, type=int, + help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus') + parser.add_argument('--epochs', default=200, type=int) # epochs原800 + parser.add_argument('--accum_iter', default=2, type=int, + help='Accumulate gradient iterations ' + '(for increasing the effective batch size under memory constraints)') + + # if we have --resume,we will load the checkpoint and continue training, if not, we start a new training + # the checkpoint should include model, optimizer, loss_scaler information + parser.add_argument('--resume', default='', help='resume from checkpoint') + parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch of checkpoint') + + # Model parameters sae_vit_base_patch16 mae_vit_base_patch16 + parser.add_argument('--model', default='sae_vit_base_patch16', type=str, metavar='MODEL', + help='Name of model to train') # ori mae_vit_large_patch16 + parser.add_argument('--seg_decoder', default=None, type=str, metavar='segmentation decoder', + help='Name of segmentation decoder') + + parser.add_argument('--input_size', default=224, type=int,help='images input size') + parser.add_argument('--model_patch_size', default=16, type=int, + help='model_patch_size, default 16 for ViT-base') + parser.add_argument('--num_classes', default=3, type=int, # decoder seg class set to channel + help='the number of classes for segmentation') + + # MAE mask_ratio + parser.add_argument('--mask_ratio', default=0.75, type=float, + help='Masking ratio (percentage of removed patches)') + + # Tuning setting + # PromptTuning + parser.add_argument('--PromptTuning', default=None, type=str, + help='use Prompt Tuning strategy (Deep/Shallow) instead of Finetuning (None, by default)') + # Prompt_Token_num + parser.add_argument('--Prompt_Token_num', default=20, type=int, help='Prompt_Token_num for VPT backbone') + + # Course learning setting + parser.add_argument('--strategy', default=None, type=str, + help='use linear or other puzzle size scheduler') + parser.add_argument('--fix_position_ratio', default=None, type=float, + help='ablation fix_position_ratio (percentage of position token patches)') + parser.add_argument('--fix_patch_size', default=None, type=int, help='ablation using fix_patch_size') + parser.add_argument('--group_shuffle_size', default=-1, type=int, help='group_shuffle_size of group shuffling,' + 'default -1 for the whole batch as a group') + + # loss settings + parser.add_argument('--norm_pix_loss', action='store_true', + help='Use (per-patch) normalized pixels as targets for computing loss') + parser.set_defaults(norm_pix_loss=False) + + # basic_state_dict + parser.add_argument('--basic_state_dict', default=None, type=str, + help='load basic backbone state_dict for Transfer-learning-based tuning, default None') + + # Optimizer settings + parser.add_argument('--weight_decay', type=float, default=0.05, + help='weight decay (default: 0.05)') + + parser.add_argument('--lr', type=float, default=None, metavar='LR', + help='learning rate (absolute lr), default=None') + parser.add_argument('--blr', type=float, default=1.5e-4, metavar='LR', + help='base learning rate: absolute_lr = base_lr * effective batch size / 256') + parser.add_argument('--min_lr', type=float, default=0., metavar='LR', + help='lower lr bound for cyclic schedulers that hit 0') + + parser.add_argument('--warmup_epochs', type=int, default=20, metavar='N', + help='epochs to warmup LR') + + # PATH settings + # Dataset parameters /datasets01/imagenet_full_size/061417/ /data/imagenet_1k /root/autodl-tmp/imagenet + parser.add_argument('--data_path', default='/root/autodl-tmp/datasets/All', type=str, help='dataset path') + parser.add_argument('--output_dir', default='/root/autodl-tmp/runs', + help='path where to save, empty for no saving') + parser.add_argument('--log_dir', default='/root/tf-logs', + help='path where to tensorboard log') + parser.add_argument('--device', default='cuda', + help='device to use for training / testing') + parser.add_argument('--seed', default=42, type=int) + + # dataloader setting + parser.add_argument('--num_workers', default=20, type=int) + # 4A100(16,384,b128, shm40)6A100(36,384,b128, shm100) 8A100(35,384,b128, shm100) + parser.add_argument('--pin_mem', action='store_true', + help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') + parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') + parser.set_defaults(pin_mem=True) + + # print_freq and checkpoint + parser.add_argument('--print_freq', default=20, type=int) + parser.add_argument('--check_point_gap', default=50, type=int) + parser.add_argument('--check_samples', default=1, type=int, help='check how many images in a checking batch') + + # DDP_distributed training parameters for DDP + parser.add_argument('--world_size', default=1, type=int, + help='number of DDP_distributed processes') + parser.add_argument('--local_rank', default=-1, type=int) + parser.add_argument('--dist_on_itp', action='store_true') + parser.add_argument('--dist_url', default='env://', + help='url used to set up DDP_distributed training') + parser.add_argument('--DDP_distributed', action='store_true', help='Use DDP in training. ' + 'without calling, DP with be applied') + + return parser + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + + if args.output_dir: + Path(args.output_dir).mkdir(parents=True, exist_ok=True) + + main(args) \ No newline at end of file diff --git a/PuzzleTuning/README.md b/PuzzleTuning/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f7e2cc81f043affc9007d91cce8f85f01b78fbad --- /dev/null +++ b/PuzzleTuning/README.md @@ -0,0 +1,49 @@ +# PuzzleTuning +[[`arXiv`](https://arxiv.org/abs/2311.06712)] [[`Vidio Presentation`](https://www.bilibili.com/video/BV1ZVHje9EdX)] + +Pathological image analysis is a crucial field in computer vision. Due to the annotation scarcity in the pathological field, recently, most of the works have leveraged self-supervised learning (SSL) trained on unlabeled pathological images, hoping to mine the representation effectively. However, there are two core defects in current SSL-based pathological pre-training: (1) they do not explicitly explore the essential focuses of the pathological field, and (2) they do not effectively bridge with and thus take advantage of the knowledge from natural images. To explicitly address them, we propose our large-scale PuzzleTuning framework, containing the following innovations. Firstly, we define three task focuses that can effectively bridge knowledge of pathological and natural domain: appearance consistency, spatial consistency, and restoration understanding. Secondly, we devise a novel multiple puzzle restoring task, which explicitly pre-trains the model regarding these focuses. Thirdly, we introduce an explicit prompt-tuning process to incrementally integrate the domain-specific knowledge. It builds a bridge to align the large domain gap between natural and pathological images. Additionally, a curriculum-learning training strategy is designed to regulate task difficulty, making the model adaptive to the puzzle restoring complexity. Experimental results show that our PuzzleTuning framework outperforms the previous state-of-the-art methods in various downstream tasks on multiple datasets. + +fig_concept +Samples illustrate the focuses and relationships in pathological images. They are pancreatic liquid samples (a and b) and colonic epithelium tissue samples (c and d) of normal (a and c) and cancer conditions (b and d). The patches of them are numbered from 1 to 9. Grouping the patches from each image as a bag, after intermixing patches among them, the three pathological focuses of appearance consistency, spatial consistency, and restoration understanding are highlighted. + +fig_PuzzleTuning_method +Overview of PuzzleTuning. Three steps are designed in PuzzleTuning: 1) Puzzle making, where image batch are divided into bags of patches and fix-position and relation identity are randomly assigned. The relation patches are then in-place shuffled with each other, making up the puzzle state. 2) Puzzle understanding, where puzzles regarding grouping, junction, and restoration relationships are learned by prompt tokens attached to the encoder. Through the prompt tokens, the pathological focuses are explicitly seamed with general vision knowledge. 3) Puzzle restoring, where the decoder restores the relation patches with position patches as hint, under SSL supervision against original images. + + +# Usage +## pre-trained weights +we have updated the pre-trained weight of PuzzleTuning and all counterparts at + +https://drive.google.com/file/d/1-mddejIdCRP5AscnlWAyEcGzfgBIRCSf/view?usp=share_link + +## demo with Colab +we have updated a demo for iullustration at + +https://github.com/sagizty/PuzzleTuning/blob/main/PuzzleTuning%20Colab%20Demo.ipynb + +## training script +```Shell +python -m torch.distributed.launch --nproc_per_node=8 --nnodes 1 --node_rank 0 PuzzleTuning.py --DDP_distributed --batch_size 64 --group_shuffle_size 8 --blr 1.5e-4 --epochs 2000 --accum_iter 2 --print_freq 5000 --check_point_gap 100 --input_size 224 --warmup_epochs 100 --pin_mem --num_workers 32 --strategy loop --PromptTuning Deep --basic_state_dict /home/saved_models/ViT_b16_224_Imagenet.pth --data_path /home/datasets/All +``` + +## CPIA dataset +https://github.com/zhanglab2021/CPIA_Dataset + +# Results +## Comparison +image +image + +## Domain bridging target +image + +## Domain bridging with Puzzles and Prompts +Screenshot 2023-10-28 at 4 42 31 PM +Screenshot 2023-10-28 at 4 43 02 PM + +image + +## Curiculum learning +Screenshot 2023-10-28 at 4 43 36 PM + +image diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/LICENSE b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2ef27144b58b2accb943fda2cc282ca5ce407568 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Jeya Maria Jose + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/README.md b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9a6693edd7612c5e739aa0734df2f856ddd35199 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/README.md @@ -0,0 +1,152 @@ +# Medical-Transformer + + + +Pytorch code for the paper +["Medical Transformer: Gated Axial-Attention for +Medical Image Segmentation"](https://arxiv.org/pdf/2102.10662.pdf), MICCAI 2021 + +[Paper](https://arxiv.org/pdf/2102.10662.pdf) | [Poster](https://drive.google.com/file/d/1gMjc5guT_dYQFT6TEEwdHAFKwG5XkEc9/view?usp=sharing) + +### About this repo: + +This repo hosts the code for the following networks: + +1) Gated Axial Attention U-Net +2) MedT + +## Introduction + +Majority of existing Transformer-based network architectures proposed for vision applications require large-scale +datasets to train properly. However, compared to the datasets for vision +applications, for medical imaging the number of data samples is relatively +low, making it difficult to efficiently train transformers for medical appli- +cations. To this end, we propose a Gated Axial-Attention model which +extends the existing architectures by introducing an additional control +mechanism in the self-attention module. Furthermore, to train the model +effectively on medical images, we propose a Local-Global training strat- +egy (LoGo) which further improves the performance. Specifically, we op- +erate on the whole image and patches to learn global and local features, +respectively. The proposed Medical Transformer (MedT) uses LoGo training strategy on Gated Axial Attention U-Net. + +

+ +

+ +### Using the code: + +- Clone this repository: +```bash +git clone https://github.com/jeya-maria-jose/Medical-Transformer +cd Medical-Transformer +``` + +The code is stable using Python 3.6.10, Pytorch 1.4.0 + +To install all the dependencies using conda: + +```bash +conda env create -f environment.yml +conda activate medt +``` + +To install all the dependencies using pip: + +```bash +pip install -r requirements.txt +``` + +### Links for downloading the public Datasets: + +1) MoNuSeG Dataset - Link (Original) +2) GLAS Dataset - Link (Original) +3) Brain Anatomy US dataset from the paper will be made public soon ! + +## Using the Code for your dataset + +### Dataset Preparation + +Prepare the dataset in the following format for easy use of the code. The train and test folders should contain two subfolders each: img and label. Make sure the images their corresponding segmentation masks are placed under these folders and have the same name for easy correspondance. Please change the data loaders to your need if you prefer not preparing the dataset in this format. + + + +```bash +Train Folder----- + img---- + 0001.png + 0002.png + ....... + labelcol--- + 0001.png + 0002.png + ....... +Validation Folder----- + img---- + 0001.png + 0002.png + ....... + labelcol--- + 0001.png + 0002.png + ....... +Test Folder----- + img---- + 0001.png + 0002.png + ....... + labelcol--- + 0001.png + 0002.png + ....... + +``` + +- The ground truth images should have pixels corresponding to the labels. Example: In case of binary segmentation, the pixels in the GT should be 0 or 255. + +### Training Command: + +```bash +python train.py --train_dataset "enter train directory" --val_dataset "enter validation directory" --direc 'path for results to be saved' --batch_size 4 --epoch 400 --save_freq 10 --modelname "gatedaxialunet" --learning_rate 0.001 --imgsize 128 --gray "no" +``` + +```bash +Change modelname to MedT or logo to train them +``` + +### Testing Command: + +```bash +python test.py --loaddirec "./saved_model_path/model_name.pth" --val_dataset "test dataset directory" --direc 'path for results to be saved' --batch_size 1 --modelname "gatedaxialunet" --imgsize 128 --gray "no" +``` + +The results including predicted segmentations maps will be placed in the results folder along with the model weights. Run the performance metrics code in MATLAB for calculating F1 Score and mIoU. + +### Notes: + +1)Note that these experiments were conducted in Nvidia Quadro 8000 with 48 GB memory. +2)Google Colab Code is an unofficial implementation for quick train/test. Please follow original code for proper training. + +### Acknowledgement: + +The dataloader code is inspired from pytorch-UNet . The axial attention code is developed from axial-deeplab. + +# Citation: + +```bash +@InProceedings{jose2021medical, +author="Valanarasu, Jeya Maria Jose +and Oza, Poojan +and Hacihaliloglu, Ilker +and Patel, Vishal M.", +title="Medical Transformer: Gated Axial-Attention for Medical Image Segmentation", +booktitle="Medical Image Computing and Computer Assisted Intervention -- MICCAI 2021", +year="2021", +publisher="Springer International Publishing", +address="Cham", +pages="36--46", +isbn="978-3-030-87193-2" +} + +``` + +Open an issue or mail me directly in case of any queries or suggestions. diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/__init__.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/cmd.txt b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/cmd.txt new file mode 100644 index 0000000000000000000000000000000000000000..2437302889786ed5fcf084030aff8f1104e20166 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/cmd.txt @@ -0,0 +1,2 @@ +python train.py --train_dataset "/media/jeyamariajose/7888230b-5c10-4229-90f2-c78bdae9c5de/Data/Brain_Ultrasound/Final/resized/train/" --val_dataset "/media/jeyamariajose/7888230b-5c10-4229-90f2-c78bdae9c5de/Data/Brain_Ultrasound/Final/resized/test/" --direc "./results/axial128_en/" --batch_size 4 --modelname "logo" --epoch 401 --save_freq 50 --learning_rate 0.0001 --imgsize 128 + diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/environment.yml b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/environment.yml new file mode 100644 index 0000000000000000000000000000000000000000..384ddd33b36321d13f85bd41d4b612535e36be0b --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/environment.yml @@ -0,0 +1,133 @@ +name: medt +channels: + - conda-forge + - defaults +dependencies: + - _libgcc_mutex=0.1=main + - argon2-cffi=20.1.0=py36h8c4c3a4_1 + - attrs=20.1.0=pyh9f0ad1d_0 + - backcall=0.2.0=pyh9f0ad1d_0 + - backports=1.0=py_2 + - backports.functools_lru_cache=1.6.1=py_0 + - blas=1.0=mkl + - bleach=3.1.5=pyh9f0ad1d_0 + - brotlipy=0.7.0=py36h8c4c3a4_1000 + - ca-certificates=2020.6.20=hecda079_0 + - certifi=2020.6.20=py36h9f0ad1d_0 + - cffi=1.11.5=py36_0 + - chardet=3.0.4=py36h9f0ad1d_1006 + - cryptography=3.1=py36h45558ae_0 + - decorator=4.4.2=py_0 + - defusedxml=0.6.0=py_0 + - entrypoints=0.3=py36h9f0ad1d_1001 + - idna=2.10=pyh9f0ad1d_0 + - importlib-metadata=1.7.0=py36h9f0ad1d_0 + - importlib_metadata=1.7.0=0 + - intel-openmp=2020.1=217 + - ipykernel=5.3.4=py36h95af2a2_0 + - ipython=7.16.1=py36h95af2a2_0 + - ipython_genutils=0.2.0=py_1 + - ipywidgets=7.5.1=py_0 + - jedi=0.17.2=py36h9f0ad1d_0 + - jinja2=2.11.2=pyh9f0ad1d_0 + - json5=0.9.4=pyh9f0ad1d_0 + - jsonschema=3.2.0=py36h9f0ad1d_1 + - jupyter_client=6.1.7=py_0 + - jupyter_core=4.6.3=py36h9f0ad1d_1 + - jupyterlab=2.2.6=py_0 + - jupyterlab_server=1.2.0=py_0 + - ld_impl_linux-64=2.33.1=h53a641e_7 + - libedit=3.1.20191231=h7b6447c_0 + - libffi=3.3=he6710b0_1 + - libgcc-ng=9.1.0=hdf63c60_0 + - libgfortran-ng=7.3.0=hdf63c60_0 + - libsodium=1.0.18=h516909a_0 + - libstdcxx-ng=9.1.0=hdf63c60_0 + - markupsafe=1.1.1=py36h8c4c3a4_1 + - mistune=0.8.4=py36h8c4c3a4_1001 + - mkl=2020.1=217 + - mkl-service=2.3.0=py36he904b0f_0 + - mkl_fft=1.1.0=py36h23d657b_0 + - mkl_random=1.1.1=py36h0573a6f_0 + - nbconvert=5.6.1=py36h9f0ad1d_1 + - nbformat=5.0.7=py_0 + - ncurses=6.2=he6710b0_1 + - notebook=6.1.3=py36h9f0ad1d_0 + - numpy=1.18.5=py36ha1c710e_0 + - numpy-base=1.18.5=py36hde5b4d6_0 + - openssl=1.1.1g=h516909a_1 + - packaging=20.4=pyh9f0ad1d_0 + - pandoc=2.10.1=h516909a_0 + - pandocfilters=1.4.2=py_1 + - parso=0.7.1=pyh9f0ad1d_0 + - pexpect=4.8.0=py36h9f0ad1d_1 + - pickleshare=0.7.5=py36h9f0ad1d_1001 + - pip=20.1.1=py36_1 + - prometheus_client=0.8.0=pyh9f0ad1d_0 + - prompt-toolkit=3.0.7=py_0 + - ptyprocess=0.6.0=py_1001 + - pycparser=2.20=pyh9f0ad1d_2 + - pygments=2.6.1=py_0 + - pyopenssl=19.1.0=py_1 + - pyparsing=2.4.7=pyh9f0ad1d_0 + - pyrsistent=0.16.0=py36h8c4c3a4_0 + - pysocks=1.7.1=py36h9f0ad1d_1 + - python=3.6.10=h7579374_2 + - python-dateutil=2.8.1=py_0 + - python_abi=3.6=1_cp36m + - pyzmq=19.0.2=py36h9947dbf_0 + - readline=8.0=h7b6447c_0 + - requests=2.24.0=pyh9f0ad1d_0 + - send2trash=1.5.0=py_0 + - setuptools=47.3.1=py36_0 + - six=1.15.0=py_0 + - sqlite=3.32.3=h62c20be_0 + - terminado=0.8.3=py36h9f0ad1d_1 + - testpath=0.4.4=py_0 + - tk=8.6.10=hbc83047_0 + - tornado=6.0.4=py36h8c4c3a4_1 + - traitlets=4.3.3=py36h9f0ad1d_1 + - urllib3=1.25.10=py_0 + - wcwidth=0.2.5=pyh9f0ad1d_1 + - webencodings=0.5.1=py_1 + - wheel=0.34.2=py36_0 + - widgetsnbextension=3.5.1=py36h9f0ad1d_1 + - xz=5.2.5=h7b6447c_0 + - yaml=0.2.5=h7b6447c_0 + - zeromq=4.3.2=he1b5a44_3 + - zipp=3.1.0=py_0 + - zlib=1.2.11=h7b6447c_3 + - pip: + - ci-info==0.2.0 + - click==7.1.2 + - cython==0.29.20 + - et-xmlfile==1.0.1 + - etelemetry==0.2.1 + - filelock==3.0.12 + - isodate==0.6.0 + - jdcal==1.4.1 + - joblib==0.17.0 + - lxml==4.5.1 + - matplotlib==3.3.2 + - medpy==0.4.0 + - natsort==7.0.1 + - nibabel==3.1.0 + - nipype==1.5.0 + - openpyxl==3.0.4 + - prov==1.5.3 + - pydicom==2.0.0 + - pydot==1.4.1 + - pydotplus==2.0.2 + - pynrrd==0.4.2 + - rdflib==5.0.0 + - scikit-learn==0.23.2 + - scipy==1.5.3 + - setproctitle==1.1.10 + - simplejson==3.17.0 + - threadpoolctl==2.1.0 + - torch==1.4.0 + - torch-dwconv==0.1.0 + - torchvision==0.4.0 + - traits==6.1.0 +prefix: /home/jeyamariajose/anaconda3/envs/medt + diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/extractors.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/extractors.py new file mode 100644 index 0000000000000000000000000000000000000000..3f79b12882940bcfc6c1b951cf95f2c5e7e0620d --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/extractors.py @@ -0,0 +1,373 @@ +from collections import OrderedDict +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils import model_zoo +from torchvision.models.densenet import densenet121, densenet161 +from torchvision.models.squeezenet import squeezenet1_1 + + +def load_weights_sequential(target, source_state): + new_dict = OrderedDict() + for (k1, v1), (k2, v2) in zip(target.state_dict().items(), source_state.items()): + new_dict[k1] = v2 + target.load_state_dict(new_dict) + +''' + Implementation of dilated ResNet-101 with deep supervision. Downsampling is changed to 8x +''' +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1, dilation=1): + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, dilation=dilation, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride=stride, dilation=dilation) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes, stride=1, dilation=dilation) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, dilation=dilation, + padding=dilation, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + def __init__(self, block, layers=(3, 4, 23, 3)): + self.inplanes = 64 + super(ResNet, self).__init__() + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_layer(self, block, planes, blocks, stride=1, dilation=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [block(self.inplanes, planes, stride, downsample)] + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, dilation=dilation)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x_3 = self.layer3(x) + x = self.layer4(x_3) + + return x, x_3 + + +''' + Implementation of DenseNet with deep supervision. Downsampling is changed to 8x +''' + + +class _DenseLayer(nn.Sequential): + def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, index): + super(_DenseLayer, self).__init__() + self.add_module('norm1', nn.BatchNorm2d(num_input_features)), + self.add_module('relu1', nn.ReLU(inplace=True)), + if index == 3: + self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * + growth_rate, kernel_size=1, stride=1, bias=False)), + self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)), + self.add_module('relu2', nn.ReLU(inplace=True)), + self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, + kernel_size=3, stride=1, dilation=2, padding=2, bias=False)), + else: + self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * + growth_rate, kernel_size=1, stride=1, bias=False)), + self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)), + self.add_module('relu2', nn.ReLU(inplace=True)), + self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, + kernel_size=3, stride=1, padding=1, bias=False)), + self.drop_rate = drop_rate + + def forward(self, x): + new_features = super(_DenseLayer, self).forward(x) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) + return torch.cat([x, new_features], 1) + + +class _DenseBlock(nn.Sequential): + def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, index): + super(_DenseBlock, self).__init__() + for i in range(num_layers): + layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate, index) + self.add_module('denselayer%d' % (i + 1), layer) + + +class _Transition(nn.Sequential): + def __init__(self, num_input_features, num_output_features, downsample=True): + super(_Transition, self).__init__() + self.add_module('norm', nn.BatchNorm2d(num_input_features)) + self.add_module('relu', nn.ReLU(inplace=True)) + self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, + kernel_size=1, stride=1, bias=False)) + if downsample: + self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) + else: + self.add_module('pool', nn.AvgPool2d(kernel_size=1, stride=1)) # compatibility hack + + +class DenseNet(nn.Module): + def __init__(self, growth_rate=8, block_config=(6, 12, 24, 16), + num_init_features=16, bn_size=4, drop_rate=0, pretrained=False): + + super(DenseNet, self).__init__() + + # First convolution + self.start_features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), + ('norm0', nn.BatchNorm2d(num_init_features)), + ('relu0', nn.ReLU(inplace=True)), + ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)), + ])) + + # Each denseblock + num_features = num_init_features + + init_weights = list(densenet121(pretrained=True).features.children()) + start = 0 + for i, c in enumerate(self.start_features.children()): + #if pretrained: + #c.load_state_dict(init_weights[i].state_dict()) + start += 1 + self.blocks = nn.ModuleList() + for i, num_layers in enumerate(block_config): + block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, + bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, index = i) + if pretrained: + block.load_state_dict(init_weights[start].state_dict()) + start += 1 + self.blocks.append(block) + setattr(self, 'denseblock%d' % (i + 1), block) + + num_features = num_features + num_layers * growth_rate + if i != len(block_config) - 1: + downsample = i < 1 + trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2, + downsample=downsample) + if pretrained: + trans.load_state_dict(init_weights[start].state_dict()) + start += 1 + self.blocks.append(trans) + setattr(self, 'transition%d' % (i + 1), trans) + num_features = num_features // 2 + + def forward(self, x): + out = self.start_features(x) + deep_features = None + for i, block in enumerate(self.blocks): + out = block(out) + if i == 5: + deep_features = out + + return out, deep_features + + +class Fire(nn.Module): + + def __init__(self, inplanes, squeeze_planes, + expand1x1_planes, expand3x3_planes, dilation=1): + super(Fire, self).__init__() + self.inplanes = inplanes + self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) + self.squeeze_activation = nn.ReLU(inplace=True) + self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, + kernel_size=1) + self.expand1x1_activation = nn.ReLU(inplace=True) + self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, + kernel_size=3, padding=dilation, dilation=dilation) + self.expand3x3_activation = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.squeeze_activation(self.squeeze(x)) + return torch.cat([ + self.expand1x1_activation(self.expand1x1(x)), + self.expand3x3_activation(self.expand3x3(x)) + ], 1) + + +class SqueezeNet(nn.Module): + + def __init__(self, pretrained=False): + super(SqueezeNet, self).__init__() + + self.feat_1 = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1), + nn.ReLU(inplace=True) + ) + self.feat_2 = nn.Sequential( + nn.MaxPool2d(kernel_size=3, stride=2, padding=1), + Fire(64, 16, 64, 64), + Fire(128, 16, 64, 64) + ) + self.feat_3 = nn.Sequential( + nn.MaxPool2d(kernel_size=3, stride=2, padding=1), + Fire(128, 32, 128, 128, 2), + Fire(256, 32, 128, 128, 2) + ) + self.feat_4 = nn.Sequential( + Fire(256, 48, 192, 192, 4), + Fire(384, 48, 192, 192, 4), + Fire(384, 64, 256, 256, 4), + Fire(512, 64, 256, 256, 4) + ) + if pretrained: + weights = squeezenet1_1(pretrained=True).features.state_dict() + load_weights_sequential(self, weights) + + def forward(self, x): + f1 = self.feat_1(x) + f2 = self.feat_2(f1) + f3 = self.feat_3(f2) + f4 = self.feat_4(f3) + return f4, f3 + + +''' + Handy methods for construction +''' + + +def squeezenet(pretrained=True): + return SqueezeNet(pretrained) + + +def densenet(pretrained=True): + return DenseNet(pretrained=pretrained) + + +def resnet18(pretrained=True): + model = ResNet(BasicBlock, [2, 2, 2, 2]) + if pretrained: + load_weights_sequential(model, model_zoo.load_url(model_urls['resnet18'])) + return model + + +def resnet34(pretrained=True): + model = ResNet(BasicBlock, [3, 4, 6, 3]) + if pretrained: + load_weights_sequential(model, model_zoo.load_url(model_urls['resnet34'])) + return model + + +def resnet50(pretrained=True): + model = ResNet(Bottleneck, [3, 4, 6, 3]) + if pretrained: + load_weights_sequential(model, model_zoo.load_url(model_urls['resnet50'])) + return model + + +def resnet101(pretrained=True): + model = ResNet(Bottleneck, [3, 4, 23, 3]) + if pretrained: + load_weights_sequential(model, model_zoo.load_url(model_urls['resnet101'])) + return model + + +def resnet152(pretrained=True): + model = ResNet(Bottleneck, [3, 8, 36, 3]) + if pretrained: + load_weights_sequential(model, model_zoo.load_url(model_urls['resnet152'])) + return model diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/img/arch.png b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/img/arch.png new file mode 100644 index 0000000000000000000000000000000000000000..d84791afdda09a0d95d9a6d312557dc09dd515d3 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/img/arch.png differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/img/medt.png b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/img/medt.png new file mode 100644 index 0000000000000000000000000000000000000000..d84791afdda09a0d95d9a6d312557dc09dd515d3 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/img/medt.png differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/img/medt1.png b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/img/medt1.png new file mode 100644 index 0000000000000000000000000000000000000000..c11b58abf88ecf2ea6e420cc62afe2683fc42e62 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/img/medt1.png differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/img/poster.pdf b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/img/poster.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e1576b1762ac42d689b9b5cd21da333c5b66ac67 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/img/poster.pdf differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__init__.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..05b870d489e93480b95ec238b0191c5b885d6b5d --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__init__.py @@ -0,0 +1,7 @@ +from .build_dataloader import build_dataloader +from .build_model import build_model +from .build_optimizer import build_optimizer +from .metrics import Metric + + +__all__ = ['build_dataloader', 'build_model', 'build_optimizer', 'Metric'] \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/__init__.cpython-36.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..626fa0a33129e07d3a3a79804b929d6e9a66700c Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/__init__.cpython-36.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/__init__.cpython-37.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26c2e4ceb062fc6e3085b0af00f8130c8964587a Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/__init__.cpython-37.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_dataloader.cpython-36.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_dataloader.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..706c56560bc0b60b333e7040fd76aa7979ff1886 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_dataloader.cpython-36.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_dataloader.cpython-37.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_dataloader.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5b4e3a89a8e8242855c4ff186783033bbf5ae13 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_dataloader.cpython-37.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_model.cpython-36.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_model.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83e5488245584d9e5d964c9ed93b458eebde5944 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_model.cpython-36.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_model.cpython-37.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_model.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6f6e6686988405262a8ce25a5d476ad6d855e08 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_model.cpython-37.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_optimizer.cpython-36.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_optimizer.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a8a1808145d702defe9b066d17150ce20311e8d Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_optimizer.cpython-36.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_optimizer.cpython-37.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_optimizer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf340ff36aa067b16fa512eb56235adcb6a01453 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/build_optimizer.cpython-37.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/metrics.cpython-36.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/metrics.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ed2f6d0d01dd679f533519b2080ee0e3eb6bda4 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/metrics.cpython-36.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/metrics.cpython-37.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/metrics.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..768bc2fc98d8dd68175e0ac5733d08f5d82ca592 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/__pycache__/metrics.cpython-37.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/build_dataloader.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/build_dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..4fd58b24afb9372545eb580955e2ebc14f1a90a4 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/build_dataloader.py @@ -0,0 +1,5 @@ +from . import datasets + + +def build_dataloader(args, distributed=False): + return datasets.__dict__[args.dataset](args, distributed) diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/build_model.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/build_model.py new file mode 100644 index 0000000000000000000000000000000000000000..f8de6f781720d6db0174a40662941c15c6495808 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/build_model.py @@ -0,0 +1,6 @@ +from . import models + + +def build_model(args): + model = models.__dict__[args.model](num_classes=args.num_classes) + return model diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/build_optimizer.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/build_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..8ab69efc9f2285da5ab4619463e9153c379ce03a --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/build_optimizer.py @@ -0,0 +1,12 @@ +import torch.optim as optim + + +def build_optimizer(args, model): + if args.optim == 'sgd': + optimizer = optim.SGD(model.parameters(), lr=args.lr, + momentum=args.momentum, weight_decay=args.weight_decay, + nesterov=args.nesterov) + else: + raise AssertionError + return optimizer + diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/__init__.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7d4c38e378591d894a22335fe18dbd6b28c771ad --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/__init__.py @@ -0,0 +1,4 @@ +from .imagenet1k import imagenet1k + + +__all__ = ['imagenet1k'] diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/__pycache__/__init__.cpython-36.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90d03bfba52450b34e62841124c9637428d9ad79 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/__pycache__/__init__.cpython-36.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/__pycache__/__init__.cpython-37.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb7ba1f516ffc97a54c04ddd89c3b87bd3ec615d Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/__pycache__/__init__.cpython-37.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/__pycache__/imagenet1k.cpython-36.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/__pycache__/imagenet1k.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..402f6db9c829f2ddcc802ce9e61f337e46e327ac Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/__pycache__/imagenet1k.cpython-36.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/__pycache__/imagenet1k.cpython-37.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/__pycache__/imagenet1k.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79329fe2ca03ce87e888046a13d1c0f53cfa2b04 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/__pycache__/imagenet1k.cpython-37.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/imagenet1k.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/imagenet1k.py new file mode 100644 index 0000000000000000000000000000000000000000..d4ed14df7e0ea16bc4554d96397220e2e7706883 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/datasets/imagenet1k.py @@ -0,0 +1,56 @@ +import torch +import torchvision +from torchvision import datasets, transforms + + +def imagenet1k(args, distributed=False): + train_dirs = args.train_dirs + val_dirs = args.val_dirs + batch_size = args.batch_size + val_batch_size = args.val_batch_size + num_workers = args.num_workers + color_jitter = args.color_jitter + + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + process = [ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + ] + if color_jitter: + process += [transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)] + process += [ + transforms.ToTensor(), + normalize + ] + + transform_train = transforms.Compose(process) + + train_set = datasets.ImageFolder(train_dirs, + transform=transform_train) + + if distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler(train_set) + else: + train_sampler = None + + train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=(train_sampler is None), + sampler=train_sampler, num_workers=num_workers, pin_memory=True) + + transform_val = transforms.Compose( + [transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize]) + + val_set = datasets.ImageFolder(root=val_dirs, + transform=transform_val) + + if distributed: + val_sampler = torch.utils.data.distributed.DistributedSampler(val_set) + else: + val_sampler = None + + val_loader = torch.utils.data.DataLoader(val_set, batch_size=val_batch_size, shuffle=False, + sampler=val_sampler, num_workers=num_workers, pin_memory=True) + + return train_loader, train_sampler, val_loader, val_sampler diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/metrics.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..67fc4a54501a825e1c51b9e3a074fc77c9e23342 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/metrics.py @@ -0,0 +1,16 @@ +import torch + + +class Metric(object): + def __init__(self, name): + self.name = name + self.sum = torch.tensor(0.) + self.n = torch.tensor(0.) + + def update(self, val): + self.sum += val.detach().cpu() + self.n += 1 + + @property + def avg(self): + return self.sum / self.n \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__init__.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ad5caac364d96adc7dc9bb3889d145ddb69aa8c6 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__init__.py @@ -0,0 +1,3 @@ +from .resnet import * +from .axialnet import * +from .myaxialnet import * diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/__init__.cpython-36.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec420ea1bdcbbe6dae943ef2bef72a8624ef3c64 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/__init__.cpython-36.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/__init__.cpython-37.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e59002a9ac336d8aebbf4d9a4ff371a4f9a6aaee Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/__init__.cpython-37.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/axialnet.cpython-36.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/axialnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6445b4c9ce8026232e5c17efe90fa1ba6ca6961 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/axialnet.cpython-36.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/axialnet.cpython-37.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/axialnet.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60cfba896fb19500c251cba7b07c40bc124cfdc9 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/axialnet.cpython-37.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/resnet.cpython-36.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/resnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de0ac4e82f04f84742fb090d04a10b7bbe8a24f9 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/resnet.cpython-36.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/resnet.cpython-37.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/resnet.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..620b283c43b9d4cbcd8eb5bc97d086fc29acff94 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/resnet.cpython-37.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/utils.cpython-36.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/utils.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..732f01108110a689804a66f196680de7ae9a9e1e Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/utils.cpython-36.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/utils.cpython-37.pyc b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..529fee973b2f9fb3a60ebae0703cbd540f59fd05 Binary files /dev/null and b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/__pycache__/utils.cpython-37.pyc differ diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/axialnet.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/axialnet.py new file mode 100644 index 0000000000000000000000000000000000000000..a258c82ae9c4b17dc2a429c67eac3e158128aaaa --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/axialnet.py @@ -0,0 +1,731 @@ +import pdb +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +from .utils import * +import pdb +import matplotlib.pyplot as plt + +import random + + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class AxialAttention(nn.Module): + def __init__(self, in_planes, out_planes, groups=8, kernel_size=56, + stride=1, bias=False, width=False): + assert (in_planes % groups == 0) and (out_planes % groups == 0) + super(AxialAttention, self).__init__() + self.in_planes = in_planes + self.out_planes = out_planes + self.groups = groups + self.group_planes = out_planes // groups + self.kernel_size = kernel_size + self.stride = stride + self.bias = bias + self.width = width + + # Multi-head self attention + self.qkv_transform = qkv_transform(in_planes, out_planes * 2, kernel_size=1, stride=1, + padding=0, bias=False) + self.bn_qkv = nn.BatchNorm1d(out_planes * 2) + self.bn_similarity = nn.BatchNorm2d(groups * 3) + + self.bn_output = nn.BatchNorm1d(out_planes * 2) + + # Position embedding + self.relative = nn.Parameter(torch.randn(self.group_planes * 2, kernel_size * 2 - 1), requires_grad=True) + query_index = torch.arange(kernel_size).unsqueeze(0) + key_index = torch.arange(kernel_size).unsqueeze(1) + relative_index = key_index - query_index + kernel_size - 1 + self.register_buffer('flatten_index', relative_index.view(-1)) + if stride > 1: + self.pooling = nn.AvgPool2d(stride, stride=stride) + + self.reset_parameters() + + def forward(self, x): + # pdb.set_trace() + if self.width: + x = x.permute(0, 2, 1, 3) + else: + x = x.permute(0, 3, 1, 2) # N, W, C, H + N, W, C, H = x.shape + x = x.contiguous().view(N * W, C, H) + + # Transformations + qkv = self.bn_qkv(self.qkv_transform(x)) + q, k, v = torch.split(qkv.reshape(N * W, self.groups, self.group_planes * 2, H), [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=2) + + # Calculate position embedding + all_embeddings = torch.index_select(self.relative, 1, self.flatten_index).view(self.group_planes * 2, self.kernel_size, self.kernel_size) + q_embedding, k_embedding, v_embedding = torch.split(all_embeddings, [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=0) + + qr = torch.einsum('bgci,cij->bgij', q, q_embedding) + kr = torch.einsum('bgci,cij->bgij', k, k_embedding).transpose(2, 3) + + qk = torch.einsum('bgci, bgcj->bgij', q, k) + + stacked_similarity = torch.cat([qk, qr, kr], dim=1) + stacked_similarity = self.bn_similarity(stacked_similarity).view(N * W, 3, self.groups, H, H).sum(dim=1) + #stacked_similarity = self.bn_qr(qr) + self.bn_kr(kr) + self.bn_qk(qk) + # (N, groups, H, H, W) + similarity = F.softmax(stacked_similarity, dim=3) + sv = torch.einsum('bgij,bgcj->bgci', similarity, v) + sve = torch.einsum('bgij,cij->bgci', similarity, v_embedding) + stacked_output = torch.cat([sv, sve], dim=-1).view(N * W, self.out_planes * 2, H) + output = self.bn_output(stacked_output).view(N, W, self.out_planes, 2, H).sum(dim=-2) + + if self.width: + output = output.permute(0, 2, 1, 3) + else: + output = output.permute(0, 2, 3, 1) + + if self.stride > 1: + output = self.pooling(output) + + return output + + def reset_parameters(self): + self.qkv_transform.weight.data.normal_(0, math.sqrt(1. / self.in_planes)) + #nn.init.uniform_(self.relative, -0.1, 0.1) + nn.init.normal_(self.relative, 0., math.sqrt(1. / self.group_planes)) + +class AxialAttention_dynamic(nn.Module): + def __init__(self, in_planes, out_planes, groups=8, kernel_size=56, + stride=1, bias=False, width=False): + assert (in_planes % groups == 0) and (out_planes % groups == 0) + super(AxialAttention_dynamic, self).__init__() + self.in_planes = in_planes + self.out_planes = out_planes + self.groups = groups + self.group_planes = out_planes // groups + self.kernel_size = kernel_size + self.stride = stride + self.bias = bias + self.width = width + + # Multi-head self attention + self.qkv_transform = qkv_transform(in_planes, out_planes * 2, kernel_size=1, stride=1, + padding=0, bias=False) + self.bn_qkv = nn.BatchNorm1d(out_planes * 2) + self.bn_similarity = nn.BatchNorm2d(groups * 3) + self.bn_output = nn.BatchNorm1d(out_planes * 2) + + # Priority on encoding + + ## Initial values + + self.f_qr = nn.Parameter(torch.tensor(0.1), requires_grad=False) + self.f_kr = nn.Parameter(torch.tensor(0.1), requires_grad=False) + self.f_sve = nn.Parameter(torch.tensor(0.1), requires_grad=False) + self.f_sv = nn.Parameter(torch.tensor(1.0), requires_grad=False) + + + # Position embedding + self.relative = nn.Parameter(torch.randn(self.group_planes * 2, kernel_size * 2 - 1), requires_grad=True) + query_index = torch.arange(kernel_size).unsqueeze(0) + key_index = torch.arange(kernel_size).unsqueeze(1) + relative_index = key_index - query_index + kernel_size - 1 + self.register_buffer('flatten_index', relative_index.view(-1)) + if stride > 1: + self.pooling = nn.AvgPool2d(stride, stride=stride) + + self.reset_parameters() + # self.print_para() + + def forward(self, x): + if self.width: + x = x.permute(0, 2, 1, 3) + else: + x = x.permute(0, 3, 1, 2) # N, W, C, H + N, W, C, H = x.shape + x = x.contiguous().view(N * W, C, H) + + # Transformations + qkv = self.bn_qkv(self.qkv_transform(x)) + q, k, v = torch.split(qkv.reshape(N * W, self.groups, self.group_planes * 2, H), [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=2) + + # Calculate position embedding + all_embeddings = torch.index_select(self.relative, 1, self.flatten_index).view(self.group_planes * 2, self.kernel_size, self.kernel_size) + q_embedding, k_embedding, v_embedding = torch.split(all_embeddings, [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=0) + qr = torch.einsum('bgci,cij->bgij', q, q_embedding) + kr = torch.einsum('bgci,cij->bgij', k, k_embedding).transpose(2, 3) + qk = torch.einsum('bgci, bgcj->bgij', q, k) + + + # multiply by factors + qr = torch.mul(qr, self.f_qr) + kr = torch.mul(kr, self.f_kr) + + stacked_similarity = torch.cat([qk, qr, kr], dim=1) + stacked_similarity = self.bn_similarity(stacked_similarity).view(N * W, 3, self.groups, H, H).sum(dim=1) + #stacked_similarity = self.bn_qr(qr) + self.bn_kr(kr) + self.bn_qk(qk) + # (N, groups, H, H, W) + similarity = F.softmax(stacked_similarity, dim=3) + sv = torch.einsum('bgij,bgcj->bgci', similarity, v) + sve = torch.einsum('bgij,cij->bgci', similarity, v_embedding) + + # multiply by factors + sv = torch.mul(sv, self.f_sv) + sve = torch.mul(sve, self.f_sve) + + stacked_output = torch.cat([sv, sve], dim=-1).view(N * W, self.out_planes * 2, H) + output = self.bn_output(stacked_output).view(N, W, self.out_planes, 2, H).sum(dim=-2) + + if self.width: + output = output.permute(0, 2, 1, 3) + else: + output = output.permute(0, 2, 3, 1) + + if self.stride > 1: + output = self.pooling(output) + + return output + def reset_parameters(self): + self.qkv_transform.weight.data.normal_(0, math.sqrt(1. / self.in_planes)) + #nn.init.uniform_(self.relative, -0.1, 0.1) + nn.init.normal_(self.relative, 0., math.sqrt(1. / self.group_planes)) + +class AxialAttention_wopos(nn.Module): + def __init__(self, in_planes, out_planes, groups=8, kernel_size=56, + stride=1, bias=False, width=False): + assert (in_planes % groups == 0) and (out_planes % groups == 0) + super(AxialAttention_wopos, self).__init__() + self.in_planes = in_planes + self.out_planes = out_planes + self.groups = groups + self.group_planes = out_planes // groups + self.kernel_size = kernel_size + self.stride = stride + self.bias = bias + self.width = width + + # Multi-head self attention + self.qkv_transform = qkv_transform(in_planes, out_planes * 2, kernel_size=1, stride=1, + padding=0, bias=False) + self.bn_qkv = nn.BatchNorm1d(out_planes * 2) + self.bn_similarity = nn.BatchNorm2d(groups ) + + self.bn_output = nn.BatchNorm1d(out_planes * 1) + + if stride > 1: + self.pooling = nn.AvgPool2d(stride, stride=stride) + + self.reset_parameters() + + def forward(self, x): + if self.width: + x = x.permute(0, 2, 1, 3) + else: + x = x.permute(0, 3, 1, 2) # N, W, C, H + N, W, C, H = x.shape + x = x.contiguous().view(N * W, C, H) + + # Transformations + qkv = self.bn_qkv(self.qkv_transform(x)) + q, k, v = torch.split(qkv.reshape(N * W, self.groups, self.group_planes * 2, H), [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=2) + + qk = torch.einsum('bgci, bgcj->bgij', q, k) + + stacked_similarity = self.bn_similarity(qk).reshape(N * W, 1, self.groups, H, H).sum(dim=1).contiguous() + + similarity = F.softmax(stacked_similarity, dim=3) + sv = torch.einsum('bgij,bgcj->bgci', similarity, v) + + sv = sv.reshape(N*W,self.out_planes * 1, H).contiguous() + output = self.bn_output(sv).reshape(N, W, self.out_planes, 1, H).sum(dim=-2).contiguous() + + + if self.width: + output = output.permute(0, 2, 1, 3) + else: + output = output.permute(0, 2, 3, 1) + + if self.stride > 1: + output = self.pooling(output) + + return output + + def reset_parameters(self): + self.qkv_transform.weight.data.normal_(0, math.sqrt(1. / self.in_planes)) + #nn.init.uniform_(self.relative, -0.1, 0.1) + # nn.init.normal_(self.relative, 0., math.sqrt(1. / self.group_planes)) + +# end of attn definition + +class AxialBlock(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, kernel_size=56): + super(AxialBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv_down = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.hight_block = AxialAttention(width, width, groups=groups, kernel_size=kernel_size) + self.width_block = AxialAttention(width, width, groups=groups, kernel_size=kernel_size, stride=stride, width=True) + self.conv_up = conv1x1(width, planes * self.expansion) + self.bn2 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv_down(x) # 下采样 inplanes -> width + out = self.bn1(out) + out = self.relu(out) + # print(out.shape) + out = self.hight_block(out) + out = self.width_block(out) + out = self.relu(out) + + out = self.conv_up(out) # width -> planes*2(expansion) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + +class AxialBlock_dynamic(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, kernel_size=56): + super(AxialBlock_dynamic, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv_down = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.hight_block = AxialAttention_dynamic(width, width, groups=groups, kernel_size=kernel_size) + self.width_block = AxialAttention_dynamic(width, width, groups=groups, kernel_size=kernel_size, stride=stride, width=True) + self.conv_up = conv1x1(width, planes * self.expansion) + self.bn2 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv_down(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.hight_block(out) + out = self.width_block(out) + out = self.relu(out) + + out = self.conv_up(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + +class AxialBlock_wopos(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, kernel_size=56): + super(AxialBlock_wopos, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + # print(kernel_size) + width = int(planes * (base_width / 64.)) + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv_down = conv1x1(inplanes, width) + self.conv1 = nn.Conv2d(width, width, kernel_size = 1) + self.bn1 = norm_layer(width) + self.hight_block = AxialAttention_wopos(width, width, groups=groups, kernel_size=kernel_size) + self.width_block = AxialAttention_wopos(width, width, groups=groups, kernel_size=kernel_size, stride=stride, width=True) + self.conv_up = conv1x1(width, planes * self.expansion) + self.bn2 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + # pdb.set_trace() + + out = self.conv_down(x) + out = self.bn1(out) + out = self.relu(out) + # print(out.shape) + out = self.hight_block(out) + out = self.width_block(out) + + out = self.relu(out) + + out = self.conv_up(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +#end of block definition + + +class ResAxialAttentionUNet(nn.Module): + + def __init__(self, block, layers, num_classes=2, zero_init_residual=True, + groups=8, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, s=0.125, img_size = 128,imgchan = 3): + super(ResAxialAttentionUNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = int(64 * s) + self.dilation = 1 + if replace_stride_with_dilation is None: + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.conv2 = nn.Conv2d(self.inplanes, 128, kernel_size=3, stride=1, padding=1, bias=False) + self.conv3 = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.bn2 = norm_layer(128) + self.bn3 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, int(128 * s), layers[0], kernel_size= (img_size//2)) + self.layer2 = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size//2), + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size//4), + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size//8), + dilate=replace_stride_with_dilation[2]) + + # Decoder + self.decoder1 = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + self.decoder2 = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + self.decoder3 = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4 = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft = nn.Softmax(dim=1) + + + def _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups, + base_width=self.base_width, dilation=previous_dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + self.inplanes = planes * block.expansion + if stride != 1: + kernel_size = kernel_size // 2 + + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + + # AxialAttention Encoder + # pdb.set_trace() + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x1 = self.layer1(x) + + x2 = self.layer2(x1) + # print(x2.shape) + x3 = self.layer3(x2) + # print(x3.shape) + x4 = self.layer4(x3) + + x = F.relu(F.interpolate(self.decoder1(x4), scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x4) + x = F.relu(F.interpolate(self.decoder2(x) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x3) + x = F.relu(F.interpolate(self.decoder3(x) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x2) + x = F.relu(F.interpolate(self.decoder4(x) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x1) + x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + x = self.adjust(F.relu(x)) + # pdb.set_trace() + return x + + def forward(self, x): + return self._forward_impl(x) + +class medt_net(nn.Module): + + def __init__(self, block, block_2, layers, num_classes=2, zero_init_residual=True, + groups=8, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, s=0.125, img_size = 128,imgchan = 3): + super(medt_net, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = int(64 * s) # 64*0.125=8 + self.dilation = 1 + if replace_stride_with_dilation is None: + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups # 8 + self.base_width = width_per_group # 64 + self.conv1 = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) # (h-7+6/2)+1=h/2 + self.conv2 = nn.Conv2d(self.inplanes, 128, kernel_size=3, stride=1, padding=1, bias=False) # 尺寸不变 + self.conv3 = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) # 尺寸不变 + self.bn1 = norm_layer(self.inplanes) + self.bn2 = norm_layer(128) + self.bn3 = norm_layer(self.inplanes) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, int(128 * s), layers[0], kernel_size= (img_size//2)) + self.layer2 = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size//2), + dilate=replace_stride_with_dilation[0]) + # self.layer3 = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size//4), + # dilate=replace_stride_with_dilation[1]) + # self.layer4 = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size//8), + # dilate=replace_stride_with_dilation[2]) + + # Decoder + # self.decoder1 = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + # self.decoder2 = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + # self.decoder3 = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4 = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft = nn.Softmax(dim=1) + + + self.conv1_p = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.conv2_p = nn.Conv2d(self.inplanes,128, kernel_size=3, stride=1, padding=1, + bias=False) + self.conv3_p = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, + bias=False) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1_p = norm_layer(self.inplanes) + self.bn2_p = norm_layer(128) + self.bn3_p = norm_layer(self.inplanes) + + self.relu_p = nn.ReLU(inplace=True) + + img_size_p = img_size // 4 + + self.layer1_p = self._make_layer(block_2, int(128 * s), layers[0], kernel_size= (img_size_p//2)) + self.layer2_p = self._make_layer(block_2, int(256 * s), layers[1], stride=2, kernel_size=(img_size_p//2), + dilate=replace_stride_with_dilation[0]) + self.layer3_p = self._make_layer(block_2, int(512 * s), layers[2], stride=2, kernel_size=(img_size_p//4), + dilate=replace_stride_with_dilation[1]) + self.layer4_p = self._make_layer(block_2, int(1024 * s), layers[3], stride=2, kernel_size=(img_size_p//8), + dilate=replace_stride_with_dilation[2]) + + # Decoder + self.decoder1_p = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + self.decoder2_p = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + self.decoder3_p = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4_p = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5_p = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + + self.decoderf = nn.Conv2d(int(128*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust_p = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft_p = nn.Softmax(dim=1) + + + def _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups, + base_width=self.base_width, dilation=previous_dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + self.inplanes = planes * block.expansion + if stride != 1: + kernel_size = kernel_size // 2 + + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + + xin = x.clone() + x = self.conv1(x) # 3-> inplanes + x = self.bn1(x) + x = self.relu(x) + x = self.conv2(x) # inplanes -> 128 + x = self.bn2(x) + x = self.relu(x) + x = self.conv3(x) # 128 -> inplanes + x = self.bn3(x) + # x = F.max_pool2d(x,2,2) + x = self.relu(x) + # print('x shape:', x.shape) + + # x = self.maxpool(x) + # pdb.set_trace() + x1 = self.layer1(x) # inplanes -> 128*s*2 inplanes在layers里面会乘以2 inplanes变为 planes*2(expansion) + # print(x1.shape) + x2 = self.layer2(x1) # 128*s*2 -> 256*s*2 inplances:256*s->256*s*2 + # print(x2.shape) + # x3 = self.layer3(x2) + # # print(x3.shape) + # x4 = self.layer4(x3) + # # print(x4.shape) + # x = F.relu(F.interpolate(self.decoder1(x4), scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x4) + # x = F.relu(F.interpolate(self.decoder2(x4) , scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x3) + # x = F.relu(F.interpolate(self.decoder3(x3) , scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x2) + x = F.relu(F.interpolate(self.decoder4(x2) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x1) + x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) # 256*s->128*s->256*s + # print(x.shape) + + # end of full image training + + # y_out = torch.ones((1,2,128,128)) + x_loc = x.clone() + # x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + #start + for i in range(0,4): + for j in range(0,4): + + x_p = xin[:,:,32*i:32*(i+1),32*j:32*(j+1)] # 分patch对每一patch进行transformer,循环操作。 + # begin patch wise + x_p = self.conv1_p(x_p) # imgchans -> inplanes 3>512*s + x_p = self.bn1_p(x_p) + # x = F.max_pool2d(x,2,2) + x_p = self.relu(x_p) + + x_p = self.conv2_p(x_p) # inplanes -> 128 + x_p = self.bn2_p(x_p) + # x = F.max_pool2d(x,2,2) + x_p = self.relu(x_p) + x_p = self.conv3_p(x_p) # 128->inplanes + x_p = self.bn3_p(x_p) + # x = F.max_pool2d(x,2,2) + x_p = self.relu(x_p) + + # x = self.maxpool(x) + # pdb.set_trace() + x1_p = self.layer1_p(x_p) # inplanes ->128*s*2, inplanes: 512*s->128*s*2 + # print(x1.shape) + x2_p = self.layer2_p(x1_p) # 256*s*2 + # print(x2.shape) + x3_p = self.layer3_p(x2_p) # 512*s*2 + # # print(x3.shape) + x4_p = self.layer4_p(x3_p) # 1024*s*2 + + x_p = F.relu(F.interpolate(self.decoder1_p(x4_p), scale_factor=(2,2), mode ='bilinear')) # 1024*s*2 + x_p = torch.add(x_p, x4_p) + x_p = F.relu(F.interpolate(self.decoder2_p(x_p) , scale_factor=(2,2), mode ='bilinear')) # 1024*s + x_p = torch.add(x_p, x3_p) + x_p = F.relu(F.interpolate(self.decoder3_p(x_p) , scale_factor=(2,2), mode ='bilinear')) # 512*s + x_p = torch.add(x_p, x2_p) + x_p = F.relu(F.interpolate(self.decoder4_p(x_p) , scale_factor=(2,2), mode ='bilinear')) # 256*s + x_p = torch.add(x_p, x1_p) + x_p = F.relu(F.interpolate(self.decoder5_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + + x_loc[:,:,32*i:32*(i+1),32*j:32*(j+1)] = x_p + + x = torch.add(x,x_loc) + x = F.relu(self.decoderf(x)) # 128*s->128*s + + x = self.adjust(F.relu(x)) # 128*s -> classes + + # pdb.set_trace() + return x + + def forward(self, x): + return self._forward_impl(x) + + +def axialunet(pretrained=False, **kwargs): + model = ResAxialAttentionUNet(AxialBlock, [1, 2, 4, 1], s= 0.125, **kwargs) + return model + +def gated(pretrained=False, **kwargs): + model = ResAxialAttentionUNet(AxialBlock_dynamic, [1, 2, 4, 1], s= 0.125, **kwargs) + return model + +def MedT(pretrained=False, **kwargs): + model = medt_net(AxialBlock_dynamic,AxialBlock_wopos, [1, 2, 4, 1], s= 0.125, **kwargs) + return model + +def logo(pretrained=False, **kwargs): + model = medt_net(AxialBlock,AxialBlock, [1, 2, 4, 1], s= 0.125, **kwargs) + return model + +# EOF \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/model_codes.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/model_codes.py new file mode 100644 index 0000000000000000000000000000000000000000..6e517c234f96f01e7d08acfc4482f82378751437 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/model_codes.py @@ -0,0 +1,2324 @@ +import pdb +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +from .utils import * +import pdb +import matplotlib.pyplot as plt + +import random + +__all__ = ['axial26s', 'axial50s', 'axial50m', 'axial50l'] + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class AxialAttention(nn.Module): + def __init__(self, in_planes, out_planes, groups=8, kernel_size=56, + stride=1, bias=False, width=False): + assert (in_planes % groups == 0) and (out_planes % groups == 0) + super(AxialAttention, self).__init__() + self.in_planes = in_planes + self.out_planes = out_planes + self.groups = groups + self.group_planes = out_planes // groups + self.kernel_size = kernel_size + self.stride = stride + self.bias = bias + self.width = width + + # Multi-head self attention + self.qkv_transform = qkv_transform(in_planes, out_planes * 2, kernel_size=1, stride=1, + padding=0, bias=False) + self.bn_qkv = nn.BatchNorm1d(out_planes * 2) + self.bn_similarity = nn.BatchNorm2d(groups * 3) + #self.bn_qk = nn.BatchNorm2d(groups) + #self.bn_qr = nn.BatchNorm2d(groups) + #self.bn_kr = nn.BatchNorm2d(groups) + self.bn_output = nn.BatchNorm1d(out_planes * 2) + + # Position embedding + self.relative = nn.Parameter(torch.randn(self.group_planes * 2, kernel_size * 2 - 1), requires_grad=True) + query_index = torch.arange(kernel_size).unsqueeze(0) + key_index = torch.arange(kernel_size).unsqueeze(1) + relative_index = key_index - query_index + kernel_size - 1 + self.register_buffer('flatten_index', relative_index.view(-1)) + if stride > 1: + self.pooling = nn.AvgPool2d(stride, stride=stride) + + self.reset_parameters() + + def forward(self, x): + # pdb.set_trace() + if self.width: + x = x.permute(0, 2, 1, 3) + else: + x = x.permute(0, 3, 1, 2) # N, W, C, H + N, W, C, H = x.shape + x = x.contiguous().view(N * W, C, H) + + # Transformations + qkv = self.bn_qkv(self.qkv_transform(x)) + q, k, v = torch.split(qkv.reshape(N * W, self.groups, self.group_planes * 2, H), [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=2) + + # Calculate position embedding + all_embeddings = torch.index_select(self.relative, 1, self.flatten_index).view(self.group_planes * 2, self.kernel_size, self.kernel_size) + q_embedding, k_embedding, v_embedding = torch.split(all_embeddings, [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=0) + + rd = random.randint(0,100) + qqn = q_embedding[0].detach().cpu().numpy() + plt.imshow(qqn) + plt.savefig("glas/q/%d.png"%rd) + + kqn = k_embedding[0].detach().cpu().numpy() + plt.imshow(kqn) + plt.savefig("glas/k/%d.png"%rd) + + vqn = v_embedding[0].detach().cpu().numpy() + plt.imshow(vqn) + plt.savefig("glas/v/%d.png"%rd) + + qr = torch.einsum('bgci,cij->bgij', q, q_embedding) + kr = torch.einsum('bgci,cij->bgij', k, k_embedding).transpose(2, 3) + + qk = torch.einsum('bgci, bgcj->bgij', q, k) + # print(qk.shape, qr.shape, kr.shape) + # import pdb + # pdb.set_trace() + stacked_similarity = torch.cat([qk, qr, kr], dim=1) + stacked_similarity = self.bn_similarity(stacked_similarity).view(N * W, 3, self.groups, H, H).sum(dim=1) + #stacked_similarity = self.bn_qr(qr) + self.bn_kr(kr) + self.bn_qk(qk) + # (N, groups, H, H, W) + similarity = F.softmax(stacked_similarity, dim=3) + sv = torch.einsum('bgij,bgcj->bgci', similarity, v) + sve = torch.einsum('bgij,cij->bgci', similarity, v_embedding) + stacked_output = torch.cat([sv, sve], dim=-1).view(N * W, self.out_planes * 2, H) + output = self.bn_output(stacked_output).view(N, W, self.out_planes, 2, H).sum(dim=-2) + + if self.width: + output = output.permute(0, 2, 1, 3) + else: + output = output.permute(0, 2, 3, 1) + + if self.stride > 1: + output = self.pooling(output) + + return output + + def reset_parameters(self): + self.qkv_transform.weight.data.normal_(0, math.sqrt(1. / self.in_planes)) + #nn.init.uniform_(self.relative, -0.1, 0.1) + nn.init.normal_(self.relative, 0., math.sqrt(1. / self.group_planes)) + +class AxialAttention_dynamic(nn.Module): + def __init__(self, in_planes, out_planes, groups=8, kernel_size=56, + stride=1, bias=False, width=False): + assert (in_planes % groups == 0) and (out_planes % groups == 0) + super(AxialAttention_dynamic, self).__init__() + self.in_planes = in_planes + self.out_planes = out_planes + self.groups = groups + self.group_planes = out_planes // groups + self.kernel_size = kernel_size + self.stride = stride + self.bias = bias + self.width = width + + # Multi-head self attention + self.qkv_transform = qkv_transform(in_planes, out_planes * 2, kernel_size=1, stride=1, + padding=0, bias=False) + self.bn_qkv = nn.BatchNorm1d(out_planes * 2) + self.bn_similarity = nn.BatchNorm2d(groups * 3) + #self.bn_qk = nn.BatchNorm2d(groups) + #self.bn_qr = nn.BatchNorm2d(groups) + #self.bn_kr = nn.BatchNorm2d(groups) + self.bn_output = nn.BatchNorm1d(out_planes * 2) + + # Priority on encoding + + self.f_qr = nn.Parameter(torch.tensor(1.0), requires_grad=False) + self.f_kr = nn.Parameter(torch.tensor(1.0), requires_grad=False) + self.f_sve = nn.Parameter(torch.tensor(1.0), requires_grad=False) + self.f_sv = nn.Parameter(torch.tensor(1.0), requires_grad=False) + + # Position embedding + self.relative = nn.Parameter(torch.randn(self.group_planes * 2, kernel_size * 2 - 1), requires_grad=True) + query_index = torch.arange(kernel_size).unsqueeze(0) + key_index = torch.arange(kernel_size).unsqueeze(1) + relative_index = key_index - query_index + kernel_size - 1 + self.register_buffer('flatten_index', relative_index.view(-1)) + if stride > 1: + self.pooling = nn.AvgPool2d(stride, stride=stride) + + self.reset_parameters() + # self.print_para() + + def forward(self, x): + if self.width: + x = x.permute(0, 2, 1, 3) + else: + x = x.permute(0, 3, 1, 2) # N, W, C, H + N, W, C, H = x.shape + x = x.contiguous().view(N * W, C, H) + + # Transformations + qkv = self.bn_qkv(self.qkv_transform(x)) + q, k, v = torch.split(qkv.reshape(N * W, self.groups, self.group_planes * 2, H), [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=2) + + # Calculate position embedding + all_embeddings = torch.index_select(self.relative, 1, self.flatten_index).view(self.group_planes * 2, self.kernel_size, self.kernel_size) + q_embedding, k_embedding, v_embedding = torch.split(all_embeddings, [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=0) + qr = torch.einsum('bgci,cij->bgij', q, q_embedding) + kr = torch.einsum('bgci,cij->bgij', k, k_embedding).transpose(2, 3) + qk = torch.einsum('bgci, bgcj->bgij', q, k) + # print(qk.shape, qr.shape, kr.shape) + # import pdb + # pdb.set_trace() + + # multiply by factors + qr = torch.mul(qr, self.f_qr) + kr = torch.mul(kr, self.f_kr) + + stacked_similarity = torch.cat([qk, qr, kr], dim=1) + stacked_similarity = self.bn_similarity(stacked_similarity).view(N * W, 3, self.groups, H, H).sum(dim=1) + #stacked_similarity = self.bn_qr(qr) + self.bn_kr(kr) + self.bn_qk(qk) + # (N, groups, H, H, W) + similarity = F.softmax(stacked_similarity, dim=3) + sv = torch.einsum('bgij,bgcj->bgci', similarity, v) + sve = torch.einsum('bgij,cij->bgci', similarity, v_embedding) + + # multiply by factors + sv = torch.mul(sv, self.f_sv) + sve = torch.mul(sve, self.f_sve) + + stacked_output = torch.cat([sv, sve], dim=-1).view(N * W, self.out_planes * 2, H) + output = self.bn_output(stacked_output).view(N, W, self.out_planes, 2, H).sum(dim=-2) + + if self.width: + output = output.permute(0, 2, 1, 3) + else: + output = output.permute(0, 2, 3, 1) + + if self.stride > 1: + output = self.pooling(output) + + return output + def reset_parameters(self): + self.qkv_transform.weight.data.normal_(0, math.sqrt(1. / self.in_planes)) + #nn.init.uniform_(self.relative, -0.1, 0.1) + nn.init.normal_(self.relative, 0., math.sqrt(1. / self.group_planes)) + +class AxialAttention_gated_sig(nn.Module): + def __init__(self, in_planes, out_planes, groups=8, kernel_size=56, + stride=1, bias=False, width=False): + assert (in_planes % groups == 0) and (out_planes % groups == 0) + super(AxialAttention_gated_sig, self).__init__() + self.in_planes = in_planes + self.out_planes = out_planes + self.groups = groups + self.group_planes = out_planes // groups + self.kernel_size = kernel_size + self.stride = stride + self.bias = bias + self.width = width + + # Multi-head self attention + self.qkv_transform = qkv_transform(in_planes, out_planes * 2, kernel_size=1, stride=1, + padding=0, bias=False) + self.bn_qkv = nn.BatchNorm1d(out_planes * 2) + self.bn_similarity = nn.BatchNorm2d(groups * 3) + #self.bn_qk = nn.BatchNorm2d(groups) + #self.bn_qr = nn.BatchNorm2d(groups) + #self.bn_kr = nn.BatchNorm2d(groups) + self.bn_output = nn.BatchNorm1d(out_planes * 2) + + # Priority on encoding + + self.f_qr = nn.Parameter(torch.tensor(0.1), requires_grad=False) + self.f_kr = nn.Parameter(torch.tensor(0.1), requires_grad=False) + self.f_sve = nn.Parameter(torch.tensor(0.1), requires_grad=False) + self.f_sv = nn.Parameter(torch.tensor(5.0), requires_grad=False) + + # Position embedding + self.relative = nn.Parameter(torch.randn(self.group_planes * 2, kernel_size * 2 - 1), requires_grad=True) + query_index = torch.arange(kernel_size).unsqueeze(0) + key_index = torch.arange(kernel_size).unsqueeze(1) + relative_index = key_index - query_index + kernel_size - 1 + self.register_buffer('flatten_index', relative_index.view(-1)) + if stride > 1: + self.pooling = nn.AvgPool2d(stride, stride=stride) + + self.reset_parameters() + # self.print_para() + + def forward(self, x): + if self.width: + x = x.permute(0, 2, 1, 3) + else: + x = x.permute(0, 3, 1, 2) # N, W, C, H + N, W, C, H = x.shape + x = x.contiguous().view(N * W, C, H) + + # Transformations + qkv = self.bn_qkv(self.qkv_transform(x)) + q, k, v = torch.split(qkv.reshape(N * W, self.groups, self.group_planes * 2, H), [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=2) + + # Calculate position embedding + all_embeddings = torch.index_select(self.relative, 1, self.flatten_index).view(self.group_planes * 2, self.kernel_size, self.kernel_size) + q_embedding, k_embedding, v_embedding = torch.split(all_embeddings, [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=0) + qr = torch.einsum('bgci,cij->bgij', q, q_embedding) + kr = torch.einsum('bgci,cij->bgij', k, k_embedding).transpose(2, 3) + qk = torch.einsum('bgci, bgcj->bgij', q, k) + # print(qk.shape, qr.shape, kr.shape) + # import pdb + # pdb.set_trace() + + # multiply by factors + qr = torch.mul(qr, torch.sigmoid(self.f_qr)) + kr = torch.mul(kr, torch.sigmoid(self.f_kr)) + + stacked_similarity = torch.cat([qk, qr, kr], dim=1) + stacked_similarity = self.bn_similarity(stacked_similarity).view(N * W, 3, self.groups, H, H).sum(dim=1) + #stacked_similarity = self.bn_qr(qr) + self.bn_kr(kr) + self.bn_qk(qk) + # (N, groups, H, H, W) + similarity = F.softmax(stacked_similarity, dim=3) + sv = torch.einsum('bgij,bgcj->bgci', similarity, v) + sve = torch.einsum('bgij,cij->bgci', similarity, v_embedding) + + # multiply by factors + sv = torch.mul(sv, torch.sigmoid(self.f_sv)) + sve = torch.mul(sve, torch.sigmoid(self.f_sve)) + + stacked_output = torch.cat([sv, sve], dim=-1).view(N * W, self.out_planes * 2, H) + output = self.bn_output(stacked_output).view(N, W, self.out_planes, 2, H).sum(dim=-2) + + if self.width: + output = output.permute(0, 2, 1, 3) + else: + output = output.permute(0, 2, 3, 1) + + if self.stride > 1: + output = self.pooling(output) + + return output + + def print_para(self): + print(self.f_qr) + def reset_parameters(self): + self.qkv_transform.weight.data.normal_(0, math.sqrt(1. / self.in_planes)) + #nn.init.uniform_(self.relative, -0.1, 0.1) + nn.init.normal_(self.relative, 0., math.sqrt(1. / self.group_planes)) + +class AxialAttention_gated_data(nn.Module): + def __init__(self, in_planes, out_planes, groups=8, kernel_size=56, + stride=1, bias=False, width=False): + assert (in_planes % groups == 0) and (out_planes % groups == 0) + super(AxialAttention_gated_data, self).__init__() + self.in_planes = in_planes + self.out_planes = out_planes + self.groups = groups + self.group_planes = out_planes // groups + self.kernel_size = kernel_size + self.stride = stride + self.bias = bias + self.width = width + + # Multi-head self attention + self.qkv_transform = qkv_transform(in_planes, out_planes * 2, kernel_size=1, stride=1, + padding=0, bias=False) + self.bn_qkv = nn.BatchNorm1d(out_planes * 2) + self.bn_similarity = nn.BatchNorm2d(groups * 3) + #self.bn_qk = nn.BatchNorm2d(groups) + #self.bn_qr = nn.BatchNorm2d(groups) + #self.bn_kr = nn.BatchNorm2d(groups) + self.bn_output = nn.BatchNorm1d(out_planes * 2) + + # Priority on encoding + + # self.f_qr = nn.Parameter(torch.tensor(0.1), requires_grad=False) + # self.f_kr = nn.Parameter(torch.tensor(0.1), requires_grad=False) + # self.f_sve = nn.Parameter(torch.tensor(0.1), requires_grad=False) + # self.f_sv = nn.Parameter(torch.tensor(1.0), requires_grad=False) + + self.fcn1 = nn.Linear(in_planes, in_planes) + self.fcn2 = nn.Linear(in_planes, 4) + self.pool = nn.AdaptiveAvgPool2d((1,1)) + + # Position embedding + self.relative = nn.Parameter(torch.randn(self.group_planes * 2, kernel_size * 2 - 1), requires_grad=True) + query_index = torch.arange(kernel_size).unsqueeze(0) + key_index = torch.arange(kernel_size).unsqueeze(1) + relative_index = key_index - query_index + kernel_size - 1 + self.register_buffer('flatten_index', relative_index.view(-1)) + if stride > 1: + self.pooling = nn.AvgPool2d(stride, stride=stride) + + self.reset_parameters() + # self.print_para() + + def forward(self, x): + + if self.width: + x = x.permute(0, 2, 1, 3) + else: + x = x.permute(0, 3, 1, 2) # N, W, C, H + N, W, C, H = x.shape + x = x.contiguous().view(N * W, C, H) + + # import pdb + # pdb.set_trace() + xn = self.pool(x.unsqueeze(3)) + xn = F.relu(self.fcn1(xn.squeeze(2).squeeze(2))) + xn = F.relu(self.fcn2(xn)) + + sig = F.sigmoid(xn) + + sig1 = sig[:,0] + sig2 = sig[:,1] + sig3 = sig[:,2] + sig4 = sig[:,3] + + + # Transformations + # import pdb + # pdb.set_trace() + qkv = self.bn_qkv(self.qkv_transform(x)) + q, k, v = torch.split(qkv.reshape(N * W, self.groups, self.group_planes * 2, H), [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=2) + + # Calculate position embedding + all_embeddings = torch.index_select(self.relative, 1, self.flatten_index).view(self.group_planes * 2, self.kernel_size, self.kernel_size) + q_embedding, k_embedding, v_embedding = torch.split(all_embeddings, [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=0) + qr = torch.einsum('bgci,cij->bgij', q, q_embedding) + kr = torch.einsum('bgci,cij->bgij', k, k_embedding).transpose(2, 3) + qk = torch.einsum('bgci, bgcj->bgij', q, k) + # print(qk.shape, qr.shape, kr.shape) + # import pdb + # pdb.set_trace() + + # multiply by factors + # print(x.shape, qr.shape) + # import pdb + # pdb.set_trace() + qr = sig1.reshape(-1, 1, 1, 1).contiguous()*qr + kr = sig2.reshape(-1, 1, 1, 1).contiguous()*kr + # kr = torch.mul(kr, torch.sigmoid(self.f_kr)) + + stacked_similarity = torch.cat([qk, qr, kr], dim=1) + stacked_similarity = self.bn_similarity(stacked_similarity).view(N * W, 3, self.groups, H, H).sum(dim=1) + #stacked_similarity = self.bn_qr(qr) + self.bn_kr(kr) + self.bn_qk(qk) + # (N, groups, H, H, W) + similarity = F.softmax(stacked_similarity, dim=3) + + + sv = torch.einsum('bgij,bgcj->bgci', similarity, v) + sve = torch.einsum('bgij,cij->bgci', similarity, v_embedding) + + # multiply by factors + sv = sig3.reshape(-1, 1, 1, 1).contiguous()*sv + sve = sig4.reshape(-1, 1, 1, 1).contiguous()*sve + # sv = torch.mul(sv, torch.sigmoid(self.f_sv)) + # sve = torch.mul(sve, torch.sigmoid(self.f_sve)) + + stacked_output = torch.cat([sv, sve], dim=-1).view(N * W, self.out_planes * 2, H) + output = self.bn_output(stacked_output).view(N, W, self.out_planes, 2, H).sum(dim=-2) + + if self.width: + output = output.permute(0, 2, 1, 3) + else: + output = output.permute(0, 2, 3, 1) + + if self.stride > 1: + output = self.pooling(output) + + return output + + def print_para(self): + print(self.f_qr) + def reset_parameters(self): + self.qkv_transform.weight.data.normal_(0, math.sqrt(1. / self.in_planes)) + #nn.init.uniform_(self.relative, -0.1, 0.1) + nn.init.normal_(self.relative, 0., math.sqrt(1. / self.group_planes)) + +class AxialAttention_wopos(nn.Module): + def __init__(self, in_planes, out_planes, groups=8, kernel_size=56, + stride=1, bias=False, width=False): + assert (in_planes % groups == 0) and (out_planes % groups == 0) + super(AxialAttention_wopos, self).__init__() + self.in_planes = in_planes + self.out_planes = out_planes + self.groups = groups + self.group_planes = out_planes // groups + self.kernel_size = kernel_size + self.stride = stride + self.bias = bias + self.width = width + + # Multi-head self attention + self.qkv_transform = qkv_transform(in_planes, out_planes * 2, kernel_size=1, stride=1, + padding=0, bias=False) + self.bn_qkv = nn.BatchNorm1d(out_planes * 2) + self.bn_similarity = nn.BatchNorm2d(groups ) + #self.bn_qk = nn.BatchNorm2d(groups) + #self.bn_qr = nn.BatchNorm2d(groups) + #self.bn_kr = nn.BatchNorm2d(groups) + self.bn_output = nn.BatchNorm1d(out_planes * 1) + + # Position embedding + # self.relative = nn.Parameter(torch.randn(self.group_planes * 2, kernel_size * 2 - 1), requires_grad=True) + # query_index = torch.arange(kernel_size).unsqueeze(0) + # key_index = torch.arange(kernel_size).unsqueeze(1) + # relative_index = key_index - query_index + kernel_size - 1 + # self.register_buffer('flatten_index', relative_index.view(-1)) + if stride > 1: + self.pooling = nn.AvgPool2d(stride, stride=stride) + + self.reset_parameters() + + def forward(self, x): + if self.width: + x = x.permute(0, 2, 1, 3) + else: + x = x.permute(0, 3, 1, 2) # N, W, C, H + N, W, C, H = x.shape + x = x.contiguous().view(N * W, C, H) + + # Transformations + qkv = self.bn_qkv(self.qkv_transform(x)) + q, k, v = torch.split(qkv.reshape(N * W, self.groups, self.group_planes * 2, H), [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=2) + + # Calculate position embedding + # all_embeddings = torch.index_select(self.relative, 1, self.flatten_index).view(self.group_planes * 2, self.kernel_size, self.kernel_size) + # q_embedding, k_embedding, v_embedding = torch.split(all_embeddings, [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=0) + # qr = torch.einsum('bgci,cij->bgij', q, q_embedding) + # kr = torch.einsum('bgci,cij->bgij', k, k_embedding).transpose(2, 3) + qk = torch.einsum('bgci, bgcj->bgij', q, k) + # qr = q + # kr = k.transpose(2, 3) + # # print(qk.shape, qr.shape, kr.shape) + # stacked_similarity = torch.cat([qk, qk, qk], dim=1) + stacked_similarity = self.bn_similarity(qk).reshape(N * W, 1, self.groups, H, H).sum(dim=1).contiguous() + #stacked_similarity = self.bn_qr(qr) + self.bn_kr(kr) + self.bn_qk(qk) + # (N, groups, H, H, W) + # import pdb + # pdb.set_trace() + similarity = F.softmax(stacked_similarity, dim=3) + sv = torch.einsum('bgij,bgcj->bgci', similarity, v) + # sve = torch.einsum('bgij,bgcj->bgci', similarity, v) + # stacked_output = torch.cat([sv, sve], dim=-1).view(N * W, self.out_planes * 2, H) + # import pdb + # pdb.set_trace() + sv = sv.reshape(N*W,self.out_planes * 1, H).contiguous() + output = self.bn_output(sv).reshape(N, W, self.out_planes, 1, H).sum(dim=-2).contiguous() + + + if self.width: + output = output.permute(0, 2, 1, 3) + else: + output = output.permute(0, 2, 3, 1) + + if self.stride > 1: + output = self.pooling(output) + + return output + + def reset_parameters(self): + self.qkv_transform.weight.data.normal_(0, math.sqrt(1. / self.in_planes)) + #nn.init.uniform_(self.relative, -0.1, 0.1) + # nn.init.normal_(self.relative, 0., math.sqrt(1. / self.group_planes)) + +#end of attn definition + +class AxialBlock(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, kernel_size=56): + super(AxialBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv_down = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.hight_block = AxialAttention(width, width, groups=groups, kernel_size=kernel_size) + self.width_block = AxialAttention(width, width, groups=groups, kernel_size=kernel_size, stride=stride, width=True) + self.conv_up = conv1x1(width, planes * self.expansion) + self.bn2 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv_down(x) + out = self.bn1(out) + out = self.relu(out) + # print(out.shape) + out = self.hight_block(out) + out = self.width_block(out) + out = self.relu(out) + + out = self.conv_up(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + +class AxialBlock_dynamic(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, kernel_size=56): + super(AxialBlock_dynamic, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv_down = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.hight_block = AxialAttention_dynamic(width, width, groups=groups, kernel_size=kernel_size) + self.width_block = AxialAttention_dynamic(width, width, groups=groups, kernel_size=kernel_size, stride=stride, width=True) + self.conv_up = conv1x1(width, planes * self.expansion) + self.bn2 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv_down(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.hight_block(out) + out = self.width_block(out) + out = self.relu(out) + + out = self.conv_up(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + +class AxialBlock_gated_data(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, kernel_size=56): + super(AxialBlock_gated_data, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv_down = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.hight_block = AxialAttention_gated_data(width, width, groups=groups, kernel_size=kernel_size) + self.width_block = AxialAttention_gated_data(width, width, groups=groups, kernel_size=kernel_size, stride=stride, width=True) + self.conv_up = conv1x1(width, planes * self.expansion) + self.bn2 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv_down(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.hight_block(out) + out = self.width_block(out) + out = self.relu(out) + + out = self.conv_up(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + +class AxialBlockmod(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, kernel_size=56): + super(AxialBlockmod, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + # print(kernel_size) + width = int(planes * (base_width / 64.)) + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv_down = conv1x1(inplanes, width) + self.conv1 = nn.Conv2d(width, width, kernel_size = 1) + self.bn1 = norm_layer(width) + self.hight_block = AxialAttention(width, width, groups=groups, kernel_size=kernel_size) + self.width_block = AxialAttention(width, width, groups=groups, kernel_size=kernel_size, stride=stride, width=True) + self.conv_up = conv1x1(width, planes * self.expansion) + self.bn2 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + # pdb.set_trace() + + out = self.conv_down(x) + out = self.bn1(out) + out = self.relu(out) + # print(out.shape) + # out = self.hight_block(out) + # out = self.width_block(out) + # print(self.stride) + out = self.conv1(out) + if self.stride == 2: + out = F.max_pool2d(out,2,2) + if self.downsample is not None: + identity = self.downsample(x) + # out = F.max_pool2d(out,2,2) + # out = self.conv1(out) + + # print(out.shape) + # out = + out = self.relu(out) + + out = self.conv_up(out) + out = self.bn2(out) + + # if self.downsample is not None: + # identity = self.downsample(x) + + # out += identity + out = self.relu(out) + + return out + +class AxialBlock_wopos(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, kernel_size=56): + super(AxialBlock_wopos, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + # print(kernel_size) + width = int(planes * (base_width / 64.)) + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv_down = conv1x1(inplanes, width) + self.conv1 = nn.Conv2d(width, width, kernel_size = 1) + self.bn1 = norm_layer(width) + self.hight_block = AxialAttention_wopos(width, width, groups=groups, kernel_size=kernel_size) + self.width_block = AxialAttention_wopos(width, width, groups=groups, kernel_size=kernel_size, stride=stride, width=True) + self.conv_up = conv1x1(width, planes * self.expansion) + self.bn2 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + # pdb.set_trace() + + out = self.conv_down(x) + out = self.bn1(out) + out = self.relu(out) + # print(out.shape) + out = self.hight_block(out) + out = self.width_block(out) + # print(self.stride) + # out = self.conv1(out) + # if self.stride == 2: + # out = F.max_pool2d(out,2,2) + # if self.downsample is not None: + # identity = self.downsample(x) + # # out = F.max_pool2d(out,2,2) + # # out = self.conv1(out) + + # # print(out.shape) + # # out = + out = self.relu(out) + + out = self.conv_up(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + +class AxialBlockmod_wopos(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, kernel_size=56): + super(AxialBlockmod_wopos, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + # print(kernel_size) + width = int(planes * (base_width / 64.)) + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv_down = conv1x1(inplanes, width) + self.conv1 = nn.Conv2d(width, width, kernel_size = 1) + self.bn1 = norm_layer(width) + self.hight_block = AxialAttention_wopos(width, width, groups=groups, kernel_size=kernel_size) + self.width_block = AxialAttention_wopos(width, width, groups=groups, kernel_size=kernel_size, stride=stride, width=True) + self.conv_up = conv1x1(width, planes * self.expansion) + self.bn2 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + # pdb.set_trace() + + out = self.conv_down(x) + out = self.bn1(out) + out = self.relu(out) + # print(out.shape) + # out = self.hight_block(out) + # out = self.width_block(out) + # print(self.stride) + out = self.conv1(out) + if self.stride == 2: + out = F.max_pool2d(out,2,2) + if self.downsample is not None: + identity = self.downsample(x) + # out = F.max_pool2d(out,2,2) + # out = self.conv1(out) + + # print(out.shape) + # out = + out = self.relu(out) + + out = self.conv_up(out) + out = self.bn2(out) + + # if self.downsample is not None: + # identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + +#end of block definition + +class AxialAttentionNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=True, + groups=8, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, s=0.5): + super(AxialAttentionNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = int(64 * s) + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + + + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, int(128 * s), layers[0], kernel_size=56) + self.layer2 = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=56, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=28, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=14, + dilate=replace_stride_with_dilation[2]) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(int(1024 * block.expansion * s), num_classes) + + for m in self.modules(): + if isinstance(m, (nn.Conv2d, nn.Conv1d)): + if isinstance(m, qkv_transform): + pass + else: + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, AxialBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups, + base_width=self.base_width, dilation=previous_dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + self.inplanes = planes * block.expansion + if stride != 1: + kernel_size = kernel_size // 2 + + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + # See note [TorchScript super()] + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.fc(x) + + return x + + def forward(self, x): + return self._forward_impl(x) + +class ResAxialAttentionUNet(nn.Module): + + def __init__(self, block, layers, num_classes=2, zero_init_residual=True, + groups=8, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, s=0.125, img_size = 128,imgchan = 3): + super(ResAxialAttentionUNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = int(64 * s) + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + # self.conv2 = nn.Conv2d(self.inplanes, 128, kernel_size=3, stride=1, padding=1, bias=False) + # self.conv3 = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + # self.bn2 = norm_layer(128) + # self.bn3 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, int(128 * s), layers[0], kernel_size= (img_size//2)) + self.layer2 = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size//2), + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size//4), + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size//8), + dilate=replace_stride_with_dilation[2]) + + # self.layer1 = nn.Conv2d(8,32,kernel_size=3, stride=1, padding=1) + # self.layer2 = nn.Conv2d(32,64,kernel_size=3, stride=1, padding=1) + # dilate=replace_stride_with_dilation[0]) + # self.layer3 = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size//4), + # dilate=replace_stride_with_dilation[1]) + # self.layer4 = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size//8), + # dilate=replace_stride_with_dilation[2]) + + # Decoder + self.decoder1 = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + self.decoder2 = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + self.decoder3 = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4 = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft = nn.Softmax(dim=1) + + + # self.conv1_1 = nn.Conv2d(32,8,kernel_size=1, stride=1, padding=0) + # self.conv1_2 = nn.Conv2d(64,8,kernel_size=1, stride=1, padding=0) + # for m in self.modules(): + # if isinstance(m, (nn.Conv2d, nn.Conv1d)): + # if isinstance(m, qkv_transform): + # pass + # else: + # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + # elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d, nn.GroupNorm)): + # nn.init.constant_(m.weight, 1) + # nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + # if zero_init_residual: + # for m in self.modules(): + # if isinstance(m, AxialBlock): + # nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups, + base_width=self.base_width, dilation=previous_dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + self.inplanes = planes * block.expansion + if stride != 1: + kernel_size = kernel_size // 2 + + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + # See note [TorchScript super()] + # AxialAttention Encoder + # pdb.set_trace() + x = self.conv1(x) + x = self.bn1(x) + # x = self.relu(x) + # x = self.conv2(x) + # x = self.bn2(x) + # x = self.relu(x) + # x = self.conv3(x) + # x = self.bn3(x) + # x = self.relu(x) + # # x = F.max_pool2d(x,2,2) + x = self.relu(x) + + # x = self.maxpool(x) + # pdb.set_trace() + # print(x.shape) + x1 = self.layer1(x) + # x1 = F.relu(F.max_pool2d(x1,2,2)) + # x1 = self.conv1_1(x1) + # print(x1.shape) + x2 = self.layer2(x1) + # print(x2.shape) + x3 = self.layer3(x2) + # print(x3.shape) + x4 = self.layer4(x3) + # print(x4.shape) + # pdb.set_trace() + # Transposed Convolution Decoder + x = F.relu(F.interpolate(self.decoder1(x4), scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x4) + x = F.relu(F.interpolate(self.decoder2(x) , scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x3) + x = F.relu(F.interpolate(self.decoder3(x) , scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x2) + x = F.relu(F.interpolate(self.decoder4(x) , scale_factor=(2,2), mode ='bilinear')) + # print(x.shape, x1.shape) + # x = torch.add(x, x1) + x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + x = self.soft(self.adjust(F.relu(x))) + # pdb.set_trace() + return x + + def forward(self, x): + return self._forward_impl(x) + +class unetplus(nn.Module): + + def __init__(self, block, layers, num_classes=2, zero_init_residual=True, + groups=8, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, s=0.125, img_size = 128,imgchan = 3): + super(unetplus, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = int(64 * s) + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, int(128 * s), layers[0], kernel_size= (img_size//2)) + self.layer2 = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size//2), + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size//4), + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size//8), + dilate=replace_stride_with_dilation[2]) + + # Decoder + self.decoder1 = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + self.decoder2 = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + self.decoder3 = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4 = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft = nn.Softmax(dim=1) + + self.inter1= nn.Conv2d(32, 32, 3, stride=1, padding=1) + self.inter2= nn.Conv2d(64, 64, 3, stride=1, padding=1) + self.inter3= nn.Conv2d(128, 128, 3, stride=1, padding=1) + self.inter4= nn.Conv2d(256, 256, 3, stride=1, padding=1) + # self.inter5= nn.Conv2d(32, 32, 3, stride=1, padding=1) + + self.inte1= nn.Conv2d(32, 2, 1, stride=1, padding=0) + self.inte2= nn.Conv2d(64, 2, 1, stride=1, padding=0) + self.inte3= nn.Conv2d(128, 2, 1, stride=1, padding=0) + self.inte4= nn.Conv2d(256, 2, 1, stride=1, padding=0) + + # for m in self.modules(): + # if isinstance(m, (nn.Conv2d, nn.Conv1d)): + # if isinstance(m, qkv_transform): + # pass + # else: + # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + # elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d, nn.GroupNorm)): + # nn.init.constant_(m.weight, 1) + # nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + # if zero_init_residual: + # for m in self.modules(): + # if isinstance(m, AxialBlock): + # nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups, + base_width=self.base_width, dilation=previous_dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + self.inplanes = planes * block.expansion + if stride != 1: + kernel_size = kernel_size // 2 + + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + # See note [TorchScript super()] + # AxialAttention Encoder + # pdb.set_trace() + x = self.conv1(x) + x = self.bn1(x) + # x = F.max_pool2d(x,2,2) + x = self.relu(x) + + # x = self.maxpool(x) + # pdb.set_trace() + x1 = self.layer1(x) + # print(x1.shape) + x2 = self.layer2(x1) + # print(x2.shape) + x3 = self.layer3(x2) + # print(x3.shape) + x4 = self.layer4(x3) + # print(x4.shape) + # pdb.set_trace() + # Transposed Convolution Decoder + x = F.relu(F.interpolate(self.decoder1(x4), scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x4) + x = F.relu(F.interpolate(self.decoder2(x) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x3) + x = F.relu(F.interpolate(self.decoder3(x) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x2) + x = F.relu(F.interpolate(self.decoder4(x) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x1) + x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + x = self.soft(self.adjust(F.relu(x))) + # pdb.set_trace() + return x + + def forward(self, x): + return self._forward_impl(x) + +class mix(nn.Module): + + def __init__(self, block, layers, num_classes=2, zero_init_residual=True, + groups=8, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, s=0.125, img_size = 128,imgchan = 3): + super(mix, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = int(64 * s) + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, int(128 * s), layers[0], kernel_size= (img_size//2)) + self.layer2 = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size//2), + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size//4), + dilate=replace_stride_with_dilation[1]) + # self.layer4 = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size//8), + # dilate=replace_stride_with_dilation[2]) + + # Decoder + # self.decoder1 = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + # self.decoder2 = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + self.decoder3 = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4 = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft = nn.Softmax(dim=1) + + + self.conv1_p = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1_p = norm_layer(self.inplanes) + self.relu_p = nn.ReLU(inplace=True) + + img_size_p = img_size // 4 + + self.layer1_p = self._make_layer(block, int(128 * s), layers[0], kernel_size= (img_size_p//2)) + self.layer2_p = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size_p//2), + dilate=replace_stride_with_dilation[0]) + self.layer3_p = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size_p//4), + dilate=replace_stride_with_dilation[1]) + self.layer4_p = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size_p//8), + dilate=replace_stride_with_dilation[2]) + + # Decoder + self.decoder1_p = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + self.decoder2_p = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + self.decoder3_p = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4_p = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5_p = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + + self.decoderf = nn.Conv2d(int(128*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust_p = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft_p = nn.Softmax(dim=1) + + # for m in self.modules(): + # if isinstance(m, (nn.Conv2d, nn.Conv1d)): + # if isinstance(m, qkv_transform): + # pass + # else: + # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + # elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d, nn.GroupNorm)): + # nn.init.constant_(m.weight, 1) + # nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + # if zero_init_residual: + # for m in self.modules(): + # if isinstance(m, AxialBlock): + # nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups, + base_width=self.base_width, dilation=previous_dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + self.inplanes = planes * block.expansion + if stride != 1: + kernel_size = kernel_size // 2 + + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + # See note [TorchScript super()] + # AxialAttention Encoder + # pdb.set_trace() + xin = x.clone() + x = self.conv1(x) + x = self.bn1(x) + # x = F.max_pool2d(x,2,2) + x = self.relu(x) + + # x = self.maxpool(x) + # pdb.set_trace() + x1 = self.layer1(x) + # print(x1.shape) + x2 = self.layer2(x1) + # print(x2.shape) + x3 = self.layer3(x2) + # # print(x3.shape) + # x4 = self.layer4(x3) + # # print(x4.shape) + # # pdb.set_trace() + # # Transposed Convolution Decoder + # x = F.relu(F.interpolate(self.decoder1(x4), scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x4) + # x = F.relu(F.interpolate(self.decoder2(x4) , scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x3) + x = F.relu(F.interpolate(self.decoder3(x3) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x2) + x = F.relu(F.interpolate(self.decoder4(x2) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x1) + x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + # print(x.shape) + + # end of full image training + + # y_out = torch.ones((1,2,128,128)) + x_loc = x.clone() + # x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + #start + for i in range(0,4): + for j in range(0,4): + + x_p = xin[:,:,32*i:32*(i+1),32*j:32*(j+1)] + # begin patch wise + x_p = self.conv1_p(x_p) + x_p = self.bn1_p(x_p) + # x = F.max_pool2d(x,2,2) + x_p = self.relu(x_p) + + # x = self.maxpool(x) + # pdb.set_trace() + x1_p = self.layer1_p(x_p) + # print(x1.shape) + x2_p = self.layer2_p(x1_p) + # print(x2.shape) + x3_p = self.layer3_p(x2_p) + # # print(x3.shape) + x4_p = self.layer4_p(x3_p) + # # print(x4.shape) + # # pdb.set_trace() + # # Transposed Convolution Decoder + x_p = F.relu(F.interpolate(self.decoder1_p(x4_p), scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x4_p) + x_p = F.relu(F.interpolate(self.decoder2_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x3_p) + x_p = F.relu(F.interpolate(self.decoder3_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x2_p) + x_p = F.relu(F.interpolate(self.decoder4_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x1_p) + x_p = F.relu(F.interpolate(self.decoder5_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + # x_p = self.soft_p(self.adjust_p(F.relu(x_p))) + # print(x_p.shape) + x_loc[:,:,32*i:32*(i+1),32*j:32*(j+1)] = x_p + + x = torch.add(x,x_loc) + x = F.relu(self.decoderf(x)) + + x = self.soft(self.adjust(F.relu(x))) + + # pdb.set_trace() + return x + + def forward(self, x): + return self._forward_impl(x) + +class mix_wopos(nn.Module): + + def __init__(self, block, block_2, layers, num_classes=2, zero_init_residual=True, + groups=8, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, s=0.125, img_size = 128,imgchan = 3): + super(mix_wopos, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = int(64 * s) + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.conv2 = nn.Conv2d(self.inplanes, 128, kernel_size=3, stride=1, padding=1, bias=False) + self.conv3 = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.bn2 = norm_layer(128) + self.bn3 = norm_layer(self.inplanes) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, int(128 * s), layers[0], kernel_size= (img_size//2)) + self.layer2 = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size//2), + dilate=replace_stride_with_dilation[0]) + # self.layer3 = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size//4), + # dilate=replace_stride_with_dilation[1]) + # self.layer4 = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size//8), + # dilate=replace_stride_with_dilation[2]) + + # Decoder + # self.decoder1 = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + # self.decoder2 = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + # self.decoder3 = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4 = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft = nn.Softmax(dim=1) + + + self.conv1_p = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.conv2_p = nn.Conv2d(self.inplanes,128, kernel_size=3, stride=1, padding=1, + bias=False) + self.conv3_p = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, + bias=False) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1_p = norm_layer(self.inplanes) + self.bn2_p = norm_layer(128) + self.bn3_p = norm_layer(self.inplanes) + + self.relu_p = nn.ReLU(inplace=True) + + img_size_p = img_size // 4 + + self.layer1_p = self._make_layer(block_2, int(128 * s), layers[0], kernel_size= (img_size_p//2)) + self.layer2_p = self._make_layer(block_2, int(256 * s), layers[1], stride=2, kernel_size=(img_size_p//2), + dilate=replace_stride_with_dilation[0]) + self.layer3_p = self._make_layer(block_2, int(512 * s), layers[2], stride=2, kernel_size=(img_size_p//4), + dilate=replace_stride_with_dilation[1]) + self.layer4_p = self._make_layer(block_2, int(1024 * s), layers[3], stride=2, kernel_size=(img_size_p//8), + dilate=replace_stride_with_dilation[2]) + + # Decoder + self.decoder1_p = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + self.decoder2_p = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + self.decoder3_p = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4_p = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5_p = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + + self.decoderf = nn.Conv2d(int(128*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust_p = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft_p = nn.Softmax(dim=1) + + # for m in self.modules(): + # if isinstance(m, (nn.Conv2d, nn.Conv1d)): + # if isinstance(m, qkv_transform): + # pass + # else: + # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + # elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d, nn.GroupNorm)): + # nn.init.constant_(m.weight, 1) + # nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + # if zero_init_residual: + # for m in self.modules(): + # if isinstance(m, AxialBlock): + # nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups, + base_width=self.base_width, dilation=previous_dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + self.inplanes = planes * block.expansion + if stride != 1: + kernel_size = kernel_size // 2 + + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + # See note [TorchScript super()] + # AxialAttention Encoder + # pdb.set_trace() + xin = x.clone() + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + x = self.conv3(x) + x = self.bn3(x) + # x = F.max_pool2d(x,2,2) + x = self.relu(x) + + # x = self.maxpool(x) + # pdb.set_trace() + x1 = self.layer1(x) + # print(x1.shape) + x2 = self.layer2(x1) + # print(x2.shape) + # x3 = self.layer3(x2) + # # print(x3.shape) + # x4 = self.layer4(x3) + # # print(x4.shape) + # # pdb.set_trace() + # # Transposed Convolution Decoder + # x = F.relu(F.interpolate(self.decoder1(x4), scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x4) + # x = F.relu(F.interpolate(self.decoder2(x4) , scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x3) + # x = F.relu(F.interpolate(self.decoder3(x3) , scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x2) + x = F.relu(F.interpolate(self.decoder4(x2) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x1) + x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + # print(x.shape) + + # end of full image training + + # y_out = torch.ones((1,2,128,128)) + x_loc = x.clone() + # x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + #start + for i in range(0,4): + for j in range(0,4): + + x_p = xin[:,:,32*i:32*(i+1),32*j:32*(j+1)] + # begin patch wise + x_p = self.conv1_p(x_p) + x_p = self.bn1_p(x_p) + # x = F.max_pool2d(x,2,2) + x_p = self.relu(x_p) + + x_p = self.conv2_p(x_p) + x_p = self.bn2_p(x_p) + # x = F.max_pool2d(x,2,2) + x_p = self.relu(x_p) + x_p = self.conv3_p(x_p) + x_p = self.bn3_p(x_p) + # x = F.max_pool2d(x,2,2) + x_p = self.relu(x_p) + + # x = self.maxpool(x) + # pdb.set_trace() + x1_p = self.layer1_p(x_p) + # print(x1.shape) + x2_p = self.layer2_p(x1_p) + # print(x2.shape) + x3_p = self.layer3_p(x2_p) + # # print(x3.shape) + x4_p = self.layer4_p(x3_p) + # # print(x4.shape) + # # pdb.set_trace() + # # Transposed Convolution Decoder + x_p = F.relu(F.interpolate(self.decoder1_p(x4_p), scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x4_p) + x_p = F.relu(F.interpolate(self.decoder2_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x3_p) + x_p = F.relu(F.interpolate(self.decoder3_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x2_p) + x_p = F.relu(F.interpolate(self.decoder4_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x1_p) + x_p = F.relu(F.interpolate(self.decoder5_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + # x_p = self.soft_p(self.adjust_p(F.relu(x_p))) + # print(x_p.shape) + x_loc[:,:,32*i:32*(i+1),32*j:32*(j+1)] = x_p + + x = torch.add(x,x_loc) + x = F.relu(self.decoderf(x)) + + x = self.adjust(F.relu(x)) + + # pdb.set_trace() + return x + + def forward(self, x): + return self._forward_impl(x) + +class mix_wopos_512(nn.Module): + + def __init__(self, block, block_2, layers, num_classes=2, zero_init_residual=True, + groups=8, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, s=0.125, img_size = 128,imgchan = 3): + super(mix_wopos_512, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = int(64 * s) + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.conv2 = nn.Conv2d(self.inplanes, 128, kernel_size=3, stride=1, padding=1, bias=False) + self.conv3 = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.bn2 = norm_layer(128) + self.bn3 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, int(128 * s), layers[0], kernel_size= (img_size//2)) + self.layer2 = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size//2), + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size//4), + dilate=replace_stride_with_dilation[1]) + # self.layer4 = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size//8), + # dilate=replace_stride_with_dilation[2]) + + # Decoder + # self.decoder1 = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + # self.decoder2 = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + self.decoder3 = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4 = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft = nn.Softmax(dim=1) + + self.conv1_p = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.conv2_p = nn.Conv2d(self.inplanes,128, kernel_size=3, stride=1, padding=1, + bias=False) + self.conv3_p = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, + bias=False) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1_p = norm_layer(self.inplanes) + self.bn2_p = norm_layer(128) + self.bn3_p = norm_layer(self.inplanes) + self.conv1_p = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1_p = norm_layer(self.inplanes) + self.relu_p = nn.ReLU(inplace=True) + + img_size_p = img_size // 4 + + self.layer1_p = self._make_layer(block_2, int(128 * s), layers[0], kernel_size= (img_size_p//2)) + self.layer2_p = self._make_layer(block_2, int(256 * s), layers[1], stride=2, kernel_size=(img_size_p//2), + dilate=replace_stride_with_dilation[0]) + self.layer3_p = self._make_layer(block_2, int(512 * s), layers[2], stride=2, kernel_size=(img_size_p//4), + dilate=replace_stride_with_dilation[1]) + self.layer4_p = self._make_layer(block_2, int(1024 * s), layers[3], stride=2, kernel_size=(img_size_p//8), + dilate=replace_stride_with_dilation[2]) + + # Decoder + self.decoder1_p = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + self.decoder2_p = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + self.decoder3_p = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4_p = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5_p = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + + self.decoderf = nn.Conv2d(int(128*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust_p = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft_p = nn.Softmax(dim=1) + + # for m in self.modules(): + # if isinstance(m, (nn.Conv2d, nn.Conv1d)): + # if isinstance(m, qkv_transform): + # pass + # else: + # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + # elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d, nn.GroupNorm)): + # nn.init.constant_(m.weight, 1) + # nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + # if zero_init_residual: + # for m in self.modules(): + # if isinstance(m, AxialBlock): + # nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups, + base_width=self.base_width, dilation=previous_dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + self.inplanes = planes * block.expansion + if stride != 1: + kernel_size = kernel_size // 2 + + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + # See note [TorchScript super()] + # AxialAttention Encoder + # pdb.set_trace() + xin = x.clone() + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + x = self.conv3(x) + x = self.bn3(x) + # x = F.max_pool2d(x,2,2) + x = self.relu(x) + + # x = self.maxpool(x) + # pdb.set_trace() + x1 = self.layer1(x) + # print(x1.shape) + x2 = self.layer2(x1) + # print(x2.shape) + x3 = self.layer3(x2) + # # print(x3.shape) + # x4 = self.layer4(x3) + # # print(x4.shape) + # # pdb.set_trace() + # # Transposed Convolution Decoder + # x = F.relu(F.interpolate(self.decoder1(x4), scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x4) + # x = F.relu(F.interpolate(self.decoder2(x4) , scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x3) + x = F.relu(F.interpolate(self.decoder3(x3) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x2) + x = F.relu(F.interpolate(self.decoder4(x2) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x1) + x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + # print(x.shape) + + # end of full image training + + # y_out = torch.ones((1,2,128,128)) + x_loc = x.clone() + # x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + #start + for i in range(0,4): + for j in range(0,4): + + x_p = xin[:,:,128*i:128*(i+1),128*j:128*(j+1)] + # begin patch wise + x_p = self.conv1_p(x_p) + x_p = self.bn1_p(x_p) + # x = F.max_pool2d(x,2,2) + x_p = self.relu(x_p) + + x_p = self.conv2_p(x_p) + x_p = self.bn2_p(x_p) + # x = F.max_pool2d(x,2,2) + x_p = self.relu(x_p) + x_p = self.conv3_p(x_p) + x_p = self.bn3_p(x_p) + # x = F.max_pool2d(x,2,2) + x_p = self.relu(x_p) + + # x = self.maxpool(x) + # pdb.set_trace() + x1_p = self.layer1_p(x_p) + # print(x1.shape) + x2_p = self.layer2_p(x1_p) + # print(x2.shape) + x3_p = self.layer3_p(x2_p) + # # # print(x3.shape) + x4_p = self.layer4_p(x3_p) + # # # print(x4.shape) + # # # pdb.set_trace() + # # # Transposed Convolution Decoder + x_p = F.relu(F.interpolate(self.decoder1_p(x4_p), scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x4_p) + x_p = F.relu(F.interpolate(self.decoder2_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x3_p) + x_p = F.relu(F.interpolate(self.decoder3_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x2_p) + x_p = F.relu(F.interpolate(self.decoder4_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x1_p) + x_p = F.relu(F.interpolate(self.decoder5_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + # x_p = self.soft_p(self.adjust_p(F.relu(x_p))) + # print(x_p.shape) + x_loc[:,:,128*i:128*(i+1),128*j:128*(j+1)] = x_p + + x = torch.add(x,x_loc) + x = F.relu(self.decoderf(x)) + + x = self.soft(self.adjust(F.relu(x))) + + # pdb.set_trace() + return x + + def forward(self, x): + return self._forward_impl(x) + +class mix_512(nn.Module): + + def __init__(self, block, layers, num_classes=2, zero_init_residual=True, + groups=8, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, s=0.125, img_size = 128,imgchan = 3): + super(mix_512, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = int(64 * s) + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, int(128 * s), layers[0], kernel_size= (img_size//2)) + self.layer2 = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size//2), + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size//4), + dilate=replace_stride_with_dilation[1]) + # self.layer4 = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size//8), + # dilate=replace_stride_with_dilation[2]) + + # Decoder + # self.decoder1 = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + # self.decoder2 = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + self.decoder3 = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4 = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft = nn.Softmax(dim=1) + + + self.conv1_p = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1_p = norm_layer(self.inplanes) + self.relu_p = nn.ReLU(inplace=True) + + img_size_p = img_size // 4 + + self.layer1_p = self._make_layer(block, int(128 * s), layers[0], kernel_size= (img_size_p//2)) + self.layer2_p = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size_p//2), + dilate=replace_stride_with_dilation[0]) + self.layer3_p = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size_p//4), + dilate=replace_stride_with_dilation[1]) + self.layer4_p = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size_p//8), + dilate=replace_stride_with_dilation[2]) + + # Decoder + self.decoder1_p = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + self.decoder2_p = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + self.decoder3_p = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4_p = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5_p = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + + self.decoderf = nn.Conv2d(int(128*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust_p = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft_p = nn.Softmax(dim=1) + + # for m in self.modules(): + # if isinstance(m, (nn.Conv2d, nn.Conv1d)): + # if isinstance(m, qkv_transform): + # pass + # else: + # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + # elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d, nn.GroupNorm)): + # nn.init.constant_(m.weight, 1) + # nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + # if zero_init_residual: + # for m in self.modules(): + # if isinstance(m, AxialBlock): + # nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups, + base_width=self.base_width, dilation=previous_dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + self.inplanes = planes * block.expansion + if stride != 1: + kernel_size = kernel_size // 2 + + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + # See note [TorchScript super()] + # AxialAttention Encoder + # pdb.set_trace() + xin = x.clone() + x = self.conv1(x) + x = self.bn1(x) + # x = F.max_pool2d(x,2,2) + x = self.relu(x) + + # x = self.maxpool(x) + # pdb.set_trace() + x1 = self.layer1(x) + # print(x1.shape) + x2 = self.layer2(x1) + # print(x2.shape) + x3 = self.layer3(x2) + # # print(x3.shape) + # x4 = self.layer4(x3) + # # print(x4.shape) + # # pdb.set_trace() + # # Transposed Convolution Decoder + # x = F.relu(F.interpolate(self.decoder1(x4), scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x4) + # x = F.relu(F.interpolate(self.decoder2(x4) , scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x3) + x = F.relu(F.interpolate(self.decoder3(x3) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x2) + x = F.relu(F.interpolate(self.decoder4(x2) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x1) + x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + # print(x.shape) + + # end of full image training + + # y_out = torch.ones((1,2,128,128)) + x_loc = x.clone() + # x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + #start + for i in range(0,4): + for j in range(0,4): + + x_p = xin[:,:,128*i:128*(i+1),128*j:128*(j+1)] + # begin patch wise + x_p = self.conv1_p(x_p) + x_p = self.bn1_p(x_p) + # x = F.max_pool2d(x,2,2) + x_p = self.relu(x_p) + + # x = self.maxpool(x) + # pdb.set_trace() + x1_p = self.layer1_p(x_p) + # print(x1.shape) + x2_p = self.layer2_p(x1_p) + # print(x2.shape) + x3_p = self.layer3_p(x2_p) + # # print(x3.shape) + x4_p = self.layer4_p(x3_p) + # # print(x4.shape) + # # pdb.set_trace() + # # Transposed Convolution Decoder + x_p = F.relu(F.interpolate(self.decoder1_p(x4_p), scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x4_p) + x_p = F.relu(F.interpolate(self.decoder2_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x3_p) + x_p = F.relu(F.interpolate(self.decoder3_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x2_p) + x_p = F.relu(F.interpolate(self.decoder4_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + x_p = torch.add(x_p, x1_p) + x_p = F.relu(F.interpolate(self.decoder5_p(x_p) , scale_factor=(2,2), mode ='bilinear')) + # x_p = self.soft_p(self.adjust_p(F.relu(x_p))) + # print(x_p.shape) + x_loc[:,:,128*i:128*(i+1),128*j:128*(j+1)] = x_p + + x = torch.add(x,x_loc) + x = F.relu(self.decoderf(x)) + + x = self.soft(self.adjust(F.relu(x))) + + # pdb.set_trace() + return x + + def forward(self, x): + return self._forward_impl(x) + +class ResAxialAttentionUNetshallow(nn.Module): + + def __init__(self, block, layers, num_classes=2, zero_init_residual=True, + groups=8, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, s=0.125, img_size = 128, imgchan = 3): + super(ResAxialAttentionUNetshallow, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = int(64 * s) + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, int(128 * s), layers[0], kernel_size= (img_size//2)) + self.layer2 = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size//2), + dilate=replace_stride_with_dilation[0]) + # self.layer3 = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size//4), + # dilate=replace_stride_with_dilation[1]) + # self.layer4 = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size//8), + # dilate=replace_stride_with_dilation[2]) + + # Decoder + # self.decoder1 = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + # self.decoder2 = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + # self.decoder3 = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4 = nn.Conv2d(int(512*s) , int(256*s), kernel_size=3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(int(256*s) , int(128*s) , kernel_size=3, stride=1, padding=1) + self.adjust = nn.Conv2d(int(128*s) , num_classes, kernel_size=1, stride=1, padding=0) + self.soft = nn.Softmax(dim=1) + + # for m in self.modules(): + # if isinstance(m, (nn.Conv2d, nn.Conv1d)): + # if isinstance(m, qkv_transform): + # pass + # else: + # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + # elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d, nn.GroupNorm)): + # nn.init.constant_(m.weight, 1) + # nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + # if zero_init_residual: + # for m in self.modules(): + # if isinstance(m, AxialBlock): + # nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups, + base_width=self.base_width, dilation=previous_dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + self.inplanes = planes * block.expansion + if stride != 1: + kernel_size = kernel_size // 2 + + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + # See note [TorchScript super()] + # AxialAttention Encoder + # pdb.set_trace() + x = self.conv1(x) + x = self.bn1(x) + # x = F.max_pool2d(x,2,2) + x = self.relu(x) + + # x = self.maxpool(x) + # pdb.set_trace() + x1 = self.layer1(x) + # print(x1.shape) + x2 = self.layer2(x1) + # print(x2.shape) + # x3 = self.layer3(x2) + # print(x3.shape) + # x4 = self.layer4(x3) + # print(x4.shape) + # pdb.set_trace() + # Transposed Convolution Decoder + # x = F.relu(F.interpolate(self.decoder1(x4), scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x4) + # x = F.relu(F.interpolate(self.decoder2(x) , scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x3) + # x = F.relu(F.interpolate(self.decoder3(x2) , scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x2) + x = F.relu(F.interpolate(self.decoder4(x2) , scale_factor=(2,2), mode ='bilinear')) + x = torch.add(x, x1) + x = F.relu(F.interpolate(self.decoder5(x1) , scale_factor=(2,2), mode ='bilinear')) + x = self.soft(self.adjust(F.relu(x))) + # pdb.set_trace() + return x + + def forward(self, x): + return self._forward_impl(x) + +class autoencoder(nn.Module): + def __init__(self): + super(autoencoder, self).__init__() + + + self.encoder1 = nn.Conv2d(3, 64, 3, stride=1, padding=1) # b, 16, 10, 10 + self.encoder2= nn.Conv2d(64, 128, 3, stride=1, padding=1) # b, 8, 3, 3 + self.encoder3= nn.Conv2d(128, 256, 3, stride=1, padding=1) + self.encoder4= nn.Conv2d(256, 512, 3, stride=1, padding=1) + self.encoder5= nn.Conv2d(512, 1024, 3, stride=1, padding=1) + + self.decoder1 = nn.Conv2d(1024, 512, 3, stride=1,padding=2) # b, 16, 5, 5 + self.decoder2 = nn.Conv2d(512, 256, 3, stride=1, padding=2) # b, 8, 15, 1 + self.decoder3 = nn.Conv2d(256, 128, 3, stride=1, padding=1) # b, 1, 28, 28 + self.decoder4 = nn.Conv2d(128, 64, 3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(64, 2, 3, stride=1, padding=1) + + self.soft = nn.Softmax(dim =1) + + def forward(self, x): + + out = F.relu(F.max_pool2d(self.encoder1(x),2,2)) + out = F.relu(F.max_pool2d(self.encoder2(out),2,2)) + out = F.relu(F.max_pool2d(self.encoder3(out),2,2)) + + out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear')) + + out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear')) + + out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear')) + # print(out.shape) + out = self.soft(out) + return out + + +def axial26s(pretrained=False, **kwargs): + model = AxialAttentionNet(AxialBlock, [1, 2, 4, 1], s=0.5, **kwargs) + return model + + +def axial50s(pretrained=False, **kwargs): + model = AxialAttentionNet(AxialBlock, [3, 4, 6, 3], s=0.5, **kwargs) + return model + + +def axial50m(pretrained=False, **kwargs): + model = AxialAttentionNet(AxialBlock, [3, 4, 6, 3], s=0.75, **kwargs) + return model + + +def axial50l(pretrained=False, **kwargs): + model = AxialAttentionNet(AxialBlock, [3, 4, 6, 3], s=1, **kwargs) + return model + + +def resxialunet128s(pretrained=False, **kwargs): + model = ResAxialAttentionUNet(AxialBlock_dynamic, [1, 2, 4, 1], s= 0.125,img_size = 128, imgchan =1, **kwargs) + return model + +def resaxialunet_dyn(pretrained=False, **kwargs): + model = ResAxialAttentionUNet(AxialBlock_dynamic, [1, 2, 4, 1], s= 0.125,img_size = 512, imgchan =3, **kwargs) + return model + +def resxialunet_wopos(pretrained=False, **kwargs): + model = ResAxialAttentionUNet(AxialBlock_wopos, [1, 2, 4, 1], s= 0.125,img_size = 128, imgchan =3, **kwargs) + return model + +def resunet(pretrained=False, **kwargs): + model = ResAxialAttentionUNet(AxialBlockmod, [1, 2, 4, 1], s= 0.125, img_size = 128, imgchan = 1, **kwargs) + return model + +def unetplusplus(pretrained=False, **kwargs): + model = unetplus(AxialBlockmod, [1, 2, 4, 1], s= 0.125, img_size = 128, imgchan = 3, **kwargs) + return model + +def mix_net(pretrained=False, **kwargs): + model = mix(AxialBlock_dynamic, [1, 2, 4, 1], s= 0.125, img_size = 128, imgchan = 3, **kwargs) + return model + +def mix_net_512(pretrained=False, **kwargs): + model = mix_512(AxialBlock, [1, 2, 4, 1], s= 0.125, img_size = 512, imgchan = 3, **kwargs) + return model +def mix_net_gated_d(pretrained=False, **kwargs): + model = mix(AxialBlock_gated_data, [1, 2, 4, 1], s= 0.125, img_size = 128, imgchan = 1, **kwargs) + return model + +def mix_net_wopos(pretrained=False, **kwargs): + model = mix_wopos(AxialBlock_dynamic,AxialBlock_wopos, [1, 2, 4, 1], s= 0.125, img_size = 128, imgchan = 1, **kwargs) + return model + +def mix_net_wopos_512(pretrained=False, **kwargs): + model = mix_wopos_512(AxialBlock,AxialBlock_wopos, [1, 2, 4, 1], s= 0.125, img_size = 512, imgchan = 3, **kwargs) + return model +# def resunet_wopos(pretrained=False, **kwargs): +# model = ResAxialAttentionUNet(AxialBlockmod_wopos, [1, 2, 4, 1], s= 0.125, img_size = 32, imgchan = 3, **kwargs) +# return model + +def resxialunet128s_shallow(pretrained=False, **kwargs): + model = ResAxialAttentionUNetshallow(AxialBlockmod, [1, 2, 4, 1], s= 0.125,img_size = 128, imgchan =3, **kwargs) + return model +# EOF \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/myaxialnet.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/myaxialnet.py new file mode 100644 index 0000000000000000000000000000000000000000..67a16efacc0006798edbf26da9ddd4bbd47034bb --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/myaxialnet.py @@ -0,0 +1,1298 @@ +import pdb +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +from .utils import * +import pdb +import matplotlib.pyplot as plt + +import random + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class AxialBlock(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, kernel_size=56): + super(AxialBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv_down = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.hight_block = AxialAttention(width, width, groups=groups, kernel_size=kernel_size) + self.width_block = AxialAttention(width, width, groups=groups, kernel_size=kernel_size, stride=stride, width=True) + self.conv_up = conv1x1(width, planes * self.expansion) + self.bn2 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv_down(x) # 下采样 inplanes -> width + out = self.bn1(out) + out = self.relu(out) + # print(out.shape) + out = self.hight_block(out) + out = self.width_block(out) + out = self.relu(out) + + out = self.conv_up(out) # width -> planes*2(expansion) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + + +class AxialAttention(nn.Module): + def __init__(self, in_planes, out_planes, groups=8, kernel_size=56, + stride=1, bias=False, width=False): + assert (in_planes % groups == 0) and (out_planes % groups == 0) + super(AxialAttention, self).__init__() + self.in_planes = in_planes + self.out_planes = out_planes + self.groups = groups + self.group_planes = out_planes // groups + self.kernel_size = kernel_size + self.stride = stride + self.bias = bias + self.width = width + + # Multi-head self attention + self.qkv_transform = qkv_transform(in_planes, out_planes * 2, kernel_size=1, stride=1, + padding=0, bias=False) + self.bn_qkv = nn.BatchNorm1d(out_planes * 2) + self.bn_similarity = nn.BatchNorm2d(groups * 3) + + self.bn_output = nn.BatchNorm1d(out_planes * 2) + + # Position embedding + self.relative = nn.Parameter(torch.randn(self.group_planes * 2, kernel_size * 2 - 1), requires_grad=True) + query_index = torch.arange(kernel_size).unsqueeze(0) + key_index = torch.arange(kernel_size).unsqueeze(1) + relative_index = key_index - query_index + kernel_size - 1 + self.register_buffer('flatten_index', relative_index.view(-1)) + if stride > 1: + self.pooling = nn.AvgPool2d(stride, stride=stride) + + self.reset_parameters() + + def forward(self, x): + # pdb.set_trace() + if self.width: + x = x.permute(0, 2, 1, 3) + else: + x = x.permute(0, 3, 1, 2) # N, W, C, H + N, W, C, H = x.shape + + # print('N: ', x.shape[0]) # 1 layer2相同 1 layer4与3相同 1 6与5相同 + # print('W: ', x.shape[1]) # 56 56 28 28 14 14 7 + # print('C: ', x.shape[2]) # 16 32 32 64 64 128 256 + # print('H: ', x.shape[3]) # 56 56 28 28 14 14 7 + x = x.contiguous().view(N * W, C, H) # 56, 16, 56 56, 32, 56 28 32 28 + + # Transformations + qkv = self.bn_qkv(self.qkv_transform(x)) + q, k, v = torch.split(qkv.reshape(N * W, self.groups, self.group_planes * 2, H), + [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=2) + + # Calculate position embedding + all_embeddings = torch.index_select(self.relative, 1, self.flatten_index).view(self.group_planes * 2, + self.kernel_size, + self.kernel_size) + # torch.index_select(input, dim, index, out=None) 函数返回的是沿着输入张量的指定维度的指定索引号进行索引的张量子集 + q_embedding, k_embedding, v_embedding = torch.split(all_embeddings, + [self.group_planes // 2, self.group_planes // 2, + self.group_planes], dim=0) + + # print('group: ', self.groups) # 8 + # print('in_planes: ', self.in_planes) # 16 32 32 + # print('out_planes: ', self.out_planes) # 16 32 32 + # print('group_planes: ', self.group_planes) # 2 4 4 + # print('all embedding: ', all_embeddings.shape) # [4, 56, 56] [8, 56, 56] [8,28,28] + # print('q_embedding: ', q_embedding.shape) # [1, 56, 56] [2, 56, 56] [2,28,28] + # print('qkv: ', qkv.shape) # 56, 32, 56 -> 56, 8, 4, 56 56, 64, 56 -> 56, 8, 8, 56 28, 64, 28 -> 28, 8, 8, 28 + # print('q: ', q.shape) # [56, 8, 1, 56] [56, 8, 2, 56] [28,8,2,28] + # print('relative.shape: ', self.relative.shape) # [4,111] [8, 111] [8, 55] + # print('flatten_index.shape: ', self.flatten_index.shape) # [3136] [3136] [784] + + qr = torch.einsum('bgci,cij->bgij', q, q_embedding) + kr = torch.einsum('bgci,cij->bgij', k, k_embedding).transpose(2, 3) + + qk = torch.einsum('bgci, bgcj->bgij', q, k) + + stacked_similarity = torch.cat([qk, qr, kr], dim=1) + stacked_similarity = self.bn_similarity(stacked_similarity).view(N * W, 3, self.groups, H, H).sum(dim=1) + # stacked_similarity = self.bn_qr(qr) + self.bn_kr(kr) + self.bn_qk(qk) + # (N, groups, H, H, W) + similarity = F.softmax(stacked_similarity, dim=3) + sv = torch.einsum('bgij,bgcj->bgci', similarity, v) + sve = torch.einsum('bgij,cij->bgci', similarity, v_embedding) + stacked_output = torch.cat([sv, sve], dim=-1).view(N * W, self.out_planes * 2, H) + output = self.bn_output(stacked_output).view(N, W, self.out_planes, 2, H).sum(dim=-2) + + if self.width: + output = output.permute(0, 2, 1, 3) + else: + output = output.permute(0, 2, 3, 1) + + if self.stride > 1: + output = self.pooling(output) + + return output + + def reset_parameters(self): + self.qkv_transform.weight.data.normal_(0, math.sqrt(1. / self.in_planes)) + # nn.init.uniform_(self.relative, -0.1, 0.1) + nn.init.normal_(self.relative, 0., math.sqrt(1. / self.group_planes)) + + +# class medt_net(nn.Module): +# +# def __init__(self, block, block_2, layers, num_classes=2, zero_init_residual=True, +# groups=8, width_per_group=64, replace_stride_with_dilation=None, +# norm_layer=None, s=0.125, img_size=128, imgchan=3): +# super(medt_net, self).__init__() +# if norm_layer is None: +# norm_layer = nn.BatchNorm2d +# self._norm_layer = norm_layer +# +# self.inplanes = int(64 * s) # 64*0.125=8 +# self.dilation = 1 +# if replace_stride_with_dilation is None: +# replace_stride_with_dilation = [False, False, False] +# if len(replace_stride_with_dilation) != 3: +# raise ValueError("replace_stride_with_dilation should be None " +# "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) +# self.groups = groups # 8 +# self.base_width = width_per_group # 64 +# self.conv1 = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, +# bias=False) # (h-7+6/2)+1=h/2 +# self.conv2 = nn.Conv2d(self.inplanes, 128, kernel_size=3, stride=1, padding=1, bias=False) # 尺寸不变 +# self.conv3 = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) # 尺寸不变 +# self.conv4 = nn.Conv2d(self.inplanes, self.inplanes*2, kernel_size=3, stride=1, padding=1, bias=False) +# +# self.bn1 = norm_layer(self.inplanes) +# self.bn2 = norm_layer(128) +# self.bn3 = norm_layer(self.inplanes) +# self.bn4 = norm_layer(self.inplanes*2) +# +# # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) +# self.bn1 = norm_layer(self.inplanes) +# self.relu = nn.ReLU(inplace=True) +# self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) +# self.layer1 = self._make_layer(block, int(128 * s), layers[0], kernel_size=(img_size // 2)) +# self.layer2 = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size // 2), +# dilate=replace_stride_with_dilation[0]) +# # self.layer3 = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size//4), +# # dilate=replace_stride_with_dilation[1]) +# # self.layer4 = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size//8), +# # dilate=replace_stride_with_dilation[2]) +# +# # Decoder +# # self.decoder1 = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) +# # self.decoder2 = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) +# # self.decoder3 = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) +# self.decoder4 = nn.Conv2d(int(512 * s), int(256 * s), kernel_size=3, stride=1, padding=1) +# self.decoder5 = nn.Conv2d(int(256 * s), int(128 * s), kernel_size=3, stride=1, padding=1) +# self.adjust = nn.Conv2d(int(128 * s), num_classes, kernel_size=1, stride=1, padding=0) +# self.soft = nn.Softmax(dim=1) +# +# self.conv1_p = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, +# bias=False) +# self.conv2_p = nn.Conv2d(self.inplanes, 128, kernel_size=3, stride=1, padding=1, +# bias=False) +# self.conv3_p = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, +# bias=False) +# # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) +# self.bn1_p = norm_layer(self.inplanes) +# self.bn2_p = norm_layer(128) +# self.bn3_p = norm_layer(self.inplanes) +# +# self.relu_p = nn.ReLU(inplace=True) +# +# img_size_p = img_size // 4 +# +# self.layer1_p = self._make_layer(block_2, int(128 * s), layers[0], kernel_size=(img_size_p // 2)) +# self.layer2_p = self._make_layer(block_2, int(256 * s), layers[1], stride=2, kernel_size=(img_size_p // 2), +# dilate=replace_stride_with_dilation[0]) +# self.layer3_p = self._make_layer(block_2, int(512 * s), layers[2], stride=2, kernel_size=(img_size_p // 4), +# dilate=replace_stride_with_dilation[1]) +# self.layer4_p = self._make_layer(block_2, int(1024 * s), layers[3], stride=2, kernel_size=(img_size_p // 8), +# dilate=replace_stride_with_dilation[2]) +# +# # Decoder +# self.decoder1_p = nn.Conv2d(int(1024 * 2 * s), int(1024 * 2 * s), kernel_size=3, stride=2, padding=1) +# self.decoder2_p = nn.Conv2d(int(1024 * 2 * s), int(1024 * s), kernel_size=3, stride=1, padding=1) +# self.decoder3_p = nn.Conv2d(int(1024 * s), int(512 * s), kernel_size=3, stride=1, padding=1) +# self.decoder4_p = nn.Conv2d(int(512 * s), int(256 * s), kernel_size=3, stride=1, padding=1) +# self.decoder5_p = nn.Conv2d(int(256 * s), int(128 * s), kernel_size=3, stride=1, padding=1) +# +# self.decoderf = nn.Conv2d(int(128 * s), int(128 * s), kernel_size=3, stride=1, padding=1) +# self.adjust_p = nn.Conv2d(int(128 * s), num_classes, kernel_size=1, stride=1, padding=0) +# self.soft_p = nn.Softmax(dim=1) +# +# def _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False): +# norm_layer = self._norm_layer +# downsample = None +# previous_dilation = self.dilation +# if dilate: +# self.dilation *= stride +# stride = 1 +# if stride != 1 or self.inplanes != planes * block.expansion: +# downsample = nn.Sequential( +# conv1x1(self.inplanes, planes * block.expansion, stride), +# norm_layer(planes * block.expansion), +# ) +# +# layers = [] +# layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups, +# base_width=self.base_width, dilation=previous_dilation, +# norm_layer=norm_layer, kernel_size=kernel_size)) +# self.inplanes = planes * block.expansion +# if stride != 1: +# kernel_size = kernel_size // 2 +# +# for _ in range(1, blocks): +# layers.append(block(self.inplanes, planes, groups=self.groups, +# base_width=self.base_width, dilation=self.dilation, +# norm_layer=norm_layer, kernel_size=kernel_size)) +# +# return nn.Sequential(*layers) +# +# def _forward_impl(self, x): +# +# xin_s = x.clone() +# xin_m = x.clone() +# xin_l = x.clone() +# +# xin = x.clone() +# +# +# x = self.conv1(x) # 3-> inplanes +# x = self.bn1(x) +# x = self.relu(x) +# x = self.conv2(x) # inplanes -> 128 +# x = self.bn2(x) +# x = self.relu(x) +# x = self.conv3(x) # 128 -> inplanes +# x = self.bn3(x) +# # x = F.max_pool2d(x,2,2) +# x = self.relu(x) +# +# x = self.conv4(x) +# x = self.bn4(x) +# x = self.relu(x) +# x = F.interpolate(x, scale_factor=(2, 2), mode='bilinear') +# print('x: ', x.shape) # [1, 8, 128, 128] +# +# '''# x = self.maxpool(x) +# # pdb.set_trace() +# x1 = self.layer1(x) # inplanes -> 128*s*2 inplanes在layers里面会乘以2 inplanes变为 planes*2(expansion) +# print('layer1: ', x1.shape) # [1, 32, 128, 128] +# x2 = self.layer2(x1) # 128*s*2 -> 256*s*2 inplances:256*s->256*s*2 +# print('layer2: ', x2.shape) # [1, 64, 64, 64] +# # x3 = self.layer3(x2) +# # # print(x3.shape) +# # x4 = self.layer4(x3) +# # # print(x4.shape) +# # x = F.relu(F.interpolate(self.decoder1(x4), scale_factor=(2,2), mode ='bilinear')) +# # x = torch.add(x, x4) +# # x = F.relu(F.interpolate(self.decoder2(x4) , scale_factor=(2,2), mode ='bilinear')) +# # x = torch.add(x, x3) +# # x = F.relu(F.interpolate(self.decoder3(x3) , scale_factor=(2,2), mode ='bilinear')) +# # x = torch.add(x, x2) +# x = F.relu(F.interpolate(self.decoder4(x2), scale_factor=(2, 2), mode='bilinear')) +# x = torch.add(x, x1) +# x = F.relu(F.interpolate(self.decoder5(x), scale_factor=(2, 2), mode='bilinear')) +# print(x.shape) # [1, 16, 256, 256]''' +# +# +# +# # 到这将全图片输入进行了两层transformer,每层都有残差连接。 +# # +# # end of full image training +# +# # y_out = torch.ones((1,2,128,128)) +# x_loc_s = x.clone() +# x_loc_m = x.clone() +# x_loc_l = x.clone() +# +# # x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) +# # start +# h_s = xin_s.shape[2] +# w_s = xin_s.shape[3] +# print('w_s: ', w_s) # 256 +# print('h_s: ', h_s) # 256 +# i_start = 0 +# i_end = 0 +# j_start = 0 +# j_end = 0 +# for i in range(0, h_s): +# for j in range(0, w_s): +# if i < h_s//8: +# if j < w_s//8: +# i_start = 0 +# i_end = h_s//4 +# j_start = 0 +# j_end = w_s//4 +# x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] +# elif j >= w_s*7//8-1: +# i_start = 0 +# i_end = h_s // 4 +# j_start = w_s*3//4 +# j_end = w_s +# x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] +# else: +# i_start = 0 +# i_end = h_s // 4 +# j_start = j-w_s//8 +# j_end = j+w_s//8 +# x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] +# +# elif i >= h_s*7//8-1: +# if j < w_s//8: +# i_start = h_s*3//4 +# i_end = h_s +# j_start = 0 +# j_end = w_s//4 +# x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] +# +# elif j >= w_s*7//8-1: +# i_start = h_s * 3 // 4 +# i_end = h_s +# j_start = w_s*3//4 +# j_end = w_s +# x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] +# +# else: +# i_start = h_s * 3 // 4 +# i_end = h_s +# j_start = j-w_s//8 +# j_end = j+w_s//8 +# x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] +# else: +# if j < w_s//8: +# i_start = i-h_s//8 +# i_end = i+h_s//8 +# j_start = 0 +# j_end = w_s//4 +# x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] +# +# elif j >= w_s*7//8-1: +# i_start = i-h_s//8 +# i_end = i+h_s//8 +# j_start = w_s*3//4 +# j_end = w_s +# x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] +# else: +# i_start = i-h_s//8 +# i_end = i+h_s//8 +# j_start = j - w_s//8 +# j_end = j + w_s//8 +# x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] +# print('x_p_s patch: ', x_p_s.shape) # [1, 3, 64, 64] 256/4跨度为H/4 +# print('inplans:', self.inplanes) # 256 +# +# x_p_s = self.conv1_p(x_p_s) +# print('conv1_p shape: ', x_p_s.shape) # [1, 64, 32, 32] stride=2 +# x_p_s = self.bn1_p(x_p_s) +# x_p_s = self.relu(x_p_s) +# +# x_p_s = self.conv2_p(x_p_s) +# print('conv2_p shape: ', x_p_s.shape) # [1, 128, 32, 32] +# x_p_s = self.bn2_p(x_p_s) +# x_p_s = self.relu(x_p_s) +# +# x_p_s = self.conv3_p(x_p_s) +# print('conv3_p shape: ', x_p_s.shape) # [1, 64, 32, 32] +# x_p_s = self.bn3_p(x_p_s) +# x_p_s = self.relu(x_p_s) +# +# x1_p_s = self.layer1_p(x_p_s) +# print('layer1_p shape: ', x1_p_s.shape) # [1, 32, 32, 32] +# x2_p_s = self.layer2_p(x1_p_s) +# print('layer2_p shape: ', x2_p_s.shape) # [1, 64, 16, 16] +# x3_p_s = self.layer3_p(x2_p_s) +# print('layer3_p shape: ', x3_p_s.shape) # [1, 128, 8, 8] +# x4_p_s = self.layer4_p(x3_p_s) +# print('x4_p_s shape: ', x4_p_s.shape) # [1, 256, 4, 4] +# +# x_p_s = F.relu(F.interpolate(self.decoder1_p(x4_p_s), scale_factor=(2, 2), mode='bilinear')) +# print('x_p_s shape: ', x_p_s.shape) # [1, 256, 4, 4] +# x_p_s = torch.add(x_p_s, x4_p_s) +# x_p_s = F.relu(F.interpolate(self.decoder2_p(x_p_s), scale_factor=(2, 2), mode='bilinear')) +# print('x_p_s shape: ', x_p_s.shape) # [1, 128, 8, 8] +# x_p_s = torch.add(x_p_s, x3_p_s) +# x_p_s = F.relu(F.interpolate(self.decoder3_p(x_p_s), scale_factor=(2, 2), mode='bilinear')) +# print('x_p_s shape: ', x_p_s.shape) # [1, 64, 16, 16] +# x_p_s = torch.add(x_p_s, x2_p_s) +# x_p_s = F.relu(F.interpolate(self.decoder4_p(x_p_s), scale_factor=(2, 2), mode='bilinear')) +# print('x_p_s shape: ', x_p_s.shape) # [1, 32, 32, 32] +# x_p_s = torch.add(x_p_s, x1_p_s) +# x_p_s = F.relu(F.interpolate(self.decoder5_p(x_p_s), scale_factor=(2, 2), mode='bilinear')) +# print('x_p_s shape: ', x_p_s.shape) # [1, 16, 64, 64] +# x_loc_s[:, :, i_start:i_end, j_start:j_end] = x_p_s +# print('i,j: ', i, j) +# +# +# +# # x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) +# # start +# +# xin_m = self.maxpool(xin_m) +# h_m = xin_m.shape[2] +# w_m = xin_m.shape[3] +# print('h_m: ', h_m) +# print('h_m: ', w_m) +# i_m_start = 0 +# i_m_end = 0 +# j_m_start = 0 +# j_m_end = 0 +# for i in range(0, h_m): +# for j in range(0, w_m): +# if i < h_m // 4: +# if j < w_m // 4: +# i_m_start = 0 +# i_m_end = h_m // 2 +# j_m_start = 0 +# j_m_end = w_m // 2 +# x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] +# elif j >= w_m * 3 // 4 - 1: +# i_m_start = 0 +# i_m_end = h_m // 2 +# j_m_start = w_m * 1 // 2 +# j_m_end = w_m +# x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] +# else: +# i_m_start = 0 +# i_m_end = h_m // 2 +# j_m_start = j - w_m//4 +# j_m_end = j + w_m//4 +# x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] +# +# elif i >= h_m * 3 // 4 - 1: +# if j < w_m // 4: +# i_m_start = h_m * 1 // 2 +# i_m_end = h_m +# j_m_start = 0 +# j_m_end = w_m // 2 +# x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] +# +# elif j >= w_m * 3 // 4 - 1: +# i_m_start = h_m * 1 // 2 +# i_m_end = h_m +# j_m_start = w_m * 1 // 2 +# j_m_end = w_m +# x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] +# +# else: +# i_m_start = h_m * 1 // 2 +# i_m_end = h_m +# j_m_start = j - w_m//4 +# j_m_end = j + w_m//4 +# x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] +# else: +# if j < w_m // 4: +# i_m_start = i - h_m//4 +# i_m_end = i + h_m//4 +# j_m_start = 0 +# j_m_end = w_m // 2 +# x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] +# +# elif j >= w_m * 3 // 4 - 1: +# i_m_start = i - h_m//4 +# i_m_end = i + h_m//4 +# j_m_start = w_m * 1 // 2 +# j_m_end = w_m +# x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] +# else: +# i_m_start = i - h_m//4 +# i_m_end = i + h_m//4 +# j_m_start = j - w_m//4 +# j_m_end = j + w_m//4 +# x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] +# print('x_p_m patch: ', x_p_m.shape) # [1, 3, 64, 64] 256/4跨度为H/4 +# x_p_m = self.conv1_p(x_p_m) +# x_p_m = self.bn1_p(x_p_m) +# x_p_m = self.relu(x_p_m) +# +# x_p_m = self.conv2_p(x_p_m) +# x_p_m = self.bn2_p(x_p_m) +# x_p_m = self.relu(x_p_m) +# +# x_p_m = self.conv3_p(x_p_m) +# x_p_m = self.bn3_p(x_p_m) +# x_p_m = self.relu(x_p_m) +# +# x1_p_m = self.layer1_p(x_p_m) +# x2_p_m = self.layer2_p(x1_p_m) +# # print(x2.shape) +# x3_p_m = self.layer3_p(x2_p_m) +# # # print(x3.shape) +# x4_p_m = self.layer4_p(x3_p_m) +# +# x_p_m = F.relu(F.interpolate(self.decoder1_p(x4_p_m), scale_factor=(2, 2), mode='bilinear')) +# x_p_m = torch.add(x_p_m, x4_p_m) +# x_p_m = F.relu(F.interpolate(self.decoder2_p(x_p_m), scale_factor=(2, 2), mode='bilinear')) +# x_p_m = torch.add(x_p_m, x3_p_m) +# x_p_m = F.relu(F.interpolate(self.decoder3_p(x_p_m), scale_factor=(2, 2), mode='bilinear')) +# x_p_m = torch.add(x_p_m, x2_p_m) +# x_p_m = F.relu(F.interpolate(self.decoder4_p(x_p_m), scale_factor=(2, 2), mode='bilinear')) +# x_p_m = torch.add(x_p_m, x1_p_m) +# x_p_m = F.relu(F.interpolate(self.decoder5_p(x_p_m), scale_factor=(2, 2), mode='bilinear')) +# x_loc_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] = x_p_m +# print('i, j: ', i, j) +# x_loc_m = F.interpolate(x_loc_m, scale_factor=(2, 2), mode='bilinear') # 上采样 +# +# +# +# xin_l = self.maxpool(xin_l) +# xin_l = self.maxpool(xin_l) +# h_l = xin_l.shape[2] +# w_l = xin_l.shape[3] +# i_l_start = 0 +# i_l_end = 0 +# j_l_start = 0 +# j_l_end = 0 +# for i in range(0, h_l): +# for j in range(0, w_l): +# i_l_start = 0 +# i_l_end = h_l +# j_l_start = 0 +# j_l_end = w_l +# x_p_l = xin_l[:, :, i_l_start:i_l_end, j_l_start:j_l_end] +# x_p_l = self.conv1_p(x_p_l) +# x_p_l = self.bn1_p(x_p_l) +# x_p_l = self.relu(x_p_l) +# +# x_p_l = self.conv2_p(x_p_l) +# x_p_l = self.bn2_p(x_p_l) +# x_p_l = self.relu(x_p_l) +# +# x_p_l = self.conv3_p(x_p_l) +# x_p_l = self.bn3_p(x_p_l) +# x_p_l = self.relu(x_p_l) +# +# x1_p_l = self.layer1_p(x_p_l) +# x2_p_l = self.layer2_p(x1_p_l) +# # print(x2.shape) +# x3_p_l = self.layer3_p(x2_p_l) +# # # print(x3.shape) +# x4_p_l = self.layer4_p(x3_p_l) +# +# x_p_l = F.relu(F.interpolate(self.decoder1_p(x4_p_l), scale_factor=(2, 2), mode='bilinear')) +# x_p_l = torch.add(x_p_l, x4_p_l) +# x_p_l = F.relu(F.interpolate(self.decoder2_p(x_p_l), scale_factor=(2, 2), mode='bilinear')) +# x_p_l = torch.add(x_p_l, x3_p_l) +# x_p_l = F.relu(F.interpolate(self.decoder3_p(x_p_l), scale_factor=(2, 2), mode='bilinear')) +# x_p_l = torch.add(x_p_l, x2_p_l) +# x_p_l = F.relu(F.interpolate(self.decoder4_p(x_p_l), scale_factor=(2, 2), mode='bilinear')) +# x_p_l = torch.add(x_p_l, x1_p_l) +# x_p_l = F.relu(F.interpolate(self.decoder5_p(x_p_l), scale_factor=(2, 2), mode='bilinear')) +# x_loc_l[:, :, i_l_start:i_l_end, j_l_start:j_l_end] = x_p_l +# x_loc_l = F.interpolate(x_loc_l, scale_factor=(2, 2), mode='bilinear') # 上采样 +# x_loc_l = F.interpolate(x_loc_l, scale_factor=(2, 2), mode='bilinear') # 上采样 +# # print('x_loc_s.shape: ', x_loc_s.shape) # [1, 3, 256, 256] +# +# '''x_loc = x.clone() +# for i in range(0, 4): +# for j in range(0, 4): +# x_p = xin[:, :, 64 * i:64 * (i + 1), 64 * j:64 * (j + 1)] +# print('x_p shape: ', x_p.shape) # [1, 3, 32, 32] +# # begin patch wise +# x_p = self.conv1_p(x_p) # imgchans-> inplans +# x_p = self.bn1_p(x_p) +# # x = F.max_pool2d(x,2,2) +# x_p = self.relu(x_p) +# +# x_p = self.conv2_p(x_p) +# x_p = self.bn2_p(x_p) +# # x = F.max_pool2d(x,2,2) +# x_p = self.relu(x_p) +# x_p = self.conv3_p(x_p) +# x_p = self.bn3_p(x_p) +# # x = F.max_pool2d(x,2,2) +# x_p = self.relu(x_p) +# +# # x = self.maxpool(x) +# # pdb.set_trace() +# x1_p = self.layer1_p(x_p) +# print('x1_p shape: ', x1_p.shape) +# x2_p = self.layer2_p(x1_p) +# print('x2_p shape: ', x2_p.shape) +# x3_p = self.layer3_p(x2_p) +# print('x3_p shape: ', x3_p.shape) +# x4_p = self.layer4_p(x3_p) +# +# x_p = F.relu(F.interpolate(self.decoder1_p(x4_p), scale_factor=(2, 2), mode='bilinear')) +# print('x_p shape: ', x_p.shape) +# x_p = torch.add(x_p, x4_p) +# x_p = F.relu(F.interpolate(self.decoder2_p(x_p), scale_factor=(2, 2), mode='bilinear')) +# x_p = torch.add(x_p, x3_p) +# x_p = F.relu(F.interpolate(self.decoder3_p(x_p), scale_factor=(2, 2), mode='bilinear')) +# x_p = torch.add(x_p, x2_p) +# x_p = F.relu(F.interpolate(self.decoder4_p(x_p), scale_factor=(2, 2), mode='bilinear')) +# x_p = torch.add(x_p, x1_p) +# x_p = F.relu(F.interpolate(self.decoder5_p(x_p), scale_factor=(2, 2), mode='bilinear')) +# print('x_p shape: ', x_p.shape) +# print('i,j: ', i, j) +# +# x_loc[:, :, 64 * i:64 * (i + 1), 64 * j:64 * (j + 1)] = x_p''' +# # 长城短程皆一样的操作过程,还未更改 +# x_out = torch.add(x_loc_s, x_loc_l, x_loc_m) # 三个就是尺寸统一后相加 +# x_out = torch.add(x_loc_s, x_loc_l, x_loc_m) # 三个就是尺寸统一后相加 +# x_out = F.relu(self.decoderf(x_out)) +# +# x_out = self.adjust(F.relu(x_out)) +# '''x = torch.add(x, x_loc) +# x = F.relu(self.decoderf(x)) # 128*s->128*s +# +# x = self.adjust(F.relu(x)) # 128*s -> classes''' +# +# # pdb.set_trace() +# # return x +# # pdb.set_trace() +# return x +# +# def forward(self, x): +# return self._forward_impl(x) +class medt_net(nn.Module): + + def __init__(self, block, block_2, layers, num_classes=2, zero_init_residual=True, + groups=8, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, s=0.125, img_size=128, imgchan=3): + super(medt_net, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = int(64 * s) # 64*0.125=8 + self.dilation = 1 + if replace_stride_with_dilation is None: + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups # 8 + self.base_width = width_per_group # 64 + self.conv1 = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) # (h-7+6/2)+1=h/2 + self.conv2 = nn.Conv2d(self.inplanes, 128, kernel_size=3, stride=1, padding=1, bias=False) # 尺寸不变 + self.conv3 = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) # 尺寸不变 + self.conv4 = nn.Conv2d(self.inplanes, self.inplanes*2, kernel_size=3, stride=1, padding=1, bias=False) + + self.bn1 = norm_layer(self.inplanes) + self.bn2 = norm_layer(128) + self.bn3 = norm_layer(self.inplanes) + self.bn4 = norm_layer(self.inplanes*2) + + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, int(128 * s), layers[0], kernel_size=(img_size // 2)) + self.layer2 = self._make_layer(block, int(256 * s), layers[1], stride=2, kernel_size=(img_size // 2), + dilate=replace_stride_with_dilation[0]) + # self.layer3 = self._make_layer(block, int(512 * s), layers[2], stride=2, kernel_size=(img_size//4), + # dilate=replace_stride_with_dilation[1]) + # self.layer4 = self._make_layer(block, int(1024 * s), layers[3], stride=2, kernel_size=(img_size//8), + # dilate=replace_stride_with_dilation[2]) + + # Decoder + # self.decoder1 = nn.Conv2d(int(1024 *2*s) , int(1024*2*s), kernel_size=3, stride=2, padding=1) + # self.decoder2 = nn.Conv2d(int(1024 *2*s) , int(1024*s), kernel_size=3, stride=1, padding=1) + # self.decoder3 = nn.Conv2d(int(1024*s), int(512*s), kernel_size=3, stride=1, padding=1) + self.decoder4 = nn.Conv2d(int(512 * s), int(256 * s), kernel_size=3, stride=1, padding=1) + self.decoder5 = nn.Conv2d(int(256 * s), int(128 * s), kernel_size=3, stride=1, padding=1) + self.adjust = nn.Conv2d(int(128 * s), num_classes, kernel_size=1, stride=1, padding=0) + self.soft = nn.Softmax(dim=1) + + self.conv1_p = nn.Conv2d(imgchan, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.conv2_p = nn.Conv2d(self.inplanes, 128, kernel_size=3, stride=1, padding=1, + bias=False) + self.conv3_p = nn.Conv2d(128, self.inplanes, kernel_size=3, stride=1, padding=1, + bias=False) + # self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1_p = norm_layer(self.inplanes) + self.bn2_p = norm_layer(128) + self.bn3_p = norm_layer(self.inplanes) + + self.relu_p = nn.ReLU(inplace=True) + + img_size_p = img_size // 4 + + self.layer1_p = self._make_layer(block_2, int(128 * s), layers[0], kernel_size=(img_size_p // 2)) + self.layer2_p = self._make_layer(block_2, int(256 * s), layers[1], stride=2, kernel_size=(img_size_p // 2), + dilate=replace_stride_with_dilation[0]) + self.layer3_p = self._make_layer(block_2, int(512 * s), layers[2], stride=2, kernel_size=(img_size_p // 4), + dilate=replace_stride_with_dilation[1]) + self.layer4_p = self._make_layer(block_2, int(1024 * s), layers[3], stride=2, kernel_size=(img_size_p // 8), + dilate=replace_stride_with_dilation[2]) + + # Decoder + self.decoder1_p = nn.Conv2d(int(1024 * 2 * s), int(1024 * 2 * s), kernel_size=3, stride=2, padding=1) + self.decoder2_p = nn.Conv2d(int(1024 * 2 * s), int(1024 * s), kernel_size=3, stride=1, padding=1) + self.decoder3_p = nn.Conv2d(int(1024 * s), int(512 * s), kernel_size=3, stride=1, padding=1) + self.decoder4_p = nn.Conv2d(int(512 * s), int(256 * s), kernel_size=3, stride=1, padding=1) + self.decoder5_p = nn.Conv2d(int(256 * s), int(128 * s), kernel_size=3, stride=1, padding=1) + + self.decoderf = nn.Conv2d(int(128 * s), int(128 * s), kernel_size=3, stride=1, padding=1) + self.adjust_p = nn.Conv2d(int(128 * s), num_classes, kernel_size=1, stride=1, padding=0) + self.soft_p = nn.Softmax(dim=1) + + def _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups, + base_width=self.base_width, dilation=previous_dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + self.inplanes = planes * block.expansion + if stride != 1: + kernel_size = kernel_size // 2 + + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer, kernel_size=kernel_size)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + + xin_s = x.clone() + xin_m = x.clone() + xin_l = x.clone() + + xin = x.clone() + + + x = self.conv1(x) # 3-> inplanes + x = self.bn1(x) + x = self.relu(x) + x = self.conv2(x) # inplanes -> 128 + x = self.bn2(x) + x = self.relu(x) + x = self.conv3(x) # 128 -> inplanes + x = self.bn3(x) + # x = F.max_pool2d(x,2,2) + x = self.relu(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.relu(x) + x = F.interpolate(x, scale_factor=(2, 2), mode='bilinear') + # print('x: ', x.shape) # [1, 8, 128, 128] + + '''# x = self.maxpool(x) + # pdb.set_trace() + x1 = self.layer1(x) # inplanes -> 128*s*2 inplanes在layers里面会乘以2 inplanes变为 planes*2(expansion) + print('layer1: ', x1.shape) # [1, 32, 128, 128] + x2 = self.layer2(x1) # 128*s*2 -> 256*s*2 inplances:256*s->256*s*2 + print('layer2: ', x2.shape) # [1, 64, 64, 64] + # x3 = self.layer3(x2) + # # print(x3.shape) + # x4 = self.layer4(x3) + # # print(x4.shape) + # x = F.relu(F.interpolate(self.decoder1(x4), scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x4) + # x = F.relu(F.interpolate(self.decoder2(x4) , scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x3) + # x = F.relu(F.interpolate(self.decoder3(x3) , scale_factor=(2,2), mode ='bilinear')) + # x = torch.add(x, x2) + x = F.relu(F.interpolate(self.decoder4(x2), scale_factor=(2, 2), mode='bilinear')) + x = torch.add(x, x1) + x = F.relu(F.interpolate(self.decoder5(x), scale_factor=(2, 2), mode='bilinear')) + print(x.shape) # [1, 16, 256, 256]''' + + + + # # 到这将全图片输入进行了两层transformer,每层都有残差连接。 + # # + # # end of full image training + # + # # y_out = torch.ones((1,2,128,128)) + # x_loc_s = x.clone() + # x_loc_m = x.clone() + # x_loc_l = x.clone() + # + # # x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + # # start + # h_s = xin_s.shape[2] + # w_s = xin_s.shape[3] + # print('w_s: ', w_s) # 256 + # print('h_s: ', h_s) # 256 + # i_start = 0 + # i_end = 0 + # j_start = 0 + # j_end = 0 + # for i in range(0, h_s): + # for j in range(0, w_s): + # if i < h_s//8: + # if j < w_s//8: + # i_start = 0 + # i_end = h_s//4 + # j_start = 0 + # j_end = w_s//4 + # x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] + # elif j >= w_s*7//8-1: + # i_start = 0 + # i_end = h_s // 4 + # j_start = w_s*3//4 + # j_end = w_s + # x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] + # else: + # i_start = 0 + # i_end = h_s // 4 + # j_start = j-w_s//8 + # j_end = j+w_s//8 + # x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] + # + # elif i >= h_s*7//8-1: + # if j < w_s//8: + # i_start = h_s*3//4 + # i_end = h_s + # j_start = 0 + # j_end = w_s//4 + # x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] + # + # elif j >= w_s*7//8-1: + # i_start = h_s * 3 // 4 + # i_end = h_s + # j_start = w_s*3//4 + # j_end = w_s + # x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] + # + # else: + # i_start = h_s * 3 // 4 + # i_end = h_s + # j_start = j-w_s//8 + # j_end = j+w_s//8 + # x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] + # else: + # if j < w_s//8: + # i_start = i-h_s//8 + # i_end = i+h_s//8 + # j_start = 0 + # j_end = w_s//4 + # x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] + # + # elif j >= w_s*7//8-1: + # i_start = i-h_s//8 + # i_end = i+h_s//8 + # j_start = w_s*3//4 + # j_end = w_s + # x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] + # else: + # i_start = i-h_s//8 + # i_end = i+h_s//8 + # j_start = j - w_s//8 + # j_end = j + w_s//8 + # x_p_s = xin_s[:, :, i_start:i_end, j_start:j_end] + # print('x_p_s patch: ', x_p_s.shape) # [1, 3, 64, 64] 256/4跨度为H/4 + # print('inplans:', self.inplanes) # 256 + # + # x_p_s = self.conv1_p(x_p_s) + # print('conv1_p shape: ', x_p_s.shape) # [1, 64, 32, 32] stride=2 + # x_p_s = self.bn1_p(x_p_s) + # x_p_s = self.relu(x_p_s) + # + # x_p_s = self.conv2_p(x_p_s) + # print('conv2_p shape: ', x_p_s.shape) # [1, 128, 32, 32] + # x_p_s = self.bn2_p(x_p_s) + # x_p_s = self.relu(x_p_s) + # + # x_p_s = self.conv3_p(x_p_s) + # print('conv3_p shape: ', x_p_s.shape) # [1, 64, 32, 32] + # x_p_s = self.bn3_p(x_p_s) + # x_p_s = self.relu(x_p_s) + # + # x1_p_s = self.layer1_p(x_p_s) + # print('layer1_p shape: ', x1_p_s.shape) # [1, 32, 32, 32] + # x2_p_s = self.layer2_p(x1_p_s) + # print('layer2_p shape: ', x2_p_s.shape) # [1, 64, 16, 16] + # x3_p_s = self.layer3_p(x2_p_s) + # print('layer3_p shape: ', x3_p_s.shape) # [1, 128, 8, 8] + # x4_p_s = self.layer4_p(x3_p_s) + # print('x4_p_s shape: ', x4_p_s.shape) # [1, 256, 4, 4] + # + # x_p_s = F.relu(F.interpolate(self.decoder1_p(x4_p_s), scale_factor=(2, 2), mode='bilinear')) + # print('x_p_s shape: ', x_p_s.shape) # [1, 256, 4, 4] + # x_p_s = torch.add(x_p_s, x4_p_s) + # x_p_s = F.relu(F.interpolate(self.decoder2_p(x_p_s), scale_factor=(2, 2), mode='bilinear')) + # print('x_p_s shape: ', x_p_s.shape) # [1, 128, 8, 8] + # x_p_s = torch.add(x_p_s, x3_p_s) + # x_p_s = F.relu(F.interpolate(self.decoder3_p(x_p_s), scale_factor=(2, 2), mode='bilinear')) + # print('x_p_s shape: ', x_p_s.shape) # [1, 64, 16, 16] + # x_p_s = torch.add(x_p_s, x2_p_s) + # x_p_s = F.relu(F.interpolate(self.decoder4_p(x_p_s), scale_factor=(2, 2), mode='bilinear')) + # print('x_p_s shape: ', x_p_s.shape) # [1, 32, 32, 32] + # x_p_s = torch.add(x_p_s, x1_p_s) + # x_p_s = F.relu(F.interpolate(self.decoder5_p(x_p_s), scale_factor=(2, 2), mode='bilinear')) + # print('x_p_s shape: ', x_p_s.shape) # [1, 16, 64, 64] + # x_loc_s[:, :, i_start:i_end, j_start:j_end] = x_p_s + # print('i,j: ', i, j) + # + # + # + # # x = F.relu(F.interpolate(self.decoder5(x) , scale_factor=(2,2), mode ='bilinear')) + # # start + # + # xin_m = self.maxpool(xin_m) + # h_m = xin_m.shape[2] + # w_m = xin_m.shape[3] + # print('h_m: ', h_m) + # print('h_m: ', w_m) + # i_m_start = 0 + # i_m_end = 0 + # j_m_start = 0 + # j_m_end = 0 + # for i in range(0, h_m): + # for j in range(0, w_m): + # if i < h_m // 4: + # if j < w_m // 4: + # i_m_start = 0 + # i_m_end = h_m // 2 + # j_m_start = 0 + # j_m_end = w_m // 2 + # x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] + # elif j >= w_m * 3 // 4 - 1: + # i_m_start = 0 + # i_m_end = h_m // 2 + # j_m_start = w_m * 1 // 2 + # j_m_end = w_m + # x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] + # else: + # i_m_start = 0 + # i_m_end = h_m // 2 + # j_m_start = j - w_m//4 + # j_m_end = j + w_m//4 + # x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] + # + # elif i >= h_m * 3 // 4 - 1: + # if j < w_m // 4: + # i_m_start = h_m * 1 // 2 + # i_m_end = h_m + # j_m_start = 0 + # j_m_end = w_m // 2 + # x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] + # + # elif j >= w_m * 3 // 4 - 1: + # i_m_start = h_m * 1 // 2 + # i_m_end = h_m + # j_m_start = w_m * 1 // 2 + # j_m_end = w_m + # x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] + # + # else: + # i_m_start = h_m * 1 // 2 + # i_m_end = h_m + # j_m_start = j - w_m//4 + # j_m_end = j + w_m//4 + # x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] + # else: + # if j < w_m // 4: + # i_m_start = i - h_m//4 + # i_m_end = i + h_m//4 + # j_m_start = 0 + # j_m_end = w_m // 2 + # x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] + # + # elif j >= w_m * 3 // 4 - 1: + # i_m_start = i - h_m//4 + # i_m_end = i + h_m//4 + # j_m_start = w_m * 1 // 2 + # j_m_end = w_m + # x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] + # else: + # i_m_start = i - h_m//4 + # i_m_end = i + h_m//4 + # j_m_start = j - w_m//4 + # j_m_end = j + w_m//4 + # x_p_m = xin_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] + # print('x_p_m patch: ', x_p_m.shape) # [1, 3, 64, 64] 256/4跨度为H/4 + # x_p_m = self.conv1_p(x_p_m) + # x_p_m = self.bn1_p(x_p_m) + # x_p_m = self.relu(x_p_m) + # + # x_p_m = self.conv2_p(x_p_m) + # x_p_m = self.bn2_p(x_p_m) + # x_p_m = self.relu(x_p_m) + # + # x_p_m = self.conv3_p(x_p_m) + # x_p_m = self.bn3_p(x_p_m) + # x_p_m = self.relu(x_p_m) + # + # x1_p_m = self.layer1_p(x_p_m) + # x2_p_m = self.layer2_p(x1_p_m) + # # print(x2.shape) + # x3_p_m = self.layer3_p(x2_p_m) + # # # print(x3.shape) + # x4_p_m = self.layer4_p(x3_p_m) + # + # x_p_m = F.relu(F.interpolate(self.decoder1_p(x4_p_m), scale_factor=(2, 2), mode='bilinear')) + # x_p_m = torch.add(x_p_m, x4_p_m) + # x_p_m = F.relu(F.interpolate(self.decoder2_p(x_p_m), scale_factor=(2, 2), mode='bilinear')) + # x_p_m = torch.add(x_p_m, x3_p_m) + # x_p_m = F.relu(F.interpolate(self.decoder3_p(x_p_m), scale_factor=(2, 2), mode='bilinear')) + # x_p_m = torch.add(x_p_m, x2_p_m) + # x_p_m = F.relu(F.interpolate(self.decoder4_p(x_p_m), scale_factor=(2, 2), mode='bilinear')) + # x_p_m = torch.add(x_p_m, x1_p_m) + # x_p_m = F.relu(F.interpolate(self.decoder5_p(x_p_m), scale_factor=(2, 2), mode='bilinear')) + # x_loc_m[:, :, i_m_start:i_m_end, j_m_start:j_m_end] = x_p_m + # print('i, j: ', i, j) + # x_loc_m = F.interpolate(x_loc_m, scale_factor=(2, 2), mode='bilinear') # 上采样 + # + # + # + # xin_l = self.maxpool(xin_l) + # xin_l = self.maxpool(xin_l) + # h_l = xin_l.shape[2] + # w_l = xin_l.shape[3] + # i_l_start = 0 + # i_l_end = 0 + # j_l_start = 0 + # j_l_end = 0 + # for i in range(0, h_l): + # for j in range(0, w_l): + # i_l_start = 0 + # i_l_end = h_l + # j_l_start = 0 + # j_l_end = w_l + # x_p_l = xin_l[:, :, i_l_start:i_l_end, j_l_start:j_l_end] + # x_p_l = self.conv1_p(x_p_l) + # x_p_l = self.bn1_p(x_p_l) + # x_p_l = self.relu(x_p_l) + # + # x_p_l = self.conv2_p(x_p_l) + # x_p_l = self.bn2_p(x_p_l) + # x_p_l = self.relu(x_p_l) + # + # x_p_l = self.conv3_p(x_p_l) + # x_p_l = self.bn3_p(x_p_l) + # x_p_l = self.relu(x_p_l) + # + # x1_p_l = self.layer1_p(x_p_l) + # x2_p_l = self.layer2_p(x1_p_l) + # # print(x2.shape) + # x3_p_l = self.layer3_p(x2_p_l) + # # # print(x3.shape) + # x4_p_l = self.layer4_p(x3_p_l) + # + # x_p_l = F.relu(F.interpolate(self.decoder1_p(x4_p_l), scale_factor=(2, 2), mode='bilinear')) + # x_p_l = torch.add(x_p_l, x4_p_l) + # x_p_l = F.relu(F.interpolate(self.decoder2_p(x_p_l), scale_factor=(2, 2), mode='bilinear')) + # x_p_l = torch.add(x_p_l, x3_p_l) + # x_p_l = F.relu(F.interpolate(self.decoder3_p(x_p_l), scale_factor=(2, 2), mode='bilinear')) + # x_p_l = torch.add(x_p_l, x2_p_l) + # x_p_l = F.relu(F.interpolate(self.decoder4_p(x_p_l), scale_factor=(2, 2), mode='bilinear')) + # x_p_l = torch.add(x_p_l, x1_p_l) + # x_p_l = F.relu(F.interpolate(self.decoder5_p(x_p_l), scale_factor=(2, 2), mode='bilinear')) + # x_loc_l[:, :, i_l_start:i_l_end, j_l_start:j_l_end] = x_p_l + # x_loc_l = F.interpolate(x_loc_l, scale_factor=(2, 2), mode='bilinear') # 上采样 + # x_loc_l = F.interpolate(x_loc_l, scale_factor=(2, 2), mode='bilinear') # 上采样 + # # print('x_loc_s.shape: ', x_loc_s.shape) # [1, 3, 256, 256] + + x_loc_s = x.clone() + x_loc_l = x.clone() + x_loc_m = x.clone() + + h_1 = x_loc_s.shape[2] + w_1 = x_loc_s.shape[3] + for i in range(0, 4): + for j in range(0, 4): + x_p_s_1 = xin_s[:, :, h_1//4 * i:h_1//4 * (i + 1), w_1//4 * j:w_1//4 * (j + 1)] + + # print('x_p shape: ', x_p.shape) # [1, 3, 32, 32] + # begin patch wise + x_p_s_1 = self.conv1_p(x_p_s_1) # imgchans-> inplans + x_p_s_1 = self.bn1_p(x_p_s_1) + # x = F.max_pool2d(x,2,2) + x_p_s_1 = self.relu(x_p_s_1) + + x_p_s_1 = self.conv2_p(x_p_s_1) + x_p_s_1 = self.bn2_p(x_p_s_1) + # x = F.max_pool2d(x,2,2) + x_p_s_1 = self.relu(x_p_s_1) + x_p_s_1 = self.conv3_p(x_p_s_1) + x_p_s_1 = self.bn3_p(x_p_s_1) + # x = F.max_pool2d(x,2,2) + x_p_s_1 = self.relu(x_p_s_1) + + # x = self.maxpool(x) + # pdb.set_trace() + x1_p_s_1 = self.layer1_p(x_p_s_1) + # print('x1_p shape: ', x1_p_s_1.shape) + x2_p_s_1 = self.layer2_p(x1_p_s_1) + # print('x2_p shape: ', x2_p_s_1.shape) + x3_p_s_1 = self.layer3_p(x2_p_s_1) + # print('x3_p shape: ', x3_p_s_1.shape) + x4_p_s_1 = self.layer4_p(x3_p_s_1) + + x_p_s_1 = F.relu(F.interpolate(self.decoder1_p(x4_p_s_1), scale_factor=(2, 2), mode='bilinear')) + # print('x_p shape: ', x_p_s_1.shape) + x_p_s_1 = torch.add(x_p_s_1, x4_p_s_1) + x_p_s_1 = F.relu(F.interpolate(self.decoder2_p(x_p_s_1), scale_factor=(2, 2), mode='bilinear')) + x_p_s_1 = torch.add(x_p_s_1, x3_p_s_1) + x_p_s_1 = F.relu(F.interpolate(self.decoder3_p(x_p_s_1), scale_factor=(2, 2), mode='bilinear')) + x_p_s_1 = torch.add(x_p_s_1, x2_p_s_1) + x_p_s_1 = F.relu(F.interpolate(self.decoder4_p(x_p_s_1), scale_factor=(2, 2), mode='bilinear')) + x_p_s_1 = torch.add(x_p_s_1, x1_p_s_1) + x_p_s_1 = F.relu(F.interpolate(self.decoder5_p(x_p_s_1), scale_factor=(2, 2), mode='bilinear')) + # print('x_p shape: ', x_p_s_1.shape) + # print('i,j: ', i, j) + + x_loc_s[:, :, h_1//4 * i:h_1//4 * (i + 1), w_1//4 * j:w_1//4 * (j + 1)] = x_p_s_1 + + x_loc_m = self.maxpool(x_loc_m) + # print('x_loc_m: ', x_loc_m.shape) + xin_m = self.maxpool(xin_m) + h_m = x_loc_m.shape[2] + w_m = x_loc_m.shape[3] + for i in range(0, 2): + for j in range(0, 2): + x_p_m_1 = xin_m[:, :, h_m // 2 * i:h_m // 2 * (i + 1), w_m // 2 * j:w_m//2 * (j + 1)] + # 取patch需要在整幅图,然后替代回去,所以xin_m应当是下采样过后的,之后在插值回去融合 + # print('x_p shape: ', x_p.shape) # [1, 3, 32, 32] + # begin patch wise + x_p_m_1 = self.conv1_p(x_p_m_1) # imgchans-> inplans + x_p_m_1 = self.bn1_p(x_p_m_1) + # x = F.max_pool2d(x,2,2) + x_p_m_1 = self.relu(x_p_m_1) + + x_p_m_1 = self.conv2_p(x_p_m_1) + x_p_m_1 = self.bn2_p(x_p_m_1) + # x = F.max_pool2d(x,2,2) + x_p_m_1 = self.relu(x_p_m_1) + x_p_m_1 = self.conv3_p(x_p_m_1) + x_p_m_1 = self.bn3_p(x_p_m_1) + # x = F.max_pool2d(x,2,2) + x_p_m_1 = self.relu(x_p_m_1) + + # x = self.maxpool(x) + # pdb.set_trace() + x1_p_m_1 = self.layer1_p(x_p_m_1) + # print('x1_p shape: ', x1_p_m_1.shape) + x2_p_m_1 = self.layer2_p(x1_p_m_1) + # print('x2_p shape: ', x2_p_m_1.shape) + x3_p_m_1 = self.layer3_p(x2_p_m_1) + # print('x3_p shape: ', x3_p_m_1.shape) + x4_p_m_1 = self.layer4_p(x3_p_m_1) + + x_p_m_1 = F.relu(F.interpolate(self.decoder1_p(x4_p_m_1), scale_factor=(2, 2), mode='bilinear')) + # print('x_p shape: ', x_p_m_1.shape) + x_p_m_1 = torch.add(x_p_m_1, x4_p_m_1) + x_p_m_1 = F.relu(F.interpolate(self.decoder2_p(x_p_m_1), scale_factor=(2, 2), mode='bilinear')) + x_p_m_1 = torch.add(x_p_m_1, x3_p_m_1) + x_p_m_1 = F.relu(F.interpolate(self.decoder3_p(x_p_m_1), scale_factor=(2, 2), mode='bilinear')) + x_p_m_1 = torch.add(x_p_m_1, x2_p_m_1) + x_p_m_1 = F.relu(F.interpolate(self.decoder4_p(x_p_m_1), scale_factor=(2, 2), mode='bilinear')) + x_p_m_1 = torch.add(x_p_m_1, x1_p_m_1) + x_p_m_1 = F.relu(F.interpolate(self.decoder5_p(x_p_m_1), scale_factor=(2, 2), mode='bilinear')) + # print('x_p shape: ', x_p_m_1.shape) + # print('i,j: ', i, j) + + x_loc_m[:, :, h_m // 2 * i:h_m // 2 * (i + 1), w_m // 2 * j:w_m//2 * (j + 1)] = x_p_m_1 + x_loc_m = F.interpolate(x_loc_m, scale_factor=(2, 2), mode='bilinear') + + xin_l = self.maxpool(xin_l) + xin_l = self.maxpool(xin_l) # xin_l 满足patch尺寸一致 + x_loc_l = self.maxpool(x_loc_l) # x_loc_l 满足channel一致 + x_loc_l = self.maxpool(x_loc_l) + # print('x_loc_l: ', x_loc_l.shape) + h_l = x_loc_l.shape[2] + w_l = x_loc_l.shape[3] + + x_p_l_1 = xin_l[:, :, :, :] + + # print('x_p shape: ', x_p.shape) # [1, 3, 32, 32] + # begin patch wise + x_p_l_1 = self.conv1_p(x_p_l_1) # imgchans-> inplans + x_p_l_1 = self.bn1_p(x_p_l_1) + # x = F.max_pool2d(x,2,2) + x_p_l_1 = self.relu(x_p_l_1) + + x_p_l_1 = self.conv2_p(x_p_l_1) + x_p_l_1 = self.bn2_p(x_p_l_1) + # x = F.max_pool2d(x,2,2) + x_p_l_1 = self.relu(x_p_l_1) + x_p_l_1 = self.conv3_p(x_p_l_1) + x_p_l_1 = self.bn3_p(x_p_l_1) + # x = F.max_pool2d(x,2,2) + x_p_l_1 = self.relu(x_p_l_1) + + # x = self.maxpool(x) + # pdb.set_trace() + x1_p_l_1 = self.layer1_p(x_p_l_1) + # print('x1_p shape: ', x1_p_l_1.shape) + x2_p_l_1 = self.layer2_p(x1_p_l_1) + # print('x2_p shape: ', x2_p_l_1.shape) + x3_p_l_1 = self.layer3_p(x2_p_l_1) + # print('x3_p shape: ', x3_p_l_1.shape) + x4_p_l_1 = self.layer4_p(x3_p_l_1) + + x_p_l_1 = F.relu(F.interpolate(self.decoder1_p(x4_p_l_1), scale_factor=(2, 2), mode='bilinear')) + # print('x_p shape: ', x_p_l_1.shape) + x_p_l_1 = torch.add(x_p_l_1, x4_p_l_1) + x_p_l_1 = F.relu(F.interpolate(self.decoder2_p(x_p_l_1), scale_factor=(2, 2), mode='bilinear')) + x_p_l_1 = torch.add(x_p_l_1, x3_p_l_1) + x_p_l_1 = F.relu(F.interpolate(self.decoder3_p(x_p_l_1), scale_factor=(2, 2), mode='bilinear')) + x_p_l_1 = torch.add(x_p_l_1, x2_p_l_1) + x_p_l_1 = F.relu(F.interpolate(self.decoder4_p(x_p_l_1), scale_factor=(2, 2), mode='bilinear')) + x_p_l_1 = torch.add(x_p_l_1, x1_p_l_1) + x_p_l_1 = F.relu(F.interpolate(self.decoder5_p(x_p_l_1), scale_factor=(2, 2), mode='bilinear')) + # print('x_p shape: ', x_p_l_1.shape) + # print('i,j: ', i, j) + + x_loc_l[:, :, :, :] = x_p_l_1 + x_loc_l = F.interpolate(x_loc_l, scale_factor=(2, 2), mode='bilinear') + x_loc_l = F.interpolate(x_loc_l, scale_factor=(2, 2), mode='bilinear') + + # 长城短程皆一样的操作过程,还未更改 + '''x_out = torch.add(x_loc_s, x_loc_l, x_loc_m) # 三个就是尺寸统一后相加 + x_out = torch.add(x_loc_s, x_loc_l, x_loc_m) # 三个就是尺寸统一后相加 + x_out = F.relu(self.decoderf(x_out)) + + x_out = self.adjust(F.relu(x_out))''' + x = torch.add(x_loc_m, x_loc_l) + x = torch.add(x, x_loc_s) + x = F.relu(self.decoderf(x)) # 128*s->128*s + + x = self.adjust(F.relu(x)) # 128*s -> classes + + # pdb.set_trace() + # return x + # pdb.set_trace() + return x + + def forward(self, x): + return self._forward_impl(x) +def mylogo(pretrained=False, **kwargs): + model = medt_net(AxialBlock, AxialBlock, [1, 2, 4, 1], s= 0.125, **kwargs) + return model \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/resnet.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..2017ce5f18a0078da879eb2d788faa9b2bfd43df --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/resnet.py @@ -0,0 +1,287 @@ +import torch +import torch.nn as nn + + +__all__ = ['ResNet', 'resnet26', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152',] + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', + 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', + 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', + 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) + # while original implementation places the stride at the first 1x1 convolution(self.conv1) + # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. + # This variant is also known as ResNet V1.5 and improves accuracy according to + # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. + + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None): + super(ResNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + # See note [TorchScript super()] + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.fc(x) + + return x + + def forward(self, x): + return self._forward_impl(x) + + +def _resnet(arch, block, layers, pretrained, progress, **kwargs): + model = ResNet(block, layers, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls[arch], + progress=progress) + model.load_state_dict(state_dict) + return model + + +def resnet18(pretrained=False, progress=True, **kwargs): + r"""ResNet-18 model from + `"Deep Residual Learning for Image Recognition" `_ + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, + **kwargs) + + +def resnet34(pretrained=False, progress=True, **kwargs): + r"""ResNet-34 model from + `"Deep Residual Learning for Image Recognition" `_ + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet26(pretrained=False, progress=True, **kwargs): + return _resnet('resnet26', Bottleneck, [1, 2, 4, 1], pretrained, progress, + **kwargs) + + +def resnet50(pretrained=False, progress=True, **kwargs): + r"""ResNet-50 model from + `"Deep Residual Learning for Image Recognition" `_ + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet101(pretrained=False, progress=True, **kwargs): + r"""ResNet-101 model from + `"Deep Residual Learning for Image Recognition" `_ + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, + **kwargs) + + +def resnet152(pretrained=False, progress=True, **kwargs): + r"""ResNet-152 model from + `"Deep Residual Learning for Image Recognition" `_ + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, + **kwargs) diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/utils.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f0ac0f62c2fa7a98bf56d98b9a9168eb0f9a15fe --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/models/utils.py @@ -0,0 +1,6 @@ +import torch.nn as nn + + +class qkv_transform(nn.Conv1d): + """Conv1d for qkv_transform""" + diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/utils.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e6093bdaed50a81a88ea6a9d3d1842ad60378f04 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/lib/utils.py @@ -0,0 +1,167 @@ +import math +import os +import torch +import torch.nn.functional as F + + +def adjust_learning_rate(args, optimizer, epoch, batch_idx, data_nums, type="cosine"): + if epoch < args.warmup_epochs: + epoch += float(batch_idx + 1) / data_nums + lr_adj = 1. * (epoch / args.warmup_epochs) + elif type == "linear": + if epoch < 30 + args.warmup_epochs: + lr_adj = 1. + elif epoch < 60 + args.warmup_epochs: + lr_adj = 1e-1 + elif epoch < 90 + args.warmup_epochs: + lr_adj = 1e-2 + else: + lr_adj = 1e-3 + elif type == "cosine": + run_epochs = epoch - args.warmup_epochs + total_epochs = args.epochs - args.warmup_epochs + T_cur = float(run_epochs * data_nums) + batch_idx + T_total = float(total_epochs * data_nums) + + lr_adj = 0.5 * (1 + math.cos(math.pi * T_cur / T_total)) + + for param_group in optimizer.param_groups: + param_group['lr'] = args.lr * lr_adj + return args.lr * lr_adj + + +def label_smoothing(pred, target, eta=0.1): + ''' + Refer from https://arxiv.org/pdf/1512.00567.pdf + :param target: N, + :param n_classes: int + :param eta: float + :return: + N x C onehot smoothed vector + ''' + n_classes = pred.size(1) + target = torch.unsqueeze(target, 1) + onehot_target = torch.zeros_like(pred) + onehot_target.scatter_(1, target, 1) + return onehot_target * (1 - eta) + eta / n_classes * 1 + + +def cross_entropy_for_onehot(pred, target): + return torch.mean(torch.sum(- target * F.log_softmax(pred, dim=-1), 1)) + + +def cross_entropy_with_label_smoothing(pred, target, eta=0.1): + onehot_target = label_smoothing(pred, target, eta=eta) + return cross_entropy_for_onehot(pred, onehot_target) + + +def accuracy(output, target): + # get the index of the max log-probability + pred = output.max(1, keepdim=True)[1] + return pred.eq(target.view_as(pred)).cpu().float().mean() + + +def save_model(model, optimizer, epoch, args): + os.system('mkdir -p {}'.format(args.work_dirs)) + if optimizer is not None: + torch.save({ + 'net': model.state_dict(), + 'optim': optimizer.state_dict(), + 'epoch': epoch + }, os.path.join(args.work_dirs, '{}.pth'.format(epoch))) + else: + torch.save({ + 'net': model.state_dict(), + 'epoch': epoch + }, os.path.join(args.work_dirs, '{}.pth'.format(epoch))) + + +def dist_save_model(model, optimizer, epoch, ngpus_per_node, args): + if not args.multiprocessing_distributed or (args.multiprocessing_distributed + and args.rank % ngpus_per_node == 0): + os.system('mkdir -p {}'.format(args.work_dirs)) + if optimizer is not None: + torch.save({ + 'net': model.state_dict(), + 'optim': optimizer.state_dict(), + 'epoch': epoch + }, os.path.join(args.work_dirs, '{}.pth'.format(epoch))) + else: + torch.save({ + 'net': model.state_dict(), + 'epoch': epoch + }, os.path.join(args.work_dirs, '{}.pth'.format(epoch))) + + +def load_model(network, args): + if not os.path.exists(args.work_dirs): + print("No such working directory!") + raise AssertionError + + pths = [pth.split('.')[0] for pth in os.listdir(args.work_dirs) if 'pth' in pth] + if len(pths) == 0: + print("No model to load!") + raise AssertionError + + pths = [int(pth) for pth in pths] + if args.test_model == -1: + pth = -1 + if pth in pths: + pass + else: + pth = max(pths) + else: + pth = args.test_model + try: + if args.distributed: + loc = 'cuda:{}'.format(args.gpu) + model = torch.load(os.path.join(args.work_dirs, '{}.pth'.format(pth)), map_location=loc) + except: + model = torch.load(os.path.join(args.work_dirs, '{}.pth'.format(pth))) + try: + network.load_state_dict(model['net'], strict=True) + except: + network.load_state_dict(convert_model(model['net']), strict=True) + return True + + +def resume_model(network, optimizer, args): + print("Loading the model...") + if not os.path.exists(args.work_dirs): + print("No such working directory!") + return 0 + pths = [pth.split('.')[0] for pth in os.listdir(args.work_dirs) if 'pth' in pth] + if len(pths) == 0: + print("No model to load!") + return 0 + pths = [int(pth) for pth in pths] + if args.test_model == -1: + pth = max(pths) + else: + pth = args.test_model + try: + if args.distributed: + loc = 'cuda:{}'.format(args.gpu) + model = torch.load(os.path.join(args.work_dirs, '{}.pth'.format(pth)), map_location=loc) + except: + model = torch.load(os.path.join(args.work_dirs, '{}.pth'.format(pth))) + try: + network.load_state_dict(model['net'], strict=True) + except: + network.load_state_dict(convert_model(model['net']), strict=True) + optimizer.load_state_dict(model['optim']) + for state in optimizer.state.values(): + for k, v in state.items(): + if torch.is_tensor(v): + try: + state[k] = v.cuda(args.gpu) + except: + state[k] = v.cuda() + return model['epoch'] + + +def convert_model(model): + new_model = {} + for k in model.keys(): + new_model[k[7:]] = model[k] + return new_model diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/metrics.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..676be8dea1c639e48b8d9e6d31c7a931ce613458 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/metrics.py @@ -0,0 +1,235 @@ +import torch +from torch.nn.functional import cross_entropy +from torch.nn.modules.loss import _WeightedLoss + +import numpy as np +import torch.nn as nn + + +EPSILON = 1e-32 + + +class LogNLLLoss(_WeightedLoss): + __constants__ = ['weight', 'reduction', 'ignore_index'] + + def __init__(self, weight=None, size_average=None, reduce=None, reduction=None, + ignore_index=-100): + super(LogNLLLoss, self).__init__(weight, size_average, reduce, reduction) + self.ignore_index = ignore_index + + def forward(self, y_input, y_target): + # y_input = torch.log(y_input + EPSILON) + return cross_entropy(y_input, y_target, weight=self.weight, + ignore_index=self.ignore_index) + + +def classwise_iou(output, gt): + """ + Args: + output: torch.Tensor of shape (n_batch, n_classes, image.shape) + gt: torch.LongTensor of shape (n_batch, image.shape) + """ + dims = (0, *range(2, len(output.shape))) + gt = torch.zeros_like(output).scatter_(1, gt[:, None, :], 1) + intersection = output*gt + union = output + gt - intersection + classwise_iou = (intersection.sum(dim=dims).float() + EPSILON) / (union.sum(dim=dims) + EPSILON) + + return classwise_iou + + +def classwise_f1(output, gt): + """ + Args: + output: torch.Tensor of shape (n_batch, n_classes, image.shape) + gt: torch.LongTensor of shape (n_batch, image.shape) + """ + + epsilon = 1e-20 + n_classes = output.shape[1] + + output = torch.argmax(output, dim=1) + true_positives = torch.tensor([((output == i) * (gt == i)).sum() for i in range(n_classes)]).float() + selected = torch.tensor([(output == i).sum() for i in range(n_classes)]).float() + relevant = torch.tensor([(gt == i).sum() for i in range(n_classes)]).float() + + precision = (true_positives + epsilon) / (selected + epsilon) + recall = (true_positives + epsilon) / (relevant + epsilon) + classwise_f1 = 2 * (precision * recall) / (precision + recall) + + return classwise_f1 + + +def make_weighted_metric(classwise_metric): + """ + Args: + classwise_metric: classwise metric like classwise_IOU or classwise_F1 + """ + + def weighted_metric(output, gt, weights=None): + + # dimensions to sum over + dims = (0, *range(2, len(output.shape))) + + # default weights + if weights == None: + weights = torch.ones(output.shape[1]) / output.shape[1] + else: + # creating tensor if needed + if len(weights) != output.shape[1]: + raise ValueError("The number of weights must match with the number of classes") + if not isinstance(weights, torch.Tensor): + weights = torch.tensor(weights) + # normalizing weights + weights /= torch.sum(weights) + + classwise_scores = classwise_metric(output, gt).cpu() + + return classwise_scores + + return weighted_metric + + +jaccard_index = make_weighted_metric(classwise_iou) +f1_score = make_weighted_metric(classwise_f1) + + +class BinaryMetrics(): + """ + Compute common metrics for binary segmentation, including overlap metrics, distance metrics and MAE + NOTE: batch size must be set to one for accurate measurement, batch size larger than one may cause errors! + """ + def __init__(self, eps=1e-5, resolution=(1, 1), inf_result=np.nan): + self.eps = eps + self.resolution = resolution + self.inf_result = inf_result + + def _check_inf(self, result): + if result == np.inf: # inf 无穷 + return self.inf_result + else: + return result + + def _calculate_overlap_metrics(self, gt, pred): + output = pred.view(-1, ) + target = gt.view(-1, ).float() + + tp = torch.sum(output * target) # TP + fp = torch.sum(output * (1 - target)) # FP + fn = torch.sum((1 - output) * target) # FN + tn = torch.sum((1 - output) * (1 - target)) # TN + + pixel_acc = (tp + tn + self.eps) / (tp + tn + fp + fn + self.eps) + dice = (2 * tp + self.eps) / (2 * tp + fp + fn + self.eps) + precision = (tp + self.eps) / (tp + fp + self.eps) + recall = (tp + self.eps) / (tp + fn + self.eps) + specificity = (tn + self.eps) / (tn + fp + self.eps) + + metric_dict = dict() + metric_dict['pixel_acc'] = pixel_acc.item() + metric_dict['dice'] = dice.item() + metric_dict['precision'] = precision.item() + metric_dict['recall'] = recall.item() + metric_dict['specificity'] = specificity.item() + + return metric_dict + + def _calculate_distance_metrics(self, gt, pred): + # shape: (N, C, H, W) + gt_class = gt[0, ...].cpu().numpy().astype(np.int).astype(np.bool) # (H, W) + pred_class = pred[0, 0, ...].cpu().numpy().astype(np.int).astype(np.bool) # (H, W) + # surface_distance_dict = compute_surface_distances(gt_class, pred_class, self.resolution) + # distances = surface_distance_dict['distances_pred_to_gt'] + # mean_surface_distance = self._check_inf(np.mean(distances)) + + # compute Hausdorff distance (95 percentile) + # hd95 = self._check_inf(compute_robust_hausdorff(surface_distance_dict, percent=95)) + + metric_dict = dict() + # metric_dict['mean_surface_distance'] = mean_surface_distance + # metric_dict['hd95'] = hd95 + + return metric_dict + + def _calculate_mae(self, gt, pred): + # shape: (N, C, H, W) + residual = torch.abs(gt.unsqueeze(1) - pred) + mae = torch.mean(residual, dim=(2, 3)).squeeze().detach().cpu().numpy() + + metric_dict = dict() + metric_dict['mae'] = mae + return metric_dict + + def __call__(self, y_true, y_pred): + # y_true: (N, H, W) + # y_pred: (N, 1, H, W) + sigmoid_pred = nn.Sigmoid()(y_pred) + class_pred = (sigmoid_pred > 0.5).float().to(y_pred.device) + + assert class_pred.shape[1] == 1, 'Predictions must contain only one channel' \ + ' when performing binary segmentation' + + overlap_metrics = self._calculate_overlap_metrics(y_true.to(y_pred.device, dtype=torch.float), class_pred) + distance_metrics = self._calculate_distance_metrics(y_true, class_pred) + mae = self._calculate_mae(y_true, sigmoid_pred) + + metrics = {**overlap_metrics, **distance_metrics, **mae} + + return metrics + + +class MetricMeter(object): + """ + Metric记录器 + """ + def __init__(self, metrics): + self.metrics = metrics + self.initialization() + + def initialization(self): + for metric in self.metrics: + exec('self.' + metric + '=[]') + + def update(self, metric_dict): + """ + 将新的metric字典传入,更新记录器 + :param metric_dict: 指标字典 + :return: None + """ + for metric_key, metric_value in metric_dict.items(): + try: + exec('self.' + metric_key + '.append(metric_value)') + # exec 执行储存在字符串或文件中的 Python 语句,相比于 eval,exec可以执行更复杂的 Python 代码 + # exec(object[, globals[, locals]]) + # object:必选参数,表示需要被指定的 Python 代码。它必须是字符串或 code 对象。 + # 如果 object 是一个字符串,该字符串会先被解析为一组 Python 语句,然后再执行(除非发生语法错误)。如果 object 是一个 code 对象,那么它只是被简单的执行。 + # globals:可选参数,表示全局命名空间(存放全局变量),如果被提供,则必须是一个字典对象。 + # locals:可选参数,表示当前局部命名空间(存放局部变量),如果被提供,可以是任何映射对象。 + # 如果该参数被忽略,那么它将会取与 globals 相同的值。 + except: + continue + + def report(self, print_stats=True): + """ + 汇报目前记录的指标信息 + :param print_stats: 是否将指标信息打印在终端 + :return: report_str + """ + report_str = '' + for metric in self.metrics: + metric_mean = np.nanmean(eval('self.' + metric), axis=0) # 沿着指定的轴计算算数平均值,NAN忽略 + metric_std = np.nanstd(eval('self.' + metric), axis=0) + if print_stats: + stats = metric + ': {} ± {};'.format(np.around(metric_mean, decimals=4), # 四舍五入到小数点后的位数 + np.around(metric_std, decimals=4)) + print(stats, end=' ') + report_str += stats + return report_str + + + + + +if __name__ == '__main__': + output, gt = torch.zeros(3, 2, 5, 5), torch.zeros(3, 5, 5).long() + print(classwise_iou(output, gt)) diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/performancemetrics_ax.m b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/performancemetrics_ax.m new file mode 100644 index 0000000000000000000000000000000000000000..75218d914993902716511c3e0a56e7883f025e00 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/performancemetrics_ax.m @@ -0,0 +1,100 @@ + +% close all; +% clear all; +% clc; +N = 328 +st = 0; +Fsc=[]; +MIU=[]; +PA=[]; +bestfsc=0; +bestmiu=0; +bestpa=0; +bestep = 0; + +for k = 0:8 + k + Fsc=[]; + MIU=[]; + PA=[]; +for i = st:st+N + i; + %gname = strcat('./Brain_test/',num2str(i,'%04d'),'.png'); + + tname = '/media/jeyamariajose/7888230b-5c10-4229-90f2-c78bdae9c5de/Data/Projects/axialseg/KiU-Net-pytorch/results/brainus/mix_3_gated_wopos/'; + imgname = strcat(tname,num2str(50*k),'/',num2str(i,'%04d'),'.png'); + lname = '/media/jeyamariajose/7888230b-5c10-4229-90f2-c78bdae9c5de/Data/Brain_Ultrasound/Final/resized/test/labelcol/'; + labelname = strcat(lname, num2str(i,'%04d'),'.png'); + + I = double(imread(imgname));tmp2=zeros(128,128); + tmp2(I>131) = 255; + tmp2(I<130) = 0; + tmp = double(imread(labelname)); + tmp = tmp(:,:,1); + tmp(tmp<130)=0;tmp(tmp>131)=255; + + tp=0;fp=0;fn=0;tn=0;uni=0;ttp=0;lab=0; + + for p =1:128 + for q =1:128 + if tmp(p,q)==0 + if tmp2(p,q) == tmp(p,q) + tn = tn+1; + else + fp = fp+1; + uni = uni+1; + ttp = ttp+1; + end + elseif tmp(p,q)==255 + lab = lab +1; + if tmp2(p,q) == tmp(p,q) + tp = tp+1; + ttp = ttp+1; + else + fn = fn+1; + end + uni = uni+1; + end + + end + end + + if (tp~=0) + F = (2*tp)/(2*tp+fp+fn); + MIU=[MIU,(tp*1.0/uni)]; + PA=[PA,(tp*1.0/ttp)]; + Fsc=[Fsc;[i,F]]; + else + MIU=[MIU,1]; + PA=[PA,1]; + Fsc=[Fsc;[i,1]]; + + end + + + +end + if bestfsc <= mean(Fsc) & (mean(Fsc) ~= 1) + bestfsc = mean(Fsc); + bestmiu = mean(MIU,2); + bestpa = mean(PA,2); + bestep = 50*k; + + end + mean(Fsc) +end + +bestfsc +bestmiu +bestpa +bestep + +% plot(Fsc(:,1),Fsc(:,2),'-*') +% hold on +% plot(Fsc(:,1),Fsc1(:,2),'-s') +% hold off +% figure();plot(Fsc(:,1),PA,'-*');hold on +% plot(Fsc(:,1),PA1,'-s');hold off +% Fsc1=Fsc; +% MIU1=MIU; +% PA1=PA; diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/performancemetrics_glas.m b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/performancemetrics_glas.m new file mode 100644 index 0000000000000000000000000000000000000000..803e3d1e559b333477cec111ea23720f8a3b831c --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/performancemetrics_glas.m @@ -0,0 +1,96 @@ + +% close all; +% clear all; +% clc; +N = 79 +st = 1; +Fsc=[]; +MIU=[]; +PA=[]; +bestfsc=0; +bestmiu=0; +bestpa=0; +bestep = 0; + +for k = 1:24 + k + Fsc=[]; + MIU=[]; + PA=[]; +for i = st:st+N + i; + %gname = strcat('./Brain_test/',num2str(i,'%04d'),'.png'); + + tname = '/media/jeyamariajose/7888230b-5c10-4229-90f2-c78bdae9c5de/Data/Projects/axialseg/KiU-Net-pytorch/results/glas/medT/'; + imgname = strcat(tname,num2str(50*k),'/',num2str(i,'%02d'),'.png'); + lname = '/media/jeyamariajose/7888230b-5c10-4229-90f2-c78bdae9c5de/Data/glas/resized/test/labelcol/'; + + labelname = strcat(lname, num2str(i,'%02d'),'.png'); + + I = double(imread(imgname));tmp2=zeros(128,128); + tmp2(I>130) = 255; + tmp2(I<131) = 0; + tmp = double(imread(labelname)); + tmp = tmp(:,:,1); + tmp(tmp<130)=0;tmp(tmp>131)=255; + + + + tp=0;fp=0;fn=0;tn=0;uni=0;ttp=0;lab=0; + + for p =1:128 + for q =1:128 + if tmp(p,q)==0 + if tmp2(p,q) == tmp(p,q) + tn = tn+1; + else + fp = fp+1; + uni = uni+1; + ttp = ttp+1; + end + elseif tmp(p,q)==255 + lab = lab +1; + if tmp2(p,q) == tmp(p,q) + tp = tp+1; + ttp = ttp+1; + else + fn = fn+1; + end + uni = uni+1; + end + + end + end + + + if (tp~=0) + F = (2*tp)/(2*tp+fp+fn); + MIU=[MIU,(tp*1.0/uni)]; + PA=[PA,(tp*1.0/ttp)]; + Fsc=[Fsc;[i,F]]; + + else + MIU=[MIU,1]; + PA=[PA,1]; + Fsc=[Fsc;[i,1]]; + + end + + + +end + if bestfsc <= mean(Fsc) & (mean(Fsc) ~= 1) + bestfsc = mean(Fsc); + bestmiu = mean(MIU,2); + bestpa = mean(PA,2); + bestep = 50*k; + + end + mean(Fsc) +end + +bestfsc +bestmiu +bestpa +bestep + diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/performancemetrics_monuseg.m b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/performancemetrics_monuseg.m new file mode 100644 index 0000000000000000000000000000000000000000..af574303f354788e6969b0019daab60c7350f679 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/performancemetrics_monuseg.m @@ -0,0 +1,108 @@ + +% close all; +% clear all; +% clc; +N = 328 +st = 0; +Fsc=[]; +MIU=[]; +PA=[]; +bestfsc=0; +bestmiu=0; +bestpa=0; +bestep = 0; + +folder = '/media/jeyamariajose/7888230b-5c10-4229-90f2-c78bdae9c5de/Data/monuseg/resized/test/labelcol/'; +listinfo = dir(strcat(folder,'*.png')); +lm = length(listinfo); + + +for k = 1:10 + k + Fsc=[]; + MIU=[]; + PA=[]; +for i = 1:lm + %I = double(imread(strcat(folder,listinfo(i).name))); + imgfile = strcat(folder,listinfo(i).name); + imgname = listinfo(i).name(1:27) ; + i; + %gname = strcat('./Brain_test/',num2str(i,'%04d'),'.png'); + + lname = '/media/jeyamariajose/7888230b-5c10-4229-90f2-c78bdae9c5de/Data/Projects/axialseg/KiU-Net-pytorch/results/monuseg/medTr/'; + labelname = strcat(lname, num2str(k*10),'/', imgname); + %imgname + I = double(imread(imgfile));tmp2=zeros(512,512); + %I = rgb2gray(I); + tmp2(I>127) = 255; + tmp2(I<126) = 0; + tmp = double(imread(labelname)); + + tmp(tmp<127)=0;tmp(tmp>126)=255; + %tmp2 = I; + tp=0;fp=0;fn=0;tn=0;uni=0;ttp=0;lab=0; + + for p =1:512 + for q =1:512 + if tmp(p,q)==0 + if tmp2(p,q) == tmp(p,q) + tn = tn+1; + else + fp = fp+1; + uni = uni+1; + ttp = ttp+1; + end + elseif tmp(p,q)==255 + lab = lab +1; + if tmp2(p,q) == tmp(p,q) + tp = tp+1; + ttp = ttp+1; + else + fn = fn+1; + end + uni = uni+1; + end + + end + end + + if (tp~=0) + F = (2*tp)/(2*tp+fp+fn); + MIU=[MIU,(tp*1.0/uni)]; + PA=[PA,(tp*1.0/ttp)]; + Fsc=[Fsc;[i,F]]; + else + MIU=[MIU,1]; + PA=[PA,1]; + Fsc=[Fsc;[i,1]]; + + end + + + +end + + if bestfsc <= mean(Fsc) & (mean(Fsc) ~= 1) + bestfsc = mean(Fsc); + bestmiu = mean(MIU,2); + bestpa = mean(PA,2); + bestep = 10*k; + + end + mean(Fsc) +end + +bestfsc +bestmiu +%bestpa +bestep + +% plot(Fsc(:,1),Fsc(:,2),'-*') +% hold on +% plot(Fsc(:,1),Fsc1(:,2),'-s') +% hold off +% figure();plot(Fsc(:,1),PA,'-*');hold on +% plot(Fsc(:,1),PA1,'-s');hold off +% Fsc1=Fsc; +% MIU1=MIU; +% PA1=PA; diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/requirements.txt b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e5dffd903b38e80116f9ad403468f351542a5d12 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/requirements.txt @@ -0,0 +1,4 @@ +torch>=1.4.0 +torchvision>=0.5.0 +scikit-learn==0.23.2 +scipy==1.5.3 diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/test.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/test.py new file mode 100644 index 0000000000000000000000000000000000000000..b707bd7293227e84373c97d9d6137042245c97db --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/test.py @@ -0,0 +1,149 @@ +import argparse +import lib +import torch +import torchvision +from torch import nn +from torch.autograd import Variable +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.utils import save_image +from torchvision.datasets import MNIST +import torch.nn.functional as F +import os +import matplotlib.pyplot as plt +import torch.utils.data as data +from PIL import Image +import numpy as np +from torchvision.utils import save_image +import torch +import torch.nn.init as init +from utils import JointTransform2D, ImageToImage2D, Image2D +from metrics import jaccard_index, f1_score, LogNLLLoss,classwise_f1 +from utils import chk_mkdir, Logger, MetricList +import cv2 +from functools import partial +from random import randint + + +parser = argparse.ArgumentParser(description='MedT') +parser.add_argument('-j', '--workers', default=16, type=int, metavar='N', + help='number of data loading workers (default: 8)') +parser.add_argument('--epochs', default=100, type=int, metavar='N', + help='number of total epochs to run(default: 1)') +parser.add_argument('--start-epoch', default=0, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('-b', '--batch_size', default=1, type=int, + metavar='N', help='batch size (default: 8)') +parser.add_argument('--learning_rate', default=1e-3, type=float, + metavar='LR', help='initial learning rate (default: 0.01)') +parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') +parser.add_argument('--weight-decay', '--wd', default=1e-5, type=float, + metavar='W', help='weight decay (default: 1e-4)') +parser.add_argument('--train_dataset', type=str) +parser.add_argument('--val_dataset', type=str) +parser.add_argument('--save_freq', type=int,default = 5) +parser.add_argument('--modelname', default='off', type=str, + help='name of the model to load') +parser.add_argument('--cuda', default="on", type=str, + help='switch on/off cuda option (default: off)') + +parser.add_argument('--direc', default='./results', type=str, + help='directory to save') +parser.add_argument('--crop', type=int, default=None) +parser.add_argument('--device', default='cuda', type=str) +parser.add_argument('--loaddirec', default='load', type=str) +parser.add_argument('--imgsize', type=int, default=None) +parser.add_argument('--gray', default='no', type=str) +args = parser.parse_args() + +direc = args.direc +gray_ = args.gray +aug = args.aug +direc = args.direc +modelname = args.modelname +imgsize = args.imgsize +loaddirec = args.loaddirec + +if gray_ == "yes": + from utils_gray import JointTransform2D, ImageToImage2D, Image2D + imgchant = 1 +else: + from utils import JointTransform2D, ImageToImage2D, Image2D + imgchant = 3 + +if args.crop is not None: + crop = (args.crop, args.crop) +else: + crop = None + +tf_train = JointTransform2D(crop=crop, p_flip=0.5, color_jitter_params=None, long_mask=True) +tf_val = JointTransform2D(crop=crop, p_flip=0, color_jitter_params=None, long_mask=True) +train_dataset = ImageToImage2D(args.train_dataset, tf_val) +val_dataset = ImageToImage2D(args.val_dataset, tf_val) +predict_dataset = Image2D(args.val_dataset) +dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True) +valloader = DataLoader(val_dataset, 1, shuffle=True) + +device = torch.device("cuda") + +if modelname == "axialunet": + model = lib.models.axialunet(img_size = imgsize, imgchan = imgchant) +elif modelname == "MedT": + model = lib.models.axialnet.MedT(img_size = imgsize, imgchan = imgchant) +elif modelname == "gatedaxialunet": + model = lib.models.axialnet.gated(img_size = imgsize, imgchan = imgchant) +elif modelname == "logo": + model = lib.models.axialnet.logo(img_size = imgsize, imgchan = imgchant) + +if torch.cuda.device_count() > 1: + print("Let's use", torch.cuda.device_count(), "GPUs!") + # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs + model = nn.DataParallel(model,device_ids=[0,1]).cuda() +model.to(device) + +model.load_state_dict(torch.load(loaddirec)) +model.eval() + + +for batch_idx, (X_batch, y_batch, *rest) in enumerate(valloader): + # print(batch_idx) + if isinstance(rest[0][0], str): + image_filename = rest[0][0] + else: + image_filename = '%s.png' % str(batch_idx + 1).zfill(3) + + X_batch = Variable(X_batch.to(device='cuda')) + y_batch = Variable(y_batch.to(device='cuda')) + + y_out = model(X_batch) + + tmp2 = y_batch.detach().cpu().numpy() + tmp = y_out.detach().cpu().numpy() + tmp[tmp>=0.5] = 1 + tmp[tmp<0.5] = 0 + tmp2[tmp2>0] = 1 + tmp2[tmp2<=0] = 0 + tmp2 = tmp2.astype(int) + tmp = tmp.astype(int) + + # print(np.unique(tmp2)) + yHaT = tmp + yval = tmp2 + + epsilon = 1e-20 + + del X_batch, y_batch,tmp,tmp2, y_out + + yHaT[yHaT==1] =255 + yval[yval==1] =255 + fulldir = direc+"/" + + if not os.path.isdir(fulldir): + + os.makedirs(fulldir) + + cv2.imwrite(fulldir+image_filename, yHaT[0,1,:,:]) + + + diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/train.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/train.py new file mode 100644 index 0000000000000000000000000000000000000000..ee511f00d771a4e1753da110eac2789b1508eaf6 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/train.py @@ -0,0 +1,320 @@ +# Code for MedT + +import torch +import lib +import argparse +import torch +import torchvision +from torch import nn +from torch.autograd import Variable +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.utils import save_image +import torch.nn.functional as F +import os +import matplotlib.pyplot as plt +import torch.utils.data as data +from PIL import Image +import numpy as np +from torchvision.utils import save_image +import torch +import torch.nn.init as init +# from utils import JointTransform2D, ImageToImage2D, Image2D +from metrics import jaccard_index, f1_score, LogNLLLoss,classwise_f1, BinaryMetrics,MetricMeter +# from utils import chk_mkdir, Logger, MetricList +import cv2 +from functools import partial +from random import randint +import timeit + +from skimage.color import gray2rgb +from torch.utils.data import Dataset +from skimage.exposure import equalize_adapthist, rescale_intensity, adjust_gamma +import re +from sklearn.model_selection import train_test_split +from torchsummary import summary +from torchvision.transforms import Compose +from HNC_ZXY.utils.data_pipeline import * + +parser = argparse.ArgumentParser(description='MedT') +parser.add_argument('-j', '--workers', default=16, type=int, metavar='N', + help='number of data loading workers (default: 8)') +parser.add_argument('--epochs', default=40, type=int, metavar='N', + help='number of total epochs to run(default: 400)') +parser.add_argument('--start-epoch', default=0, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('-b', '--batch_size', default=2, type=int, + metavar='N', help='batch size (default: 1)') +parser.add_argument('--learning_rate', default=1e-3, type=float, + metavar='LR', help='initial learning rate (default: 0.001)') +parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') +parser.add_argument('--weight-decay', '--wd', default=1e-5, type=float, + metavar='W', help='weight decay (default: 1e-5)') +# parser.add_argument('--train_dataset', required=True, type=str) +# parser.add_argument('--val_dataset', type=str) +parser.add_argument('--save_freq', type=int,default = 10) + +parser.add_argument('--modelname', default='myaxial', type=str, + help='type of model') +parser.add_argument('--cuda', default="on", type=str, + help='switch on/off cuda option (default: off)') +parser.add_argument('--aug', default='off', type=str, + help='turn on img augmentation (default: False)') +parser.add_argument('--load', default='default', type=str, + help='load a pretrained model') +parser.add_argument('--save', default='default', type=str, + help='save the model') +parser.add_argument('--direc', default='./medt', type=str, + help='directory to save') +parser.add_argument('--crop', type=int, default=512) +parser.add_argument('--imgsize', type=int, default=512) +parser.add_argument('--device', default='cuda', type=str) +parser.add_argument('--gray', default='no', type=str) + +args = parser.parse_args() +gray_ = args.gray +aug = args.aug +direc = args.direc +modelname = args.modelname +imgsize = args.imgsize + +if gray_ == "yes": # 输入图像通道 + from utils_gray import JointTransform2D, ImageToImage2D, Image2D + imgchant = 1 +else: + from utils import JointTransform2D, ImageToImage2D, Image2D + # from Medical_Transformer_main import utils + imgchant = 3 + +if args.crop is not None: + crop = (args.crop, args.crop) +else: + crop = None + +# tf_train = JointTransform2D(crop=crop, p_flip=0.5, color_jitter_params=None, long_mask=True) # 调用时对图像和蒙版执行增强。 +# tf_val = JointTransform2D(crop=crop, p_flip=0, color_jitter_params=None, long_mask=True) +tf_train = Compose([Rescale((512, 512)), RandomCrop((imgsize, imgsize)), ToTensor()]) +tf_val = Compose([Rescale((imgsize, imgsize)), ToTensor()]) + + + +class SegmentationDataset(Dataset): + def __init__(self, image_root, mask_root, subject_list, transform=None, vis=False): + self.image_root = image_root + self.mask_root = mask_root + self.transform = transform + self.subject_list = subject_list + self.vis = vis + self.file_list = self.get_file_list() + + def get_file_list(self): + file_list = [] + for file in os.listdir(self.image_root): + patient_id = file.split('_')[1] # 0 + if patient_id in self.subject_list: + file_list.append(file) + return file_list + + def __len__(self): + return len(self.file_list) + + def __getitem__(self, i): + image_filename = os.path.join(self.image_root, self.file_list[i]) + # /opt/zhanglab/HYF/data/BrainStem/BrainStem/images_2d/patient_0_slice_0.npy + mask_filename = os.path.join(self.mask_root, self.file_list[i]) + # /opt/zhanglab/HYF/data/BrainStem/BrainStem/masks_2d/patient_0_slice_0.npy + data_id = self.file_list[i].split('.')[0] + # str.split(str="", num=string.count(str)) str:指定字符 通过指定字符进行分割字符串,num分割次数默认-1切割所有 + # data_id = Patient_0_Slice_0 + + + image = np.load(image_filename) + image = gray2rgb(remap_by_window(image, window_width=80, window_level=1035)) # data_pipiline 恢复成图片 + # image = gray2rgb(rescale_intensity(image, out_range=np.uint8)).astype(np.uint8) + mask = np.load(mask_filename) + + sample = {'image': image, 'mask': mask} + if self.transform: + sample = self.transform(sample) + + if self.vis: + return sample, data_id + else: + return sample + +def remap_by_window(float_data, window_width, window_level): # 取CT的HU窗 + """ + CT window transform + """ + low = int(window_level - window_width // 2) + high = int(window_level + window_width // 2) + output = rescale_intensity(float_data, in_range=(low, high), out_range=np.uint8).astype(np.uint8) + # rescale_intensity(image, in_range=’image’, out_range=’dtype’) + # skimage.exposure.exposure 模块中的函数,在对图像进行拉伸或者伸缩强度水平后返回修改后的图像 + # 输入图像和输出图像的强度范围分别由in_range 和out_range指定,用来拉伸或缩小输入图像的强度范围 + return output + + +# image_root = '/data/zhanglab_headneck/beiyisanyuan/Lens_R/images_2d' +image_root = '/home/zhanglab3090/headneck/beiyisanyuan/Lens_R/images_2d' +# mask_root = '/data/zhanglab_headneck/beiyisanyuan/Lens_R/masks_2d' +mask_root = '/home/zhanglab3090/headneck/beiyisanyuan/Lens_R/masks_2d' +model_savedir = '/home/zhanglab_headneck/HYF/HNC_ZXY/checkpoints/Lens_R/medt/lognlloss_256_4_1' +# subject_list = [re.findall('(\d+)', file)[0] for file in os.listdir('/data/zhanglab_headneck/beiyisanyuan/Lens_R/images')] +subject_list = [re.findall('(\d+)', file)[0] for file in os.listdir('/home/zhanglab3090/headneck/beiyisanyuan/Lens_R/images')] +train_list, val_list = train_test_split(subject_list, test_size=0.2, random_state=512) +train_dataset = SegmentationDataset(image_root, mask_root, train_list, transform=tf_train) +val_dataset = SegmentationDataset(image_root, mask_root, val_list, transform=tf_val) + + +# train_dataset = ImageToImage2D(args.train_dataset, tf_train) +# val_dataset = ImageToImage2D(args.val_dataset, tf_val) +# predict_dataset = Image2D(args.val_dataset) +dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True) +valloader = DataLoader(val_dataset, 1, shuffle=True) + +device = torch.device("cuda") + +if modelname == "axialunet": + model = lib.models.axialunet(img_size = imgsize, imgchan = imgchant) +elif modelname == "MedT": + model = lib.models.axialnet.MedT(img_size = imgsize, imgchan = imgchant) +elif modelname == "gatedaxialunet": + model = lib.models.axialnet.gated(img_size = imgsize, imgchan = imgchant) +elif modelname == "logo": + model = lib.models.axialnet.logo(img_size = imgsize, imgchan = imgchant) +elif modelname == "myaxial": + model = lib.models.myaxialnet.mylogo(img_size = imgsize, imgchan = imgchant) + +if torch.cuda.device_count() > 1: + print("Let's use", torch.cuda.device_count(), "GPUs!") + # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs + model = nn.DataParallel(model, device_ids=[0,1]).cuda() +model.to(device) +summary(model, input_size=(3, 512, 512), batch_size=-1) + +# summary(model, input_size=(3, 256, 256), batch_size=-1) +criterion = LogNLLLoss() + +optimizer = torch.optim.Adam(list(model.parameters()), lr=args.learning_rate, + weight_decay=1e-5) + + +pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) +print("Total_params: {}M".format(pytorch_total_params/1e6)) + +seed = 3000 +np.random.seed(seed) +torch.manual_seed(seed) +torch.cuda.manual_seed(seed) +# torch.set_deterministic(True) +# random.seed(seed) + + +for epoch in range(args.epochs): + print('epoch: ', epoch) + epoch_running_loss = 0 + + for batch_idx, (X_batch, y_batch, *rest) in enumerate(dataloader): + + + + X_batch = Variable(X_batch.to(device ='cuda')) + y_batch = Variable(y_batch.to(device='cuda')) + + # ===================forward===================== + + + output = model(X_batch) + + tmp2 = y_batch.detach().cpu().numpy() + tmp = output.detach().cpu().numpy() + tmp[tmp>=0.5] = 1 + tmp[tmp<0.5] = 0 + tmp2[tmp2>0] = 1 + tmp2[tmp2<=0] = 0 + tmp2 = tmp2.astype(int) + tmp = tmp.astype(int) + + yHaT = tmp + yval = tmp2 + + + + loss = criterion(output, y_batch) + + # ===================backward==================== + optimizer.zero_grad() + loss.backward() + optimizer.step() + epoch_running_loss += loss.item() + + # ===================log======================== + print('epoch [{}/{}], loss:{:.4f}' + .format(epoch, args.epochs, epoch_running_loss/(batch_idx+1))) + + + if epoch == 10: + for param in model.parameters(): + param.requires_grad =True + if (epoch % args.save_freq) ==0: + metric_list = ['pixel_acc', 'dice', 'precision', 'recall', 'specificity', 'mean_surface_distance'] + metric_meter = MetricMeter(metrics=metric_list) + + for batch_idx, (X_batch, y_batch, *rest) in enumerate(valloader): + # print(batch_idx) + if isinstance(rest[0][0], str): + image_filename = rest[0][0] + else: + image_filename = '%s.png' % str(batch_idx + 1).zfill(3) + + X_batch = Variable(X_batch.to(device='cuda')) + y_batch = Variable(y_batch.to(device='cuda')) + # start = timeit.default_timer() + y_out = model(X_batch) + # stop = timeit.default_timer() + # print('Time: ', stop - start) + tmp2 = y_batch.detach().cpu().numpy() + tmp = y_out.detach().cpu().numpy() + tmp[tmp>=0.5] = 1 + tmp[tmp<0.5] = 0 + tmp2[tmp2>0] = 1 + tmp2[tmp2<=0] = 0 + tmp2 = tmp2.astype(int) + tmp = tmp.astype(int) + + # print(np.unique(tmp2)) + yHaT = tmp + yval = tmp2 + + epsilon = 1e-20 + + metrics = BinaryMetrics()(tmp2, tmp) # 返回列表内含各种评价方式 + metric_meter.update(metrics) # 更新 update + + # print('[ Validation ] Loss: {:.4f}'.format(np.mean(loss_list)), end=' ') + metric_meter.report(print_stats=True) + + + # del X_batch, y_batch,tmp,tmp2, y_out + # + # + # yHaT[yHaT==1] =255 + # yval[yval==1] =255 + # fulldir = direc+"/{}/".format(epoch) + # print(fulldir+image_filename) + # if not os.path.isdir(fulldir): + # + # os.makedirs(fulldir) + # + # cv2.imwrite(fulldir+image_filename, yHaT[0,1,:,:]) + # # cv2.imwrite(fulldir+'/gt_{}.png'.format(count), yval[0,:,:]) + # fulldir = direc+"/{}/".format(epoch) + # torch.save(model.state_dict(), fulldir+args.modelname+".pth") + # torch.save(model.state_dict(), direc+"final_model.pth") + + + + + diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/utils.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7fe23fcc8dba7372315e56abcf0f97292fe974fb --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/utils.py @@ -0,0 +1,285 @@ +import os +import numpy as np +import torch + +from skimage import io,color +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms as T +from torchvision.transforms import functional as F + +from typing import Callable +import os +import cv2 +import pandas as pd + +from numbers import Number +from typing import Container +from collections import defaultdict + + +def to_long_tensor(pic): + # handle numpy array + img = torch.from_numpy(np.array(pic, np.uint8)) + # backward compatibility + return img.long() + + +def correct_dims(*images): + corr_images = [] + # print(images) + for img in images: + if len(img.shape) == 2: + corr_images.append(np.expand_dims(img, axis=2)) + else: + corr_images.append(img) + + if len(corr_images) == 1: + return corr_images[0] + else: + return corr_images + + +class JointTransform2D: + """ + Performs augmentation on image and mask when called. Due to the randomness of augmentation transforms, + it is not enough to simply apply the same Transform from torchvision on the image and mask separetely. + Doing this will result in messing up the ground truth mask. To circumvent this problem, this class can + be used, which will take care of the problems above. + + Args: + crop: tuple describing the size of the random crop. If bool(crop) evaluates to False, no crop will + be taken. + p_flip: float, the probability of performing a random horizontal flip. + color_jitter_params: tuple describing the parameters of torchvision.transforms.ColorJitter. + If bool(color_jitter_params) evaluates to false, no color jitter transformation will be used. + p_random_affine: float, the probability of performing a random affine transform using + torchvision.transforms.RandomAffine. + long_mask: bool, if True, returns the mask as LongTensor in label-encoded format. + """ + def __init__(self, crop=(32, 32), p_flip=0.5, color_jitter_params=(0.1, 0.1, 0.1, 0.1), + p_random_affine=0, long_mask=False): + self.crop = crop + self.p_flip = p_flip + self.color_jitter_params = color_jitter_params + if color_jitter_params: + self.color_tf = T.ColorJitter(*color_jitter_params) + self.p_random_affine = p_random_affine + self.long_mask = long_mask + + def __call__(self, image, mask): + # transforming to PIL image + image = image.astype(np.uint8) + mask = mask.astype(np.uint8) + + image, mask = F.to_pil_image(image), F.to_pil_image(mask) + + # random crop + if self.crop: + i, j, h, w = T.RandomCrop.get_params(image, self.crop) + image, mask = F.crop(image, i, j, h, w), F.crop(mask, i, j, h, w) + + if np.random.rand() < self.p_flip: + image, mask = F.hflip(image), F.hflip(mask) + + # color transforms || ONLY ON IMAGE + if self.color_jitter_params: + image = self.color_tf(image) + + # random affine transform + if np.random.rand() < self.p_random_affine: + affine_params = T.RandomAffine(180).get_params((-90, 90), (1, 1), (2, 2), (-45, 45), self.crop) + image, mask = F.affine(image, *affine_params), F.affine(mask, *affine_params) + + # transforming to tensor + image = F.to_tensor(image) + if not self.long_mask: + mask = F.to_tensor(mask) + else: + mask = to_long_tensor(mask) + + return image, mask + + +class ImageToImage2D(Dataset): + """ + Reads the images and applies the augmentation transform on them. + Usage: + 1. If used without the unet.model.Model wrapper, an instance of this object should be passed to + torch.utils.data.DataLoader. Iterating through this returns the tuple of image, mask and image + filename. + 2. With unet.model.Model wrapper, an instance of this object should be passed as train or validation + datasets. + + Args: + dataset_path: path to the dataset. Structure of the dataset should be: + dataset_path + |-- images + |-- img001.png + |-- img002.png + |-- ... + |-- masks + |-- img001.png + |-- img002.png + |-- ... + + joint_transform: augmentation transform, an instance of JointTransform2D. If bool(joint_transform) + evaluates to False, torchvision.transforms.ToTensor will be used on both image and mask. + one_hot_mask: bool, if True, returns the mask in one-hot encoded form. + """ + + def __init__(self, dataset_path: str, joint_transform: Callable = None, one_hot_mask: int = False) -> None: + self.dataset_path = dataset_path + self.input_path = os.path.join(dataset_path, 'img') + self.output_path = os.path.join(dataset_path, 'labelcol') + self.images_list = os.listdir(self.input_path) + self.one_hot_mask = one_hot_mask + + if joint_transform: + self.joint_transform = joint_transform + else: + to_tensor = T.ToTensor() + self.joint_transform = lambda x, y: (to_tensor(x), to_tensor(y)) + + def __len__(self): + return len(os.listdir(self.input_path)) + + def __getitem__(self, idx): + image_filename = self.images_list[idx] + #print(image_filename[: -3]) + # read image + # print(os.path.join(self.input_path, image_filename)) + # print(os.path.join(self.output_path, image_filename[: -3] + "png")) + # print(os.path.join(self.input_path, image_filename)) + image = cv2.imread(os.path.join(self.input_path, image_filename)) + # print(image.shape) + # read mask image + mask = cv2.imread(os.path.join(self.output_path, image_filename[: -3] + "png"),0) + + mask[mask<=127] = 0 + mask[mask>127] = 1 + # correct dimensions if needed + image, mask = correct_dims(image, mask) + # print(image.shape) + + if self.joint_transform: + image, mask = self.joint_transform(image, mask) + + if self.one_hot_mask: + assert self.one_hot_mask > 0, 'one_hot_mask must be nonnegative' + mask = torch.zeros((self.one_hot_mask, mask.shape[1], mask.shape[2])).scatter_(0, mask.long(), 1) + # mask = np.swapaxes(mask,2,0) + # print(image.shape) + # print(mask.shape) + # mask = np.transpose(mask,(2,0,1)) + # image = np.transpose(image,(2,0,1)) + # print(image.shape) + # print(mask.shape) + + return image, mask, image_filename + + +class Image2D(Dataset): + """ + Reads the images and applies the augmentation transform on them. As opposed to ImageToImage2D, this + reads a single image and requires a simple augmentation transform. + Usage: + 1. If used without the unet.model.Model wrapper, an instance of this object should be passed to + torch.utils.data.DataLoader. Iterating through this returns the tuple of image and image + filename. + 2. With unet.model.Model wrapper, an instance of this object should be passed as a prediction + dataset. + + Args: + + dataset_path: path to the dataset. Structure of the dataset should be: + dataset_path + |-- images + |-- img001.png + |-- img002.png + |-- ... + + transform: augmentation transform. If bool(joint_transform) evaluates to False, + torchvision.transforms.ToTensor will be used. + """ + + def __init__(self, dataset_path: str, transform: Callable = None): + + self.dataset_path = dataset_path + self.input_path = os.path.join(dataset_path, 'img') + self.images_list = os.listdir(self.input_path) + + if transform: + self.transform = transform + else: + self.transform = T.ToTensor() + + def __len__(self): + return len(os.listdir(self.input_path)) + + def __getitem__(self, idx): + + image_filename = self.images_list[idx] + + image = cv2.imread(os.path.join(self.input_path, image_filename)) + + # image = np.transpose(image,(2,0,1)) + + image = correct_dims(image) + + image = self.transform(image) + + # image = np.swapaxes(image,2,0) + + return image, image_filename + +def chk_mkdir(*paths: Container) -> None: + """ + Creates folders if they do not exist. + + Args: + paths: Container of paths to be created. + """ + for path in paths: + if not os.path.exists(path): + os.makedirs(path) + + +class Logger: + def __init__(self, verbose=False): + self.logs = defaultdict(list) + self.verbose = verbose + + def log(self, logs): + for key, value in logs.items(): + self.logs[key].append(value) + + if self.verbose: + print(logs) + + def get_logs(self): + return self.logs + + def to_csv(self, path): + pd.DataFrame(self.logs).to_csv(path, index=None) + + +class MetricList: + def __init__(self, metrics): + assert isinstance(metrics, dict), '\'metrics\' must be a dictionary of callables' + self.metrics = metrics + self.results = {key: 0.0 for key in self.metrics.keys()} + + def __call__(self, y_out, y_batch): + for key, value in self.metrics.items(): + self.results[key] += value(y_out, y_batch) + + def reset(self): + self.results = {key: 0.0 for key in self.metrics.keys()} + + def get_results(self, normalize=False): + assert isinstance(normalize, bool) or isinstance(normalize, Number), '\'normalize\' must be boolean or a number' + if not normalize: + return self.results + else: + return {key: value/normalize for key, value in self.results.items()} diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/utils_gray.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/utils_gray.py new file mode 100644 index 0000000000000000000000000000000000000000..6c9b4253932036bbb6579a559c14ff82179988ee --- /dev/null +++ b/PuzzleTuning/SSL_structures/Medical_Transformer_main/Medical_Transformer_main/utils_gray.py @@ -0,0 +1,283 @@ +import os +import numpy as np +import torch + +from skimage import io,color +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms as T +from torchvision.transforms import functional as F + +from typing import Callable +import os +import cv2 +import pandas as pd + +from numbers import Number +from typing import Container +from collections import defaultdict + + +def to_long_tensor(pic): + # handle numpy array + img = torch.from_numpy(np.array(pic, np.uint8)) + # backward compatibility + return img.long() + + +def correct_dims(*images): + corr_images = [] + # print(images) + for img in images: + if len(img.shape) == 2: + corr_images.append(np.expand_dims(img, axis=2)) + else: + corr_images.append(img) + + if len(corr_images) == 1: + return corr_images[0] + else: + return corr_images + + +class JointTransform2D: + """ + Performs augmentation on image and mask when called. Due to the randomness of augmentation transforms, + it is not enough to simply apply the same Transform from torchvision on the image and mask separetely. + Doing this will result in messing up the ground truth mask. To circumvent this problem, this class can + be used, which will take care of the problems above. + + Args: + crop: tuple describing the size of the random crop. If bool(crop) evaluates to False, no crop will + be taken. + p_flip: float, the probability of performing a random horizontal flip. + color_jitter_params: tuple describing the parameters of torchvision.transforms.ColorJitter. + If bool(color_jitter_params) evaluates to false, no color jitter transformation will be used. + p_random_affine: float, the probability of performing a random affine transform using + torchvision.transforms.RandomAffine. + long_mask: bool, if True, returns the mask as LongTensor in label-encoded format. + """ + def __init__(self, crop=(32, 32), p_flip=0.5, color_jitter_params=(0.1, 0.1, 0.1, 0.1), + p_random_affine=0, long_mask=False): + self.crop = crop + self.p_flip = p_flip + self.color_jitter_params = color_jitter_params + if color_jitter_params: + self.color_tf = T.ColorJitter(*color_jitter_params) + self.p_random_affine = p_random_affine + self.long_mask = long_mask + + def __call__(self, image, mask): + # transforming to PIL image + image, mask = F.to_pil_image(image), F.to_pil_image(mask) + + # random crop + if self.crop: + i, j, h, w = T.RandomCrop.get_params(image, self.crop) + image, mask = F.crop(image, i, j, h, w), F.crop(mask, i, j, h, w) + + if np.random.rand() < self.p_flip: + image, mask = F.hflip(image), F.hflip(mask) + + # color transforms || ONLY ON IMAGE + if self.color_jitter_params: + image = self.color_tf(image) + + # random affine transform + if np.random.rand() < self.p_random_affine: + affine_params = T.RandomAffine(180).get_params((-90, 90), (1, 1), (2, 2), (-45, 45), self.crop) + image, mask = F.affine(image, *affine_params), F.affine(mask, *affine_params) + + # transforming to tensor + image = F.to_tensor(image) + if not self.long_mask: + mask = F.to_tensor(mask) + else: + mask = to_long_tensor(mask) + + return image, mask + + +class ImageToImage2D(Dataset): + """ + Reads the images and applies the augmentation transform on them. + Usage: + 1. If used without the unet.model.Model wrapper, an instance of this object should be passed to + torch.utils.data.DataLoader. Iterating through this returns the tuple of image, mask and image + filename. + 2. With unet.model.Model wrapper, an instance of this object should be passed as train or validation + datasets. + + Args: + dataset_path: path to the dataset. Structure of the dataset should be: + dataset_path + |-- images + |-- img001.png + |-- img002.png + |-- ... + |-- masks + |-- img001.png + |-- img002.png + |-- ... + + joint_transform: augmentation transform, an instance of JointTransform2D. If bool(joint_transform) + evaluates to False, torchvision.transforms.ToTensor will be used on both image and mask. + one_hot_mask: bool, if True, returns the mask in one-hot encoded form. + """ + + def __init__(self, dataset_path: str, joint_transform: Callable = None, one_hot_mask: int = False) -> None: + self.dataset_path = dataset_path + self.input_path = os.path.join(dataset_path, 'img') + self.output_path = os.path.join(dataset_path, 'labelcol') + self.images_list = os.listdir(self.input_path) + self.one_hot_mask = one_hot_mask + + if joint_transform: + self.joint_transform = joint_transform + else: + to_tensor = T.ToTensor() + self.joint_transform = lambda x, y: (to_tensor(x), to_tensor(y)) + + def __len__(self): + return len(os.listdir(self.input_path)) + + def __getitem__(self, idx): + image_filename = self.images_list[idx] + #print(image_filename[: -3]) + # read image + # print(os.path.join(self.input_path, image_filename)) + # print(os.path.join(self.output_path, image_filename[: -3] + "png")) + # print(os.path.join(self.input_path, image_filename)) + image = cv2.imread(os.path.join(self.input_path, image_filename),0) + # print(image.shape) + # read mask image + mask = cv2.imread(os.path.join(self.output_path, image_filename[: -3] + "png"),0) + + # correct dimensions if needed + image, mask = correct_dims(image, mask) + # print(image.shape) + mask[mask<127] = 0 + mask[mask>=127] = 1 + + + if self.joint_transform: + image, mask = self.joint_transform(image, mask) + + if self.one_hot_mask: + assert self.one_hot_mask > 0, 'one_hot_mask must be nonnegative' + mask = torch.zeros((self.one_hot_mask, mask.shape[1], mask.shape[2])).scatter_(0, mask.long(), 1) + # mask = np.swapaxes(mask,2,0) + # print(image.shape) + # print(mask.shape) + # mask = np.transpose(mask,(2,0,1)) + # image = np.transpose(image,(2,0,1)) + # print(image.shape) + # print(mask.shape) + + return image, mask, image_filename + + +class Image2D(Dataset): + """ + Reads the images and applies the augmentation transform on them. As opposed to ImageToImage2D, this + reads a single image and requires a simple augmentation transform. + Usage: + 1. If used without the unet.model.Model wrapper, an instance of this object should be passed to + torch.utils.data.DataLoader. Iterating through this returns the tuple of image and image + filename. + 2. With unet.model.Model wrapper, an instance of this object should be passed as a prediction + dataset. + + Args: + + dataset_path: path to the dataset. Structure of the dataset should be: + dataset_path + |-- images + |-- img001.png + |-- img002.png + |-- ... + + transform: augmentation transform. If bool(joint_transform) evaluates to False, + torchvision.transforms.ToTensor will be used. + """ + + def __init__(self, dataset_path: str, transform: Callable = None): + + self.dataset_path = dataset_path + self.input_path = os.path.join(dataset_path, 'img') + self.images_list = os.listdir(self.input_path) + + if transform: + self.transform = transform + else: + self.transform = T.ToTensor() + + def __len__(self): + return len(os.listdir(self.input_path)) + + def __getitem__(self, idx): + + image_filename = self.images_list[idx] + + image = cv2.imread(os.path.join(self.input_path, image_filename),0) + + # image = np.transpose(image,(2,0,1)) + + image = correct_dims(image) + + image = self.transform(image) + + # image = np.swapaxes(image,2,0) + + return image, image_filename + +def chk_mkdir(*paths: Container) -> None: + """ + Creates folders if they do not exist. + + Args: + paths: Container of paths to be created. + """ + for path in paths: + if not os.path.exists(path): + os.makedirs(path) + + +class Logger: + def __init__(self, verbose=False): + self.logs = defaultdict(list) + self.verbose = verbose + + def log(self, logs): + for key, value in logs.items(): + self.logs[key].append(value) + + if self.verbose: + print(logs) + + def get_logs(self): + return self.logs + + def to_csv(self, path): + pd.DataFrame(self.logs).to_csv(path, index=None) + + +class MetricList: + def __init__(self, metrics): + assert isinstance(metrics, dict), '\'metrics\' must be a dictionary of callables' + self.metrics = metrics + self.results = {key: 0.0 for key in self.metrics.keys()} + + def __call__(self, y_out, y_batch): + for key, value in self.metrics.items(): + self.results[key] += value(y_out, y_batch) + + def reset(self): + self.results = {key: 0.0 for key in self.metrics.keys()} + + def get_results(self, normalize=False): + assert isinstance(normalize, bool) or isinstance(normalize, Number), '\'normalize\' must be boolean or a number' + if not normalize: + return self.results + else: + return {key: value/normalize for key, value in self.results.items()} diff --git a/PuzzleTuning/SSL_structures/Medical_Transformer_main/__init__.py b/PuzzleTuning/SSL_structures/Medical_Transformer_main/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/PuzzleTuning/SSL_structures/SAE.py b/PuzzleTuning/SSL_structures/SAE.py new file mode 100644 index 0000000000000000000000000000000000000000..274427cc8417070fd1c8468eae526b810ac08c15 --- /dev/null +++ b/PuzzleTuning/SSL_structures/SAE.py @@ -0,0 +1,798 @@ +""" +SAE Model Script ver: Oct 28th 2023 15:30 +SAE stands for shuffled autoencoder, designed for PuzzleTuning + +# References: +Based on MAE code. +https://github.com/facebookresearch/mae + +""" + +from functools import partial + +import torch +import torch.nn as nn + +from timm.models.vision_transformer import PatchEmbed, Block + +from SSL_structures.pos_embed import get_2d_sincos_pos_embed + +from Backbone.VPT_structure import VPT_ViT + + +class ShuffledAutoEncoderViT(VPT_ViT): + """ + Shuffled Autoencoder with VisionTransformer backbone + + prompt_mode: "Deep" / "Shallow" by default None + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, + embed_dim=1024, depth=24, num_heads=16, + decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False, group_shuffle_size=-1, + prompt_mode=None, Prompt_Token_num=20, basic_state_dict=None, decoder=None, decoder_rep_dim=None): + + if prompt_mode is None: + super().__init__() + # SAE encoder specifics (this part just the same as ViT) + # -------------------------------------------------------------------------- + self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim) # BCHW -> BNC + num_patches = self.patch_embed.num_patches + + # learnable cls token is still used but on cls head need + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + # set and freeze encoder_pos_embed, use the fixed sin-cos embedding for tokens + mask_token + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False) + # Encoder blocks + self.blocks = nn.ModuleList([ # qk_scale=None fixme related to timm version + Block(embed_dim, num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + self.prompt_mode = prompt_mode + # -------------------------------------------------------------------------- + + else: + super().__init__(img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dim, depth=depth, num_heads=num_heads, mlp_ratio=mlp_ratio, + norm_layer=norm_layer, Prompt_Token_num=Prompt_Token_num, VPT_type=prompt_mode, + basic_state_dict=None) # Firstly, set then Encoder state_dict to none here. + num_patches = self.patch_embed.num_patches # set patch_embed of VPT + # set and freeze encoder_pos_embed, use the fixed sin-cos embedding for tokens + mask_token + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False) + + self.prompt_mode = prompt_mode + # Freeze Encoder parameters except of the Prompt Tokens + self.Freeze() + + # SAE decoder specifics todo as a low-level backbone, the explore for future segmentation is need + # -------------------------------------------------------------------------- + # if the feature dimension of encoder and decoder are different, use decoder_embed to align them + if embed_dim != decoder_embed_dim: + self.decoder_embed = nn.Linear(embed_dim, decoder_embed_dim, bias=True) + else: + self.decoder_embed = nn.Identity() + + # set decoder + if decoder is not None: + self.decoder = decoder + # Decoder use a FC to reconstruct image, unlike the Encoder which use a CNN to split patch + self.decoder_pred = nn.Linear(decoder_rep_dim, patch_size ** 2 * in_chans, bias=True) # decoder to patch + + else: + self.decoder = None + # set and freeze decoder_pos_embed, use the fixed sin-cos embedding for tokens + mask_token + self.decoder_pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, decoder_embed_dim), + requires_grad=False) + self.decoder_blocks = nn.ModuleList([ # qk_scale=None fixme related to timm version + Block(decoder_embed_dim, decoder_num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer) + for i in range(decoder_depth)]) + self.decoder_norm = norm_layer(decoder_embed_dim) + + # Decoder use a FC to reconstruct image, unlike the Encoder which use a CNN to split patch + self.decoder_pred = nn.Linear(decoder_embed_dim, patch_size ** 2 * in_chans, bias=True) # decoder to patch + + # -------------------------------------------------------------------------- + # this controls the puzzle group + self.group_shuffle_size = group_shuffle_size + + # wether or not to use norm_pix_loss + self.norm_pix_loss = norm_pix_loss + # parameter initialization + self.initialize_weights() + + # load basic state_dict of backbone for Transfer-learning-based tuning + if basic_state_dict is not None: + self.load_state_dict(basic_state_dict, False) + + def initialize_weights(self): + # initialization + # initialize a 2d positional encoding of (embed_dim, grid) by sin-cos embedding + pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], + int(self.patch_embed.num_patches ** .5), + cls_token=True) + self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0)) + + if self.decoder is None: + # initialize a 2d positional encoding of (embed_dim, grid) by sin-cos embedding + decoder_pos_embed = get_2d_sincos_pos_embed(self.decoder_pos_embed.shape[-1], + int(self.patch_embed.num_patches ** .5), + cls_token=True) + self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0)) + + # initialize patch_embed like nn.Linear (instead of nn.Conv2d) + w = self.patch_embed.proj.weight.data + torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1])) # xavier_uniform,让输入输出的方差相同,包括前后向传播 + + # timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.) + torch.nn.init.normal_(self.cls_token, std=.02) + # torch.nn.init.normal_(self.prompt_token, std=.02) + + # initialize nn.Linear and nn.LayerNorm + self.apply(self._init_weights) + + def _init_weights(self, m): + # initialize nn.Linear and nn.LayerNorm + if isinstance(m, nn.Linear): + # we use xavier_uniform following official JAX ViT: + torch.nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def patchify(self, imgs, patch_size=None): + """ + Break image to patch tokens + + input: + imgs: (B, 3, H, W) + + output: + x: (B, num_patches, patch_size**2 *3) AKA [B, num_patches, flatten_dim] + """ + # patch_size + patch_size = self.patch_embed.patch_size[0] if patch_size is None else patch_size + + # assert H == W and image shape is dividedable by patch + assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % patch_size == 0 + # patch num in rol or column + h = w = imgs.shape[2] // patch_size + + # use reshape to split patch [B, C, H, W] -> [B, C, h_p, patch_size, w_p, patch_size] + x = imgs.reshape(shape=(imgs.shape[0], 3, h, patch_size, w, patch_size)) + + # ReArrange dimensions [B, C, h_p, patch_size, w_p, patch_size] -> [B, h_p, w_p, patch_size, patch_size, C] + x = torch.einsum('nchpwq->nhwpqc', x) + # ReArrange dimensions [B, h_p, w_p, patch_size, patch_size, C] -> [B, num_patches, flatten_dim] + x = x.reshape(shape=(imgs.shape[0], h * w, patch_size ** 2 * 3)) + return x + + def patchify_decoder(self, imgs, patch_size=None): + """ + Break image to patch tokens + + fixme,注意,这里patch_size应该是按照decoder的网络设置来作为default更合理 + + input: + imgs: (B, CLS, H, W) + + output: + x: (B, num_patches, -1) AKA [B, num_patches, -1] + """ + # patch_size + patch_size = self.patch_embed.patch_size[0] if patch_size is None else patch_size + + # assert H == W and image shape is divided-able by patch + assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % patch_size == 0 + # patch num in rol or column + h = w = imgs.shape[2] // patch_size + + # use reshape to split patch [B, CLS, H, W] -> [B, CLS, h_p, patch_size, w_p, patch_size] + x = imgs.reshape(shape=(imgs.shape[0], -1, h, patch_size, w, patch_size)) + + # ReArrange dimensions [B, CLS, h_p, patch_size, w_p, patch_size] -> [B, h_p, w_p, patch_size, patch_size, CLS] + x = torch.einsum('nchpwq->nhwpqc', x) + # ReArrange dimensions [B, h_p, w_p, patch_size, patch_size, C] -> [B, num_patches, flatten_dim] + x = x.reshape(shape=(imgs.shape[0], h * w, -1)) + return x + + def unpatchify(self, x, patch_size=None): + """ + Decoding encoded patch tokens + + input: + x: (B, num_patches, patch_size**2 *3) AKA [B, num_patches, flatten_dim] + + output: + imgs: (B, 3, H, W) + """ + # patch_size + p = self.patch_embed.patch_size[0] if patch_size is None else patch_size + + # squre root of num_patches (without CLS token is required) + h = w = int(x.shape[1] ** .5) + # assert num_patches is with out CLS token + assert h * w == x.shape[1] + + # ReArrange dimensions [B, num_patches, flatten_dim] -> [B, h_p, w_p, patch_size, patch_size, C] + x = x.reshape(shape=(x.shape[0], h, w, p, p, 3)) + # ReArrange dimensions [B, h_p, w_p, patch_size, patch_size, C] -> [B, C, h_p, patch_size, w_p, patch_size] + x = torch.einsum('nhwpqc->nchpwq', x) + # use reshape to compose patch [B, C, h_p, patch_size, w_p, patch_size] -> [B, C, H, W] + imgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p)) + return imgs + + def fix_position_shuffling(self, x, fix_position_ratio, puzzle_patch_size): + """ + Fix-position shuffling + + Randomly assign patches by per-sample shuffling. + After it, the fixed patches are reserved as Positional Tokens + the rest patches are batch wise randomly shuffled among the batch since they serve as Relation Tokens. + + Per-sample shuffling is done by argsort random noise. + batch wise shuffle operation is done by shuffle all idxes + + input: + x: [B, 3, H, W], input image tensor + fix_position_ratio float + puzzle_patch_size int + + output: x_puzzled, mask + x_puzzled: [B, 3, H, W] + mask: [B, 3, H, W], binary mask indicating pix position with 0 + """ + # Break img into puzzle patches with the size of puzzle_patch_size [B, num_puzzle_patches, D_puzzle] + x = self.patchify(x, puzzle_patch_size) + # output: x: (B, num_patches, patch_size**2 *3) AKA [B, num_patches, flatten_dim] + B, num_puzzle_patches, D = x.shape + + # num of fix_position puzzle patches + len_fix_position = int(num_puzzle_patches * fix_position_ratio) + num_shuffled_patches = num_puzzle_patches - len_fix_position + # create a noise tensor to prepare shuffle idx of puzzle patches + noise = torch.rand(B, num_puzzle_patches, device=x.device) # [B,num_puzzle_patches] noise in [0, 1] + + # 在Batch里面每个序列上获得noise tensor经过升序排列后原本位置的idx矩阵,(各自不同) + ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove + # 再对idx矩阵继续升序排列可获得:原始noise tensor的每个位置的排序顺位 + ids_restore = torch.argsort(ids_shuffle, dim=1) + + # keep the first subset 前面的是fix的,后面的是puzzle的 + ids_fix = ids_shuffle[:, :len_fix_position] # [B,num_puzzle_patches] -> [B,fix_patches] + # fix_patches=num_puzzle_patches * fix_position_ratio len_fix_position + ids_puzzle = ids_shuffle[:, len_fix_position:] # [B,num_puzzle_patches] -> [B,puzzle_patches] + # puzzle_patches=num_puzzle_patches*(1-fix_position_ratio) num_shuffled_patches + + # set puzzle patch + # ids_?.unsqueeze(-1).repeat(1, 1, D) + # [B,?_patches] -> [B,?_patches,1] (at each place with the idx of ori patch) -> [B,?_patches,D] + + # torch.gather to select patche groups x_fixed of [B,fix_patches,D] and x_puzzle of [B,puzzle_patches,D] + # 要保持的,batch中每个sample不一样 + x_fixed = torch.gather(x, dim=1, index=ids_fix.unsqueeze(-1).repeat(1, 1, D)) + # 要shuffle的,batch中每个sample不一样 + x_puzzle = torch.gather(x, dim=1, index=ids_puzzle.unsqueeze(-1).repeat(1, 1, D)) + + # batch&patch-wise shuffle is needed else the restore will restore all puzzles + if self.group_shuffle_size == -1 or self.group_shuffle_size == B: + puzzle_shuffle_indices = torch.randperm(B * num_shuffled_patches, device=x.device, requires_grad=False) + else: + assert B > self.group_shuffle_size > 0 and B % self.group_shuffle_size == 0 + # build [B//self.group_shuffle_size, num_puzzle_patches] noise in [0, 1] + group_noise = torch.rand(B // self.group_shuffle_size, num_shuffled_patches * self.group_shuffle_size, device=x.device) + # get shuffled index in each (num_shuffled_patches*group_shuffle) + group_ids_shuffle = torch.argsort(group_noise, dim=1) + # break the dim and add the group idx(in list), stack back to tensor + group_ids_shuffle = torch.stack([group_ids_shuffle[i] + + num_shuffled_patches * self.group_shuffle_size * i + for i in range(B // self.group_shuffle_size)]) + # flattern to be idx for all (B * num_shuffled_patches) + puzzle_shuffle_indices = group_ids_shuffle.view(-1) + + # 将0~B * num_shuffled_patches-1(包括0和B * num_shuffled_patches-1)随机打乱后获得的数字序列 + x_puzzle = x_puzzle.view(B * num_shuffled_patches, D)[puzzle_shuffle_indices].view(B, num_shuffled_patches, D) + # 利用randperm获得的乱序序列对应batch内所有需要shuffle的部分进行打乱顺序,之后将其恢复为原本的划分batch + # pack up all puzzle patches + x = torch.cat([x_fixed, x_puzzle], dim=1) + + # generate the binary mask: 0 is keep, 1 is remove + mask = torch.ones([B, num_puzzle_patches, D], device=x.device, requires_grad=False) # no grad + mask[:, :len_fix_position, :] = 0 # set the first len_fix of tokens to 0,rest to 1 + + # unshuffle to restore the fixed positions + x = torch.gather(x, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, D)) + # torch.gather to generate restored binary mask + mask = torch.gather(mask, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, D)) + + # unpatchify to obtain puzzle images and their mask + x = self.unpatchify(x, puzzle_patch_size) + mask = self.unpatchify(mask, puzzle_patch_size) + + return x, mask # x_puzzled and mask + + def forward_puzzle(self, imgs, fix_position_ratio=0.25, puzzle_patch_size=32): + """ + Transform the input images to puzzle images + + input: + x: [B, 3, H, W], input image tensor + fix_position_ratio float + puzzle_patch_size int + + output: x_puzzled, mask + x_puzzled: [B, 3, H, W] + mask: [B, 3, H, W], binary mask indicating pix position with 0 + """ + x_puzzled, mask = self.fix_position_shuffling(imgs, fix_position_ratio, puzzle_patch_size) + return x_puzzled, mask + + def forward_encoder(self, imgs): + """ + :param imgs: [B, C, H, W], sequence of imgs + + :return: Encoder output: encoded tokens, mask position, restore idxs + x: [B, num_patches, D], sequence of Tokens (including the cls token) + CLS_token: [B, 1, D] + """ + + if self.prompt_mode is None: # ViT + # embed patches + x = self.patch_embed(imgs) + + # add pos embed before concatenate the cls token + x = x + self.pos_embed[:, 1:, :] + + # detatch puzzle for embed_puzzle output + embed_puzzle = x.data.detach() + + # append cls token + cls_token = self.cls_token + self.pos_embed[:, :1, :] + cls_tokens = cls_token.expand(x.shape[0], -1, -1) # batch fix + x = torch.cat((cls_tokens, x), dim=1) + + # apply Transformer blocks + for blk in self.blocks: + x = blk(x) + + else: # VPT + x = self.patch_embed(imgs) + # add pos embed before concatenate the cls token + x = x + self.pos_embed[:, 1:, :] + + # detatch puzzle for embed_puzzle output + embed_puzzle = x.data.detach() # copy the embed original puzzle (for illustration) + + # append cls token + cls_token = self.cls_token + self.pos_embed[:, :1, :] + cls_tokens = cls_token.expand(x.shape[0], -1, -1) # batch fix + x = torch.cat((cls_tokens, x), dim=1) + + if self.VPT_type == "Deep": + + Prompt_Token_num = self.Prompt_Tokens.shape[1] + + for i in range(len(self.blocks)): + # concatenate Prompt_Tokens + Prompt_Tokens = self.Prompt_Tokens[i].unsqueeze(0) + # firstly concatenate + x = torch.cat((x, Prompt_Tokens.expand(x.shape[0], -1, -1)), dim=1) + num_tokens = x.shape[1] + # lastly remove, a good trick + x = self.blocks[i](x)[:, :num_tokens - Prompt_Token_num] + + else: # self.VPT_type == "Shallow" + Prompt_Token_num = self.Prompt_Tokens.shape[1] + + # concatenate Prompt_Tokens + Prompt_Tokens = self.Prompt_Tokens.expand(x.shape[0], -1, -1) + x = torch.cat((x, Prompt_Tokens), dim=1) + num_tokens = x.shape[1] + # A whole sequential process + x = self.blocks(x)[:, :num_tokens - Prompt_Token_num] + + # last norm of Transformer + x = self.norm(x) + + CLS_token = x[:, :1, :] + x = x[:, 1:, :] + + # Encoder output: encoded tokens, mask position, embed original puzzle (for illustration) + return x, CLS_token, embed_puzzle + + def forward_decoder(self, x): + """ + Decoder to reconstruct the puzzle image + [B, 1 + num_patches, D_Encoder] -> [B, 1 + num_patches, D_Decoder] -> [B, num_patches, p*p*3] + + :param x: [B, 1 + num_patches, D_Encoder], sequence of Tokens (including the cls token) + + :return: Decoder output: reconstracted tokens + x: [B, num_patches, patch_size ** 2 * in_chans], sequence of Patch Tokens + """ + + if self.decoder is None: + # embed tokens: [B, num_encoded_tokens, D_Encoder] -> [B, num_encoded_tokens, D_Decoder] + x = self.decoder_embed(x) + # print(x.shape) + # add pos embed + x = x + self.decoder_pos_embed + + # apply Transformer blocks + for blk in self.decoder_blocks: + x = blk(x) + x = self.decoder_norm(x) + + # Reconstruction projection + x = self.decoder_pred(x) + # remove cls token + x = x[:, 1:, :] + # print("x shape: ", x.shape) # [B, N, p*p*3] + + else: + # remove cls token + x = x[:, 1:, :] + # embed tokens: [B, num_encoded_tokens, D_Encoder] -> [B, num_encoded_tokens, D_Decoder] + x = self.decoder_embed(x) + # unpatchify to make image form [B, H, W, C] + x = self.unpatchify(x) # restore image by Encoder + # apply decoder module to segment the output of encoder + x = self.decoder(x) # one-hot seg decoder [B, CLS, H, W] + # the output of segmentation is transformed to [B, N, Dec] + x = self.patchify_decoder(x) # TODO 做一个有意义的设计 + # Convert the number of channels to match image for loss function + x = self.decoder_pred(x) # [B, N, Dec] -> [B, N, p*p*3] + # print(x.shape) + + return x + + def forward_loss(self, imgs, pred, mask): + """ + MSE loss for all patches towards the ori image + + Input: + imgs: [B, 3, H, W], Encoder input image + pred: [B, num_patches, p*p*3], Decoder reconstructed image + mask: [B, num_patches, p*p*3], 0 is keep, 1 is puzzled + + """ + # print("pred shape: ", pred.shape) # [64, 196, 768] + # target imgs: [B, 3, H, W] -> [B, num_patches, p*p*3] + target = self.patchify(imgs) + # print("target shape: ", target.shape) # [64, 196, 768] + # use mask as a patch indicator [B, num_patches, D] -> [B, num_patches] + mask = mask[:, :, 0] # Binary mask, 1 for removed patches, 0 for reserved patches: + + if self.norm_pix_loss: # Normalize the target image patches + mean = target.mean(dim=-1, keepdim=True) + var = target.var(dim=-1, keepdim=True) + target = (target - mean) / (var + 1.e-6) ** .5 + + # MSE loss + loss = (pred - target) ** 2 + loss = loss.mean(dim=-1) # [B, num_patches], mean loss on each patch pixel + + loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches [B], scalar + + return loss + + def forward(self, imgs, fix_position_ratio=0.25, puzzle_patch_size=32, combined_pred_illustration=False): + # STEP 1: Puzzle making + # create puzzle images: [B, 3, H, W] + imgs_puzzled, mask = self.forward_puzzle(imgs, fix_position_ratio, puzzle_patch_size) + + # Visualization of imgs_puzzled_patches sequence: [B, num_patches, p*p*3] + imgs_puzzled_patches = self.patchify(imgs_puzzled) + # here, latent crop size is automatically based on encoder embedding + + # STEP 2: Puzzle understanding + # Encoder to obtain latent tokens and embed_puzzle: [B, num_patches, D] + latent_puzzle, CLS_token, embed_puzzle = self.forward_encoder(imgs_puzzled) + # VPT output size of more tokens ? currently use firstly-cat-lastly-remove so its fine + + # STEP 3: Puzzle restoring + + # step 3.(a) prepare decoder input indcator mask at the encoder output stage: + mask_patches_pp3 = self.patchify(mask) # mark relation tokens with 1 [B, num_patches, p*p*3] + # here, latent crop size is automatically based on encoder embedding + + # Reassign mask indicator shape to the encoder output dim + if mask_patches_pp3.shape[-1] != latent_puzzle.shape[-1]: + # [B, num_patches, p*p*3] -> [B, num_patches, 1] -> [B, num_patches, D] + mask_patches = mask_patches_pp3[:, :, :1].expand(-1, -1, latent_puzzle.shape[-1]) + else: + mask_patches = mask_patches_pp3 + + # anti_mask: [B, num_patches, D], binary mask indicating fix position with 1 instead of 0 + anti_mask = mask_patches * -1 + 1 # great trick to process positional operation with less calculation + + # Position hint + # in mask, 0 is Position Tokens, therefore take only Relation Tokens + latent_tokens = latent_puzzle * mask_patches # take out relation tokens(latent_tokens here) + # in anti_mask, 0 is Relation Tokens, therefore take only Position Tokens + hint_tokens = embed_puzzle * anti_mask # anti_mask to take hint_tokens (position tokens) + # group decoder tokens: [B, num_patches, D] + latent = latent_tokens + hint_tokens + # append back the cls token at the first -> [B, 1+num_patches, D] + x = torch.cat([CLS_token, latent], dim=1) + + # step 3.(b) Decoder to obtain Reconstructed image patches: + # [B, 1+num_patches,D] -> [B, 1+num_patches, D_Decoder] -> [B, num_patches, p*p*3] + pred = self.forward_decoder(x) + + # combined pred + anti_mask_patches_pp3 = mask_patches_pp3 * -1 + 1 # fix position with 1, relation patches with 0 + hint_img_patches = imgs_puzzled_patches * anti_mask_patches_pp3 + pred_img_patches = pred * mask_patches_pp3 # mark relation tokens with 1, fix position with 0 + pred_with_hint_imgs = hint_img_patches + pred_img_patches + + # MSE loss for all patches towards the ori image + loss = self.forward_loss(imgs, pred, mask_patches) + # print(loss) # check whether the loss is working + + if combined_pred_illustration: + return loss, pred_with_hint_imgs, imgs_puzzled_patches + else: + return loss, pred, imgs_puzzled_patches + + +def sae_vit_base_patch16_dec512d8b(dec_idx=None, **kwargs): + print("Decoder:", dec_idx) + + model = ShuffledAutoEncoderViT( + patch_size=16, embed_dim=768, depth=12, num_heads=12, + decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def sae_vit_large_patch16_dec512d8b(dec_idx=None, **kwargs): + print("Decoder:", dec_idx) + + model = ShuffledAutoEncoderViT( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, + decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def sae_vit_huge_patch14_dec512d8b(dec_idx=None, **kwargs): + print("Decoder:", dec_idx) + + model = ShuffledAutoEncoderViT( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, + decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +# decoder +def sae_vit_base_patch16_dec(dec_idx=None, num_classes=3, img_size=224, **kwargs): + # num_classes做的是one-hot seg但是不是做还原,我们得设计一下如何去做这个还原才能实现预训练 + + if dec_idx == 'swin_unet': + decoder_embed_dim = 768 + decoder_rep_dim = 16 * 16 * 3 + + from SSL_structures.Swin_Unet_main.networks.vision_transformer import SwinUnet as ViT_seg + decoder = ViT_seg(num_classes=num_classes, img_size=img_size, patch_size=16) + + elif dec_idx == 'transunet': + decoder_embed_dim = 768 + decoder_rep_dim = 16 * 16 * 3 + + transunet_name = 'R50-ViT-B_16' + transunet_patches_size = 16 + from SSL_structures.TransUNet_main.networks.vit_seg_modeling import CONFIGS as CONFIGS_Transunet_seg + from SSL_structures.TransUNet_main.networks.vit_seg_modeling import VisionTransformer as Transunet_seg + + config_vit = CONFIGS_Transunet_seg[transunet_name] + config_vit.n_classes = num_classes + config_vit.n_skip = 3 + + if transunet_name.find('R50') != -1: + config_vit.patches.grid = ( + int(img_size / transunet_patches_size), int(img_size / transunet_patches_size)) + decoder = Transunet_seg(config_vit, num_classes=config_vit.n_classes) + + elif dec_idx == 'UTNetV2': + decoder_embed_dim = 768 + decoder_rep_dim = 16 * 16 * 3 + + from SSL_structures.UtnetV2.utnetv2 import UTNetV2 as UTNetV2_seg + decoder = UTNetV2_seg(in_chan=3, num_classes=num_classes) + + else: + print('no effective decoder!') + return -1 + + print('dec_idx: ', dec_idx) + + model = ShuffledAutoEncoderViT( + patch_size=16, embed_dim=768, depth=12, num_heads=12, + decoder_embed_dim=decoder_embed_dim, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), decoder_rep_dim=decoder_rep_dim, decoder=decoder, + **kwargs) + return model + + +def sae_vit_large_patch16_dec(dec_idx=None, num_classes=3, img_size=224, **kwargs): + # num_classes做的是one-hot seg但是不是做还原,我们得设计一下如何去做这个还原才能实现预训练 + + if dec_idx == 'swin_unet': + decoder_embed_dim = 768 + decoder_rep_dim = 16 * 16 * 3 + + from SSL_structures.Swin_Unet_main.networks.vision_transformer import SwinUnet as ViT_seg + decoder = ViT_seg(num_classes=num_classes, img_size=img_size, patch_size=16) + + elif dec_idx == 'transunet': + decoder_embed_dim = 768 + decoder_rep_dim = 16 * 16 * 3 + + transunet_name = 'R50-ViT-B_16' + transunet_patches_size = 16 + from SSL_structures.TransUNet_main.networks.vit_seg_modeling import CONFIGS as CONFIGS_Transunet_seg + from SSL_structures.TransUNet_main.networks.vit_seg_modeling import VisionTransformer as Transunet_seg + + config_vit = CONFIGS_Transunet_seg[transunet_name] + config_vit.n_classes = num_classes + config_vit.n_skip = 3 + + if transunet_name.find('R50') != -1: + config_vit.patches.grid = ( + int(img_size / transunet_patches_size), int(img_size / transunet_patches_size)) + decoder = Transunet_seg(config_vit, num_classes=config_vit.n_classes) + + elif dec_idx == 'UTNetV2': + decoder_embed_dim = 768 + decoder_rep_dim = 16 * 16 * 3 + + from SSL_structures.UtnetV2.utnetv2 import UTNetV2 as UTNetV2_seg + decoder = UTNetV2_seg(in_chan=3, num_classes=num_classes) + + else: + print('no effective decoder!') + return -1 + + print('dec_idx: ', dec_idx) + + model = ShuffledAutoEncoderViT( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, + decoder_embed_dim=decoder_embed_dim, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), decoder_rep_dim=decoder_rep_dim, decoder=decoder, + **kwargs) + return model + + +def sae_vit_huge_patch14_dec(dec_idx=None, num_classes=3, img_size=224, **kwargs): + # num_classes做的是one-hot seg但是不是做还原,我们得设计一下如何去做这个还原才能实现预训练 + + if dec_idx == 'swin_unet': + decoder_embed_dim = 14 * 14 * 3 + decoder_rep_dim = 14 * 14 * 3 + + from SSL_structures.Swin_Unet_main.networks.vision_transformer import SwinUnet as ViT_seg + decoder = ViT_seg(num_classes=num_classes, img_size=img_size, patch_size=16) + + elif dec_idx == 'transunet': + decoder_embed_dim = 14 * 14 * 3 + decoder_rep_dim = 14 * 14 * 3 + + transunet_name = 'R50-ViT-B_16' + transunet_patches_size = 16 + from SSL_structures.TransUNet_main.networks.vit_seg_modeling import CONFIGS as CONFIGS_Transunet_seg + from SSL_structures.TransUNet_main.networks.vit_seg_modeling import VisionTransformer as Transunet_seg + + config_vit = CONFIGS_Transunet_seg[transunet_name] + config_vit.n_classes = num_classes + config_vit.n_skip = 3 + + if transunet_name.find('R50') != -1: + config_vit.patches.grid = ( + int(img_size / transunet_patches_size), int(img_size / transunet_patches_size)) + decoder = Transunet_seg(config_vit, num_classes=config_vit.n_classes) + + elif dec_idx == 'UTNetV2': + decoder_embed_dim = 14 * 14 * 3 + decoder_rep_dim = 14 * 14 * 3 + + from SSL_structures.UtnetV2.utnetv2 import UTNetV2 as UTNetV2_seg + decoder = UTNetV2_seg(in_chan=3, num_classes=num_classes) + + else: + print('no effective decoder!') + return -1 + + print('dec_idx: ', dec_idx) + + model = ShuffledAutoEncoderViT( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, + decoder_embed_dim=decoder_embed_dim, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), decoder_rep_dim=decoder_rep_dim, decoder=decoder, + **kwargs) + return model + + +# set recommended archs following MAE +sae_vit_base_patch16 = sae_vit_base_patch16_dec512d8b # decoder: 512 dim, 8 blocks +sae_vit_large_patch16 = sae_vit_large_patch16_dec512d8b # decoder: 512 dim, 8 blocks +sae_vit_huge_patch14 = sae_vit_huge_patch14_dec512d8b # decoder: 512 dim, 8 blocks + +# Equiped with decoders +sae_vit_base_patch16_decoder = sae_vit_base_patch16_dec # decoder: 768 dim, HYF decoders +sae_vit_large_patch16_decoder = sae_vit_large_patch16_dec # decoder: 768 dim, HYF decoders +sae_vit_huge_patch14_decoder = sae_vit_huge_patch14_dec # decoder: 768 dim, HYF decoders + +if __name__ == '__main__': + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + img_size = 224 + + ''' + num_classes = 3 # set to 3 for 3 channel + x = torch.rand(2, 3, img_size, img_size, device=device) + ''' + + image_tensor_path = './temp-tensors/color.pt' + x = torch.load(image_tensor_path) + x.to(device) + + # model = sae_vit_base_patch16(img_size=img_size, decoder=None) + # model = sae_vit_huge_patch14(img_size=img_size, decoder=None) + # model = sae_vit_base_patch16_decoder(prompt_mode="Deep", dec_idx='swin_unet', img_size=img_size) + model = sae_vit_base_patch16(img_size=img_size, decoder=None, group_shuffle_size=2) + + ''' + # ViT_Prompt + + from pprint import pprint + model_names = timm.list_models('*vit*') + pprint(model_names) + + basic_model = timm.create_model('vit_base_patch' + str(16) + '_' + str(edge_size), pretrained=True) + + basic_state_dict = basic_model.state_dict() + + model = sae_vit_base_patch16(img_size=384, prompt_mode='Deep', Prompt_Token_num=20, basic_state_dict=basic_state_dict) + + prompt_state_dict = model.obtain_prompt() + VPT = VPT_ViT(img_size=384, VPT_type='Deep', Prompt_Token_num=20, basic_state_dict=basic_state_dict) + VPT.load_prompt(prompt_state_dict) + VPT.to(device) + pred = VPT(x) + print(pred) + ''' + + model.to(device) + + loss, pred, imgs_puzzled_patches = model(x, fix_position_ratio=0.25, puzzle_patch_size=32, + combined_pred_illustration=True) + # combined_pred_illustration = True to add hint tokens at the pred, False to know more info + + + # 可视化看看效果 + from utils.visual_usage import * + + imgs_puzzled_batch = unpatchify(imgs_puzzled_patches, patch_size=16) + for img_idx in range(len(imgs_puzzled_batch)): + puzzled_img = imgs_puzzled_batch.cpu()[img_idx] + puzzled_img = ToPILImage()(puzzled_img) + puzzled_img.save(os.path.join('./temp-figs/', 'puzzled_sample_'+str(img_idx)+'.jpg')) + + recons_img_batch = unpatchify(pred, patch_size=16) + recons_img = recons_img_batch.cpu()[img_idx] + recons_img = ToPILImage()(recons_img) + recons_img.save(os.path.join('./temp-figs/', 'recons_sample_'+str(img_idx)+'.jpg')) + ''' + + print(loss, '\n') + + print(loss.shape, '\n') + + print(pred.shape, '\n') + + print(imgs_puzzled_patches.shape, '\n') + ''' \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/README.md b/PuzzleTuning/SSL_structures/Swin_Unet_main/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5c549eff9061a3be6aca484330161eff3acc5dd9 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/README.md @@ -0,0 +1,46 @@ +# Swin-Unet +The codes for the work "Swin-Unet: Unet-like Pure Transformer for Medical Image Segmentation"(https://arxiv.org/abs/2105.05537). A validation for U-shaped Swin Transformer. + +## 1. Download pre-trained swin transformer model (Swin-T) +* [Get pre-trained model in this link] (https://drive.google.com/drive/folders/1UC3XOoezeum0uck4KBVGa8osahs6rKUY?usp=sharing): Put pretrained Swin-T into folder "pretrained_ckpt/" + +## 2. Prepare data + +- The datasets we used are provided by TransUnet's authors. Please go to ["./datasets/README.md"](datasets/README.md) for details, or please send an Email to jienengchen01 AT gmail.com to request the preprocessed data. If you would like to use the preprocessed data, please use it for research purposes and do not redistribute it (following the TransUnet's License). + +## 3. Environment + +- Please prepare an environment with python=3.7, and then use the command "pip install -r requirements.txt" for the dependencies. + +## 4. Train/Test + +- Run the train script on synapse dataset. The batch size we used is 24. If you do not have enough GPU memory, the bacth size can be reduced to 12 or 6 to save memory. + +- Train + +```bash +sh train.sh or python train.py --dataset Synapse --cfg configs/swin_tiny_patch4_window7_224_lite.yaml --root_path your DATA_DIR --max_epochs 150 --output_dir your OUT_DIR --img_size 224 --base_lr 0.05 --batch_size 24 +``` + +- Test + +```bash +sh test.sh or python test.py --dataset Synapse --cfg configs/swin_tiny_patch4_window7_224_lite.yaml --is_saveni --volume_path your DATA_DIR --output_dir your OUT_DIR --max_epoch 150 --base_lr 0.05 --img_size 224 --batch_size 24 +``` + +## References +* [TransUnet](https://github.com/Beckschen/TransUNet) +* [SwinTransformer](https://github.com/microsoft/Swin-Transformer) + +## Citation + +```bibtex +@misc{cao2021swinunet, + title={Swin-Unet: Unet-like Pure Transformer for Medical Image Segmentation}, + author={Hu Cao and Yueyue Wang and Joy Chen and Dongsheng Jiang and Xiaopeng Zhang and Qi Tian and Manning Wang}, + year={2021}, + eprint={2105.05537}, + archivePrefix={arXiv}, + primaryClass={eess.IV} +} +``` diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/config.py b/PuzzleTuning/SSL_structures/Swin_Unet_main/config.py new file mode 100644 index 0000000000000000000000000000000000000000..35bf199689555f3fc61fbee04daa2331f7efc181 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/config.py @@ -0,0 +1,229 @@ +# -------------------------------------------------------- +# Swin Transformer +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# --------------------------------------------------------' + +import os +import yaml +from yacs.config import CfgNode as CN + +_C = CN() + +# Base config files +_C.BASE = [''] + +# ----------------------------------------------------------------------------- +# Data settings +# ----------------------------------------------------------------------------- +_C.DATA = CN() +# Batch size for a single GPU, could be overwritten by command line argument +_C.DATA.BATCH_SIZE = 128 +# Path to dataset, could be overwritten by command line argument +_C.DATA.DATA_PATH = '' +# Dataset name +_C.DATA.DATASET = 'imagenet' +# Input image size +_C.DATA.IMG_SIZE = 224 +# Interpolation to resize image (random, bilinear, bicubic) +_C.DATA.INTERPOLATION = 'bicubic' +# Use zipped dataset instead of folder dataset +# could be overwritten by command line argument +_C.DATA.ZIP_MODE = False +# Cache Data in Memory, could be overwritten by command line argument +_C.DATA.CACHE_MODE = 'part' +# Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU. +_C.DATA.PIN_MEMORY = True +# Number of data loading threads +_C.DATA.NUM_WORKERS = 8 + +# ----------------------------------------------------------------------------- +# Model settings +# ----------------------------------------------------------------------------- +_C.MODEL = CN() +# Model type +_C.MODEL.TYPE = 'swin' +# Model name +_C.MODEL.NAME = 'swin_tiny_patch4_window7_224' +# Checkpoint to resume, could be overwritten by command line argument +_C.MODEL.PRETRAIN_CKPT = './pretrained_ckpt/swin_tiny_patch4_window7_224.pth' +_C.MODEL.RESUME = '' +# Number of classes, overwritten in data preparation +_C.MODEL.NUM_CLASSES = 1000 +# Dropout rate +_C.MODEL.DROP_RATE = 0.0 +# Drop path rate +_C.MODEL.DROP_PATH_RATE = 0.1 +# Label Smoothing +_C.MODEL.LABEL_SMOOTHING = 0.1 + +# Swin Transformer parameters +_C.MODEL.SWIN = CN() +_C.MODEL.SWIN.PATCH_SIZE = 4 +_C.MODEL.SWIN.IN_CHANS = 3 +_C.MODEL.SWIN.EMBED_DIM = 96 +_C.MODEL.SWIN.DEPTHS = [2, 2, 6, 2] +_C.MODEL.SWIN.DECODER_DEPTHS = [2, 2, 6, 2] +_C.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24] +_C.MODEL.SWIN.WINDOW_SIZE = 7 +_C.MODEL.SWIN.MLP_RATIO = 4. +_C.MODEL.SWIN.QKV_BIAS = True +_C.MODEL.SWIN.QK_SCALE = None +_C.MODEL.SWIN.APE = False +_C.MODEL.SWIN.PATCH_NORM = True +_C.MODEL.SWIN.FINAL_UPSAMPLE= "expand_first" + +# ----------------------------------------------------------------------------- +# Training settings +# ----------------------------------------------------------------------------- +_C.TRAIN = CN() +_C.TRAIN.START_EPOCH = 0 +_C.TRAIN.EPOCHS = 300 +_C.TRAIN.WARMUP_EPOCHS = 20 +_C.TRAIN.WEIGHT_DECAY = 0.05 +_C.TRAIN.BASE_LR = 5e-4 +_C.TRAIN.WARMUP_LR = 5e-7 +_C.TRAIN.MIN_LR = 5e-6 +# Clip gradient norm +_C.TRAIN.CLIP_GRAD = 5.0 +# Auto resume from latest checkpoint +_C.TRAIN.AUTO_RESUME = True +# Gradient accumulation steps +# could be overwritten by command line argument +_C.TRAIN.ACCUMULATION_STEPS = 0 +# Whether to use gradient checkpointing to save memory +# could be overwritten by command line argument +_C.TRAIN.USE_CHECKPOINT = False + +# LR scheduler +_C.TRAIN.LR_SCHEDULER = CN() +_C.TRAIN.LR_SCHEDULER.NAME = 'cosine' +# Epoch interval to decay LR, used in StepLRScheduler +_C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30 +# LR decay rate, used in StepLRScheduler +_C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1 + +# Optimizer +_C.TRAIN.OPTIMIZER = CN() +_C.TRAIN.OPTIMIZER.NAME = 'adamw' +# Optimizer Epsilon +_C.TRAIN.OPTIMIZER.EPS = 1e-8 +# Optimizer Betas +_C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999) +# SGD momentum +_C.TRAIN.OPTIMIZER.MOMENTUM = 0.9 + +# ----------------------------------------------------------------------------- +# Augmentation settings +# ----------------------------------------------------------------------------- +_C.AUG = CN() +# Color jitter factor +_C.AUG.COLOR_JITTER = 0.4 +# Use AutoAugment policy. "v0" or "original" +_C.AUG.AUTO_AUGMENT = 'rand-m9-mstd0.5-inc1' +# Random erase prob +_C.AUG.REPROB = 0.25 +# Random erase mode +_C.AUG.REMODE = 'pixel' +# Random erase count +_C.AUG.RECOUNT = 1 +# Mixup alpha, mixup enabled if > 0 +_C.AUG.MIXUP = 0.8 +# Cutmix alpha, cutmix enabled if > 0 +_C.AUG.CUTMIX = 1.0 +# Cutmix min/max ratio, overrides alpha and enables cutmix if set +_C.AUG.CUTMIX_MINMAX = None +# Probability of performing mixup or cutmix when either/both is enabled +_C.AUG.MIXUP_PROB = 1.0 +# Probability of switching to cutmix when both mixup and cutmix enabled +_C.AUG.MIXUP_SWITCH_PROB = 0.5 +# How to apply mixup/cutmix params. Per "batch", "pair", or "elem" +_C.AUG.MIXUP_MODE = 'batch' + +# ----------------------------------------------------------------------------- +# Testing settings +# ----------------------------------------------------------------------------- +_C.TEST = CN() +# Whether to use center crop when testing +_C.TEST.CROP = True + +# ----------------------------------------------------------------------------- +# Misc +# ----------------------------------------------------------------------------- +# Mixed precision opt level, if O0, no amp is used ('O0', 'O1', 'O2') +# overwritten by command line argument +_C.AMP_OPT_LEVEL = '' +# Path to output folder, overwritten by command line argument +_C.OUTPUT = '' +# Tag of experiment, overwritten by command line argument +_C.TAG = 'default' +# Frequency to save checkpoint +_C.SAVE_FREQ = 1 +# Frequency to logging info +_C.PRINT_FREQ = 10 +# Fixed random seed +_C.SEED = 0 +# Perform evaluation only, overwritten by command line argument +_C.EVAL_MODE = False +# Test throughput only, overwritten by command line argument +_C.THROUGHPUT_MODE = False +# local rank for DistributedDataParallel, given by command line argument +_C.LOCAL_RANK = 0 + + +def _update_config_from_file(config, cfg_file): + config.defrost() + with open(cfg_file, 'r') as f: + yaml_cfg = yaml.load(f, Loader=yaml.FullLoader) + + for cfg in yaml_cfg.setdefault('BASE', ['']): + if cfg: + _update_config_from_file( + config, os.path.join(os.path.dirname(cfg_file), cfg) + ) + print('=> merge config from {}'.format(cfg_file)) + config.merge_from_file(cfg_file) + config.freeze() + + +def update_config(config, args): + _update_config_from_file(config, args.cfg) + + config.defrost() + if args.opts: + config.merge_from_list(args.opts) + + # merge from specific arguments + if args.batch_size: + config.DATA.BATCH_SIZE = args.batch_size + if args.zip: + config.DATA.ZIP_MODE = True + if args.cache_mode: + config.DATA.CACHE_MODE = args.cache_mode + if args.resume: + config.MODEL.RESUME = args.resume + if args.accumulation_steps: + config.TRAIN.ACCUMULATION_STEPS = args.accumulation_steps + if args.use_checkpoint: + config.TRAIN.USE_CHECKPOINT = True + if args.amp_opt_level: + config.AMP_OPT_LEVEL = args.amp_opt_level + if args.tag: + config.TAG = args.tag + if args.eval: + config.EVAL_MODE = True + if args.throughput: + config.THROUGHPUT_MODE = True + + config.freeze() + + +def get_config(args): + """Get a yacs CfgNode object with default values.""" + # Return a clone so that the defaults will not be altered + # This is for the "local variable" use pattern + config = _C.clone() + update_config(config, args) + + return config diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/configs/swin_tiny_patch4_window7_224_lite.yaml b/PuzzleTuning/SSL_structures/Swin_Unet_main/configs/swin_tiny_patch4_window7_224_lite.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e25bd2b1578132dd205dd7be6d46eb56376a0b07 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/configs/swin_tiny_patch4_window7_224_lite.yaml @@ -0,0 +1,12 @@ +MODEL: + TYPE: swin + NAME: swin_tiny_patch4_window7_224 + DROP_PATH_RATE: 0.2 + PRETRAIN_CKPT: "./pretrained_ckpt/swin_tiny_patch4_window7_224.pth" + SWIN: + FINAL_UPSAMPLE: "expand_first" + EMBED_DIM: 96 + DEPTHS: [ 2, 2, 2, 2 ] + DECODER_DEPTHS: [ 2, 2, 2, 1] + NUM_HEADS: [ 3, 6, 12, 24 ] + WINDOW_SIZE: 7 \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/datasets/README.md b/PuzzleTuning/SSL_structures/Swin_Unet_main/datasets/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c662f8e2c24f9b5a899338cfe5fcd67f43d1bba5 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/datasets/README.md @@ -0,0 +1,29 @@ +# Data Preparing + +1. Access to the synapse multi-organ dataset: + 1. Sign up in the [official Synapse website](https://www.synapse.org/#!Synapse:syn3193805/wiki/) and download the dataset. Convert them to numpy format, clip the images within [-125, 275], normalize each 3D image to [0, 1], and extract 2D slices from 3D volume for training cases while keeping the 3D volume in h5 format for testing cases. + 2. You can also send an Email directly to jienengchen01 AT gmail.com to request the preprocessed data for reproduction. +2. The directory structure of the whole project is as follows: + +```bash +. +├── TransUNet +│   ├──datasets +│   │    └── dataset_*.py +│   ├──train.py +│   ├──test.py +│   └──... +├── model +│   └── vit_checkpoint +│   └── imagenet21k +│      ├── R50+ViT-B_16.npz +│      └── *.npz +└── data + └──Synapse + ├── test_vol_h5 + │   ├── case0001.npy.h5 + │   └── *.npy.h5 + └── train_npz + ├── case0005_slice000.npz + └── *.npz +``` diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/datasets/dataset_synapse.py b/PuzzleTuning/SSL_structures/Swin_Unet_main/datasets/dataset_synapse.py new file mode 100644 index 0000000000000000000000000000000000000000..c5d0de1a99f8ca46851f51e45570d4ddc8fbff09 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/datasets/dataset_synapse.py @@ -0,0 +1,75 @@ +import os +import random +import h5py +import numpy as np +import torch +from scipy import ndimage +from scipy.ndimage.interpolation import zoom +from torch.utils.data import Dataset + + +def random_rot_flip(image, label): + k = np.random.randint(0, 4) + image = np.rot90(image, k) + label = np.rot90(label, k) + axis = np.random.randint(0, 2) + image = np.flip(image, axis=axis).copy() + label = np.flip(label, axis=axis).copy() + return image, label + + +def random_rotate(image, label): + angle = np.random.randint(-20, 20) + image = ndimage.rotate(image, angle, order=0, reshape=False) + label = ndimage.rotate(label, angle, order=0, reshape=False) + return image, label + + +class RandomGenerator(object): + def __init__(self, output_size): + self.output_size = output_size + + def __call__(self, sample): + image, label = sample['image'], sample['label'] + + if random.random() > 0.5: + image, label = random_rot_flip(image, label) + elif random.random() > 0.5: + image, label = random_rotate(image, label) + x, y = image.shape + if x != self.output_size[0] or y != self.output_size[1]: + image = zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=3) # why not 3? + label = zoom(label, (self.output_size[0] / x, self.output_size[1] / y), order=0) + image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0) + label = torch.from_numpy(label.astype(np.float32)) + sample = {'image': image, 'label': label.long()} + return sample + + +class Synapse_dataset(Dataset): + def __init__(self, base_dir, list_dir, split, transform=None): + self.transform = transform # using transform in torch! + self.split = split + self.sample_list = open(os.path.join(list_dir, self.split+'.txt')).readlines() + self.data_dir = base_dir + + def __len__(self): + return len(self.sample_list) + + def __getitem__(self, idx): + if self.split == "train": + slice_name = self.sample_list[idx].strip('\n') + data_path = os.path.join(self.data_dir, slice_name+'.npz') + data = np.load(data_path) + image, label = data['image'], data['label'] + else: + vol_name = self.sample_list[idx].strip('\n') + filepath = self.data_dir + "/{}.npy.h5".format(vol_name) + data = h5py.File(filepath) + image, label = data['image'][:], data['label'][:] + + sample = {'image': image, 'label': label} + if self.transform: + sample = self.transform(sample) + sample['case_name'] = self.sample_list[idx].strip('\n') + return sample diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/lists/lists_Synapse/all.lst b/PuzzleTuning/SSL_structures/Swin_Unet_main/lists/lists_Synapse/all.lst new file mode 100644 index 0000000000000000000000000000000000000000..6ef047d4b8be2ea61d1621620e420a6f3c974ec2 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/lists/lists_Synapse/all.lst @@ -0,0 +1,30 @@ +case0031.npy.h5 +case0007.npy.h5 +case0009.npy.h5 +case0005.npy.h5 +case0026.npy.h5 +case0039.npy.h5 +case0024.npy.h5 +case0034.npy.h5 +case0033.npy.h5 +case0030.npy.h5 +case0023.npy.h5 +case0040.npy.h5 +case0010.npy.h5 +case0021.npy.h5 +case0006.npy.h5 +case0027.npy.h5 +case0028.npy.h5 +case0037.npy.h5 +case0008.npy.h5 +case0022.npy.h5 +case0038.npy.h5 +case0036.npy.h5 +case0032.npy.h5 +case0002.npy.h5 +case0029.npy.h5 +case0003.npy.h5 +case0001.npy.h5 +case0004.npy.h5 +case0025.npy.h5 +case0035.npy.h5 diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/lists/lists_Synapse/test_vol.txt b/PuzzleTuning/SSL_structures/Swin_Unet_main/lists/lists_Synapse/test_vol.txt new file mode 100644 index 0000000000000000000000000000000000000000..1c4abd53044eed5457fd1f7e0cca1c99e7222593 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/lists/lists_Synapse/test_vol.txt @@ -0,0 +1,12 @@ +case0008 +case0022 +case0038 +case0036 +case0032 +case0002 +case0029 +case0003 +case0001 +case0004 +case0025 +case0035 diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/lists/lists_Synapse/train.txt b/PuzzleTuning/SSL_structures/Swin_Unet_main/lists/lists_Synapse/train.txt new file mode 100644 index 0000000000000000000000000000000000000000..e58616844994a95407d1f664b79cd4e4533d41b8 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/lists/lists_Synapse/train.txt @@ -0,0 +1,2211 @@ +case0031_slice000 +case0031_slice001 +case0031_slice002 +case0031_slice003 +case0031_slice004 +case0031_slice005 +case0031_slice006 +case0031_slice007 +case0031_slice008 +case0031_slice009 +case0031_slice010 +case0031_slice011 +case0031_slice012 +case0031_slice013 +case0031_slice014 +case0031_slice015 +case0031_slice016 +case0031_slice017 +case0031_slice018 +case0031_slice019 +case0031_slice020 +case0031_slice021 +case0031_slice022 +case0031_slice023 +case0031_slice024 +case0031_slice025 +case0031_slice026 +case0031_slice027 +case0031_slice028 +case0031_slice029 +case0031_slice030 +case0031_slice031 +case0031_slice032 +case0031_slice033 +case0031_slice034 +case0031_slice035 +case0031_slice036 +case0031_slice037 +case0031_slice038 +case0031_slice039 +case0031_slice040 +case0031_slice041 +case0031_slice042 +case0031_slice043 +case0031_slice044 +case0031_slice045 +case0031_slice046 +case0031_slice047 +case0031_slice048 +case0031_slice049 +case0031_slice050 +case0031_slice051 +case0031_slice052 +case0031_slice053 +case0031_slice054 +case0031_slice055 +case0031_slice056 +case0031_slice057 +case0031_slice058 +case0031_slice059 +case0031_slice060 +case0031_slice061 +case0031_slice062 +case0031_slice063 +case0031_slice064 +case0031_slice065 +case0031_slice066 +case0031_slice067 +case0031_slice068 +case0031_slice069 +case0031_slice070 +case0031_slice071 +case0031_slice072 +case0031_slice073 +case0031_slice074 +case0031_slice075 +case0031_slice076 +case0031_slice077 +case0031_slice078 +case0031_slice079 +case0031_slice080 +case0031_slice081 +case0031_slice082 +case0031_slice083 +case0031_slice084 +case0031_slice085 +case0031_slice086 +case0031_slice087 +case0031_slice088 +case0031_slice089 +case0031_slice090 +case0031_slice091 +case0031_slice092 +case0007_slice000 +case0007_slice001 +case0007_slice002 +case0007_slice003 +case0007_slice004 +case0007_slice005 +case0007_slice006 +case0007_slice007 +case0007_slice008 +case0007_slice009 +case0007_slice010 +case0007_slice011 +case0007_slice012 +case0007_slice013 +case0007_slice014 +case0007_slice015 +case0007_slice016 +case0007_slice017 +case0007_slice018 +case0007_slice019 +case0007_slice020 +case0007_slice021 +case0007_slice022 +case0007_slice023 +case0007_slice024 +case0007_slice025 +case0007_slice026 +case0007_slice027 +case0007_slice028 +case0007_slice029 +case0007_slice030 +case0007_slice031 +case0007_slice032 +case0007_slice033 +case0007_slice034 +case0007_slice035 +case0007_slice036 +case0007_slice037 +case0007_slice038 +case0007_slice039 +case0007_slice040 +case0007_slice041 +case0007_slice042 +case0007_slice043 +case0007_slice044 +case0007_slice045 +case0007_slice046 +case0007_slice047 +case0007_slice048 +case0007_slice049 +case0007_slice050 +case0007_slice051 +case0007_slice052 +case0007_slice053 +case0007_slice054 +case0007_slice055 +case0007_slice056 +case0007_slice057 +case0007_slice058 +case0007_slice059 +case0007_slice060 +case0007_slice061 +case0007_slice062 +case0007_slice063 +case0007_slice064 +case0007_slice065 +case0007_slice066 +case0007_slice067 +case0007_slice068 +case0007_slice069 +case0007_slice070 +case0007_slice071 +case0007_slice072 +case0007_slice073 +case0007_slice074 +case0007_slice075 +case0007_slice076 +case0007_slice077 +case0007_slice078 +case0007_slice079 +case0007_slice080 +case0007_slice081 +case0007_slice082 +case0007_slice083 +case0007_slice084 +case0007_slice085 +case0007_slice086 +case0007_slice087 +case0007_slice088 +case0007_slice089 +case0007_slice090 +case0007_slice091 +case0007_slice092 +case0007_slice093 +case0007_slice094 +case0007_slice095 +case0007_slice096 +case0007_slice097 +case0007_slice098 +case0007_slice099 +case0007_slice100 +case0007_slice101 +case0007_slice102 +case0007_slice103 +case0007_slice104 +case0007_slice105 +case0007_slice106 +case0007_slice107 +case0007_slice108 +case0007_slice109 +case0007_slice110 +case0007_slice111 +case0007_slice112 +case0007_slice113 +case0007_slice114 +case0007_slice115 +case0007_slice116 +case0007_slice117 +case0007_slice118 +case0007_slice119 +case0007_slice120 +case0007_slice121 +case0007_slice122 +case0007_slice123 +case0007_slice124 +case0007_slice125 +case0007_slice126 +case0007_slice127 +case0007_slice128 +case0007_slice129 +case0007_slice130 +case0007_slice131 +case0007_slice132 +case0007_slice133 +case0007_slice134 +case0007_slice135 +case0007_slice136 +case0007_slice137 +case0007_slice138 +case0007_slice139 +case0007_slice140 +case0007_slice141 +case0007_slice142 +case0007_slice143 +case0007_slice144 +case0007_slice145 +case0007_slice146 +case0007_slice147 +case0007_slice148 +case0007_slice149 +case0007_slice150 +case0007_slice151 +case0007_slice152 +case0007_slice153 +case0007_slice154 +case0007_slice155 +case0007_slice156 +case0007_slice157 +case0007_slice158 +case0007_slice159 +case0007_slice160 +case0007_slice161 +case0007_slice162 +case0009_slice000 +case0009_slice001 +case0009_slice002 +case0009_slice003 +case0009_slice004 +case0009_slice005 +case0009_slice006 +case0009_slice007 +case0009_slice008 +case0009_slice009 +case0009_slice010 +case0009_slice011 +case0009_slice012 +case0009_slice013 +case0009_slice014 +case0009_slice015 +case0009_slice016 +case0009_slice017 +case0009_slice018 +case0009_slice019 +case0009_slice020 +case0009_slice021 +case0009_slice022 +case0009_slice023 +case0009_slice024 +case0009_slice025 +case0009_slice026 +case0009_slice027 +case0009_slice028 +case0009_slice029 +case0009_slice030 +case0009_slice031 +case0009_slice032 +case0009_slice033 +case0009_slice034 +case0009_slice035 +case0009_slice036 +case0009_slice037 +case0009_slice038 +case0009_slice039 +case0009_slice040 +case0009_slice041 +case0009_slice042 +case0009_slice043 +case0009_slice044 +case0009_slice045 +case0009_slice046 +case0009_slice047 +case0009_slice048 +case0009_slice049 +case0009_slice050 +case0009_slice051 +case0009_slice052 +case0009_slice053 +case0009_slice054 +case0009_slice055 +case0009_slice056 +case0009_slice057 +case0009_slice058 +case0009_slice059 +case0009_slice060 +case0009_slice061 +case0009_slice062 +case0009_slice063 +case0009_slice064 +case0009_slice065 +case0009_slice066 +case0009_slice067 +case0009_slice068 +case0009_slice069 +case0009_slice070 +case0009_slice071 +case0009_slice072 +case0009_slice073 +case0009_slice074 +case0009_slice075 +case0009_slice076 +case0009_slice077 +case0009_slice078 +case0009_slice079 +case0009_slice080 +case0009_slice081 +case0009_slice082 +case0009_slice083 +case0009_slice084 +case0009_slice085 +case0009_slice086 +case0009_slice087 +case0009_slice088 +case0009_slice089 +case0009_slice090 +case0009_slice091 +case0009_slice092 +case0009_slice093 +case0009_slice094 +case0009_slice095 +case0009_slice096 +case0009_slice097 +case0009_slice098 +case0009_slice099 +case0009_slice100 +case0009_slice101 +case0009_slice102 +case0009_slice103 +case0009_slice104 +case0009_slice105 +case0009_slice106 +case0009_slice107 +case0009_slice108 +case0009_slice109 +case0009_slice110 +case0009_slice111 +case0009_slice112 +case0009_slice113 +case0009_slice114 +case0009_slice115 +case0009_slice116 +case0009_slice117 +case0009_slice118 +case0009_slice119 +case0009_slice120 +case0009_slice121 +case0009_slice122 +case0009_slice123 +case0009_slice124 +case0009_slice125 +case0009_slice126 +case0009_slice127 +case0009_slice128 +case0009_slice129 +case0009_slice130 +case0009_slice131 +case0009_slice132 +case0009_slice133 +case0009_slice134 +case0009_slice135 +case0009_slice136 +case0009_slice137 +case0009_slice138 +case0009_slice139 +case0009_slice140 +case0009_slice141 +case0009_slice142 +case0009_slice143 +case0009_slice144 +case0009_slice145 +case0009_slice146 +case0009_slice147 +case0009_slice148 +case0005_slice000 +case0005_slice001 +case0005_slice002 +case0005_slice003 +case0005_slice004 +case0005_slice005 +case0005_slice006 +case0005_slice007 +case0005_slice008 +case0005_slice009 +case0005_slice010 +case0005_slice011 +case0005_slice012 +case0005_slice013 +case0005_slice014 +case0005_slice015 +case0005_slice016 +case0005_slice017 +case0005_slice018 +case0005_slice019 +case0005_slice020 +case0005_slice021 +case0005_slice022 +case0005_slice023 +case0005_slice024 +case0005_slice025 +case0005_slice026 +case0005_slice027 +case0005_slice028 +case0005_slice029 +case0005_slice030 +case0005_slice031 +case0005_slice032 +case0005_slice033 +case0005_slice034 +case0005_slice035 +case0005_slice036 +case0005_slice037 +case0005_slice038 +case0005_slice039 +case0005_slice040 +case0005_slice041 +case0005_slice042 +case0005_slice043 +case0005_slice044 +case0005_slice045 +case0005_slice046 +case0005_slice047 +case0005_slice048 +case0005_slice049 +case0005_slice050 +case0005_slice051 +case0005_slice052 +case0005_slice053 +case0005_slice054 +case0005_slice055 +case0005_slice056 +case0005_slice057 +case0005_slice058 +case0005_slice059 +case0005_slice060 +case0005_slice061 +case0005_slice062 +case0005_slice063 +case0005_slice064 +case0005_slice065 +case0005_slice066 +case0005_slice067 +case0005_slice068 +case0005_slice069 +case0005_slice070 +case0005_slice071 +case0005_slice072 +case0005_slice073 +case0005_slice074 +case0005_slice075 +case0005_slice076 +case0005_slice077 +case0005_slice078 +case0005_slice079 +case0005_slice080 +case0005_slice081 +case0005_slice082 +case0005_slice083 +case0005_slice084 +case0005_slice085 +case0005_slice086 +case0005_slice087 +case0005_slice088 +case0005_slice089 +case0005_slice090 +case0005_slice091 +case0005_slice092 +case0005_slice093 +case0005_slice094 +case0005_slice095 +case0005_slice096 +case0005_slice097 +case0005_slice098 +case0005_slice099 +case0005_slice100 +case0005_slice101 +case0005_slice102 +case0005_slice103 +case0005_slice104 +case0005_slice105 +case0005_slice106 +case0005_slice107 +case0005_slice108 +case0005_slice109 +case0005_slice110 +case0005_slice111 +case0005_slice112 +case0005_slice113 +case0005_slice114 +case0005_slice115 +case0005_slice116 +case0026_slice000 +case0026_slice001 +case0026_slice002 +case0026_slice003 +case0026_slice004 +case0026_slice005 +case0026_slice006 +case0026_slice007 +case0026_slice008 +case0026_slice009 +case0026_slice010 +case0026_slice011 +case0026_slice012 +case0026_slice013 +case0026_slice014 +case0026_slice015 +case0026_slice016 +case0026_slice017 +case0026_slice018 +case0026_slice019 +case0026_slice020 +case0026_slice021 +case0026_slice022 +case0026_slice023 +case0026_slice024 +case0026_slice025 +case0026_slice026 +case0026_slice027 +case0026_slice028 +case0026_slice029 +case0026_slice030 +case0026_slice031 +case0026_slice032 +case0026_slice033 +case0026_slice034 +case0026_slice035 +case0026_slice036 +case0026_slice037 +case0026_slice038 +case0026_slice039 +case0026_slice040 +case0026_slice041 +case0026_slice042 +case0026_slice043 +case0026_slice044 +case0026_slice045 +case0026_slice046 +case0026_slice047 +case0026_slice048 +case0026_slice049 +case0026_slice050 +case0026_slice051 +case0026_slice052 +case0026_slice053 +case0026_slice054 +case0026_slice055 +case0026_slice056 +case0026_slice057 +case0026_slice058 +case0026_slice059 +case0026_slice060 +case0026_slice061 +case0026_slice062 +case0026_slice063 +case0026_slice064 +case0026_slice065 +case0026_slice066 +case0026_slice067 +case0026_slice068 +case0026_slice069 +case0026_slice070 +case0026_slice071 +case0026_slice072 +case0026_slice073 +case0026_slice074 +case0026_slice075 +case0026_slice076 +case0026_slice077 +case0026_slice078 +case0026_slice079 +case0026_slice080 +case0026_slice081 +case0026_slice082 +case0026_slice083 +case0026_slice084 +case0026_slice085 +case0026_slice086 +case0026_slice087 +case0026_slice088 +case0026_slice089 +case0026_slice090 +case0026_slice091 +case0026_slice092 +case0026_slice093 +case0026_slice094 +case0026_slice095 +case0026_slice096 +case0026_slice097 +case0026_slice098 +case0026_slice099 +case0026_slice100 +case0026_slice101 +case0026_slice102 +case0026_slice103 +case0026_slice104 +case0026_slice105 +case0026_slice106 +case0026_slice107 +case0026_slice108 +case0026_slice109 +case0026_slice110 +case0026_slice111 +case0026_slice112 +case0026_slice113 +case0026_slice114 +case0026_slice115 +case0026_slice116 +case0026_slice117 +case0026_slice118 +case0026_slice119 +case0026_slice120 +case0026_slice121 +case0026_slice122 +case0026_slice123 +case0026_slice124 +case0026_slice125 +case0026_slice126 +case0026_slice127 +case0026_slice128 +case0026_slice129 +case0026_slice130 +case0039_slice000 +case0039_slice001 +case0039_slice002 +case0039_slice003 +case0039_slice004 +case0039_slice005 +case0039_slice006 +case0039_slice007 +case0039_slice008 +case0039_slice009 +case0039_slice010 +case0039_slice011 +case0039_slice012 +case0039_slice013 +case0039_slice014 +case0039_slice015 +case0039_slice016 +case0039_slice017 +case0039_slice018 +case0039_slice019 +case0039_slice020 +case0039_slice021 +case0039_slice022 +case0039_slice023 +case0039_slice024 +case0039_slice025 +case0039_slice026 +case0039_slice027 +case0039_slice028 +case0039_slice029 +case0039_slice030 +case0039_slice031 +case0039_slice032 +case0039_slice033 +case0039_slice034 +case0039_slice035 +case0039_slice036 +case0039_slice037 +case0039_slice038 +case0039_slice039 +case0039_slice040 +case0039_slice041 +case0039_slice042 +case0039_slice043 +case0039_slice044 +case0039_slice045 +case0039_slice046 +case0039_slice047 +case0039_slice048 +case0039_slice049 +case0039_slice050 +case0039_slice051 +case0039_slice052 +case0039_slice053 +case0039_slice054 +case0039_slice055 +case0039_slice056 +case0039_slice057 +case0039_slice058 +case0039_slice059 +case0039_slice060 +case0039_slice061 +case0039_slice062 +case0039_slice063 +case0039_slice064 +case0039_slice065 +case0039_slice066 +case0039_slice067 +case0039_slice068 +case0039_slice069 +case0039_slice070 +case0039_slice071 +case0039_slice072 +case0039_slice073 +case0039_slice074 +case0039_slice075 +case0039_slice076 +case0039_slice077 +case0039_slice078 +case0039_slice079 +case0039_slice080 +case0039_slice081 +case0039_slice082 +case0039_slice083 +case0039_slice084 +case0039_slice085 +case0039_slice086 +case0039_slice087 +case0039_slice088 +case0039_slice089 +case0024_slice000 +case0024_slice001 +case0024_slice002 +case0024_slice003 +case0024_slice004 +case0024_slice005 +case0024_slice006 +case0024_slice007 +case0024_slice008 +case0024_slice009 +case0024_slice010 +case0024_slice011 +case0024_slice012 +case0024_slice013 +case0024_slice014 +case0024_slice015 +case0024_slice016 +case0024_slice017 +case0024_slice018 +case0024_slice019 +case0024_slice020 +case0024_slice021 +case0024_slice022 +case0024_slice023 +case0024_slice024 +case0024_slice025 +case0024_slice026 +case0024_slice027 +case0024_slice028 +case0024_slice029 +case0024_slice030 +case0024_slice031 +case0024_slice032 +case0024_slice033 +case0024_slice034 +case0024_slice035 +case0024_slice036 +case0024_slice037 +case0024_slice038 +case0024_slice039 +case0024_slice040 +case0024_slice041 +case0024_slice042 +case0024_slice043 +case0024_slice044 +case0024_slice045 +case0024_slice046 +case0024_slice047 +case0024_slice048 +case0024_slice049 +case0024_slice050 +case0024_slice051 +case0024_slice052 +case0024_slice053 +case0024_slice054 +case0024_slice055 +case0024_slice056 +case0024_slice057 +case0024_slice058 +case0024_slice059 +case0024_slice060 +case0024_slice061 +case0024_slice062 +case0024_slice063 +case0024_slice064 +case0024_slice065 +case0024_slice066 +case0024_slice067 +case0024_slice068 +case0024_slice069 +case0024_slice070 +case0024_slice071 +case0024_slice072 +case0024_slice073 +case0024_slice074 +case0024_slice075 +case0024_slice076 +case0024_slice077 +case0024_slice078 +case0024_slice079 +case0024_slice080 +case0024_slice081 +case0024_slice082 +case0024_slice083 +case0024_slice084 +case0024_slice085 +case0024_slice086 +case0024_slice087 +case0024_slice088 +case0024_slice089 +case0024_slice090 +case0024_slice091 +case0024_slice092 +case0024_slice093 +case0024_slice094 +case0024_slice095 +case0024_slice096 +case0024_slice097 +case0024_slice098 +case0024_slice099 +case0024_slice100 +case0024_slice101 +case0024_slice102 +case0024_slice103 +case0024_slice104 +case0024_slice105 +case0024_slice106 +case0024_slice107 +case0024_slice108 +case0024_slice109 +case0024_slice110 +case0024_slice111 +case0024_slice112 +case0024_slice113 +case0024_slice114 +case0024_slice115 +case0024_slice116 +case0024_slice117 +case0024_slice118 +case0024_slice119 +case0024_slice120 +case0024_slice121 +case0024_slice122 +case0024_slice123 +case0034_slice000 +case0034_slice001 +case0034_slice002 +case0034_slice003 +case0034_slice004 +case0034_slice005 +case0034_slice006 +case0034_slice007 +case0034_slice008 +case0034_slice009 +case0034_slice010 +case0034_slice011 +case0034_slice012 +case0034_slice013 +case0034_slice014 +case0034_slice015 +case0034_slice016 +case0034_slice017 +case0034_slice018 +case0034_slice019 +case0034_slice020 +case0034_slice021 +case0034_slice022 +case0034_slice023 +case0034_slice024 +case0034_slice025 +case0034_slice026 +case0034_slice027 +case0034_slice028 +case0034_slice029 +case0034_slice030 +case0034_slice031 +case0034_slice032 +case0034_slice033 +case0034_slice034 +case0034_slice035 +case0034_slice036 +case0034_slice037 +case0034_slice038 +case0034_slice039 +case0034_slice040 +case0034_slice041 +case0034_slice042 +case0034_slice043 +case0034_slice044 +case0034_slice045 +case0034_slice046 +case0034_slice047 +case0034_slice048 +case0034_slice049 +case0034_slice050 +case0034_slice051 +case0034_slice052 +case0034_slice053 +case0034_slice054 +case0034_slice055 +case0034_slice056 +case0034_slice057 +case0034_slice058 +case0034_slice059 +case0034_slice060 +case0034_slice061 +case0034_slice062 +case0034_slice063 +case0034_slice064 +case0034_slice065 +case0034_slice066 +case0034_slice067 +case0034_slice068 +case0034_slice069 +case0034_slice070 +case0034_slice071 +case0034_slice072 +case0034_slice073 +case0034_slice074 +case0034_slice075 +case0034_slice076 +case0034_slice077 +case0034_slice078 +case0034_slice079 +case0034_slice080 +case0034_slice081 +case0034_slice082 +case0034_slice083 +case0034_slice084 +case0034_slice085 +case0034_slice086 +case0034_slice087 +case0034_slice088 +case0034_slice089 +case0034_slice090 +case0034_slice091 +case0034_slice092 +case0034_slice093 +case0034_slice094 +case0034_slice095 +case0034_slice096 +case0034_slice097 +case0033_slice000 +case0033_slice001 +case0033_slice002 +case0033_slice003 +case0033_slice004 +case0033_slice005 +case0033_slice006 +case0033_slice007 +case0033_slice008 +case0033_slice009 +case0033_slice010 +case0033_slice011 +case0033_slice012 +case0033_slice013 +case0033_slice014 +case0033_slice015 +case0033_slice016 +case0033_slice017 +case0033_slice018 +case0033_slice019 +case0033_slice020 +case0033_slice021 +case0033_slice022 +case0033_slice023 +case0033_slice024 +case0033_slice025 +case0033_slice026 +case0033_slice027 +case0033_slice028 +case0033_slice029 +case0033_slice030 +case0033_slice031 +case0033_slice032 +case0033_slice033 +case0033_slice034 +case0033_slice035 +case0033_slice036 +case0033_slice037 +case0033_slice038 +case0033_slice039 +case0033_slice040 +case0033_slice041 +case0033_slice042 +case0033_slice043 +case0033_slice044 +case0033_slice045 +case0033_slice046 +case0033_slice047 +case0033_slice048 +case0033_slice049 +case0033_slice050 +case0033_slice051 +case0033_slice052 +case0033_slice053 +case0033_slice054 +case0033_slice055 +case0033_slice056 +case0033_slice057 +case0033_slice058 +case0033_slice059 +case0033_slice060 +case0033_slice061 +case0033_slice062 +case0033_slice063 +case0033_slice064 +case0033_slice065 +case0033_slice066 +case0033_slice067 +case0033_slice068 +case0033_slice069 +case0033_slice070 +case0033_slice071 +case0033_slice072 +case0033_slice073 +case0033_slice074 +case0033_slice075 +case0033_slice076 +case0033_slice077 +case0033_slice078 +case0033_slice079 +case0033_slice080 +case0033_slice081 +case0033_slice082 +case0033_slice083 +case0033_slice084 +case0033_slice085 +case0033_slice086 +case0033_slice087 +case0033_slice088 +case0033_slice089 +case0033_slice090 +case0033_slice091 +case0033_slice092 +case0033_slice093 +case0033_slice094 +case0033_slice095 +case0033_slice096 +case0033_slice097 +case0033_slice098 +case0033_slice099 +case0033_slice100 +case0033_slice101 +case0033_slice102 +case0033_slice103 +case0030_slice000 +case0030_slice001 +case0030_slice002 +case0030_slice003 +case0030_slice004 +case0030_slice005 +case0030_slice006 +case0030_slice007 +case0030_slice008 +case0030_slice009 +case0030_slice010 +case0030_slice011 +case0030_slice012 +case0030_slice013 +case0030_slice014 +case0030_slice015 +case0030_slice016 +case0030_slice017 +case0030_slice018 +case0030_slice019 +case0030_slice020 +case0030_slice021 +case0030_slice022 +case0030_slice023 +case0030_slice024 +case0030_slice025 +case0030_slice026 +case0030_slice027 +case0030_slice028 +case0030_slice029 +case0030_slice030 +case0030_slice031 +case0030_slice032 +case0030_slice033 +case0030_slice034 +case0030_slice035 +case0030_slice036 +case0030_slice037 +case0030_slice038 +case0030_slice039 +case0030_slice040 +case0030_slice041 +case0030_slice042 +case0030_slice043 +case0030_slice044 +case0030_slice045 +case0030_slice046 +case0030_slice047 +case0030_slice048 +case0030_slice049 +case0030_slice050 +case0030_slice051 +case0030_slice052 +case0030_slice053 +case0030_slice054 +case0030_slice055 +case0030_slice056 +case0030_slice057 +case0030_slice058 +case0030_slice059 +case0030_slice060 +case0030_slice061 +case0030_slice062 +case0030_slice063 +case0030_slice064 +case0030_slice065 +case0030_slice066 +case0030_slice067 +case0030_slice068 +case0030_slice069 +case0030_slice070 +case0030_slice071 +case0030_slice072 +case0030_slice073 +case0030_slice074 +case0030_slice075 +case0030_slice076 +case0030_slice077 +case0030_slice078 +case0030_slice079 +case0030_slice080 +case0030_slice081 +case0030_slice082 +case0030_slice083 +case0030_slice084 +case0030_slice085 +case0030_slice086 +case0030_slice087 +case0030_slice088 +case0030_slice089 +case0030_slice090 +case0030_slice091 +case0030_slice092 +case0030_slice093 +case0030_slice094 +case0030_slice095 +case0030_slice096 +case0030_slice097 +case0030_slice098 +case0030_slice099 +case0030_slice100 +case0030_slice101 +case0030_slice102 +case0030_slice103 +case0030_slice104 +case0030_slice105 +case0030_slice106 +case0030_slice107 +case0030_slice108 +case0030_slice109 +case0030_slice110 +case0030_slice111 +case0030_slice112 +case0030_slice113 +case0030_slice114 +case0030_slice115 +case0030_slice116 +case0030_slice117 +case0030_slice118 +case0030_slice119 +case0030_slice120 +case0030_slice121 +case0030_slice122 +case0030_slice123 +case0030_slice124 +case0030_slice125 +case0030_slice126 +case0030_slice127 +case0030_slice128 +case0030_slice129 +case0030_slice130 +case0030_slice131 +case0030_slice132 +case0030_slice133 +case0030_slice134 +case0030_slice135 +case0030_slice136 +case0030_slice137 +case0030_slice138 +case0030_slice139 +case0030_slice140 +case0030_slice141 +case0030_slice142 +case0030_slice143 +case0030_slice144 +case0030_slice145 +case0030_slice146 +case0030_slice147 +case0030_slice148 +case0030_slice149 +case0030_slice150 +case0030_slice151 +case0030_slice152 +case0023_slice000 +case0023_slice001 +case0023_slice002 +case0023_slice003 +case0023_slice004 +case0023_slice005 +case0023_slice006 +case0023_slice007 +case0023_slice008 +case0023_slice009 +case0023_slice010 +case0023_slice011 +case0023_slice012 +case0023_slice013 +case0023_slice014 +case0023_slice015 +case0023_slice016 +case0023_slice017 +case0023_slice018 +case0023_slice019 +case0023_slice020 +case0023_slice021 +case0023_slice022 +case0023_slice023 +case0023_slice024 +case0023_slice025 +case0023_slice026 +case0023_slice027 +case0023_slice028 +case0023_slice029 +case0023_slice030 +case0023_slice031 +case0023_slice032 +case0023_slice033 +case0023_slice034 +case0023_slice035 +case0023_slice036 +case0023_slice037 +case0023_slice038 +case0023_slice039 +case0023_slice040 +case0023_slice041 +case0023_slice042 +case0023_slice043 +case0023_slice044 +case0023_slice045 +case0023_slice046 +case0023_slice047 +case0023_slice048 +case0023_slice049 +case0023_slice050 +case0023_slice051 +case0023_slice052 +case0023_slice053 +case0023_slice054 +case0023_slice055 +case0023_slice056 +case0023_slice057 +case0023_slice058 +case0023_slice059 +case0023_slice060 +case0023_slice061 +case0023_slice062 +case0023_slice063 +case0023_slice064 +case0023_slice065 +case0023_slice066 +case0023_slice067 +case0023_slice068 +case0023_slice069 +case0023_slice070 +case0023_slice071 +case0023_slice072 +case0023_slice073 +case0023_slice074 +case0023_slice075 +case0023_slice076 +case0023_slice077 +case0023_slice078 +case0023_slice079 +case0023_slice080 +case0023_slice081 +case0023_slice082 +case0023_slice083 +case0023_slice084 +case0023_slice085 +case0023_slice086 +case0023_slice087 +case0023_slice088 +case0023_slice089 +case0023_slice090 +case0023_slice091 +case0023_slice092 +case0023_slice093 +case0023_slice094 +case0023_slice095 +case0040_slice000 +case0040_slice001 +case0040_slice002 +case0040_slice003 +case0040_slice004 +case0040_slice005 +case0040_slice006 +case0040_slice007 +case0040_slice008 +case0040_slice009 +case0040_slice010 +case0040_slice011 +case0040_slice012 +case0040_slice013 +case0040_slice014 +case0040_slice015 +case0040_slice016 +case0040_slice017 +case0040_slice018 +case0040_slice019 +case0040_slice020 +case0040_slice021 +case0040_slice022 +case0040_slice023 +case0040_slice024 +case0040_slice025 +case0040_slice026 +case0040_slice027 +case0040_slice028 +case0040_slice029 +case0040_slice030 +case0040_slice031 +case0040_slice032 +case0040_slice033 +case0040_slice034 +case0040_slice035 +case0040_slice036 +case0040_slice037 +case0040_slice038 +case0040_slice039 +case0040_slice040 +case0040_slice041 +case0040_slice042 +case0040_slice043 +case0040_slice044 +case0040_slice045 +case0040_slice046 +case0040_slice047 +case0040_slice048 +case0040_slice049 +case0040_slice050 +case0040_slice051 +case0040_slice052 +case0040_slice053 +case0040_slice054 +case0040_slice055 +case0040_slice056 +case0040_slice057 +case0040_slice058 +case0040_slice059 +case0040_slice060 +case0040_slice061 +case0040_slice062 +case0040_slice063 +case0040_slice064 +case0040_slice065 +case0040_slice066 +case0040_slice067 +case0040_slice068 +case0040_slice069 +case0040_slice070 +case0040_slice071 +case0040_slice072 +case0040_slice073 +case0040_slice074 +case0040_slice075 +case0040_slice076 +case0040_slice077 +case0040_slice078 +case0040_slice079 +case0040_slice080 +case0040_slice081 +case0040_slice082 +case0040_slice083 +case0040_slice084 +case0040_slice085 +case0040_slice086 +case0040_slice087 +case0040_slice088 +case0040_slice089 +case0040_slice090 +case0040_slice091 +case0040_slice092 +case0040_slice093 +case0040_slice094 +case0040_slice095 +case0040_slice096 +case0040_slice097 +case0040_slice098 +case0040_slice099 +case0040_slice100 +case0040_slice101 +case0040_slice102 +case0040_slice103 +case0040_slice104 +case0040_slice105 +case0040_slice106 +case0040_slice107 +case0040_slice108 +case0040_slice109 +case0040_slice110 +case0040_slice111 +case0040_slice112 +case0040_slice113 +case0040_slice114 +case0040_slice115 +case0040_slice116 +case0040_slice117 +case0040_slice118 +case0040_slice119 +case0040_slice120 +case0040_slice121 +case0040_slice122 +case0040_slice123 +case0040_slice124 +case0040_slice125 +case0040_slice126 +case0040_slice127 +case0040_slice128 +case0040_slice129 +case0040_slice130 +case0040_slice131 +case0040_slice132 +case0040_slice133 +case0040_slice134 +case0040_slice135 +case0040_slice136 +case0040_slice137 +case0040_slice138 +case0040_slice139 +case0040_slice140 +case0040_slice141 +case0040_slice142 +case0040_slice143 +case0040_slice144 +case0040_slice145 +case0040_slice146 +case0040_slice147 +case0040_slice148 +case0040_slice149 +case0040_slice150 +case0040_slice151 +case0040_slice152 +case0040_slice153 +case0040_slice154 +case0040_slice155 +case0040_slice156 +case0040_slice157 +case0040_slice158 +case0040_slice159 +case0040_slice160 +case0040_slice161 +case0040_slice162 +case0040_slice163 +case0040_slice164 +case0040_slice165 +case0040_slice166 +case0040_slice167 +case0040_slice168 +case0040_slice169 +case0040_slice170 +case0040_slice171 +case0040_slice172 +case0040_slice173 +case0040_slice174 +case0040_slice175 +case0040_slice176 +case0040_slice177 +case0040_slice178 +case0040_slice179 +case0040_slice180 +case0040_slice181 +case0040_slice182 +case0040_slice183 +case0040_slice184 +case0040_slice185 +case0040_slice186 +case0040_slice187 +case0040_slice188 +case0040_slice189 +case0040_slice190 +case0040_slice191 +case0040_slice192 +case0040_slice193 +case0040_slice194 +case0010_slice000 +case0010_slice001 +case0010_slice002 +case0010_slice003 +case0010_slice004 +case0010_slice005 +case0010_slice006 +case0010_slice007 +case0010_slice008 +case0010_slice009 +case0010_slice010 +case0010_slice011 +case0010_slice012 +case0010_slice013 +case0010_slice014 +case0010_slice015 +case0010_slice016 +case0010_slice017 +case0010_slice018 +case0010_slice019 +case0010_slice020 +case0010_slice021 +case0010_slice022 +case0010_slice023 +case0010_slice024 +case0010_slice025 +case0010_slice026 +case0010_slice027 +case0010_slice028 +case0010_slice029 +case0010_slice030 +case0010_slice031 +case0010_slice032 +case0010_slice033 +case0010_slice034 +case0010_slice035 +case0010_slice036 +case0010_slice037 +case0010_slice038 +case0010_slice039 +case0010_slice040 +case0010_slice041 +case0010_slice042 +case0010_slice043 +case0010_slice044 +case0010_slice045 +case0010_slice046 +case0010_slice047 +case0010_slice048 +case0010_slice049 +case0010_slice050 +case0010_slice051 +case0010_slice052 +case0010_slice053 +case0010_slice054 +case0010_slice055 +case0010_slice056 +case0010_slice057 +case0010_slice058 +case0010_slice059 +case0010_slice060 +case0010_slice061 +case0010_slice062 +case0010_slice063 +case0010_slice064 +case0010_slice065 +case0010_slice066 +case0010_slice067 +case0010_slice068 +case0010_slice069 +case0010_slice070 +case0010_slice071 +case0010_slice072 +case0010_slice073 +case0010_slice074 +case0010_slice075 +case0010_slice076 +case0010_slice077 +case0010_slice078 +case0010_slice079 +case0010_slice080 +case0010_slice081 +case0010_slice082 +case0010_slice083 +case0010_slice084 +case0010_slice085 +case0010_slice086 +case0010_slice087 +case0010_slice088 +case0010_slice089 +case0010_slice090 +case0010_slice091 +case0010_slice092 +case0010_slice093 +case0010_slice094 +case0010_slice095 +case0010_slice096 +case0010_slice097 +case0010_slice098 +case0010_slice099 +case0010_slice100 +case0010_slice101 +case0010_slice102 +case0010_slice103 +case0010_slice104 +case0010_slice105 +case0010_slice106 +case0010_slice107 +case0010_slice108 +case0010_slice109 +case0010_slice110 +case0010_slice111 +case0010_slice112 +case0010_slice113 +case0010_slice114 +case0010_slice115 +case0010_slice116 +case0010_slice117 +case0010_slice118 +case0010_slice119 +case0010_slice120 +case0010_slice121 +case0010_slice122 +case0010_slice123 +case0010_slice124 +case0010_slice125 +case0010_slice126 +case0010_slice127 +case0010_slice128 +case0010_slice129 +case0010_slice130 +case0010_slice131 +case0010_slice132 +case0010_slice133 +case0010_slice134 +case0010_slice135 +case0010_slice136 +case0010_slice137 +case0010_slice138 +case0010_slice139 +case0010_slice140 +case0010_slice141 +case0010_slice142 +case0010_slice143 +case0010_slice144 +case0010_slice145 +case0010_slice146 +case0010_slice147 +case0021_slice000 +case0021_slice001 +case0021_slice002 +case0021_slice003 +case0021_slice004 +case0021_slice005 +case0021_slice006 +case0021_slice007 +case0021_slice008 +case0021_slice009 +case0021_slice010 +case0021_slice011 +case0021_slice012 +case0021_slice013 +case0021_slice014 +case0021_slice015 +case0021_slice016 +case0021_slice017 +case0021_slice018 +case0021_slice019 +case0021_slice020 +case0021_slice021 +case0021_slice022 +case0021_slice023 +case0021_slice024 +case0021_slice025 +case0021_slice026 +case0021_slice027 +case0021_slice028 +case0021_slice029 +case0021_slice030 +case0021_slice031 +case0021_slice032 +case0021_slice033 +case0021_slice034 +case0021_slice035 +case0021_slice036 +case0021_slice037 +case0021_slice038 +case0021_slice039 +case0021_slice040 +case0021_slice041 +case0021_slice042 +case0021_slice043 +case0021_slice044 +case0021_slice045 +case0021_slice046 +case0021_slice047 +case0021_slice048 +case0021_slice049 +case0021_slice050 +case0021_slice051 +case0021_slice052 +case0021_slice053 +case0021_slice054 +case0021_slice055 +case0021_slice056 +case0021_slice057 +case0021_slice058 +case0021_slice059 +case0021_slice060 +case0021_slice061 +case0021_slice062 +case0021_slice063 +case0021_slice064 +case0021_slice065 +case0021_slice066 +case0021_slice067 +case0021_slice068 +case0021_slice069 +case0021_slice070 +case0021_slice071 +case0021_slice072 +case0021_slice073 +case0021_slice074 +case0021_slice075 +case0021_slice076 +case0021_slice077 +case0021_slice078 +case0021_slice079 +case0021_slice080 +case0021_slice081 +case0021_slice082 +case0021_slice083 +case0021_slice084 +case0021_slice085 +case0021_slice086 +case0021_slice087 +case0021_slice088 +case0021_slice089 +case0021_slice090 +case0021_slice091 +case0021_slice092 +case0021_slice093 +case0021_slice094 +case0021_slice095 +case0021_slice096 +case0021_slice097 +case0021_slice098 +case0021_slice099 +case0021_slice100 +case0021_slice101 +case0021_slice102 +case0021_slice103 +case0021_slice104 +case0021_slice105 +case0021_slice106 +case0021_slice107 +case0021_slice108 +case0021_slice109 +case0021_slice110 +case0021_slice111 +case0021_slice112 +case0021_slice113 +case0021_slice114 +case0021_slice115 +case0021_slice116 +case0021_slice117 +case0021_slice118 +case0021_slice119 +case0021_slice120 +case0021_slice121 +case0021_slice122 +case0021_slice123 +case0021_slice124 +case0021_slice125 +case0021_slice126 +case0021_slice127 +case0021_slice128 +case0021_slice129 +case0021_slice130 +case0021_slice131 +case0021_slice132 +case0021_slice133 +case0021_slice134 +case0021_slice135 +case0021_slice136 +case0021_slice137 +case0021_slice138 +case0021_slice139 +case0021_slice140 +case0021_slice141 +case0021_slice142 +case0006_slice000 +case0006_slice001 +case0006_slice002 +case0006_slice003 +case0006_slice004 +case0006_slice005 +case0006_slice006 +case0006_slice007 +case0006_slice008 +case0006_slice009 +case0006_slice010 +case0006_slice011 +case0006_slice012 +case0006_slice013 +case0006_slice014 +case0006_slice015 +case0006_slice016 +case0006_slice017 +case0006_slice018 +case0006_slice019 +case0006_slice020 +case0006_slice021 +case0006_slice022 +case0006_slice023 +case0006_slice024 +case0006_slice025 +case0006_slice026 +case0006_slice027 +case0006_slice028 +case0006_slice029 +case0006_slice030 +case0006_slice031 +case0006_slice032 +case0006_slice033 +case0006_slice034 +case0006_slice035 +case0006_slice036 +case0006_slice037 +case0006_slice038 +case0006_slice039 +case0006_slice040 +case0006_slice041 +case0006_slice042 +case0006_slice043 +case0006_slice044 +case0006_slice045 +case0006_slice046 +case0006_slice047 +case0006_slice048 +case0006_slice049 +case0006_slice050 +case0006_slice051 +case0006_slice052 +case0006_slice053 +case0006_slice054 +case0006_slice055 +case0006_slice056 +case0006_slice057 +case0006_slice058 +case0006_slice059 +case0006_slice060 +case0006_slice061 +case0006_slice062 +case0006_slice063 +case0006_slice064 +case0006_slice065 +case0006_slice066 +case0006_slice067 +case0006_slice068 +case0006_slice069 +case0006_slice070 +case0006_slice071 +case0006_slice072 +case0006_slice073 +case0006_slice074 +case0006_slice075 +case0006_slice076 +case0006_slice077 +case0006_slice078 +case0006_slice079 +case0006_slice080 +case0006_slice081 +case0006_slice082 +case0006_slice083 +case0006_slice084 +case0006_slice085 +case0006_slice086 +case0006_slice087 +case0006_slice088 +case0006_slice089 +case0006_slice090 +case0006_slice091 +case0006_slice092 +case0006_slice093 +case0006_slice094 +case0006_slice095 +case0006_slice096 +case0006_slice097 +case0006_slice098 +case0006_slice099 +case0006_slice100 +case0006_slice101 +case0006_slice102 +case0006_slice103 +case0006_slice104 +case0006_slice105 +case0006_slice106 +case0006_slice107 +case0006_slice108 +case0006_slice109 +case0006_slice110 +case0006_slice111 +case0006_slice112 +case0006_slice113 +case0006_slice114 +case0006_slice115 +case0006_slice116 +case0006_slice117 +case0006_slice118 +case0006_slice119 +case0006_slice120 +case0006_slice121 +case0006_slice122 +case0006_slice123 +case0006_slice124 +case0006_slice125 +case0006_slice126 +case0006_slice127 +case0006_slice128 +case0006_slice129 +case0006_slice130 +case0027_slice000 +case0027_slice001 +case0027_slice002 +case0027_slice003 +case0027_slice004 +case0027_slice005 +case0027_slice006 +case0027_slice007 +case0027_slice008 +case0027_slice009 +case0027_slice010 +case0027_slice011 +case0027_slice012 +case0027_slice013 +case0027_slice014 +case0027_slice015 +case0027_slice016 +case0027_slice017 +case0027_slice018 +case0027_slice019 +case0027_slice020 +case0027_slice021 +case0027_slice022 +case0027_slice023 +case0027_slice024 +case0027_slice025 +case0027_slice026 +case0027_slice027 +case0027_slice028 +case0027_slice029 +case0027_slice030 +case0027_slice031 +case0027_slice032 +case0027_slice033 +case0027_slice034 +case0027_slice035 +case0027_slice036 +case0027_slice037 +case0027_slice038 +case0027_slice039 +case0027_slice040 +case0027_slice041 +case0027_slice042 +case0027_slice043 +case0027_slice044 +case0027_slice045 +case0027_slice046 +case0027_slice047 +case0027_slice048 +case0027_slice049 +case0027_slice050 +case0027_slice051 +case0027_slice052 +case0027_slice053 +case0027_slice054 +case0027_slice055 +case0027_slice056 +case0027_slice057 +case0027_slice058 +case0027_slice059 +case0027_slice060 +case0027_slice061 +case0027_slice062 +case0027_slice063 +case0027_slice064 +case0027_slice065 +case0027_slice066 +case0027_slice067 +case0027_slice068 +case0027_slice069 +case0027_slice070 +case0027_slice071 +case0027_slice072 +case0027_slice073 +case0027_slice074 +case0027_slice075 +case0027_slice076 +case0027_slice077 +case0027_slice078 +case0027_slice079 +case0027_slice080 +case0027_slice081 +case0027_slice082 +case0027_slice083 +case0027_slice084 +case0027_slice085 +case0027_slice086 +case0027_slice087 +case0028_slice000 +case0028_slice001 +case0028_slice002 +case0028_slice003 +case0028_slice004 +case0028_slice005 +case0028_slice006 +case0028_slice007 +case0028_slice008 +case0028_slice009 +case0028_slice010 +case0028_slice011 +case0028_slice012 +case0028_slice013 +case0028_slice014 +case0028_slice015 +case0028_slice016 +case0028_slice017 +case0028_slice018 +case0028_slice019 +case0028_slice020 +case0028_slice021 +case0028_slice022 +case0028_slice023 +case0028_slice024 +case0028_slice025 +case0028_slice026 +case0028_slice027 +case0028_slice028 +case0028_slice029 +case0028_slice030 +case0028_slice031 +case0028_slice032 +case0028_slice033 +case0028_slice034 +case0028_slice035 +case0028_slice036 +case0028_slice037 +case0028_slice038 +case0028_slice039 +case0028_slice040 +case0028_slice041 +case0028_slice042 +case0028_slice043 +case0028_slice044 +case0028_slice045 +case0028_slice046 +case0028_slice047 +case0028_slice048 +case0028_slice049 +case0028_slice050 +case0028_slice051 +case0028_slice052 +case0028_slice053 +case0028_slice054 +case0028_slice055 +case0028_slice056 +case0028_slice057 +case0028_slice058 +case0028_slice059 +case0028_slice060 +case0028_slice061 +case0028_slice062 +case0028_slice063 +case0028_slice064 +case0028_slice065 +case0028_slice066 +case0028_slice067 +case0028_slice068 +case0028_slice069 +case0028_slice070 +case0028_slice071 +case0028_slice072 +case0028_slice073 +case0028_slice074 +case0028_slice075 +case0028_slice076 +case0028_slice077 +case0028_slice078 +case0028_slice079 +case0028_slice080 +case0028_slice081 +case0028_slice082 +case0028_slice083 +case0028_slice084 +case0028_slice085 +case0028_slice086 +case0028_slice087 +case0028_slice088 +case0037_slice000 +case0037_slice001 +case0037_slice002 +case0037_slice003 +case0037_slice004 +case0037_slice005 +case0037_slice006 +case0037_slice007 +case0037_slice008 +case0037_slice009 +case0037_slice010 +case0037_slice011 +case0037_slice012 +case0037_slice013 +case0037_slice014 +case0037_slice015 +case0037_slice016 +case0037_slice017 +case0037_slice018 +case0037_slice019 +case0037_slice020 +case0037_slice021 +case0037_slice022 +case0037_slice023 +case0037_slice024 +case0037_slice025 +case0037_slice026 +case0037_slice027 +case0037_slice028 +case0037_slice029 +case0037_slice030 +case0037_slice031 +case0037_slice032 +case0037_slice033 +case0037_slice034 +case0037_slice035 +case0037_slice036 +case0037_slice037 +case0037_slice038 +case0037_slice039 +case0037_slice040 +case0037_slice041 +case0037_slice042 +case0037_slice043 +case0037_slice044 +case0037_slice045 +case0037_slice046 +case0037_slice047 +case0037_slice048 +case0037_slice049 +case0037_slice050 +case0037_slice051 +case0037_slice052 +case0037_slice053 +case0037_slice054 +case0037_slice055 +case0037_slice056 +case0037_slice057 +case0037_slice058 +case0037_slice059 +case0037_slice060 +case0037_slice061 +case0037_slice062 +case0037_slice063 +case0037_slice064 +case0037_slice065 +case0037_slice066 +case0037_slice067 +case0037_slice068 +case0037_slice069 +case0037_slice070 +case0037_slice071 +case0037_slice072 +case0037_slice073 +case0037_slice074 +case0037_slice075 +case0037_slice076 +case0037_slice077 +case0037_slice078 +case0037_slice079 +case0037_slice080 +case0037_slice081 +case0037_slice082 +case0037_slice083 +case0037_slice084 +case0037_slice085 +case0037_slice086 +case0037_slice087 +case0037_slice088 +case0037_slice089 +case0037_slice090 +case0037_slice091 +case0037_slice092 +case0037_slice093 +case0037_slice094 +case0037_slice095 +case0037_slice096 +case0037_slice097 +case0037_slice098 diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/networks/swin_transformer_unet_skip_expand_decoder_sys.py b/PuzzleTuning/SSL_structures/Swin_Unet_main/networks/swin_transformer_unet_skip_expand_decoder_sys.py new file mode 100644 index 0000000000000000000000000000000000000000..dec2aeff5ea846ba112c70ebc8228171384f3c40 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/networks/swin_transformer_unet_skip_expand_decoder_sys.py @@ -0,0 +1,753 @@ +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint +from einops import rearrange +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def extra_repr(self) -> str: + return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}' + + def flops(self, N): + # calculate flops for 1 window with token length of N + flops = 0 + # qkv = self.qkv(x) + flops += N * self.dim * 3 * self.dim + # attn = (q @ k.transpose(-2, -1)) + flops += self.num_heads * N * (self.dim // self.num_heads) * N + # x = (attn @ v) + flops += self.num_heads * N * N * (self.dim // self.num_heads) + # x = self.proj(x) + flops += N * self.dim * self.dim + return flops + + +class SwinTransformerBlock(nn.Module): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, + qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if self.shift_size > 0: + # calculate attention mask for SW-MSA + H, W = self.input_resolution + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ + f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" + + def flops(self): + flops = 0 + H, W = self.input_resolution + # norm1 + flops += self.dim * H * W + # W-MSA/SW-MSA + nW = H * W / self.window_size / self.window_size + flops += nW * self.attn.flops(self.window_size * self.window_size) + # mlp + flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio + # norm2 + flops += self.dim * H * W + return flops + + +class PatchMerging(nn.Module): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." + + x = x.view(B, H, W, C) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + def extra_repr(self) -> str: + return f"input_resolution={self.input_resolution}, dim={self.dim}" + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.dim + flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim + return flops + +class PatchExpand(nn.Module): + def __init__(self, input_resolution, dim, dim_scale=2, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.expand = nn.Linear(dim, 2*dim, bias=False) if dim_scale==2 else nn.Identity() + self.norm = norm_layer(dim // dim_scale) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + x = self.expand(x) + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + x = x.view(B, H, W, C) + x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=2, p2=2, c=C//4) + x = x.view(B,-1,C//4) + x= self.norm(x) + + return x + +class FinalPatchExpand_X4(nn.Module): + def __init__(self, input_resolution, dim, dim_scale=4, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.dim_scale = dim_scale + self.expand = nn.Linear(dim, 16*dim, bias=False) + self.output_dim = dim + self.norm = norm_layer(self.output_dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + x = self.expand(x) + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + x = x.view(B, H, W, C) + x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2)) + x = x.view(B,-1,self.output_dim) + x= self.norm(x) + + return x + +class BasicLayer(nn.Module): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock(dim=dim, input_resolution=input_resolution, + num_heads=num_heads, window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" + + def flops(self): + flops = 0 + for blk in self.blocks: + flops += blk.flops() + if self.downsample is not None: + flops += self.downsample.flops() + return flops + +class BasicLayer_up(nn.Module): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, upsample=None, use_checkpoint=False): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock(dim=dim, input_resolution=input_resolution, + num_heads=num_heads, window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer) + for i in range(depth)]) + + # patch merging layer + if upsample is not None: + self.upsample = PatchExpand(input_resolution, dim=dim, dim_scale=2, norm_layer=norm_layer) + else: + self.upsample = None + + def forward(self, x): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.upsample is not None: + x = self.upsample(x) + return x + +class PatchEmbed(nn.Module): + r""" Image to Patch Embedding + + Args: + img_size (int): Image size. Default: 224. + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + self.num_patches = patches_resolution[0] * patches_resolution[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C + if self.norm is not None: + x = self.norm(x) + return x + + def flops(self): + Ho, Wo = self.patches_resolution + flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1]) + if self.norm is not None: + flops += Ho * Wo * self.embed_dim + return flops + + +class SwinTransformerSys(nn.Module): + r""" Swin Transformer + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + + Args: + img_size (int | tuple(int)): Input image size. Default 224 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=2, + embed_dim=96, depths=[2, 2, 2, 2], depths_decoder=[1, 2, 2, 2], num_heads=[3, 6, 12, 24], + window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, + norm_layer=nn.LayerNorm, ape=False, patch_norm=True, + use_checkpoint=False, final_upsample="expand_first", **kwargs): + super().__init__() + + print("SwinTransformerSys expand initial----depths:{};depths_decoder:{};drop_path_rate:{};num_classes:{}".format(depths, + depths_decoder,drop_path_rate,num_classes)) + + self.num_classes = num_classes + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) + self.num_features_up = int(embed_dim * 2) + self.mlp_ratio = mlp_ratio + self.final_upsample = final_upsample + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.absolute_pos_embed, std=.02) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + + # build encoder and bottleneck layers + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer), + input_resolution=(patches_resolution[0] // (2 ** i_layer), + patches_resolution[1] // (2 ** i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint) + self.layers.append(layer) + + # build decoder layers + self.layers_up = nn.ModuleList() + self.concat_back_dim = nn.ModuleList() + for i_layer in range(self.num_layers): + concat_linear = nn.Linear(2*int(embed_dim*2**(self.num_layers-1-i_layer)), + int(embed_dim*2**(self.num_layers-1-i_layer))) if i_layer > 0 else nn.Identity() + if i_layer ==0 : + layer_up = PatchExpand(input_resolution=(patches_resolution[0] // (2 ** (self.num_layers-1-i_layer)), + patches_resolution[1] // (2 ** (self.num_layers-1-i_layer))), dim=int(embed_dim * 2 ** (self.num_layers-1-i_layer)), dim_scale=2, norm_layer=norm_layer) + else: + layer_up = BasicLayer_up(dim=int(embed_dim * 2 ** (self.num_layers-1-i_layer)), + input_resolution=(patches_resolution[0] // (2 ** (self.num_layers-1-i_layer)), + patches_resolution[1] // (2 ** (self.num_layers-1-i_layer))), + depth=depths[(self.num_layers-1-i_layer)], + num_heads=num_heads[(self.num_layers-1-i_layer)], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:(self.num_layers-1-i_layer)]):sum(depths[:(self.num_layers-1-i_layer) + 1])], + norm_layer=norm_layer, + upsample=PatchExpand if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint) + self.layers_up.append(layer_up) + self.concat_back_dim.append(concat_linear) + + self.norm = norm_layer(self.num_features) + self.norm_up= norm_layer(self.embed_dim) + + if self.final_upsample == "expand_first": + print("---final upsample expand_first---") + self.up = FinalPatchExpand_X4(input_resolution=(img_size//patch_size,img_size//patch_size),dim_scale=4,dim=embed_dim) + self.output = nn.Conv2d(in_channels=embed_dim,out_channels=self.num_classes,kernel_size=1,bias=False) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'absolute_pos_embed'} + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {'relative_position_bias_table'} + + #Encoder and Bottleneck + def forward_features(self, x): + x = self.patch_embed(x) + if self.ape: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + x_downsample = [] + + for layer in self.layers: + x_downsample.append(x) + x = layer(x) + + x = self.norm(x) # B L C + + return x, x_downsample + + #Dencoder and Skip connection + def forward_up_features(self, x, x_downsample): + for inx, layer_up in enumerate(self.layers_up): + if inx == 0: + x = layer_up(x) + else: + x = torch.cat([x,x_downsample[3-inx]],-1) + x = self.concat_back_dim[inx](x) + x = layer_up(x) + + x = self.norm_up(x) # B L C + + return x + + def up_x4(self, x): + H, W = self.patches_resolution + # print(H, W) + B, L, C = x.shape + # print(B, L, C) + assert L == H*W, "input features has wrong size" + + if self.final_upsample=="expand_first": + x = self.up(x) + x = x.view(B,4*H,4*W,-1) + x = x.permute(0,3,1,2) #B,C,H,W + x = self.output(x) + + return x + + def forward(self, x): + x, x_downsample = self.forward_features(x) + x = self.forward_up_features(x,x_downsample) # B, L, C + x = self.up_x4(x) + + return x + + def flops(self): + flops = 0 + flops += self.patch_embed.flops() + for i, layer in enumerate(self.layers): + flops += layer.flops() + flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers) + flops += self.num_features * self.num_classes + return flops diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/networks/vision_transformer.py b/PuzzleTuning/SSL_structures/Swin_Unet_main/networks/vision_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..889f8c91aaf509f10b5ead4c310e3e2a7a0d11ae --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/networks/vision_transformer.py @@ -0,0 +1,89 @@ +# coding=utf-8 +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import logging +import math + +from os.path import join as pjoin + +import torch +import torch.nn as nn +import numpy as np + +from torch.nn import CrossEntropyLoss, Dropout, Softmax, Linear, Conv2d, LayerNorm +from torch.nn.modules.utils import _pair +from scipy import ndimage +from .swin_transformer_unet_skip_expand_decoder_sys import SwinTransformerSys + +logger = logging.getLogger(__name__) + +class SwinUnet(nn.Module): + def __init__(self, img_size=224, num_classes=2, zero_head=False, vis=False, patch_size=16): + super(SwinUnet, self).__init__() + self.num_classes = num_classes + self.zero_head = zero_head + + + self.swin_unet = SwinTransformerSys(img_size=img_size, + patch_size=4, + in_chans=3, + num_classes=num_classes, + embed_dim=96, + depths=[2, 2, 2, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4, + qkv_bias=True, + qk_scale=None, + drop_rate=0, + drop_path_rate=0.1, + ape=False, + patch_norm=True, + use_checkpoint=False) + + def forward(self, x): + if x.size()[1] == 1: + x = x.repeat(1,3,1,1) + logits = self.swin_unet(x) + return logits + + # def load_from(self, config): + # pretrained_path = config.MODEL.PRETRAIN_CKPT + # if pretrained_path is not None: + # print("pretrained_path:{}".format(pretrained_path)) + # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + # pretrained_dict = torch.load(pretrained_path, map_location=device) + # if "model" not in pretrained_dict: + # print("---start load pretrained modle by splitting---") + # pretrained_dict = {k[17:]:v for k,v in pretrained_dict.items()} + # for k in list(pretrained_dict.keys()): + # if "output" in k: + # print("delete key:{}".format(k)) + # del pretrained_dict[k] + # msg = self.swin_unet.load_state_dict(pretrained_dict,strict=False) + # # print(msg) + # return + # pretrained_dict = pretrained_dict['model'] + # print("---start load pretrained modle of swin encoder---") + # + # model_dict = self.swin_unet.state_dict() + # full_dict = copy.deepcopy(pretrained_dict) + # for k, v in pretrained_dict.items(): + # if "layers." in k: + # current_layer_num = 3-int(k[7:8]) + # current_k = "layers_up." + str(current_layer_num) + k[8:] + # full_dict.update({current_k:v}) + # for k in list(full_dict.keys()): + # if k in model_dict: + # if full_dict[k].shape != model_dict[k].shape: + # print("delete:{};shape pretrain:{};shape model:{}".format(k,v.shape,model_dict[k].shape)) + # del full_dict[k] + # + # msg = self.swin_unet.load_state_dict(full_dict, strict=False) + # # print(msg) + # else: + # print("none pretrain") + # \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/requirements.txt b/PuzzleTuning/SSL_structures/Swin_Unet_main/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..4abfe422e0bd10ed594596292121fb6eac4d4581 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/requirements.txt @@ -0,0 +1,11 @@ +torch==1.4.0 +torchvision==0.5.0 +numpy +tqdm +tensorboard +tensorboardX +ml-collections +medpy +SimpleITK +scipy +h5py diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/test.py b/PuzzleTuning/SSL_structures/Swin_Unet_main/test.py new file mode 100644 index 0000000000000000000000000000000000000000..7c0535bf9d729a3fb9355d4cd4fcac18c99c31bc --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/test.py @@ -0,0 +1,141 @@ +import argparse +import logging +import os +import random +import sys +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +from torch.utils.data import DataLoader +from tqdm import tqdm +from datasets.dataset_synapse import Synapse_dataset +from utils import test_single_volume +from networks.vision_transformer import SwinUnet as ViT_seg +from trainer import trainer_synapse +from config import get_config + +parser = argparse.ArgumentParser() +parser.add_argument('--volume_path', type=str, + default='../data/Synapse/test_vol_h5', help='root dir for validation volume data') # for acdc volume_path=root_dir +parser.add_argument('--dataset', type=str, + default='Synapse', help='experiment_name') +parser.add_argument('--num_classes', type=int, + default=9, help='output channel of network') +parser.add_argument('--list_dir', type=str, + default='./lists/lists_Synapse', help='list dir') +parser.add_argument('--output_dir', type=str, help='output dir') +parser.add_argument('--max_iterations', type=int,default=30000, help='maximum epoch number to train') +parser.add_argument('--max_epochs', type=int, default=150, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=24, + help='batch_size per gpu') +parser.add_argument('--img_size', type=int, default=224, help='input patch size of network input') +parser.add_argument('--is_savenii', action="store_true", help='whether to save results during inference') +parser.add_argument('--test_save_dir', type=str, default='../predictions', help='saving prediction as nii!') +parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, help='segmentation network learning rate') +parser.add_argument('--seed', type=int, default=1234, help='random seed') +parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', + ) +parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', help='Test throughput only') + +args = parser.parse_args() +if args.dataset == "Synapse": + args.volume_path = os.path.join(args.volume_path, "test_vol_h5") +config = get_config(args) + + +def inference(args, model, test_save_path=None): + db_test = args.Dataset(base_dir=args.volume_path, split="test_vol", list_dir=args.list_dir) + testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1) + logging.info("{} test iterations per epoch".format(len(testloader))) + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in tqdm(enumerate(testloader)): + h, w = sampled_batch["image"].size()[2:] + image, label, case_name = sampled_batch["image"], sampled_batch["label"], sampled_batch['case_name'][0] + metric_i = test_single_volume(image, label, model, classes=args.num_classes, patch_size=[args.img_size, args.img_size], + test_save_path=test_save_path, case=case_name, z_spacing=args.z_spacing) + metric_list += np.array(metric_i) + logging.info('idx %d case %s mean_dice %f mean_hd95 %f' % (i_batch, case_name, np.mean(metric_i, axis=0)[0], np.mean(metric_i, axis=0)[1])) + metric_list = metric_list / len(db_test) + for i in range(1, args.num_classes): + logging.info('Mean class %d mean_dice %f mean_hd95 %f' % (i, metric_list[i-1][0], metric_list[i-1][1])) + performance = np.mean(metric_list, axis=0)[0] + mean_hd95 = np.mean(metric_list, axis=0)[1] + logging.info('Testing performance in best val model: mean_dice : %f mean_hd95 : %f' % (performance, mean_hd95)) + return "Testing Finished!" + + +if __name__ == "__main__": + + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + dataset_config = { + 'Synapse': { + 'Dataset': Synapse_dataset, + 'volume_path': args.volume_path, + 'list_dir': './lists/lists_Synapse', + 'num_classes': 9, + 'z_spacing': 1, + }, + } + dataset_name = args.dataset + args.num_classes = dataset_config[dataset_name]['num_classes'] + args.volume_path = dataset_config[dataset_name]['volume_path'] + args.Dataset = dataset_config[dataset_name]['Dataset'] + args.list_dir = dataset_config[dataset_name]['list_dir'] + args.z_spacing = dataset_config[dataset_name]['z_spacing'] + args.is_pretrain = True + + net = ViT_seg(config, img_size=args.img_size, num_classes=args.num_classes).cuda() + + snapshot = os.path.join(args.output_dir, 'best_model.pth') + if not os.path.exists(snapshot): snapshot = snapshot.replace('best_model', 'epoch_'+str(args.max_epochs-1)) + msg = net.load_state_dict(torch.load(snapshot)) + print("self trained swin unet",msg) + snapshot_name = snapshot.split('/')[-1] + + log_folder = './test_log/test_log_' + os.makedirs(log_folder, exist_ok=True) + logging.basicConfig(filename=log_folder + '/'+snapshot_name+".txt", level=logging.INFO, format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + logging.info(snapshot_name) + + if args.is_savenii: + args.test_save_dir = os.path.join(args.output_dir, "predictions") + test_save_path = args.test_save_dir + os.makedirs(test_save_path, exist_ok=True) + else: + test_save_path = None + inference(args, net, test_save_path) + + diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/test.sh b/PuzzleTuning/SSL_structures/Swin_Unet_main/test.sh new file mode 100644 index 0000000000000000000000000000000000000000..2370296d36852a5707555ee9bfd648e11fcd777b --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/test.sh @@ -0,0 +1,52 @@ +#!/bin/bash +if [$epoch_time] +then + EPOCH_TIME = $epoch_time +else + EPOCH_TIME = 150 +fi + +if [$out_dir] +then + OUT_DIR = $out_dir +else + OUT_DIR = './model_out' +fi + +if [$cfg] +then + CFG = $cfg +else + CFG = 'configs/swin_tiny_patch4_window7_224_lite.yaml' +fi + +if [$data_dir] +then + DATA_DIR = $data_dir +else + DATA_DIR = 'datasets/Synapse' +fi + +if [$learning_rate] +then + LEARNING_RATE = $learning_rate +else + LEARNING_RATE = 0.05 +fi + +if [$img_size] +then + IMG_SIZE = $img_size +else + IMG_SIZE = 224 +fi + +if [$batch_size] +then + BATCH_SIZE = $batch_size +else + BATCH_SIZE = 24 +fi + +echo "start test model" +pyhton test.py --dataset Synapse --cfg $CFG --is_saveni --volume_path $DATA_DIR --max_epochs $EPOCH_TIME --output_dir $OUT_DIR --img_size $IMG_SIZE --base_lr $LEARNING_RATE --batch_size $BATCH_SIZE \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/train.py b/PuzzleTuning/SSL_structures/Swin_Unet_main/train.py new file mode 100644 index 0000000000000000000000000000000000000000..56415d06b9bfe25ae4341c37615d4a51d18dc26d --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/train.py @@ -0,0 +1,99 @@ +import argparse +import logging +import os +import random +import numpy as np +import torch +import torch.backends.cudnn as cudnn +from networks.vision_transformer import SwinUnet as ViT_seg +from trainer import trainer_synapse +from config import get_config + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='../data/Synapse/train_npz', help='root dir for data') +parser.add_argument('--dataset', type=str, + default='Synapse', help='experiment_name') +parser.add_argument('--list_dir', type=str, + default='./lists/lists_Synapse', help='list dir') +parser.add_argument('--num_classes', type=int, + default=9, help='output channel of network') +parser.add_argument('--output_dir', type=str, help='output dir') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--max_epochs', type=int, + default=150, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, + default=24, help='batch_size per gpu') +parser.add_argument('--n_gpu', type=int, default=1, help='total gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--img_size', type=int, + default=224, help='input patch size of network input') +parser.add_argument('--seed', type=int, + default=1234, help='random seed') +parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', ) +parser.add_argument( + "--opts", + help="Modify config options by adding 'KEY VALUE' pairs. ", + default=None, + nargs='+', + ) +parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset') +parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'], + help='no: no cache, ' + 'full: cache all data, ' + 'part: sharding the dataset into nonoverlapping pieces and only cache one piece') +parser.add_argument('--resume', help='resume from checkpoint') +parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps") +parser.add_argument('--use-checkpoint', action='store_true', + help="whether to use gradient checkpointing to save memory") +parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'], + help='mixed precision opt level, if O0, no amp is used') +parser.add_argument('--tag', help='tag of experiment') +parser.add_argument('--eval', action='store_true', help='Perform evaluation only') +parser.add_argument('--throughput', action='store_true', help='Test throughput only') + +args = parser.parse_args() +if args.dataset == "Synapse": + args.root_path = os.path.join(args.root_path, "train_npz") +config = get_config(args) + + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + dataset_name = args.dataset + dataset_config = { + 'Synapse': { + 'root_path': args.root_path, + 'list_dir': './lists/lists_Synapse', + 'num_classes': 9, + }, + } + + if args.batch_size != 24 and args.batch_size % 6 == 0: + args.base_lr *= args.batch_size / 24 + args.num_classes = dataset_config[dataset_name]['num_classes'] + args.root_path = dataset_config[dataset_name]['root_path'] + args.list_dir = dataset_config[dataset_name]['list_dir'] + + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + net = ViT_seg(config, img_size=args.img_size, num_classes=args.num_classes).cuda() + net.load_from(config) + + trainer = {'Synapse': trainer_synapse,} + trainer[dataset_name](args, net, args.output_dir) \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/train.sh b/PuzzleTuning/SSL_structures/Swin_Unet_main/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..f0f967beb4d3e2373bee9f025d25dc7f23894bf2 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/train.sh @@ -0,0 +1,52 @@ +#!/bin/bash +if [$epoch_time] +then + EPOCH_TIME = $epoch_time +else + EPOCH_TIME = 150 +fi + +if [$out_dir] +then + OUT_DIR = $out_dir +else + OUT_DIR = './model_out' +fi + +if [$cfg] +then + CFG = $cfg +else + CFG = 'configs/swin_tiny_patch4_window7_224_lite.yaml' +fi + +if [$data_dir] +then + DATA_DIR = $data_dir +else + DATA_DIR = 'datasets/Synapse' +fi + +if [$learning_rate] +then + LEARNING_RATE = $learning_rate +else + LEARNING_RATE = 0.05 +fi + +if [$img_size] +then + IMG_SIZE = $img_size +else + IMG_SIZE = 224 +fi + +if [$batch_size] +then + BATCH_SIZE = $batch_size +else + BATCH_SIZE = 24 +fi + +echo "start train model" +pyhton train.py --dataset Synapse --cfg $CFG --root_path $DATA_DIR --max_epochs $EPOCH_TIME --output_dir $OUT_DIR --img_size $IMG_SIZE --base_lr $LEARNING_RATE --batch_size $BATCH_SIZE \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/trainer.py b/PuzzleTuning/SSL_structures/Swin_Unet_main/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..658a6a2708c1ba856b4f40545da0deba47f879e6 --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/trainer.py @@ -0,0 +1,97 @@ +import argparse +import logging +import os +import random +import sys +import time +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from tqdm import tqdm +from utils import DiceLoss +from torchvision import transforms +from utils import test_single_volume + +def trainer_synapse(args, model, snapshot_path): + from datasets.dataset_synapse import Synapse_dataset, RandomGenerator + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size * args.n_gpu + # max_iterations = args.max_iterations + db_train = Synapse_dataset(base_dir=args.root_path, list_dir=args.list_dir, split="train", + transform=transforms.Compose( + [RandomGenerator(output_size=[args.img_size, args.img_size])])) + print("The length of train set is: {}".format(len(db_train))) + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True, + worker_init_fn=worker_init_fn) + if args.n_gpu > 1: + model = nn.DataParallel(model) + model.train() + ce_loss = CrossEntropyLoss() + dice_loss = DiceLoss(num_classes) + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001) + writer = SummaryWriter(snapshot_path + '/log') + iter_num = 0 + max_epoch = args.max_epochs + max_iterations = args.max_epochs * len(trainloader) # max_epoch = max_iterations // len(trainloader) + 1 + logging.info("{} iterations per epoch. {} max iterations ".format(len(trainloader), max_iterations)) + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + for i_batch, sampled_batch in enumerate(trainloader): + image_batch, label_batch = sampled_batch['image'], sampled_batch['label'] + image_batch, label_batch = image_batch.cuda(), label_batch.cuda() + outputs = model(image_batch) + loss_ce = ce_loss(outputs, label_batch[:].long()) + loss_dice = dice_loss(outputs, label_batch, softmax=True) + loss = 0.4 * loss_ce + 0.6 * loss_dice + optimizer.zero_grad() + loss.backward() + optimizer.step() + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer.param_groups: + param_group['lr'] = lr_ + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + + logging.info('iteration %d : loss : %f, loss_ce: %f' % (iter_num, loss.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = image_batch[1, 0:1, :, :] + image = (image - image.min()) / (image.max() - image.min()) + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax(outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + save_interval = 50 # int(max_epoch/6) + if epoch_num > int(max_epoch / 2) and (epoch_num + 1) % save_interval == 0: + save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if epoch_num >= max_epoch - 1: + save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + iterator.close() + break + + writer.close() + return "Training Finished!" \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/Swin_Unet_main/utils.py b/PuzzleTuning/SSL_structures/Swin_Unet_main/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0e3a1bf9ad7058f506faff0a8ae4f6056514575c --- /dev/null +++ b/PuzzleTuning/SSL_structures/Swin_Unet_main/utils.py @@ -0,0 +1,102 @@ +import numpy as np +import torch +from medpy import metric +from scipy.ndimage import zoom +import torch.nn as nn +import SimpleITK as sitk + + +class DiceLoss(nn.Module): + def __init__(self, n_classes): + super(DiceLoss, self).__init__() + self.n_classes = n_classes + + def _one_hot_encoder(self, input_tensor): + tensor_list = [] + for i in range(self.n_classes): + temp_prob = input_tensor == i # * torch.ones_like(input_tensor) + tensor_list.append(temp_prob.unsqueeze(1)) + output_tensor = torch.cat(tensor_list, dim=1) + return output_tensor.float() + + def _dice_loss(self, score, target): + target = target.float() + smooth = 1e-5 + intersect = torch.sum(score * target) + y_sum = torch.sum(target * target) + z_sum = torch.sum(score * score) + loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth) + loss = 1 - loss + return loss + + def forward(self, inputs, target, weight=None, softmax=False): + if softmax: + inputs = torch.softmax(inputs, dim=1) + target = self._one_hot_encoder(target) + if weight is None: + weight = [1] * self.n_classes + assert inputs.size() == target.size(), 'predict {} & target {} shape do not match'.format(inputs.size(), target.size()) + class_wise_dice = [] + loss = 0.0 + for i in range(0, self.n_classes): + dice = self._dice_loss(inputs[:, i], target[:, i]) + class_wise_dice.append(1.0 - dice.item()) + loss += dice * weight[i] + return loss / self.n_classes + + +def calculate_metric_percase(pred, gt): + pred[pred > 0] = 1 + gt[gt > 0] = 1 + if pred.sum() > 0 and gt.sum()>0: + dice = metric.binary.dc(pred, gt) + hd95 = metric.binary.hd95(pred, gt) + return dice, hd95 + elif pred.sum() > 0 and gt.sum()==0: + return 1, 0 + else: + return 0, 0 + + +def test_single_volume(image, label, net, classes, patch_size=[256, 256], test_save_path=None, case=None, z_spacing=1): + image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy() + if len(image.shape) == 3: + prediction = np.zeros_like(label) + for ind in range(image.shape[0]): + slice = image[ind, :, :] + x, y = slice.shape[0], slice.shape[1] + if x != patch_size[0] or y != patch_size[1]: + slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=3) # previous using 0 + input = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().cuda() + net.eval() + with torch.no_grad(): + outputs = net(input) + out = torch.argmax(torch.softmax(outputs, dim=1), dim=1).squeeze(0) + out = out.cpu().detach().numpy() + if x != patch_size[0] or y != patch_size[1]: + pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0) + else: + pred = out + prediction[ind] = pred + else: + input = torch.from_numpy(image).unsqueeze( + 0).unsqueeze(0).float().cuda() + net.eval() + with torch.no_grad(): + out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0) + prediction = out.cpu().detach().numpy() + metric_list = [] + for i in range(1, classes): + metric_list.append(calculate_metric_percase(prediction == i, label == i)) + + if test_save_path is not None: + img_itk = sitk.GetImageFromArray(image.astype(np.float32)) + prd_itk = sitk.GetImageFromArray(prediction.astype(np.float32)) + lab_itk = sitk.GetImageFromArray(label.astype(np.float32)) + img_itk.SetSpacing((1, 1, z_spacing)) + prd_itk.SetSpacing((1, 1, z_spacing)) + lab_itk.SetSpacing((1, 1, z_spacing)) + sitk.WriteImage(prd_itk, test_save_path + '/'+case + "_pred.nii.gz") + sitk.WriteImage(img_itk, test_save_path + '/'+ case + "_img.nii.gz") + sitk.WriteImage(lab_itk, test_save_path + '/'+ case + "_gt.nii.gz") + return metric_list \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/TransMUNet_main/TransMUNet.py b/PuzzleTuning/SSL_structures/TransMUNet_main/TransMUNet.py new file mode 100644 index 0000000000000000000000000000000000000000..fe56bce811ee0433a92602ad882358cf3234b33b --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransMUNet_main/TransMUNet.py @@ -0,0 +1,189 @@ +import torch +import torch.nn as nn +import torchvision + +resnet = torchvision.models.resnet.resnet50(pretrained=True) +from .munet_transformer import transmunet +import cv2 +import numpy as np + + +class ConvBlock(nn.Module): + """ + Helper module that consists of a Conv -> BN -> ReLU + """ + + def __init__(self, in_channels, out_channels, padding=1, kernel_size=3, stride=1, with_nonlinearity=True): + super().__init__() + self.conv = nn.Conv2d(in_channels, out_channels, padding=padding, kernel_size=kernel_size, stride=stride) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + self.with_nonlinearity = with_nonlinearity + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.with_nonlinearity: + x = self.relu(x) + return x + + +class Bridge(nn.Module): + """ + This is the middle layer of the UNet which just consists of some + """ + + def __init__(self, in_channels, out_channels): + super().__init__() + self.bridge = nn.Sequential( + ConvBlock(in_channels, out_channels), + ConvBlock(out_channels, out_channels) + ) + + def forward(self, x): + return self.bridge(x) + + +class UpBlockForUNetWithResNet50(nn.Module): + """ + Up block that encapsulates one up-sampling step which consists of Upsample -> ConvBlock -> ConvBlock + """ + + def __init__(self, in_channels, out_channels, up_conv_in_channels=None, up_conv_out_channels=None, + upsampling_method="conv_transpose"): + super().__init__() + + if up_conv_in_channels == None: + up_conv_in_channels = in_channels + if up_conv_out_channels == None: + up_conv_out_channels = out_channels + + if upsampling_method == "conv_transpose": + self.upsample = nn.ConvTranspose2d(up_conv_in_channels, up_conv_out_channels, kernel_size=2, stride=2) + elif upsampling_method == "bilinear": + self.upsample = nn.Sequential( + nn.Upsample(mode='bilinear', scale_factor=2), + nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1) + ) + self.conv_block_1 = ConvBlock(in_channels, out_channels) + self.conv_block_2 = ConvBlock(out_channels, out_channels) + + def forward(self, up_x, down_x): + """ + + :param up_x: this is the output from the previous up block + :param down_x: this is the output from the down block + :return: upsampled feature map + """ + x = self.upsample(up_x) + x = torch.cat([x, down_x], 1) + x = self.conv_block_1(x) + x = self.conv_block_2(x) + return x + + +class SE_Block(nn.Module): + def __init__(self, c, r=16): + super().__init__() + self.squeeze = nn.AdaptiveAvgPool2d(1) + self.excitation = nn.Sequential( + nn.Linear(c, c // r, bias=False), + nn.ReLU(inplace=True), + nn.Linear(c // r, c, bias=False), + nn.Sigmoid() + ) + + def forward(self, x): + bs, c, _, _ = x.shape + y = self.squeeze(x).view(bs, c) + y = self.excitation(y).view(bs, c, 1, 1) + x = x * y.expand_as(x) + return y + + +class TransMUNet(nn.Module): + DEPTH = 6 + + def __init__(self, n_classes=2, + patch_size: int = 16, + emb_size: int = 512, + img_size: int = 256, + n_channels=3, + depth: int = 4, + n_regions: int = (256 // 16) ** 2, + output_ch: int = 1, + bilinear=True): + super().__init__() + self.n_classes = n_classes + self.transformer = transmunet(in_channels=n_channels, + patch_size=patch_size, + emb_size=emb_size, + img_size=img_size, + depth=depth, + n_regions=n_regions) + resnet = torchvision.models.resnet.resnet50(pretrained=True) + down_blocks = [] + up_blocks = [] + self.input_block = nn.Sequential(*list(resnet.children()))[:3] + self.input_pool = list(resnet.children())[3] + for bottleneck in list(resnet.children()): + if isinstance(bottleneck, nn.Sequential): + down_blocks.append(bottleneck) + self.down_blocks = nn.ModuleList(down_blocks) + self.bridge = Bridge(2048, 2048) + up_blocks.append(UpBlockForUNetWithResNet50(2048, 1024)) + up_blocks.append(UpBlockForUNetWithResNet50(1024, 512)) + up_blocks.append(UpBlockForUNetWithResNet50(512, 256)) + up_blocks.append(UpBlockForUNetWithResNet50(in_channels=128 + 64, out_channels=128, + up_conv_in_channels=256, up_conv_out_channels=128)) + up_blocks.append(UpBlockForUNetWithResNet50(in_channels=64 + 3, out_channels=64, + up_conv_in_channels=128, up_conv_out_channels=64)) + + self.up_blocks = nn.ModuleList(up_blocks) + + self.out = nn.Conv2d(128, n_classes, kernel_size=1, stride=1) + + self.boundary = nn.Sequential(nn.Conv2d(64, 32, kernel_size=1, stride=1), + nn.BatchNorm2d(32), nn.ReLU(inplace=True), + nn.Conv2d(32, 1, kernel_size=1, stride=1, bias=False), + nn.Sigmoid()) + + self.se = SE_Block(c=64) + + def forward(self, x, with_additional=False): + [global_contexual, regional_distribution, region_coeff] = self.transformer(x) + + pre_pools = dict() + pre_pools[f"layer_0"] = x + x = self.input_block(x) + pre_pools[f"layer_1"] = x + x = self.input_pool(x) + + for i, block in enumerate(self.down_blocks, 2): + x = block(x) + if i == (TransMUNet.DEPTH - 1): + continue + pre_pools[f"layer_{i}"] = x + + x = self.bridge(x) + + for i, block in enumerate(self.up_blocks, 1): + key = f"layer_{TransMUNet.DEPTH - 1 - i}" + x = block(x, pre_pools[key]) + + B_out = self.boundary(x) + B = B_out.repeat_interleave(int(x.shape[1]), dim=1) + x = self.se(x) + x = x + B + att = regional_distribution.repeat_interleave(int(x.shape[1]), dim=1) + x = x * att + x = torch.cat((x, global_contexual), dim=1) + x = self.out(x) + # print(x.shape) + del pre_pools + x = torch.sigmoid(x) + # print('x shape: ', x.shape) + if with_additional: + return x, B_out, region_coeff + else: + return x diff --git a/PuzzleTuning/SSL_structures/TransMUNet_main/munet_transformer.py b/PuzzleTuning/SSL_structures/TransMUNet_main/munet_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..49345b3ed27e52f0999519beb44a8e0189b67d65 --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransMUNet_main/munet_transformer.py @@ -0,0 +1,219 @@ +import torch +import torch.nn.functional as F +from torch import nn +from torch import Tensor +from einops import rearrange, reduce, repeat +from einops.layers.torch import Rearrange, Reduce + + +class DoubleConv(nn.Module): + def __init__(self, in_channels, out_channels, mid_channels=None): + super().__init__() + if not mid_channels: + mid_channels = out_channels + self.double_conv = nn.Sequential( + nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True), + nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1), + nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True), + nn.MaxPool2d(2) + ) + + def forward(self, x): + return self.double_conv(x) + + +class Encoder_patch(nn.Module): + def __init__(self, n_channels, emb_size=512, bilinear=True): + super(Encoder_patch, self).__init__() + self.n_channels = n_channels + self.emb_size = emb_size + self.bilinear = bilinear + + self.conv1 = DoubleConv(n_channels, 128) + self.conv2 = DoubleConv(128, 256) + self.conv3 = DoubleConv(256, emb_size) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + x = torch.flatten(torch.nn.functional.adaptive_avg_pool2d(x, 1), start_dim=1) + return x + + +class PatchEmbedding(nn.Module): + def __init__(self, in_channels: int = 3, patch_size: int = 16, emb_size: int = 768, img_size: int = 224): + self.patch_size = patch_size + super().__init__() + # self.encoder = Encoder_patch(n_channels = in_channels, emb_size= emb_size) + self.projection = nn.Sequential( + Rearrange('b c (ph h) (pw w) -> b c (ph pw) h w', c=in_channels, h=patch_size, ph=img_size // patch_size, + w=patch_size, pw=img_size // patch_size), + Rearrange('b c p h w -> (b p) c h w'), + Encoder_patch(n_channels=in_channels, emb_size=emb_size), + Rearrange('(b p) d-> b p d', p=(img_size // patch_size) ** 2), + ) + self.cls_token = nn.Parameter(torch.randn(1, 1, emb_size)) + self.positions = nn.Parameter(torch.randn((img_size // patch_size) ** 2 + 1, emb_size)) + + def forward(self, x: Tensor) -> Tensor: + b, _, _, _ = x.shape + x = self.projection(x) + cls_tokens = repeat(self.cls_token, '() n e -> b n e', b=b) + # prepend the cls token to the input + x = torch.cat([cls_tokens, x], dim=1) + # add position embedding + x += self.positions + return x + + +class MultiHeadAttention(nn.Module): + def __init__(self, emb_size: int = 768, num_heads: int = 8, dropout: float = 0): + super().__init__() + self.emb_size = emb_size + self.num_heads = num_heads + # fuse the queries, keys and values in one matrix + self.qkv = nn.Linear(emb_size, emb_size * 3) + self.att_drop = nn.Dropout(dropout) + self.projection = nn.Linear(emb_size, emb_size) + + def forward(self, x: Tensor, mask: Tensor = None) -> Tensor: + # split keys, queries and values in num_heads + qkv = rearrange(self.qkv(x), "b n (h d qkv) -> (qkv) b h n d", h=self.num_heads, qkv=3) + queries, keys, values = qkv[0], qkv[1], qkv[2] + # sum up over the last axis + energy = torch.einsum('bhqd, bhkd -> bhqk', queries, keys) # batch, num_heads, query_len, key_len + if mask is not None: + fill_value = torch.finfo(torch.float32).min + energy.mask_fill(~mask, fill_value) + + scaling = self.emb_size ** (1 / 2) + att = F.softmax(energy, dim=-1) / scaling + att = self.att_drop(att) + # sum up over the third axis + out = torch.einsum('bhal, bhlv -> bhav ', att, values) + out = rearrange(out, "b h n d -> b n (h d)") + out = self.projection(out) + return out + + +class ResidualAdd(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + + def forward(self, x, **kwargs): + res = x + x = self.fn(x, **kwargs) + x += res + return x + + +class FeedForwardBlock(nn.Sequential): + def __init__(self, emb_size: int, expansion: int = 4, drop_p: float = 0.): + super().__init__( + nn.Linear(emb_size, expansion * emb_size), + nn.GELU(), + nn.Dropout(drop_p), + nn.Linear(expansion * emb_size, emb_size), + ) + + +class TransformerEncoderBlock(nn.Sequential): + def __init__(self, + emb_size: int = 768, + drop_p: float = 0., + forward_expansion: int = 4, + forward_drop_p: float = 0., + **kwargs): + super().__init__( + ResidualAdd(nn.Sequential( + nn.LayerNorm(emb_size), + MultiHeadAttention(emb_size, **kwargs), + nn.Dropout(drop_p) + )), + ResidualAdd(nn.Sequential( + nn.LayerNorm(emb_size), + FeedForwardBlock( + emb_size, expansion=forward_expansion, drop_p=forward_drop_p), + nn.Dropout(drop_p) + ) + )) + + +class TransformerEncoder(nn.Sequential): + def __init__(self, depth: int = 12, **kwargs): + super().__init__(*[TransformerEncoderBlock(**kwargs) for _ in range(depth)]) + + +class dependencymap(nn.Sequential): + def __init__(self, emb_size: int = 768, n_regions: int = 256, patch_size: int = 16, img_size: int = 256, + output_ch: int = 64, cuda=True): + super().__init__() + self.patch_size = patch_size + self.img_size = img_size + self.emb_size = emb_size + self.output_ch = output_ch + self.cuda = cuda + self.outconv = nn.Sequential( + nn.Conv2d(emb_size, output_ch, kernel_size=1, padding=0), + nn.BatchNorm2d(output_ch), + nn.Sigmoid() + ) + self.out2 = nn.Sigmoid() + + self.gpool = nn.AdaptiveAvgPool1d(1) + + def forward(self, x): + x_gpool = self.gpool(x) + coeff = torch.zeros((x.size()[0], self.emb_size, self.img_size, self.img_size)) + coeff2 = torch.zeros((x.size()[0], 1, self.img_size, self.img_size)) + if self.cuda: + coeff = coeff.cuda() + coeff2 = coeff2.cuda() + for i in range(0, self.img_size // self.patch_size): + for j in range(0, self.img_size // self.patch_size): + value = x[:, (i * self.patch_size) + j] + value = value.view(value.size()[0], value.size()[1], 1, 1) + coeff[:, :, self.patch_size * i:self.patch_size * (i + 1), + self.patch_size * j:self.patch_size * (j + 1)] = value.repeat(1, 1, self.patch_size, self.patch_size) + + value = x_gpool[:, (i * self.patch_size) + j] + value = value.view(value.size()[0], value.size()[1], 1, 1) + coeff2[:, :, self.patch_size * i:self.patch_size * (i + 1), + self.patch_size * j:self.patch_size * (j + 1)] = value.repeat(1, 1, self.patch_size, self.patch_size) + + global_contexual = self.outconv(coeff) + regional_distribution = self.out2(coeff2) + return [global_contexual, regional_distribution, self.out2(x_gpool)] + + +class transmunet(nn.Sequential): + def __init__(self, + in_channels: int = 3, + patch_size: int = 16, + emb_size: int = 1024, + img_size: int = 256, + depth: int = 2, + n_regions: int = (256 // 16) ** 2, + output_ch: int = 64, + cuda=True, + **kwargs): + super().__init__( + PatchEmbedding(in_channels, patch_size, emb_size, img_size), + TransformerEncoder(depth, emb_size=emb_size, **kwargs), + dependencymap(emb_size, n_regions, patch_size, img_size, output_ch, cuda) + ) + + + + + + + + + + diff --git a/PuzzleTuning/SSL_structures/TransUNet_main/LICENSE b/PuzzleTuning/SSL_structures/TransUNet_main/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransUNet_main/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/PuzzleTuning/SSL_structures/TransUNet_main/README.md b/PuzzleTuning/SSL_structures/TransUNet_main/README.md new file mode 100644 index 0000000000000000000000000000000000000000..275d10ba506a159b8142da71265426aa81f532b2 --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransUNet_main/README.md @@ -0,0 +1,50 @@ +# TransUNet +This repo holds code for [TransUNet: Transformers Make Strong Encoders for Medical Image Segmentation](https://arxiv.org/pdf/2102.04306.pdf) + +## Usage + +### 1. Download Google pre-trained ViT models +* [Get models in this link](https://console.cloud.google.com/storage/vit_models/): R50-ViT-B_16, ViT-B_16, ViT-L_16... +```bash +wget https://storage.googleapis.com/vit_models/imagenet21k/{MODEL_NAME}.npz && +mkdir ../model/vit_checkpoint/imagenet21k && +mv {MODEL_NAME}.npz ../model/vit_checkpoint/imagenet21k/{MODEL_NAME}.npz +``` + +### 2. Prepare data + +Please go to ["./datasets/README.md"](datasets/README.md) for details, or please send an Email to jienengchen01 AT gmail.com to request the preprocessed data. If you would like to use the preprocessed data, please use it for research purposes and do not redistribute it. + +### 3. Environment + +Please prepare an environment with python=3.7, and then use the command "pip install -r requirements.txt" for the dependencies. + +### 4. Train/Test + +- Run the train script on synapse dataset. The batch size can be reduced to 12 or 6 to save memory (please also decrease the base_lr linearly), and both can reach similar performance. + +```bash +CUDA_VISIBLE_DEVICES=0 python train.py --dataset Synapse --vit_name R50-ViT-B_16 +``` + +- Run the test script on synapse dataset. It supports testing for both 2D images and 3D volumes. + +```bash +python test.py --dataset Synapse --vit_name R50-ViT-B_16 +``` + +## Reference +* [Google ViT](https://github.com/google-research/vision_transformer) +* [ViT-pytorch](https://github.com/jeonsworld/ViT-pytorch) +* [segmentation_models.pytorch](https://github.com/qubvel/segmentation_models.pytorch) + +## Citations + +```bibtex +@article{chen2021transunet, + title={TransUNet: Transformers Make Strong Encoders for Medical Image Segmentation}, + author={Chen, Jieneng and Lu, Yongyi and Yu, Qihang and Luo, Xiangde and Adeli, Ehsan and Wang, Yan and Lu, Le and Yuille, Alan L., and Zhou, Yuyin}, + journal={arXiv preprint arXiv:2102.04306}, + year={2021} +} +``` diff --git a/PuzzleTuning/SSL_structures/TransUNet_main/datasets/README.md b/PuzzleTuning/SSL_structures/TransUNet_main/datasets/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c662f8e2c24f9b5a899338cfe5fcd67f43d1bba5 --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransUNet_main/datasets/README.md @@ -0,0 +1,29 @@ +# Data Preparing + +1. Access to the synapse multi-organ dataset: + 1. Sign up in the [official Synapse website](https://www.synapse.org/#!Synapse:syn3193805/wiki/) and download the dataset. Convert them to numpy format, clip the images within [-125, 275], normalize each 3D image to [0, 1], and extract 2D slices from 3D volume for training cases while keeping the 3D volume in h5 format for testing cases. + 2. You can also send an Email directly to jienengchen01 AT gmail.com to request the preprocessed data for reproduction. +2. The directory structure of the whole project is as follows: + +```bash +. +├── TransUNet +│   ├──datasets +│   │    └── dataset_*.py +│   ├──train.py +│   ├──test.py +│   └──... +├── model +│   └── vit_checkpoint +│   └── imagenet21k +│      ├── R50+ViT-B_16.npz +│      └── *.npz +└── data + └──Synapse + ├── test_vol_h5 + │   ├── case0001.npy.h5 + │   └── *.npy.h5 + └── train_npz + ├── case0005_slice000.npz + └── *.npz +``` diff --git a/PuzzleTuning/SSL_structures/TransUNet_main/datasets/dataset_synapse.py b/PuzzleTuning/SSL_structures/TransUNet_main/datasets/dataset_synapse.py new file mode 100644 index 0000000000000000000000000000000000000000..c5d0de1a99f8ca46851f51e45570d4ddc8fbff09 --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransUNet_main/datasets/dataset_synapse.py @@ -0,0 +1,75 @@ +import os +import random +import h5py +import numpy as np +import torch +from scipy import ndimage +from scipy.ndimage.interpolation import zoom +from torch.utils.data import Dataset + + +def random_rot_flip(image, label): + k = np.random.randint(0, 4) + image = np.rot90(image, k) + label = np.rot90(label, k) + axis = np.random.randint(0, 2) + image = np.flip(image, axis=axis).copy() + label = np.flip(label, axis=axis).copy() + return image, label + + +def random_rotate(image, label): + angle = np.random.randint(-20, 20) + image = ndimage.rotate(image, angle, order=0, reshape=False) + label = ndimage.rotate(label, angle, order=0, reshape=False) + return image, label + + +class RandomGenerator(object): + def __init__(self, output_size): + self.output_size = output_size + + def __call__(self, sample): + image, label = sample['image'], sample['label'] + + if random.random() > 0.5: + image, label = random_rot_flip(image, label) + elif random.random() > 0.5: + image, label = random_rotate(image, label) + x, y = image.shape + if x != self.output_size[0] or y != self.output_size[1]: + image = zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=3) # why not 3? + label = zoom(label, (self.output_size[0] / x, self.output_size[1] / y), order=0) + image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0) + label = torch.from_numpy(label.astype(np.float32)) + sample = {'image': image, 'label': label.long()} + return sample + + +class Synapse_dataset(Dataset): + def __init__(self, base_dir, list_dir, split, transform=None): + self.transform = transform # using transform in torch! + self.split = split + self.sample_list = open(os.path.join(list_dir, self.split+'.txt')).readlines() + self.data_dir = base_dir + + def __len__(self): + return len(self.sample_list) + + def __getitem__(self, idx): + if self.split == "train": + slice_name = self.sample_list[idx].strip('\n') + data_path = os.path.join(self.data_dir, slice_name+'.npz') + data = np.load(data_path) + image, label = data['image'], data['label'] + else: + vol_name = self.sample_list[idx].strip('\n') + filepath = self.data_dir + "/{}.npy.h5".format(vol_name) + data = h5py.File(filepath) + image, label = data['image'][:], data['label'][:] + + sample = {'image': image, 'label': label} + if self.transform: + sample = self.transform(sample) + sample['case_name'] = self.sample_list[idx].strip('\n') + return sample diff --git a/PuzzleTuning/SSL_structures/TransUNet_main/lists/lists_Synapse/all.lst b/PuzzleTuning/SSL_structures/TransUNet_main/lists/lists_Synapse/all.lst new file mode 100644 index 0000000000000000000000000000000000000000..6ef047d4b8be2ea61d1621620e420a6f3c974ec2 --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransUNet_main/lists/lists_Synapse/all.lst @@ -0,0 +1,30 @@ +case0031.npy.h5 +case0007.npy.h5 +case0009.npy.h5 +case0005.npy.h5 +case0026.npy.h5 +case0039.npy.h5 +case0024.npy.h5 +case0034.npy.h5 +case0033.npy.h5 +case0030.npy.h5 +case0023.npy.h5 +case0040.npy.h5 +case0010.npy.h5 +case0021.npy.h5 +case0006.npy.h5 +case0027.npy.h5 +case0028.npy.h5 +case0037.npy.h5 +case0008.npy.h5 +case0022.npy.h5 +case0038.npy.h5 +case0036.npy.h5 +case0032.npy.h5 +case0002.npy.h5 +case0029.npy.h5 +case0003.npy.h5 +case0001.npy.h5 +case0004.npy.h5 +case0025.npy.h5 +case0035.npy.h5 diff --git a/PuzzleTuning/SSL_structures/TransUNet_main/lists/lists_Synapse/test_vol.txt b/PuzzleTuning/SSL_structures/TransUNet_main/lists/lists_Synapse/test_vol.txt new file mode 100644 index 0000000000000000000000000000000000000000..1c4abd53044eed5457fd1f7e0cca1c99e7222593 --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransUNet_main/lists/lists_Synapse/test_vol.txt @@ -0,0 +1,12 @@ +case0008 +case0022 +case0038 +case0036 +case0032 +case0002 +case0029 +case0003 +case0001 +case0004 +case0025 +case0035 diff --git a/PuzzleTuning/SSL_structures/TransUNet_main/lists/lists_Synapse/train.txt b/PuzzleTuning/SSL_structures/TransUNet_main/lists/lists_Synapse/train.txt new file mode 100644 index 0000000000000000000000000000000000000000..e58616844994a95407d1f664b79cd4e4533d41b8 --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransUNet_main/lists/lists_Synapse/train.txt @@ -0,0 +1,2211 @@ +case0031_slice000 +case0031_slice001 +case0031_slice002 +case0031_slice003 +case0031_slice004 +case0031_slice005 +case0031_slice006 +case0031_slice007 +case0031_slice008 +case0031_slice009 +case0031_slice010 +case0031_slice011 +case0031_slice012 +case0031_slice013 +case0031_slice014 +case0031_slice015 +case0031_slice016 +case0031_slice017 +case0031_slice018 +case0031_slice019 +case0031_slice020 +case0031_slice021 +case0031_slice022 +case0031_slice023 +case0031_slice024 +case0031_slice025 +case0031_slice026 +case0031_slice027 +case0031_slice028 +case0031_slice029 +case0031_slice030 +case0031_slice031 +case0031_slice032 +case0031_slice033 +case0031_slice034 +case0031_slice035 +case0031_slice036 +case0031_slice037 +case0031_slice038 +case0031_slice039 +case0031_slice040 +case0031_slice041 +case0031_slice042 +case0031_slice043 +case0031_slice044 +case0031_slice045 +case0031_slice046 +case0031_slice047 +case0031_slice048 +case0031_slice049 +case0031_slice050 +case0031_slice051 +case0031_slice052 +case0031_slice053 +case0031_slice054 +case0031_slice055 +case0031_slice056 +case0031_slice057 +case0031_slice058 +case0031_slice059 +case0031_slice060 +case0031_slice061 +case0031_slice062 +case0031_slice063 +case0031_slice064 +case0031_slice065 +case0031_slice066 +case0031_slice067 +case0031_slice068 +case0031_slice069 +case0031_slice070 +case0031_slice071 +case0031_slice072 +case0031_slice073 +case0031_slice074 +case0031_slice075 +case0031_slice076 +case0031_slice077 +case0031_slice078 +case0031_slice079 +case0031_slice080 +case0031_slice081 +case0031_slice082 +case0031_slice083 +case0031_slice084 +case0031_slice085 +case0031_slice086 +case0031_slice087 +case0031_slice088 +case0031_slice089 +case0031_slice090 +case0031_slice091 +case0031_slice092 +case0007_slice000 +case0007_slice001 +case0007_slice002 +case0007_slice003 +case0007_slice004 +case0007_slice005 +case0007_slice006 +case0007_slice007 +case0007_slice008 +case0007_slice009 +case0007_slice010 +case0007_slice011 +case0007_slice012 +case0007_slice013 +case0007_slice014 +case0007_slice015 +case0007_slice016 +case0007_slice017 +case0007_slice018 +case0007_slice019 +case0007_slice020 +case0007_slice021 +case0007_slice022 +case0007_slice023 +case0007_slice024 +case0007_slice025 +case0007_slice026 +case0007_slice027 +case0007_slice028 +case0007_slice029 +case0007_slice030 +case0007_slice031 +case0007_slice032 +case0007_slice033 +case0007_slice034 +case0007_slice035 +case0007_slice036 +case0007_slice037 +case0007_slice038 +case0007_slice039 +case0007_slice040 +case0007_slice041 +case0007_slice042 +case0007_slice043 +case0007_slice044 +case0007_slice045 +case0007_slice046 +case0007_slice047 +case0007_slice048 +case0007_slice049 +case0007_slice050 +case0007_slice051 +case0007_slice052 +case0007_slice053 +case0007_slice054 +case0007_slice055 +case0007_slice056 +case0007_slice057 +case0007_slice058 +case0007_slice059 +case0007_slice060 +case0007_slice061 +case0007_slice062 +case0007_slice063 +case0007_slice064 +case0007_slice065 +case0007_slice066 +case0007_slice067 +case0007_slice068 +case0007_slice069 +case0007_slice070 +case0007_slice071 +case0007_slice072 +case0007_slice073 +case0007_slice074 +case0007_slice075 +case0007_slice076 +case0007_slice077 +case0007_slice078 +case0007_slice079 +case0007_slice080 +case0007_slice081 +case0007_slice082 +case0007_slice083 +case0007_slice084 +case0007_slice085 +case0007_slice086 +case0007_slice087 +case0007_slice088 +case0007_slice089 +case0007_slice090 +case0007_slice091 +case0007_slice092 +case0007_slice093 +case0007_slice094 +case0007_slice095 +case0007_slice096 +case0007_slice097 +case0007_slice098 +case0007_slice099 +case0007_slice100 +case0007_slice101 +case0007_slice102 +case0007_slice103 +case0007_slice104 +case0007_slice105 +case0007_slice106 +case0007_slice107 +case0007_slice108 +case0007_slice109 +case0007_slice110 +case0007_slice111 +case0007_slice112 +case0007_slice113 +case0007_slice114 +case0007_slice115 +case0007_slice116 +case0007_slice117 +case0007_slice118 +case0007_slice119 +case0007_slice120 +case0007_slice121 +case0007_slice122 +case0007_slice123 +case0007_slice124 +case0007_slice125 +case0007_slice126 +case0007_slice127 +case0007_slice128 +case0007_slice129 +case0007_slice130 +case0007_slice131 +case0007_slice132 +case0007_slice133 +case0007_slice134 +case0007_slice135 +case0007_slice136 +case0007_slice137 +case0007_slice138 +case0007_slice139 +case0007_slice140 +case0007_slice141 +case0007_slice142 +case0007_slice143 +case0007_slice144 +case0007_slice145 +case0007_slice146 +case0007_slice147 +case0007_slice148 +case0007_slice149 +case0007_slice150 +case0007_slice151 +case0007_slice152 +case0007_slice153 +case0007_slice154 +case0007_slice155 +case0007_slice156 +case0007_slice157 +case0007_slice158 +case0007_slice159 +case0007_slice160 +case0007_slice161 +case0007_slice162 +case0009_slice000 +case0009_slice001 +case0009_slice002 +case0009_slice003 +case0009_slice004 +case0009_slice005 +case0009_slice006 +case0009_slice007 +case0009_slice008 +case0009_slice009 +case0009_slice010 +case0009_slice011 +case0009_slice012 +case0009_slice013 +case0009_slice014 +case0009_slice015 +case0009_slice016 +case0009_slice017 +case0009_slice018 +case0009_slice019 +case0009_slice020 +case0009_slice021 +case0009_slice022 +case0009_slice023 +case0009_slice024 +case0009_slice025 +case0009_slice026 +case0009_slice027 +case0009_slice028 +case0009_slice029 +case0009_slice030 +case0009_slice031 +case0009_slice032 +case0009_slice033 +case0009_slice034 +case0009_slice035 +case0009_slice036 +case0009_slice037 +case0009_slice038 +case0009_slice039 +case0009_slice040 +case0009_slice041 +case0009_slice042 +case0009_slice043 +case0009_slice044 +case0009_slice045 +case0009_slice046 +case0009_slice047 +case0009_slice048 +case0009_slice049 +case0009_slice050 +case0009_slice051 +case0009_slice052 +case0009_slice053 +case0009_slice054 +case0009_slice055 +case0009_slice056 +case0009_slice057 +case0009_slice058 +case0009_slice059 +case0009_slice060 +case0009_slice061 +case0009_slice062 +case0009_slice063 +case0009_slice064 +case0009_slice065 +case0009_slice066 +case0009_slice067 +case0009_slice068 +case0009_slice069 +case0009_slice070 +case0009_slice071 +case0009_slice072 +case0009_slice073 +case0009_slice074 +case0009_slice075 +case0009_slice076 +case0009_slice077 +case0009_slice078 +case0009_slice079 +case0009_slice080 +case0009_slice081 +case0009_slice082 +case0009_slice083 +case0009_slice084 +case0009_slice085 +case0009_slice086 +case0009_slice087 +case0009_slice088 +case0009_slice089 +case0009_slice090 +case0009_slice091 +case0009_slice092 +case0009_slice093 +case0009_slice094 +case0009_slice095 +case0009_slice096 +case0009_slice097 +case0009_slice098 +case0009_slice099 +case0009_slice100 +case0009_slice101 +case0009_slice102 +case0009_slice103 +case0009_slice104 +case0009_slice105 +case0009_slice106 +case0009_slice107 +case0009_slice108 +case0009_slice109 +case0009_slice110 +case0009_slice111 +case0009_slice112 +case0009_slice113 +case0009_slice114 +case0009_slice115 +case0009_slice116 +case0009_slice117 +case0009_slice118 +case0009_slice119 +case0009_slice120 +case0009_slice121 +case0009_slice122 +case0009_slice123 +case0009_slice124 +case0009_slice125 +case0009_slice126 +case0009_slice127 +case0009_slice128 +case0009_slice129 +case0009_slice130 +case0009_slice131 +case0009_slice132 +case0009_slice133 +case0009_slice134 +case0009_slice135 +case0009_slice136 +case0009_slice137 +case0009_slice138 +case0009_slice139 +case0009_slice140 +case0009_slice141 +case0009_slice142 +case0009_slice143 +case0009_slice144 +case0009_slice145 +case0009_slice146 +case0009_slice147 +case0009_slice148 +case0005_slice000 +case0005_slice001 +case0005_slice002 +case0005_slice003 +case0005_slice004 +case0005_slice005 +case0005_slice006 +case0005_slice007 +case0005_slice008 +case0005_slice009 +case0005_slice010 +case0005_slice011 +case0005_slice012 +case0005_slice013 +case0005_slice014 +case0005_slice015 +case0005_slice016 +case0005_slice017 +case0005_slice018 +case0005_slice019 +case0005_slice020 +case0005_slice021 +case0005_slice022 +case0005_slice023 +case0005_slice024 +case0005_slice025 +case0005_slice026 +case0005_slice027 +case0005_slice028 +case0005_slice029 +case0005_slice030 +case0005_slice031 +case0005_slice032 +case0005_slice033 +case0005_slice034 +case0005_slice035 +case0005_slice036 +case0005_slice037 +case0005_slice038 +case0005_slice039 +case0005_slice040 +case0005_slice041 +case0005_slice042 +case0005_slice043 +case0005_slice044 +case0005_slice045 +case0005_slice046 +case0005_slice047 +case0005_slice048 +case0005_slice049 +case0005_slice050 +case0005_slice051 +case0005_slice052 +case0005_slice053 +case0005_slice054 +case0005_slice055 +case0005_slice056 +case0005_slice057 +case0005_slice058 +case0005_slice059 +case0005_slice060 +case0005_slice061 +case0005_slice062 +case0005_slice063 +case0005_slice064 +case0005_slice065 +case0005_slice066 +case0005_slice067 +case0005_slice068 +case0005_slice069 +case0005_slice070 +case0005_slice071 +case0005_slice072 +case0005_slice073 +case0005_slice074 +case0005_slice075 +case0005_slice076 +case0005_slice077 +case0005_slice078 +case0005_slice079 +case0005_slice080 +case0005_slice081 +case0005_slice082 +case0005_slice083 +case0005_slice084 +case0005_slice085 +case0005_slice086 +case0005_slice087 +case0005_slice088 +case0005_slice089 +case0005_slice090 +case0005_slice091 +case0005_slice092 +case0005_slice093 +case0005_slice094 +case0005_slice095 +case0005_slice096 +case0005_slice097 +case0005_slice098 +case0005_slice099 +case0005_slice100 +case0005_slice101 +case0005_slice102 +case0005_slice103 +case0005_slice104 +case0005_slice105 +case0005_slice106 +case0005_slice107 +case0005_slice108 +case0005_slice109 +case0005_slice110 +case0005_slice111 +case0005_slice112 +case0005_slice113 +case0005_slice114 +case0005_slice115 +case0005_slice116 +case0026_slice000 +case0026_slice001 +case0026_slice002 +case0026_slice003 +case0026_slice004 +case0026_slice005 +case0026_slice006 +case0026_slice007 +case0026_slice008 +case0026_slice009 +case0026_slice010 +case0026_slice011 +case0026_slice012 +case0026_slice013 +case0026_slice014 +case0026_slice015 +case0026_slice016 +case0026_slice017 +case0026_slice018 +case0026_slice019 +case0026_slice020 +case0026_slice021 +case0026_slice022 +case0026_slice023 +case0026_slice024 +case0026_slice025 +case0026_slice026 +case0026_slice027 +case0026_slice028 +case0026_slice029 +case0026_slice030 +case0026_slice031 +case0026_slice032 +case0026_slice033 +case0026_slice034 +case0026_slice035 +case0026_slice036 +case0026_slice037 +case0026_slice038 +case0026_slice039 +case0026_slice040 +case0026_slice041 +case0026_slice042 +case0026_slice043 +case0026_slice044 +case0026_slice045 +case0026_slice046 +case0026_slice047 +case0026_slice048 +case0026_slice049 +case0026_slice050 +case0026_slice051 +case0026_slice052 +case0026_slice053 +case0026_slice054 +case0026_slice055 +case0026_slice056 +case0026_slice057 +case0026_slice058 +case0026_slice059 +case0026_slice060 +case0026_slice061 +case0026_slice062 +case0026_slice063 +case0026_slice064 +case0026_slice065 +case0026_slice066 +case0026_slice067 +case0026_slice068 +case0026_slice069 +case0026_slice070 +case0026_slice071 +case0026_slice072 +case0026_slice073 +case0026_slice074 +case0026_slice075 +case0026_slice076 +case0026_slice077 +case0026_slice078 +case0026_slice079 +case0026_slice080 +case0026_slice081 +case0026_slice082 +case0026_slice083 +case0026_slice084 +case0026_slice085 +case0026_slice086 +case0026_slice087 +case0026_slice088 +case0026_slice089 +case0026_slice090 +case0026_slice091 +case0026_slice092 +case0026_slice093 +case0026_slice094 +case0026_slice095 +case0026_slice096 +case0026_slice097 +case0026_slice098 +case0026_slice099 +case0026_slice100 +case0026_slice101 +case0026_slice102 +case0026_slice103 +case0026_slice104 +case0026_slice105 +case0026_slice106 +case0026_slice107 +case0026_slice108 +case0026_slice109 +case0026_slice110 +case0026_slice111 +case0026_slice112 +case0026_slice113 +case0026_slice114 +case0026_slice115 +case0026_slice116 +case0026_slice117 +case0026_slice118 +case0026_slice119 +case0026_slice120 +case0026_slice121 +case0026_slice122 +case0026_slice123 +case0026_slice124 +case0026_slice125 +case0026_slice126 +case0026_slice127 +case0026_slice128 +case0026_slice129 +case0026_slice130 +case0039_slice000 +case0039_slice001 +case0039_slice002 +case0039_slice003 +case0039_slice004 +case0039_slice005 +case0039_slice006 +case0039_slice007 +case0039_slice008 +case0039_slice009 +case0039_slice010 +case0039_slice011 +case0039_slice012 +case0039_slice013 +case0039_slice014 +case0039_slice015 +case0039_slice016 +case0039_slice017 +case0039_slice018 +case0039_slice019 +case0039_slice020 +case0039_slice021 +case0039_slice022 +case0039_slice023 +case0039_slice024 +case0039_slice025 +case0039_slice026 +case0039_slice027 +case0039_slice028 +case0039_slice029 +case0039_slice030 +case0039_slice031 +case0039_slice032 +case0039_slice033 +case0039_slice034 +case0039_slice035 +case0039_slice036 +case0039_slice037 +case0039_slice038 +case0039_slice039 +case0039_slice040 +case0039_slice041 +case0039_slice042 +case0039_slice043 +case0039_slice044 +case0039_slice045 +case0039_slice046 +case0039_slice047 +case0039_slice048 +case0039_slice049 +case0039_slice050 +case0039_slice051 +case0039_slice052 +case0039_slice053 +case0039_slice054 +case0039_slice055 +case0039_slice056 +case0039_slice057 +case0039_slice058 +case0039_slice059 +case0039_slice060 +case0039_slice061 +case0039_slice062 +case0039_slice063 +case0039_slice064 +case0039_slice065 +case0039_slice066 +case0039_slice067 +case0039_slice068 +case0039_slice069 +case0039_slice070 +case0039_slice071 +case0039_slice072 +case0039_slice073 +case0039_slice074 +case0039_slice075 +case0039_slice076 +case0039_slice077 +case0039_slice078 +case0039_slice079 +case0039_slice080 +case0039_slice081 +case0039_slice082 +case0039_slice083 +case0039_slice084 +case0039_slice085 +case0039_slice086 +case0039_slice087 +case0039_slice088 +case0039_slice089 +case0024_slice000 +case0024_slice001 +case0024_slice002 +case0024_slice003 +case0024_slice004 +case0024_slice005 +case0024_slice006 +case0024_slice007 +case0024_slice008 +case0024_slice009 +case0024_slice010 +case0024_slice011 +case0024_slice012 +case0024_slice013 +case0024_slice014 +case0024_slice015 +case0024_slice016 +case0024_slice017 +case0024_slice018 +case0024_slice019 +case0024_slice020 +case0024_slice021 +case0024_slice022 +case0024_slice023 +case0024_slice024 +case0024_slice025 +case0024_slice026 +case0024_slice027 +case0024_slice028 +case0024_slice029 +case0024_slice030 +case0024_slice031 +case0024_slice032 +case0024_slice033 +case0024_slice034 +case0024_slice035 +case0024_slice036 +case0024_slice037 +case0024_slice038 +case0024_slice039 +case0024_slice040 +case0024_slice041 +case0024_slice042 +case0024_slice043 +case0024_slice044 +case0024_slice045 +case0024_slice046 +case0024_slice047 +case0024_slice048 +case0024_slice049 +case0024_slice050 +case0024_slice051 +case0024_slice052 +case0024_slice053 +case0024_slice054 +case0024_slice055 +case0024_slice056 +case0024_slice057 +case0024_slice058 +case0024_slice059 +case0024_slice060 +case0024_slice061 +case0024_slice062 +case0024_slice063 +case0024_slice064 +case0024_slice065 +case0024_slice066 +case0024_slice067 +case0024_slice068 +case0024_slice069 +case0024_slice070 +case0024_slice071 +case0024_slice072 +case0024_slice073 +case0024_slice074 +case0024_slice075 +case0024_slice076 +case0024_slice077 +case0024_slice078 +case0024_slice079 +case0024_slice080 +case0024_slice081 +case0024_slice082 +case0024_slice083 +case0024_slice084 +case0024_slice085 +case0024_slice086 +case0024_slice087 +case0024_slice088 +case0024_slice089 +case0024_slice090 +case0024_slice091 +case0024_slice092 +case0024_slice093 +case0024_slice094 +case0024_slice095 +case0024_slice096 +case0024_slice097 +case0024_slice098 +case0024_slice099 +case0024_slice100 +case0024_slice101 +case0024_slice102 +case0024_slice103 +case0024_slice104 +case0024_slice105 +case0024_slice106 +case0024_slice107 +case0024_slice108 +case0024_slice109 +case0024_slice110 +case0024_slice111 +case0024_slice112 +case0024_slice113 +case0024_slice114 +case0024_slice115 +case0024_slice116 +case0024_slice117 +case0024_slice118 +case0024_slice119 +case0024_slice120 +case0024_slice121 +case0024_slice122 +case0024_slice123 +case0034_slice000 +case0034_slice001 +case0034_slice002 +case0034_slice003 +case0034_slice004 +case0034_slice005 +case0034_slice006 +case0034_slice007 +case0034_slice008 +case0034_slice009 +case0034_slice010 +case0034_slice011 +case0034_slice012 +case0034_slice013 +case0034_slice014 +case0034_slice015 +case0034_slice016 +case0034_slice017 +case0034_slice018 +case0034_slice019 +case0034_slice020 +case0034_slice021 +case0034_slice022 +case0034_slice023 +case0034_slice024 +case0034_slice025 +case0034_slice026 +case0034_slice027 +case0034_slice028 +case0034_slice029 +case0034_slice030 +case0034_slice031 +case0034_slice032 +case0034_slice033 +case0034_slice034 +case0034_slice035 +case0034_slice036 +case0034_slice037 +case0034_slice038 +case0034_slice039 +case0034_slice040 +case0034_slice041 +case0034_slice042 +case0034_slice043 +case0034_slice044 +case0034_slice045 +case0034_slice046 +case0034_slice047 +case0034_slice048 +case0034_slice049 +case0034_slice050 +case0034_slice051 +case0034_slice052 +case0034_slice053 +case0034_slice054 +case0034_slice055 +case0034_slice056 +case0034_slice057 +case0034_slice058 +case0034_slice059 +case0034_slice060 +case0034_slice061 +case0034_slice062 +case0034_slice063 +case0034_slice064 +case0034_slice065 +case0034_slice066 +case0034_slice067 +case0034_slice068 +case0034_slice069 +case0034_slice070 +case0034_slice071 +case0034_slice072 +case0034_slice073 +case0034_slice074 +case0034_slice075 +case0034_slice076 +case0034_slice077 +case0034_slice078 +case0034_slice079 +case0034_slice080 +case0034_slice081 +case0034_slice082 +case0034_slice083 +case0034_slice084 +case0034_slice085 +case0034_slice086 +case0034_slice087 +case0034_slice088 +case0034_slice089 +case0034_slice090 +case0034_slice091 +case0034_slice092 +case0034_slice093 +case0034_slice094 +case0034_slice095 +case0034_slice096 +case0034_slice097 +case0033_slice000 +case0033_slice001 +case0033_slice002 +case0033_slice003 +case0033_slice004 +case0033_slice005 +case0033_slice006 +case0033_slice007 +case0033_slice008 +case0033_slice009 +case0033_slice010 +case0033_slice011 +case0033_slice012 +case0033_slice013 +case0033_slice014 +case0033_slice015 +case0033_slice016 +case0033_slice017 +case0033_slice018 +case0033_slice019 +case0033_slice020 +case0033_slice021 +case0033_slice022 +case0033_slice023 +case0033_slice024 +case0033_slice025 +case0033_slice026 +case0033_slice027 +case0033_slice028 +case0033_slice029 +case0033_slice030 +case0033_slice031 +case0033_slice032 +case0033_slice033 +case0033_slice034 +case0033_slice035 +case0033_slice036 +case0033_slice037 +case0033_slice038 +case0033_slice039 +case0033_slice040 +case0033_slice041 +case0033_slice042 +case0033_slice043 +case0033_slice044 +case0033_slice045 +case0033_slice046 +case0033_slice047 +case0033_slice048 +case0033_slice049 +case0033_slice050 +case0033_slice051 +case0033_slice052 +case0033_slice053 +case0033_slice054 +case0033_slice055 +case0033_slice056 +case0033_slice057 +case0033_slice058 +case0033_slice059 +case0033_slice060 +case0033_slice061 +case0033_slice062 +case0033_slice063 +case0033_slice064 +case0033_slice065 +case0033_slice066 +case0033_slice067 +case0033_slice068 +case0033_slice069 +case0033_slice070 +case0033_slice071 +case0033_slice072 +case0033_slice073 +case0033_slice074 +case0033_slice075 +case0033_slice076 +case0033_slice077 +case0033_slice078 +case0033_slice079 +case0033_slice080 +case0033_slice081 +case0033_slice082 +case0033_slice083 +case0033_slice084 +case0033_slice085 +case0033_slice086 +case0033_slice087 +case0033_slice088 +case0033_slice089 +case0033_slice090 +case0033_slice091 +case0033_slice092 +case0033_slice093 +case0033_slice094 +case0033_slice095 +case0033_slice096 +case0033_slice097 +case0033_slice098 +case0033_slice099 +case0033_slice100 +case0033_slice101 +case0033_slice102 +case0033_slice103 +case0030_slice000 +case0030_slice001 +case0030_slice002 +case0030_slice003 +case0030_slice004 +case0030_slice005 +case0030_slice006 +case0030_slice007 +case0030_slice008 +case0030_slice009 +case0030_slice010 +case0030_slice011 +case0030_slice012 +case0030_slice013 +case0030_slice014 +case0030_slice015 +case0030_slice016 +case0030_slice017 +case0030_slice018 +case0030_slice019 +case0030_slice020 +case0030_slice021 +case0030_slice022 +case0030_slice023 +case0030_slice024 +case0030_slice025 +case0030_slice026 +case0030_slice027 +case0030_slice028 +case0030_slice029 +case0030_slice030 +case0030_slice031 +case0030_slice032 +case0030_slice033 +case0030_slice034 +case0030_slice035 +case0030_slice036 +case0030_slice037 +case0030_slice038 +case0030_slice039 +case0030_slice040 +case0030_slice041 +case0030_slice042 +case0030_slice043 +case0030_slice044 +case0030_slice045 +case0030_slice046 +case0030_slice047 +case0030_slice048 +case0030_slice049 +case0030_slice050 +case0030_slice051 +case0030_slice052 +case0030_slice053 +case0030_slice054 +case0030_slice055 +case0030_slice056 +case0030_slice057 +case0030_slice058 +case0030_slice059 +case0030_slice060 +case0030_slice061 +case0030_slice062 +case0030_slice063 +case0030_slice064 +case0030_slice065 +case0030_slice066 +case0030_slice067 +case0030_slice068 +case0030_slice069 +case0030_slice070 +case0030_slice071 +case0030_slice072 +case0030_slice073 +case0030_slice074 +case0030_slice075 +case0030_slice076 +case0030_slice077 +case0030_slice078 +case0030_slice079 +case0030_slice080 +case0030_slice081 +case0030_slice082 +case0030_slice083 +case0030_slice084 +case0030_slice085 +case0030_slice086 +case0030_slice087 +case0030_slice088 +case0030_slice089 +case0030_slice090 +case0030_slice091 +case0030_slice092 +case0030_slice093 +case0030_slice094 +case0030_slice095 +case0030_slice096 +case0030_slice097 +case0030_slice098 +case0030_slice099 +case0030_slice100 +case0030_slice101 +case0030_slice102 +case0030_slice103 +case0030_slice104 +case0030_slice105 +case0030_slice106 +case0030_slice107 +case0030_slice108 +case0030_slice109 +case0030_slice110 +case0030_slice111 +case0030_slice112 +case0030_slice113 +case0030_slice114 +case0030_slice115 +case0030_slice116 +case0030_slice117 +case0030_slice118 +case0030_slice119 +case0030_slice120 +case0030_slice121 +case0030_slice122 +case0030_slice123 +case0030_slice124 +case0030_slice125 +case0030_slice126 +case0030_slice127 +case0030_slice128 +case0030_slice129 +case0030_slice130 +case0030_slice131 +case0030_slice132 +case0030_slice133 +case0030_slice134 +case0030_slice135 +case0030_slice136 +case0030_slice137 +case0030_slice138 +case0030_slice139 +case0030_slice140 +case0030_slice141 +case0030_slice142 +case0030_slice143 +case0030_slice144 +case0030_slice145 +case0030_slice146 +case0030_slice147 +case0030_slice148 +case0030_slice149 +case0030_slice150 +case0030_slice151 +case0030_slice152 +case0023_slice000 +case0023_slice001 +case0023_slice002 +case0023_slice003 +case0023_slice004 +case0023_slice005 +case0023_slice006 +case0023_slice007 +case0023_slice008 +case0023_slice009 +case0023_slice010 +case0023_slice011 +case0023_slice012 +case0023_slice013 +case0023_slice014 +case0023_slice015 +case0023_slice016 +case0023_slice017 +case0023_slice018 +case0023_slice019 +case0023_slice020 +case0023_slice021 +case0023_slice022 +case0023_slice023 +case0023_slice024 +case0023_slice025 +case0023_slice026 +case0023_slice027 +case0023_slice028 +case0023_slice029 +case0023_slice030 +case0023_slice031 +case0023_slice032 +case0023_slice033 +case0023_slice034 +case0023_slice035 +case0023_slice036 +case0023_slice037 +case0023_slice038 +case0023_slice039 +case0023_slice040 +case0023_slice041 +case0023_slice042 +case0023_slice043 +case0023_slice044 +case0023_slice045 +case0023_slice046 +case0023_slice047 +case0023_slice048 +case0023_slice049 +case0023_slice050 +case0023_slice051 +case0023_slice052 +case0023_slice053 +case0023_slice054 +case0023_slice055 +case0023_slice056 +case0023_slice057 +case0023_slice058 +case0023_slice059 +case0023_slice060 +case0023_slice061 +case0023_slice062 +case0023_slice063 +case0023_slice064 +case0023_slice065 +case0023_slice066 +case0023_slice067 +case0023_slice068 +case0023_slice069 +case0023_slice070 +case0023_slice071 +case0023_slice072 +case0023_slice073 +case0023_slice074 +case0023_slice075 +case0023_slice076 +case0023_slice077 +case0023_slice078 +case0023_slice079 +case0023_slice080 +case0023_slice081 +case0023_slice082 +case0023_slice083 +case0023_slice084 +case0023_slice085 +case0023_slice086 +case0023_slice087 +case0023_slice088 +case0023_slice089 +case0023_slice090 +case0023_slice091 +case0023_slice092 +case0023_slice093 +case0023_slice094 +case0023_slice095 +case0040_slice000 +case0040_slice001 +case0040_slice002 +case0040_slice003 +case0040_slice004 +case0040_slice005 +case0040_slice006 +case0040_slice007 +case0040_slice008 +case0040_slice009 +case0040_slice010 +case0040_slice011 +case0040_slice012 +case0040_slice013 +case0040_slice014 +case0040_slice015 +case0040_slice016 +case0040_slice017 +case0040_slice018 +case0040_slice019 +case0040_slice020 +case0040_slice021 +case0040_slice022 +case0040_slice023 +case0040_slice024 +case0040_slice025 +case0040_slice026 +case0040_slice027 +case0040_slice028 +case0040_slice029 +case0040_slice030 +case0040_slice031 +case0040_slice032 +case0040_slice033 +case0040_slice034 +case0040_slice035 +case0040_slice036 +case0040_slice037 +case0040_slice038 +case0040_slice039 +case0040_slice040 +case0040_slice041 +case0040_slice042 +case0040_slice043 +case0040_slice044 +case0040_slice045 +case0040_slice046 +case0040_slice047 +case0040_slice048 +case0040_slice049 +case0040_slice050 +case0040_slice051 +case0040_slice052 +case0040_slice053 +case0040_slice054 +case0040_slice055 +case0040_slice056 +case0040_slice057 +case0040_slice058 +case0040_slice059 +case0040_slice060 +case0040_slice061 +case0040_slice062 +case0040_slice063 +case0040_slice064 +case0040_slice065 +case0040_slice066 +case0040_slice067 +case0040_slice068 +case0040_slice069 +case0040_slice070 +case0040_slice071 +case0040_slice072 +case0040_slice073 +case0040_slice074 +case0040_slice075 +case0040_slice076 +case0040_slice077 +case0040_slice078 +case0040_slice079 +case0040_slice080 +case0040_slice081 +case0040_slice082 +case0040_slice083 +case0040_slice084 +case0040_slice085 +case0040_slice086 +case0040_slice087 +case0040_slice088 +case0040_slice089 +case0040_slice090 +case0040_slice091 +case0040_slice092 +case0040_slice093 +case0040_slice094 +case0040_slice095 +case0040_slice096 +case0040_slice097 +case0040_slice098 +case0040_slice099 +case0040_slice100 +case0040_slice101 +case0040_slice102 +case0040_slice103 +case0040_slice104 +case0040_slice105 +case0040_slice106 +case0040_slice107 +case0040_slice108 +case0040_slice109 +case0040_slice110 +case0040_slice111 +case0040_slice112 +case0040_slice113 +case0040_slice114 +case0040_slice115 +case0040_slice116 +case0040_slice117 +case0040_slice118 +case0040_slice119 +case0040_slice120 +case0040_slice121 +case0040_slice122 +case0040_slice123 +case0040_slice124 +case0040_slice125 +case0040_slice126 +case0040_slice127 +case0040_slice128 +case0040_slice129 +case0040_slice130 +case0040_slice131 +case0040_slice132 +case0040_slice133 +case0040_slice134 +case0040_slice135 +case0040_slice136 +case0040_slice137 +case0040_slice138 +case0040_slice139 +case0040_slice140 +case0040_slice141 +case0040_slice142 +case0040_slice143 +case0040_slice144 +case0040_slice145 +case0040_slice146 +case0040_slice147 +case0040_slice148 +case0040_slice149 +case0040_slice150 +case0040_slice151 +case0040_slice152 +case0040_slice153 +case0040_slice154 +case0040_slice155 +case0040_slice156 +case0040_slice157 +case0040_slice158 +case0040_slice159 +case0040_slice160 +case0040_slice161 +case0040_slice162 +case0040_slice163 +case0040_slice164 +case0040_slice165 +case0040_slice166 +case0040_slice167 +case0040_slice168 +case0040_slice169 +case0040_slice170 +case0040_slice171 +case0040_slice172 +case0040_slice173 +case0040_slice174 +case0040_slice175 +case0040_slice176 +case0040_slice177 +case0040_slice178 +case0040_slice179 +case0040_slice180 +case0040_slice181 +case0040_slice182 +case0040_slice183 +case0040_slice184 +case0040_slice185 +case0040_slice186 +case0040_slice187 +case0040_slice188 +case0040_slice189 +case0040_slice190 +case0040_slice191 +case0040_slice192 +case0040_slice193 +case0040_slice194 +case0010_slice000 +case0010_slice001 +case0010_slice002 +case0010_slice003 +case0010_slice004 +case0010_slice005 +case0010_slice006 +case0010_slice007 +case0010_slice008 +case0010_slice009 +case0010_slice010 +case0010_slice011 +case0010_slice012 +case0010_slice013 +case0010_slice014 +case0010_slice015 +case0010_slice016 +case0010_slice017 +case0010_slice018 +case0010_slice019 +case0010_slice020 +case0010_slice021 +case0010_slice022 +case0010_slice023 +case0010_slice024 +case0010_slice025 +case0010_slice026 +case0010_slice027 +case0010_slice028 +case0010_slice029 +case0010_slice030 +case0010_slice031 +case0010_slice032 +case0010_slice033 +case0010_slice034 +case0010_slice035 +case0010_slice036 +case0010_slice037 +case0010_slice038 +case0010_slice039 +case0010_slice040 +case0010_slice041 +case0010_slice042 +case0010_slice043 +case0010_slice044 +case0010_slice045 +case0010_slice046 +case0010_slice047 +case0010_slice048 +case0010_slice049 +case0010_slice050 +case0010_slice051 +case0010_slice052 +case0010_slice053 +case0010_slice054 +case0010_slice055 +case0010_slice056 +case0010_slice057 +case0010_slice058 +case0010_slice059 +case0010_slice060 +case0010_slice061 +case0010_slice062 +case0010_slice063 +case0010_slice064 +case0010_slice065 +case0010_slice066 +case0010_slice067 +case0010_slice068 +case0010_slice069 +case0010_slice070 +case0010_slice071 +case0010_slice072 +case0010_slice073 +case0010_slice074 +case0010_slice075 +case0010_slice076 +case0010_slice077 +case0010_slice078 +case0010_slice079 +case0010_slice080 +case0010_slice081 +case0010_slice082 +case0010_slice083 +case0010_slice084 +case0010_slice085 +case0010_slice086 +case0010_slice087 +case0010_slice088 +case0010_slice089 +case0010_slice090 +case0010_slice091 +case0010_slice092 +case0010_slice093 +case0010_slice094 +case0010_slice095 +case0010_slice096 +case0010_slice097 +case0010_slice098 +case0010_slice099 +case0010_slice100 +case0010_slice101 +case0010_slice102 +case0010_slice103 +case0010_slice104 +case0010_slice105 +case0010_slice106 +case0010_slice107 +case0010_slice108 +case0010_slice109 +case0010_slice110 +case0010_slice111 +case0010_slice112 +case0010_slice113 +case0010_slice114 +case0010_slice115 +case0010_slice116 +case0010_slice117 +case0010_slice118 +case0010_slice119 +case0010_slice120 +case0010_slice121 +case0010_slice122 +case0010_slice123 +case0010_slice124 +case0010_slice125 +case0010_slice126 +case0010_slice127 +case0010_slice128 +case0010_slice129 +case0010_slice130 +case0010_slice131 +case0010_slice132 +case0010_slice133 +case0010_slice134 +case0010_slice135 +case0010_slice136 +case0010_slice137 +case0010_slice138 +case0010_slice139 +case0010_slice140 +case0010_slice141 +case0010_slice142 +case0010_slice143 +case0010_slice144 +case0010_slice145 +case0010_slice146 +case0010_slice147 +case0021_slice000 +case0021_slice001 +case0021_slice002 +case0021_slice003 +case0021_slice004 +case0021_slice005 +case0021_slice006 +case0021_slice007 +case0021_slice008 +case0021_slice009 +case0021_slice010 +case0021_slice011 +case0021_slice012 +case0021_slice013 +case0021_slice014 +case0021_slice015 +case0021_slice016 +case0021_slice017 +case0021_slice018 +case0021_slice019 +case0021_slice020 +case0021_slice021 +case0021_slice022 +case0021_slice023 +case0021_slice024 +case0021_slice025 +case0021_slice026 +case0021_slice027 +case0021_slice028 +case0021_slice029 +case0021_slice030 +case0021_slice031 +case0021_slice032 +case0021_slice033 +case0021_slice034 +case0021_slice035 +case0021_slice036 +case0021_slice037 +case0021_slice038 +case0021_slice039 +case0021_slice040 +case0021_slice041 +case0021_slice042 +case0021_slice043 +case0021_slice044 +case0021_slice045 +case0021_slice046 +case0021_slice047 +case0021_slice048 +case0021_slice049 +case0021_slice050 +case0021_slice051 +case0021_slice052 +case0021_slice053 +case0021_slice054 +case0021_slice055 +case0021_slice056 +case0021_slice057 +case0021_slice058 +case0021_slice059 +case0021_slice060 +case0021_slice061 +case0021_slice062 +case0021_slice063 +case0021_slice064 +case0021_slice065 +case0021_slice066 +case0021_slice067 +case0021_slice068 +case0021_slice069 +case0021_slice070 +case0021_slice071 +case0021_slice072 +case0021_slice073 +case0021_slice074 +case0021_slice075 +case0021_slice076 +case0021_slice077 +case0021_slice078 +case0021_slice079 +case0021_slice080 +case0021_slice081 +case0021_slice082 +case0021_slice083 +case0021_slice084 +case0021_slice085 +case0021_slice086 +case0021_slice087 +case0021_slice088 +case0021_slice089 +case0021_slice090 +case0021_slice091 +case0021_slice092 +case0021_slice093 +case0021_slice094 +case0021_slice095 +case0021_slice096 +case0021_slice097 +case0021_slice098 +case0021_slice099 +case0021_slice100 +case0021_slice101 +case0021_slice102 +case0021_slice103 +case0021_slice104 +case0021_slice105 +case0021_slice106 +case0021_slice107 +case0021_slice108 +case0021_slice109 +case0021_slice110 +case0021_slice111 +case0021_slice112 +case0021_slice113 +case0021_slice114 +case0021_slice115 +case0021_slice116 +case0021_slice117 +case0021_slice118 +case0021_slice119 +case0021_slice120 +case0021_slice121 +case0021_slice122 +case0021_slice123 +case0021_slice124 +case0021_slice125 +case0021_slice126 +case0021_slice127 +case0021_slice128 +case0021_slice129 +case0021_slice130 +case0021_slice131 +case0021_slice132 +case0021_slice133 +case0021_slice134 +case0021_slice135 +case0021_slice136 +case0021_slice137 +case0021_slice138 +case0021_slice139 +case0021_slice140 +case0021_slice141 +case0021_slice142 +case0006_slice000 +case0006_slice001 +case0006_slice002 +case0006_slice003 +case0006_slice004 +case0006_slice005 +case0006_slice006 +case0006_slice007 +case0006_slice008 +case0006_slice009 +case0006_slice010 +case0006_slice011 +case0006_slice012 +case0006_slice013 +case0006_slice014 +case0006_slice015 +case0006_slice016 +case0006_slice017 +case0006_slice018 +case0006_slice019 +case0006_slice020 +case0006_slice021 +case0006_slice022 +case0006_slice023 +case0006_slice024 +case0006_slice025 +case0006_slice026 +case0006_slice027 +case0006_slice028 +case0006_slice029 +case0006_slice030 +case0006_slice031 +case0006_slice032 +case0006_slice033 +case0006_slice034 +case0006_slice035 +case0006_slice036 +case0006_slice037 +case0006_slice038 +case0006_slice039 +case0006_slice040 +case0006_slice041 +case0006_slice042 +case0006_slice043 +case0006_slice044 +case0006_slice045 +case0006_slice046 +case0006_slice047 +case0006_slice048 +case0006_slice049 +case0006_slice050 +case0006_slice051 +case0006_slice052 +case0006_slice053 +case0006_slice054 +case0006_slice055 +case0006_slice056 +case0006_slice057 +case0006_slice058 +case0006_slice059 +case0006_slice060 +case0006_slice061 +case0006_slice062 +case0006_slice063 +case0006_slice064 +case0006_slice065 +case0006_slice066 +case0006_slice067 +case0006_slice068 +case0006_slice069 +case0006_slice070 +case0006_slice071 +case0006_slice072 +case0006_slice073 +case0006_slice074 +case0006_slice075 +case0006_slice076 +case0006_slice077 +case0006_slice078 +case0006_slice079 +case0006_slice080 +case0006_slice081 +case0006_slice082 +case0006_slice083 +case0006_slice084 +case0006_slice085 +case0006_slice086 +case0006_slice087 +case0006_slice088 +case0006_slice089 +case0006_slice090 +case0006_slice091 +case0006_slice092 +case0006_slice093 +case0006_slice094 +case0006_slice095 +case0006_slice096 +case0006_slice097 +case0006_slice098 +case0006_slice099 +case0006_slice100 +case0006_slice101 +case0006_slice102 +case0006_slice103 +case0006_slice104 +case0006_slice105 +case0006_slice106 +case0006_slice107 +case0006_slice108 +case0006_slice109 +case0006_slice110 +case0006_slice111 +case0006_slice112 +case0006_slice113 +case0006_slice114 +case0006_slice115 +case0006_slice116 +case0006_slice117 +case0006_slice118 +case0006_slice119 +case0006_slice120 +case0006_slice121 +case0006_slice122 +case0006_slice123 +case0006_slice124 +case0006_slice125 +case0006_slice126 +case0006_slice127 +case0006_slice128 +case0006_slice129 +case0006_slice130 +case0027_slice000 +case0027_slice001 +case0027_slice002 +case0027_slice003 +case0027_slice004 +case0027_slice005 +case0027_slice006 +case0027_slice007 +case0027_slice008 +case0027_slice009 +case0027_slice010 +case0027_slice011 +case0027_slice012 +case0027_slice013 +case0027_slice014 +case0027_slice015 +case0027_slice016 +case0027_slice017 +case0027_slice018 +case0027_slice019 +case0027_slice020 +case0027_slice021 +case0027_slice022 +case0027_slice023 +case0027_slice024 +case0027_slice025 +case0027_slice026 +case0027_slice027 +case0027_slice028 +case0027_slice029 +case0027_slice030 +case0027_slice031 +case0027_slice032 +case0027_slice033 +case0027_slice034 +case0027_slice035 +case0027_slice036 +case0027_slice037 +case0027_slice038 +case0027_slice039 +case0027_slice040 +case0027_slice041 +case0027_slice042 +case0027_slice043 +case0027_slice044 +case0027_slice045 +case0027_slice046 +case0027_slice047 +case0027_slice048 +case0027_slice049 +case0027_slice050 +case0027_slice051 +case0027_slice052 +case0027_slice053 +case0027_slice054 +case0027_slice055 +case0027_slice056 +case0027_slice057 +case0027_slice058 +case0027_slice059 +case0027_slice060 +case0027_slice061 +case0027_slice062 +case0027_slice063 +case0027_slice064 +case0027_slice065 +case0027_slice066 +case0027_slice067 +case0027_slice068 +case0027_slice069 +case0027_slice070 +case0027_slice071 +case0027_slice072 +case0027_slice073 +case0027_slice074 +case0027_slice075 +case0027_slice076 +case0027_slice077 +case0027_slice078 +case0027_slice079 +case0027_slice080 +case0027_slice081 +case0027_slice082 +case0027_slice083 +case0027_slice084 +case0027_slice085 +case0027_slice086 +case0027_slice087 +case0028_slice000 +case0028_slice001 +case0028_slice002 +case0028_slice003 +case0028_slice004 +case0028_slice005 +case0028_slice006 +case0028_slice007 +case0028_slice008 +case0028_slice009 +case0028_slice010 +case0028_slice011 +case0028_slice012 +case0028_slice013 +case0028_slice014 +case0028_slice015 +case0028_slice016 +case0028_slice017 +case0028_slice018 +case0028_slice019 +case0028_slice020 +case0028_slice021 +case0028_slice022 +case0028_slice023 +case0028_slice024 +case0028_slice025 +case0028_slice026 +case0028_slice027 +case0028_slice028 +case0028_slice029 +case0028_slice030 +case0028_slice031 +case0028_slice032 +case0028_slice033 +case0028_slice034 +case0028_slice035 +case0028_slice036 +case0028_slice037 +case0028_slice038 +case0028_slice039 +case0028_slice040 +case0028_slice041 +case0028_slice042 +case0028_slice043 +case0028_slice044 +case0028_slice045 +case0028_slice046 +case0028_slice047 +case0028_slice048 +case0028_slice049 +case0028_slice050 +case0028_slice051 +case0028_slice052 +case0028_slice053 +case0028_slice054 +case0028_slice055 +case0028_slice056 +case0028_slice057 +case0028_slice058 +case0028_slice059 +case0028_slice060 +case0028_slice061 +case0028_slice062 +case0028_slice063 +case0028_slice064 +case0028_slice065 +case0028_slice066 +case0028_slice067 +case0028_slice068 +case0028_slice069 +case0028_slice070 +case0028_slice071 +case0028_slice072 +case0028_slice073 +case0028_slice074 +case0028_slice075 +case0028_slice076 +case0028_slice077 +case0028_slice078 +case0028_slice079 +case0028_slice080 +case0028_slice081 +case0028_slice082 +case0028_slice083 +case0028_slice084 +case0028_slice085 +case0028_slice086 +case0028_slice087 +case0028_slice088 +case0037_slice000 +case0037_slice001 +case0037_slice002 +case0037_slice003 +case0037_slice004 +case0037_slice005 +case0037_slice006 +case0037_slice007 +case0037_slice008 +case0037_slice009 +case0037_slice010 +case0037_slice011 +case0037_slice012 +case0037_slice013 +case0037_slice014 +case0037_slice015 +case0037_slice016 +case0037_slice017 +case0037_slice018 +case0037_slice019 +case0037_slice020 +case0037_slice021 +case0037_slice022 +case0037_slice023 +case0037_slice024 +case0037_slice025 +case0037_slice026 +case0037_slice027 +case0037_slice028 +case0037_slice029 +case0037_slice030 +case0037_slice031 +case0037_slice032 +case0037_slice033 +case0037_slice034 +case0037_slice035 +case0037_slice036 +case0037_slice037 +case0037_slice038 +case0037_slice039 +case0037_slice040 +case0037_slice041 +case0037_slice042 +case0037_slice043 +case0037_slice044 +case0037_slice045 +case0037_slice046 +case0037_slice047 +case0037_slice048 +case0037_slice049 +case0037_slice050 +case0037_slice051 +case0037_slice052 +case0037_slice053 +case0037_slice054 +case0037_slice055 +case0037_slice056 +case0037_slice057 +case0037_slice058 +case0037_slice059 +case0037_slice060 +case0037_slice061 +case0037_slice062 +case0037_slice063 +case0037_slice064 +case0037_slice065 +case0037_slice066 +case0037_slice067 +case0037_slice068 +case0037_slice069 +case0037_slice070 +case0037_slice071 +case0037_slice072 +case0037_slice073 +case0037_slice074 +case0037_slice075 +case0037_slice076 +case0037_slice077 +case0037_slice078 +case0037_slice079 +case0037_slice080 +case0037_slice081 +case0037_slice082 +case0037_slice083 +case0037_slice084 +case0037_slice085 +case0037_slice086 +case0037_slice087 +case0037_slice088 +case0037_slice089 +case0037_slice090 +case0037_slice091 +case0037_slice092 +case0037_slice093 +case0037_slice094 +case0037_slice095 +case0037_slice096 +case0037_slice097 +case0037_slice098 diff --git a/PuzzleTuning/SSL_structures/TransUNet_main/networks/vit_seg_configs.py b/PuzzleTuning/SSL_structures/TransUNet_main/networks/vit_seg_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..1bc4c784cd439720493cb17f333b683ae0494032 --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransUNet_main/networks/vit_seg_configs.py @@ -0,0 +1,130 @@ +import ml_collections + +def get_b16_config(): + """Returns the ViT-B/16 configuration.""" + config = ml_collections.ConfigDict() + config.patches = ml_collections.ConfigDict({'size': (16, 16)}) + config.hidden_size = 768 + config.transformer = ml_collections.ConfigDict() + config.transformer.mlp_dim = 3072 + config.transformer.num_heads = 12 + config.transformer.num_layers = 12 + config.transformer.attention_dropout_rate = 0.0 + config.transformer.dropout_rate = 0.1 + + config.classifier = 'seg' + config.representation_size = None + config.resnet_pretrained_path = None + config.pretrained_path = '../model/vit_checkpoint/imagenet21k/ViT-B_16.npz' + config.patch_size = 16 + + config.decoder_channels = (256, 128, 64, 16) + config.n_classes = 2 + config.activation = 'softmax' + return config + + +def get_testing(): + """Returns a minimal configuration for testing.""" + config = ml_collections.ConfigDict() + config.patches = ml_collections.ConfigDict({'size': (16, 16)}) + config.hidden_size = 1 + config.transformer = ml_collections.ConfigDict() + config.transformer.mlp_dim = 1 + config.transformer.num_heads = 1 + config.transformer.num_layers = 1 + config.transformer.attention_dropout_rate = 0.0 + config.transformer.dropout_rate = 0.1 + config.classifier = 'token' + config.representation_size = None + return config + +def get_r50_b16_config(): + """Returns the Resnet50 + ViT-B/16 configuration.""" + config = get_b16_config() + config.patches.grid = (16, 16) + config.resnet = ml_collections.ConfigDict() + config.resnet.num_layers = (3, 4, 9) + config.resnet.width_factor = 1 + + config.classifier = 'seg' + config.pretrained_path = '../model/vit_checkpoint/imagenet21k/R50+ViT-B_16.npz' + config.decoder_channels = (256, 128, 64, 16) + config.skip_channels = [512, 256, 64, 16] + config.n_classes = 2 + config.n_skip = 3 + config.activation = 'softmax' + + return config + + +def get_b32_config(): + """Returns the ViT-B/32 configuration.""" + config = get_b16_config() + config.patches.size = (32, 32) + config.pretrained_path = '../model/vit_checkpoint/imagenet21k/ViT-B_32.npz' + return config + + +def get_l16_config(): + """Returns the ViT-L/16 configuration.""" + config = ml_collections.ConfigDict() + config.patches = ml_collections.ConfigDict({'size': (16, 16)}) + config.hidden_size = 1024 + config.transformer = ml_collections.ConfigDict() + config.transformer.mlp_dim = 4096 + config.transformer.num_heads = 16 + config.transformer.num_layers = 24 + config.transformer.attention_dropout_rate = 0.0 + config.transformer.dropout_rate = 0.1 + config.representation_size = None + + # custom + config.classifier = 'seg' + config.resnet_pretrained_path = None + config.pretrained_path = '../model/vit_checkpoint/imagenet21k/ViT-L_16.npz' + config.decoder_channels = (256, 128, 64, 16) + config.n_classes = 2 + config.activation = 'softmax' + return config + + +def get_r50_l16_config(): + """Returns the Resnet50 + ViT-L/16 configuration. customized """ + config = get_l16_config() + config.patches.grid = (16, 16) + config.resnet = ml_collections.ConfigDict() + config.resnet.num_layers = (3, 4, 9) + config.resnet.width_factor = 1 + + config.classifier = 'seg' + config.resnet_pretrained_path = '../model/vit_checkpoint/imagenet21k/R50+ViT-B_16.npz' + config.decoder_channels = (256, 128, 64, 16) + config.skip_channels = [512, 256, 64, 16] + config.n_classes = 2 + config.activation = 'softmax' + return config + + +def get_l32_config(): + """Returns the ViT-L/32 configuration.""" + config = get_l16_config() + config.patches.size = (32, 32) + return config + + +def get_h14_config(): + """Returns the ViT-L/16 configuration.""" + config = ml_collections.ConfigDict() + config.patches = ml_collections.ConfigDict({'size': (14, 14)}) + config.hidden_size = 1280 + config.transformer = ml_collections.ConfigDict() + config.transformer.mlp_dim = 5120 + config.transformer.num_heads = 16 + config.transformer.num_layers = 32 + config.transformer.attention_dropout_rate = 0.0 + config.transformer.dropout_rate = 0.1 + config.classifier = 'token' + config.representation_size = None + + return config diff --git a/PuzzleTuning/SSL_structures/TransUNet_main/networks/vit_seg_modeling.py b/PuzzleTuning/SSL_structures/TransUNet_main/networks/vit_seg_modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..8346d9f166fea32cc7007b95b3878687400a7734 --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransUNet_main/networks/vit_seg_modeling.py @@ -0,0 +1,453 @@ +# coding=utf-8 +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import logging +import math + +from os.path import join as pjoin + +import torch +import torch.nn as nn +import numpy as np + +from torch.nn import CrossEntropyLoss, Dropout, Softmax, Linear, Conv2d, LayerNorm +from torch.nn.modules.utils import _pair +from scipy import ndimage +from . import vit_seg_configs as configs +from .vit_seg_modeling_resnet_skip import ResNetV2 + + +logger = logging.getLogger(__name__) + + +ATTENTION_Q = "MultiHeadDotProductAttention_1/query" +ATTENTION_K = "MultiHeadDotProductAttention_1/key" +ATTENTION_V = "MultiHeadDotProductAttention_1/value" +ATTENTION_OUT = "MultiHeadDotProductAttention_1/out" +FC_0 = "MlpBlock_3/Dense_0" +FC_1 = "MlpBlock_3/Dense_1" +ATTENTION_NORM = "LayerNorm_0" +MLP_NORM = "LayerNorm_2" + + +def np2th(weights, conv=False): + """Possibly convert HWIO to OIHW.""" + if conv: + weights = weights.transpose([3, 2, 0, 1]) + return torch.from_numpy(weights) + + +def swish(x): + return x * torch.sigmoid(x) + + +ACT2FN = {"gelu": torch.nn.functional.gelu, "relu": torch.nn.functional.relu, "swish": swish} + + +class Attention(nn.Module): + def __init__(self, config, vis): + super(Attention, self).__init__() + self.vis = vis + self.num_attention_heads = config.transformer["num_heads"] + self.attention_head_size = int(config.hidden_size / self.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = Linear(config.hidden_size, self.all_head_size) + self.key = Linear(config.hidden_size, self.all_head_size) + self.value = Linear(config.hidden_size, self.all_head_size) + + self.out = Linear(config.hidden_size, config.hidden_size) + self.attn_dropout = Dropout(config.transformer["attention_dropout_rate"]) + self.proj_dropout = Dropout(config.transformer["attention_dropout_rate"]) + + self.softmax = Softmax(dim=-1) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward(self, hidden_states): + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + attention_probs = self.softmax(attention_scores) + weights = attention_probs if self.vis else None + attention_probs = self.attn_dropout(attention_probs) + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + attention_output = self.out(context_layer) + attention_output = self.proj_dropout(attention_output) + return attention_output, weights + + +class Mlp(nn.Module): + def __init__(self, config): + super(Mlp, self).__init__() + self.fc1 = Linear(config.hidden_size, config.transformer["mlp_dim"]) + self.fc2 = Linear(config.transformer["mlp_dim"], config.hidden_size) + self.act_fn = ACT2FN["gelu"] + self.dropout = Dropout(config.transformer["dropout_rate"]) + + self._init_weights() + + def _init_weights(self): + nn.init.xavier_uniform_(self.fc1.weight) + nn.init.xavier_uniform_(self.fc2.weight) + nn.init.normal_(self.fc1.bias, std=1e-6) + nn.init.normal_(self.fc2.bias, std=1e-6) + + def forward(self, x): + x = self.fc1(x) + x = self.act_fn(x) + x = self.dropout(x) + x = self.fc2(x) + x = self.dropout(x) + return x + + +class Embeddings(nn.Module): + """Construct the embeddings from patch, position embeddings. + """ + def __init__(self, config, img_size, in_channels=3): + super(Embeddings, self).__init__() + self.hybrid = None + self.config = config + img_size = _pair(img_size) + + if config.patches.get("grid") is not None: # ResNet + grid_size = config.patches["grid"] + patch_size = (img_size[0] // 16 // grid_size[0], img_size[1] // 16 // grid_size[1]) + patch_size_real = (patch_size[0] * 16, patch_size[1] * 16) + n_patches = (img_size[0] // patch_size_real[0]) * (img_size[1] // patch_size_real[1]) + self.hybrid = True + else: + patch_size = _pair(config.patches["size"]) + n_patches = (img_size[0] // patch_size[0]) * (img_size[1] // patch_size[1]) + self.hybrid = False + + if self.hybrid: + self.hybrid_model = ResNetV2(block_units=config.resnet.num_layers, width_factor=config.resnet.width_factor) + in_channels = self.hybrid_model.width * 16 + self.patch_embeddings = Conv2d(in_channels=in_channels, + out_channels=config.hidden_size, + kernel_size=patch_size, + stride=patch_size) + self.position_embeddings = nn.Parameter(torch.zeros(1, n_patches, config.hidden_size)) + + self.dropout = Dropout(config.transformer["dropout_rate"]) + + + def forward(self, x): + if self.hybrid: + x, features = self.hybrid_model(x) + else: + features = None + x = self.patch_embeddings(x) # (B, hidden. n_patches^(1/2), n_patches^(1/2)) + x = x.flatten(2) + x = x.transpose(-1, -2) # (B, n_patches, hidden) + + embeddings = x + self.position_embeddings + embeddings = self.dropout(embeddings) + return embeddings, features + + +class Block(nn.Module): + def __init__(self, config, vis): + super(Block, self).__init__() + self.hidden_size = config.hidden_size + self.attention_norm = LayerNorm(config.hidden_size, eps=1e-6) + self.ffn_norm = LayerNorm(config.hidden_size, eps=1e-6) + self.ffn = Mlp(config) + self.attn = Attention(config, vis) + + def forward(self, x): + h = x + x = self.attention_norm(x) + x, weights = self.attn(x) + x = x + h + + h = x + x = self.ffn_norm(x) + x = self.ffn(x) + x = x + h + return x, weights + + def load_from(self, weights, n_block): + ROOT = f"Transformer/encoderblock_{n_block}" + with torch.no_grad(): + query_weight = np2th(weights[pjoin(ROOT, ATTENTION_Q, "kernel")]).view(self.hidden_size, self.hidden_size).t() + key_weight = np2th(weights[pjoin(ROOT, ATTENTION_K, "kernel")]).view(self.hidden_size, self.hidden_size).t() + value_weight = np2th(weights[pjoin(ROOT, ATTENTION_V, "kernel")]).view(self.hidden_size, self.hidden_size).t() + out_weight = np2th(weights[pjoin(ROOT, ATTENTION_OUT, "kernel")]).view(self.hidden_size, self.hidden_size).t() + + query_bias = np2th(weights[pjoin(ROOT, ATTENTION_Q, "bias")]).view(-1) + key_bias = np2th(weights[pjoin(ROOT, ATTENTION_K, "bias")]).view(-1) + value_bias = np2th(weights[pjoin(ROOT, ATTENTION_V, "bias")]).view(-1) + out_bias = np2th(weights[pjoin(ROOT, ATTENTION_OUT, "bias")]).view(-1) + + self.attn.query.weight.copy_(query_weight) + self.attn.key.weight.copy_(key_weight) + self.attn.value.weight.copy_(value_weight) + self.attn.out.weight.copy_(out_weight) + self.attn.query.bias.copy_(query_bias) + self.attn.key.bias.copy_(key_bias) + self.attn.value.bias.copy_(value_bias) + self.attn.out.bias.copy_(out_bias) + + mlp_weight_0 = np2th(weights[pjoin(ROOT, FC_0, "kernel")]).t() + mlp_weight_1 = np2th(weights[pjoin(ROOT, FC_1, "kernel")]).t() + mlp_bias_0 = np2th(weights[pjoin(ROOT, FC_0, "bias")]).t() + mlp_bias_1 = np2th(weights[pjoin(ROOT, FC_1, "bias")]).t() + + self.ffn.fc1.weight.copy_(mlp_weight_0) + self.ffn.fc2.weight.copy_(mlp_weight_1) + self.ffn.fc1.bias.copy_(mlp_bias_0) + self.ffn.fc2.bias.copy_(mlp_bias_1) + + self.attention_norm.weight.copy_(np2th(weights[pjoin(ROOT, ATTENTION_NORM, "scale")])) + self.attention_norm.bias.copy_(np2th(weights[pjoin(ROOT, ATTENTION_NORM, "bias")])) + self.ffn_norm.weight.copy_(np2th(weights[pjoin(ROOT, MLP_NORM, "scale")])) + self.ffn_norm.bias.copy_(np2th(weights[pjoin(ROOT, MLP_NORM, "bias")])) + + +class Encoder(nn.Module): + def __init__(self, config, vis): + super(Encoder, self).__init__() + self.vis = vis + self.layer = nn.ModuleList() + self.encoder_norm = LayerNorm(config.hidden_size, eps=1e-6) + for _ in range(config.transformer["num_layers"]): + layer = Block(config, vis) + self.layer.append(copy.deepcopy(layer)) + + def forward(self, hidden_states): + attn_weights = [] + for layer_block in self.layer: + hidden_states, weights = layer_block(hidden_states) + if self.vis: + attn_weights.append(weights) + encoded = self.encoder_norm(hidden_states) + return encoded, attn_weights + + +class Transformer(nn.Module): + def __init__(self, config, img_size, vis): + super(Transformer, self).__init__() + self.embeddings = Embeddings(config, img_size=img_size) + self.encoder = Encoder(config, vis) + + def forward(self, input_ids): + embedding_output, features = self.embeddings(input_ids) + encoded, attn_weights = self.encoder(embedding_output) # (B, n_patch, hidden) + return encoded, attn_weights, features + + +class Conv2dReLU(nn.Sequential): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + padding=0, + stride=1, + use_batchnorm=True, + ): + conv = nn.Conv2d( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + bias=not (use_batchnorm), + ) + relu = nn.ReLU(inplace=True) + + bn = nn.BatchNorm2d(out_channels) + + super(Conv2dReLU, self).__init__(conv, bn, relu) + + +class DecoderBlock(nn.Module): + def __init__( + self, + in_channels, + out_channels, + skip_channels=0, + use_batchnorm=True, + ): + super().__init__() + self.conv1 = Conv2dReLU( + in_channels + skip_channels, + out_channels, + kernel_size=3, + padding=1, + use_batchnorm=use_batchnorm, + ) + self.conv2 = Conv2dReLU( + out_channels, + out_channels, + kernel_size=3, + padding=1, + use_batchnorm=use_batchnorm, + ) + self.up = nn.UpsamplingBilinear2d(scale_factor=2) + + def forward(self, x, skip=None): + x = self.up(x) + if skip is not None: + x = torch.cat([x, skip], dim=1) + x = self.conv1(x) + x = self.conv2(x) + return x + + +class SegmentationHead(nn.Sequential): + + def __init__(self, in_channels, out_channels, kernel_size=3, upsampling=1): + conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=kernel_size // 2) + upsampling = nn.UpsamplingBilinear2d(scale_factor=upsampling) if upsampling > 1 else nn.Identity() + super().__init__(conv2d, upsampling) + + +class DecoderCup(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + head_channels = 512 + self.conv_more = Conv2dReLU( + config.hidden_size, + head_channels, + kernel_size=3, + padding=1, + use_batchnorm=True, + ) + decoder_channels = config.decoder_channels + in_channels = [head_channels] + list(decoder_channels[:-1]) + out_channels = decoder_channels + + if self.config.n_skip != 0: + skip_channels = self.config.skip_channels + for i in range(4-self.config.n_skip): # re-select the skip channels according to n_skip + skip_channels[3-i]=0 + + else: + skip_channels=[0,0,0,0] + + blocks = [ + DecoderBlock(in_ch, out_ch, sk_ch) for in_ch, out_ch, sk_ch in zip(in_channels, out_channels, skip_channels) + ] + self.blocks = nn.ModuleList(blocks) + + def forward(self, hidden_states, features=None): + B, n_patch, hidden = hidden_states.size() # reshape from (B, n_patch, hidden) to (B, h, w, hidden) + h, w = int(np.sqrt(n_patch)), int(np.sqrt(n_patch)) + x = hidden_states.permute(0, 2, 1) + x = x.contiguous().view(B, hidden, h, w) + x = self.conv_more(x) + for i, decoder_block in enumerate(self.blocks): + if features is not None: + skip = features[i] if (i < self.config.n_skip) else None + else: + skip = None + x = decoder_block(x, skip=skip) + return x + + +class VisionTransformer(nn.Module): + def __init__(self, config, img_size=224, num_classes=21843, zero_head=False, vis=False): + super(VisionTransformer, self).__init__() + self.num_classes = num_classes + self.zero_head = zero_head + self.classifier = config.classifier + self.transformer = Transformer(config, img_size, vis) + self.decoder = DecoderCup(config) + self.segmentation_head = SegmentationHead( + in_channels=config['decoder_channels'][-1], + out_channels=config['n_classes'], + kernel_size=3, + ) + self.config = config + + def forward(self, x): + if x.size()[1] == 1: + x = x.repeat(1,3,1,1) + x, attn_weights, features = self.transformer(x) # (B, n_patch, hidden) + x = self.decoder(x, features) + logits = self.segmentation_head(x) + return logits + + def load_from(self, weights): + with torch.no_grad(): + + res_weight = weights + self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights["embedding/kernel"], conv=True)) + self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights["embedding/bias"])) + + self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights["Transformer/encoder_norm/scale"])) + self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights["Transformer/encoder_norm/bias"])) + + posemb = np2th(weights["Transformer/posembed_input/pos_embedding"]) + + posemb_new = self.transformer.embeddings.position_embeddings + if posemb.size() == posemb_new.size(): + self.transformer.embeddings.position_embeddings.copy_(posemb) + elif posemb.size()[1]-1 == posemb_new.size()[1]: + posemb = posemb[:, 1:] + self.transformer.embeddings.position_embeddings.copy_(posemb) + else: + logger.info("load_pretrained: resized variant: %s to %s" % (posemb.size(), posemb_new.size())) + ntok_new = posemb_new.size(1) + if self.classifier == "seg": + _, posemb_grid = posemb[:, :1], posemb[0, 1:] + gs_old = int(np.sqrt(len(posemb_grid))) + gs_new = int(np.sqrt(ntok_new)) + print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new)) + posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1) + zoom = (gs_new / gs_old, gs_new / gs_old, 1) + posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1) # th2np + posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1) + posemb = posemb_grid + self.transformer.embeddings.position_embeddings.copy_(np2th(posemb)) + + # Encoder whole + for bname, block in self.transformer.encoder.named_children(): + for uname, unit in block.named_children(): + unit.load_from(weights, n_block=uname) + + if self.transformer.embeddings.hybrid: + self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(res_weight["conv_root/kernel"], conv=True)) + gn_weight = np2th(res_weight["gn_root/scale"]).view(-1) + gn_bias = np2th(res_weight["gn_root/bias"]).view(-1) + self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight) + self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias) + + for bname, block in self.transformer.embeddings.hybrid_model.body.named_children(): + for uname, unit in block.named_children(): + unit.load_from(res_weight, n_block=bname, n_unit=uname) + +CONFIGS = { + 'ViT-B_16': configs.get_b16_config(), + 'ViT-B_32': configs.get_b32_config(), + 'ViT-L_16': configs.get_l16_config(), + 'ViT-L_32': configs.get_l32_config(), + 'ViT-H_14': configs.get_h14_config(), + 'R50-ViT-B_16': configs.get_r50_b16_config(), + 'R50-ViT-L_16': configs.get_r50_l16_config(), + 'testing': configs.get_testing(), +} + + diff --git a/PuzzleTuning/SSL_structures/TransUNet_main/networks/vit_seg_modeling_resnet_skip.py b/PuzzleTuning/SSL_structures/TransUNet_main/networks/vit_seg_modeling_resnet_skip.py new file mode 100644 index 0000000000000000000000000000000000000000..9753d52fbe8275e77cc18870c1e9f9564d8cc008 --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransUNet_main/networks/vit_seg_modeling_resnet_skip.py @@ -0,0 +1,160 @@ +import math + +from os.path import join as pjoin +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def np2th(weights, conv=False): + """Possibly convert HWIO to OIHW.""" + if conv: + weights = weights.transpose([3, 2, 0, 1]) + return torch.from_numpy(weights) + + +class StdConv2d(nn.Conv2d): + + def forward(self, x): + w = self.weight + v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) + w = (w - m) / torch.sqrt(v + 1e-5) + return F.conv2d(x, w, self.bias, self.stride, self.padding, + self.dilation, self.groups) + + +def conv3x3(cin, cout, stride=1, groups=1, bias=False): + return StdConv2d(cin, cout, kernel_size=3, stride=stride, + padding=1, bias=bias, groups=groups) + + +def conv1x1(cin, cout, stride=1, bias=False): + return StdConv2d(cin, cout, kernel_size=1, stride=stride, + padding=0, bias=bias) + + +class PreActBottleneck(nn.Module): + """Pre-activation (v2) bottleneck block. + """ + + def __init__(self, cin, cout=None, cmid=None, stride=1): + super().__init__() + cout = cout or cin + cmid = cmid or cout//4 + + self.gn1 = nn.GroupNorm(32, cmid, eps=1e-6) + self.conv1 = conv1x1(cin, cmid, bias=False) + self.gn2 = nn.GroupNorm(32, cmid, eps=1e-6) + self.conv2 = conv3x3(cmid, cmid, stride, bias=False) # Original code has it on conv1!! + self.gn3 = nn.GroupNorm(32, cout, eps=1e-6) + self.conv3 = conv1x1(cmid, cout, bias=False) + self.relu = nn.ReLU(inplace=True) + + if (stride != 1 or cin != cout): + # Projection also with pre-activation according to paper. + self.downsample = conv1x1(cin, cout, stride, bias=False) + self.gn_proj = nn.GroupNorm(cout, cout) + + def forward(self, x): + + # Residual branch + residual = x + if hasattr(self, 'downsample'): + residual = self.downsample(x) + residual = self.gn_proj(residual) + + # Unit's branch + y = self.relu(self.gn1(self.conv1(x))) + y = self.relu(self.gn2(self.conv2(y))) + y = self.gn3(self.conv3(y)) + + y = self.relu(residual + y) + return y + + def load_from(self, weights, n_block, n_unit): + conv1_weight = np2th(weights[pjoin(n_block, n_unit, "conv1/kernel")], conv=True) + conv2_weight = np2th(weights[pjoin(n_block, n_unit, "conv2/kernel")], conv=True) + conv3_weight = np2th(weights[pjoin(n_block, n_unit, "conv3/kernel")], conv=True) + + gn1_weight = np2th(weights[pjoin(n_block, n_unit, "gn1/scale")]) + gn1_bias = np2th(weights[pjoin(n_block, n_unit, "gn1/bias")]) + + gn2_weight = np2th(weights[pjoin(n_block, n_unit, "gn2/scale")]) + gn2_bias = np2th(weights[pjoin(n_block, n_unit, "gn2/bias")]) + + gn3_weight = np2th(weights[pjoin(n_block, n_unit, "gn3/scale")]) + gn3_bias = np2th(weights[pjoin(n_block, n_unit, "gn3/bias")]) + + self.conv1.weight.copy_(conv1_weight) + self.conv2.weight.copy_(conv2_weight) + self.conv3.weight.copy_(conv3_weight) + + self.gn1.weight.copy_(gn1_weight.view(-1)) + self.gn1.bias.copy_(gn1_bias.view(-1)) + + self.gn2.weight.copy_(gn2_weight.view(-1)) + self.gn2.bias.copy_(gn2_bias.view(-1)) + + self.gn3.weight.copy_(gn3_weight.view(-1)) + self.gn3.bias.copy_(gn3_bias.view(-1)) + + if hasattr(self, 'downsample'): + proj_conv_weight = np2th(weights[pjoin(n_block, n_unit, "conv_proj/kernel")], conv=True) + proj_gn_weight = np2th(weights[pjoin(n_block, n_unit, "gn_proj/scale")]) + proj_gn_bias = np2th(weights[pjoin(n_block, n_unit, "gn_proj/bias")]) + + self.downsample.weight.copy_(proj_conv_weight) + self.gn_proj.weight.copy_(proj_gn_weight.view(-1)) + self.gn_proj.bias.copy_(proj_gn_bias.view(-1)) + +class ResNetV2(nn.Module): + """Implementation of Pre-activation (v2) ResNet mode.""" + + def __init__(self, block_units, width_factor): + super().__init__() + width = int(64 * width_factor) + self.width = width + + self.root = nn.Sequential(OrderedDict([ + ('conv', StdConv2d(3, width, kernel_size=7, stride=2, bias=False, padding=3)), + ('gn', nn.GroupNorm(32, width, eps=1e-6)), + ('relu', nn.ReLU(inplace=True)), + # ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0)) + ])) + + self.body = nn.Sequential(OrderedDict([ + ('block1', nn.Sequential(OrderedDict( + [('unit1', PreActBottleneck(cin=width, cout=width*4, cmid=width))] + + [(f'unit{i:d}', PreActBottleneck(cin=width*4, cout=width*4, cmid=width)) for i in range(2, block_units[0] + 1)], + ))), + ('block2', nn.Sequential(OrderedDict( + [('unit1', PreActBottleneck(cin=width*4, cout=width*8, cmid=width*2, stride=2))] + + [(f'unit{i:d}', PreActBottleneck(cin=width*8, cout=width*8, cmid=width*2)) for i in range(2, block_units[1] + 1)], + ))), + ('block3', nn.Sequential(OrderedDict( + [('unit1', PreActBottleneck(cin=width*8, cout=width*16, cmid=width*4, stride=2))] + + [(f'unit{i:d}', PreActBottleneck(cin=width*16, cout=width*16, cmid=width*4)) for i in range(2, block_units[2] + 1)], + ))), + ])) + + def forward(self, x): + features = [] + b, c, in_size, _ = x.size() + x = self.root(x) + features.append(x) + x = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)(x) + for i in range(len(self.body)-1): + x = self.body[i](x) + right_size = int(in_size / 4 / (i+1)) + if x.size()[2] != right_size: + pad = right_size - x.size()[2] + assert pad < 3 and pad > 0, "x {} should {}".format(x.size(), right_size) + feat = torch.zeros((b, x.size()[1], right_size, right_size), device=x.device) + feat[:, :, 0:x.size()[2], 0:x.size()[3]] = x[:] + else: + feat = x + features.append(feat) + x = self.body[-1](x) + return x, features[::-1] diff --git a/PuzzleTuning/SSL_structures/TransUNet_main/requirements.txt b/PuzzleTuning/SSL_structures/TransUNet_main/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..4abfe422e0bd10ed594596292121fb6eac4d4581 --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransUNet_main/requirements.txt @@ -0,0 +1,11 @@ +torch==1.4.0 +torchvision==0.5.0 +numpy +tqdm +tensorboard +tensorboardX +ml-collections +medpy +SimpleITK +scipy +h5py diff --git a/PuzzleTuning/SSL_structures/TransUNet_main/test.py b/PuzzleTuning/SSL_structures/TransUNet_main/test.py new file mode 100644 index 0000000000000000000000000000000000000000..35a48027e952822f29b7f439f85007ee81d9b92e --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransUNet_main/test.py @@ -0,0 +1,140 @@ +import argparse +import logging +import os +import random +import sys +import numpy as np +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +from torch.utils.data import DataLoader +from tqdm import tqdm +from datasets.dataset_synapse import Synapse_dataset +from utils import test_single_volume +from networks.vit_seg_modeling import VisionTransformer as ViT_seg +from networks.vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg + +parser = argparse.ArgumentParser() +parser.add_argument('--volume_path', type=str, + default='../data/Synapse/test_vol_h5', help='root dir for validation volume data') # for acdc volume_path=root_dir +parser.add_argument('--dataset', type=str, + default='Synapse', help='experiment_name') +parser.add_argument('--num_classes', type=int, + default=4, help='output channel of network') +parser.add_argument('--list_dir', type=str, + default='./lists/lists_Synapse', help='list dir') + +parser.add_argument('--max_iterations', type=int,default=20000, help='maximum epoch number to train') +parser.add_argument('--max_epochs', type=int, default=30, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, default=24, + help='batch_size per gpu') +parser.add_argument('--img_size', type=int, default=224, help='input patch size of network input') +parser.add_argument('--is_savenii', action="store_true", help='whether to save results during inference') + +parser.add_argument('--n_skip', type=int, default=3, help='using number of skip-connect, default is num') +parser.add_argument('--vit_name', type=str, default='ViT-B_16', help='select one vit model') + +parser.add_argument('--test_save_dir', type=str, default='../predictions', help='saving prediction as nii!') +parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, help='segmentation network learning rate') +parser.add_argument('--seed', type=int, default=1234, help='random seed') +parser.add_argument('--vit_patches_size', type=int, default=16, help='vit_patches_size, default is 16') +args = parser.parse_args() + + +def inference(args, model, test_save_path=None): + db_test = args.Dataset(base_dir=args.volume_path, split="test_vol", list_dir=args.list_dir) + testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1) + logging.info("{} test iterations per epoch".format(len(testloader))) + model.eval() + metric_list = 0.0 + for i_batch, sampled_batch in tqdm(enumerate(testloader)): + h, w = sampled_batch["image"].size()[2:] + image, label, case_name = sampled_batch["image"], sampled_batch["label"], sampled_batch['case_name'][0] + metric_i = test_single_volume(image, label, model, classes=args.num_classes, patch_size=[args.img_size, args.img_size], + test_save_path=test_save_path, case=case_name, z_spacing=args.z_spacing) + metric_list += np.array(metric_i) + logging.info('idx %d case %s mean_dice %f mean_hd95 %f' % (i_batch, case_name, np.mean(metric_i, axis=0)[0], np.mean(metric_i, axis=0)[1])) + metric_list = metric_list / len(db_test) + for i in range(1, args.num_classes): + logging.info('Mean class %d mean_dice %f mean_hd95 %f' % (i, metric_list[i-1][0], metric_list[i-1][1])) + performance = np.mean(metric_list, axis=0)[0] + mean_hd95 = np.mean(metric_list, axis=0)[1] + logging.info('Testing performance in best val model: mean_dice : %f mean_hd95 : %f' % (performance, mean_hd95)) + return "Testing Finished!" + + +if __name__ == "__main__": + + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + + dataset_config = { + 'Synapse': { + 'Dataset': Synapse_dataset, + 'volume_path': '../data/Synapse/test_vol_h5', + 'list_dir': './lists/lists_Synapse', + 'num_classes': 9, + 'z_spacing': 1, + }, + } + dataset_name = args.dataset + args.num_classes = dataset_config[dataset_name]['num_classes'] + args.volume_path = dataset_config[dataset_name]['volume_path'] + args.Dataset = dataset_config[dataset_name]['Dataset'] + args.list_dir = dataset_config[dataset_name]['list_dir'] + args.z_spacing = dataset_config[dataset_name]['z_spacing'] + args.is_pretrain = True + + # name the same snapshot defined in train script! + args.exp = 'TU_' + dataset_name + str(args.img_size) + snapshot_path = "../model/{}/{}".format(args.exp, 'TU') + snapshot_path = snapshot_path + '_pretrain' if args.is_pretrain else snapshot_path + snapshot_path += '_' + args.vit_name + snapshot_path = snapshot_path + '_skip' + str(args.n_skip) + snapshot_path = snapshot_path + '_vitpatch' + str(args.vit_patches_size) if args.vit_patches_size!=16 else snapshot_path + snapshot_path = snapshot_path + '_epo' + str(args.max_epochs) if args.max_epochs != 30 else snapshot_path + if dataset_name == 'ACDC': # using max_epoch instead of iteration to control training duration + snapshot_path = snapshot_path + '_' + str(args.max_iterations)[0:2] + 'k' if args.max_iterations != 30000 else snapshot_path + snapshot_path = snapshot_path+'_bs'+str(args.batch_size) + snapshot_path = snapshot_path + '_lr' + str(args.base_lr) if args.base_lr != 0.01 else snapshot_path + snapshot_path = snapshot_path + '_'+str(args.img_size) + snapshot_path = snapshot_path + '_s'+str(args.seed) if args.seed!=1234 else snapshot_path + + config_vit = CONFIGS_ViT_seg[args.vit_name] + config_vit.n_classes = args.num_classes + config_vit.n_skip = args.n_skip + config_vit.patches.size = (args.vit_patches_size, args.vit_patches_size) + if args.vit_name.find('R50') !=-1: + config_vit.patches.grid = (int(args.img_size/args.vit_patches_size), int(args.img_size/args.vit_patches_size)) + net = ViT_seg(config_vit, img_size=args.img_size, num_classes=config_vit.n_classes).cuda() + + snapshot = os.path.join(snapshot_path, 'best_model.pth') + if not os.path.exists(snapshot): snapshot = snapshot.replace('best_model', 'epoch_'+str(args.max_epochs-1)) + net.load_state_dict(torch.load(snapshot)) + snapshot_name = snapshot_path.split('/')[-1] + + log_folder = './test_log/test_log_' + args.exp + os.makedirs(log_folder, exist_ok=True) + logging.basicConfig(filename=log_folder + '/'+snapshot_name+".txt", level=logging.INFO, format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + logging.info(snapshot_name) + + if args.is_savenii: + args.test_save_dir = '../predictions' + test_save_path = os.path.join(args.test_save_dir, args.exp, snapshot_name) + os.makedirs(test_save_path, exist_ok=True) + else: + test_save_path = None + inference(args, net, test_save_path) + + diff --git a/PuzzleTuning/SSL_structures/TransUNet_main/train.py b/PuzzleTuning/SSL_structures/TransUNet_main/train.py new file mode 100644 index 0000000000000000000000000000000000000000..438dc76b9d2a00f2abe4aacca9d7279dcad4685d --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransUNet_main/train.py @@ -0,0 +1,93 @@ +import argparse +import logging +import os +import random +import numpy as np +import torch +import torch.backends.cudnn as cudnn +from networks.vit_seg_modeling import VisionTransformer as ViT_seg +from networks.vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg +from trainer import trainer_synapse + +parser = argparse.ArgumentParser() +parser.add_argument('--root_path', type=str, + default='../data/Synapse/train_npz', help='root dir for data') +parser.add_argument('--dataset', type=str, + default='Synapse', help='experiment_name') +parser.add_argument('--list_dir', type=str, + default='./lists/lists_Synapse', help='list dir') +parser.add_argument('--num_classes', type=int, + default=9, help='output channel of network') +parser.add_argument('--max_iterations', type=int, + default=30000, help='maximum epoch number to train') +parser.add_argument('--max_epochs', type=int, + default=150, help='maximum epoch number to train') +parser.add_argument('--batch_size', type=int, + default=24, help='batch_size per gpu') +parser.add_argument('--n_gpu', type=int, default=1, help='total gpu') +parser.add_argument('--deterministic', type=int, default=1, + help='whether use deterministic training') +parser.add_argument('--base_lr', type=float, default=0.01, + help='segmentation network learning rate') +parser.add_argument('--img_size', type=int, + default=224, help='input patch size of network input') +parser.add_argument('--seed', type=int, + default=1234, help='random seed') +parser.add_argument('--n_skip', type=int, + default=3, help='using number of skip-connect, default is num') +parser.add_argument('--vit_name', type=str, + default='R50-ViT-B_16', help='select one vit model') +parser.add_argument('--vit_patches_size', type=int, + default=16, help='vit_patches_size, default is 16') +args = parser.parse_args() + + +if __name__ == "__main__": + if not args.deterministic: + cudnn.benchmark = True + cudnn.deterministic = False + else: + cudnn.benchmark = False + cudnn.deterministic = True + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + dataset_name = args.dataset + dataset_config = { + 'Synapse': { + 'root_path': '../data/Synapse/train_npz', + 'list_dir': './lists/lists_Synapse', + 'num_classes': 9, + }, + } + args.num_classes = dataset_config[dataset_name]['num_classes'] + args.root_path = dataset_config[dataset_name]['root_path'] + args.list_dir = dataset_config[dataset_name]['list_dir'] + args.is_pretrain = True + args.exp = 'TU_' + dataset_name + str(args.img_size) + snapshot_path = "../model/{}/{}".format(args.exp, 'TU') + snapshot_path = snapshot_path + '_pretrain' if args.is_pretrain else snapshot_path + snapshot_path += '_' + args.vit_name + snapshot_path = snapshot_path + '_skip' + str(args.n_skip) + snapshot_path = snapshot_path + '_vitpatch' + str(args.vit_patches_size) if args.vit_patches_size!=16 else snapshot_path + snapshot_path = snapshot_path+'_'+str(args.max_iterations)[0:2]+'k' if args.max_iterations != 30000 else snapshot_path + snapshot_path = snapshot_path + '_epo' +str(args.max_epochs) if args.max_epochs != 30 else snapshot_path + snapshot_path = snapshot_path+'_bs'+str(args.batch_size) + snapshot_path = snapshot_path + '_lr' + str(args.base_lr) if args.base_lr != 0.01 else snapshot_path + snapshot_path = snapshot_path + '_'+str(args.img_size) + snapshot_path = snapshot_path + '_s'+str(args.seed) if args.seed!=1234 else snapshot_path + + if not os.path.exists(snapshot_path): + os.makedirs(snapshot_path) + config_vit = CONFIGS_ViT_seg[args.vit_name] + config_vit.n_classes = args.num_classes + config_vit.n_skip = args.n_skip + if args.vit_name.find('R50') != -1: + config_vit.patches.grid = (int(args.img_size / args.vit_patches_size), int(args.img_size / args.vit_patches_size)) + net = ViT_seg(config_vit, img_size=args.img_size, num_classes=config_vit.n_classes).cuda() + net.load_from(weights=np.load(config_vit.pretrained_path)) + + trainer = {'Synapse': trainer_synapse,} + trainer[dataset_name](args, net, snapshot_path) \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/TransUNet_main/trainer.py b/PuzzleTuning/SSL_structures/TransUNet_main/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..2445e10ce2ef85789041532ec47d6f9674016070 --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransUNet_main/trainer.py @@ -0,0 +1,96 @@ +import argparse +import logging +import os +import random +import sys +import time +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +from tensorboardX import SummaryWriter +from torch.nn.modules.loss import CrossEntropyLoss +from torch.utils.data import DataLoader +from tqdm import tqdm +from utils import DiceLoss +from torchvision import transforms + +def trainer_synapse(args, model, snapshot_path): + from datasets.dataset_synapse import Synapse_dataset, RandomGenerator + logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, + format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') + logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) + logging.info(str(args)) + base_lr = args.base_lr + num_classes = args.num_classes + batch_size = args.batch_size * args.n_gpu + # max_iterations = args.max_iterations + db_train = Synapse_dataset(base_dir=args.root_path, list_dir=args.list_dir, split="train", + transform=transforms.Compose( + [RandomGenerator(output_size=[args.img_size, args.img_size])])) + print("The length of train set is: {}".format(len(db_train))) + + def worker_init_fn(worker_id): + random.seed(args.seed + worker_id) + + trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True, + worker_init_fn=worker_init_fn) + if args.n_gpu > 1: + model = nn.DataParallel(model) + model.train() + ce_loss = CrossEntropyLoss() + dice_loss = DiceLoss(num_classes) + optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001) + writer = SummaryWriter(snapshot_path + '/log') + iter_num = 0 + max_epoch = args.max_epochs + max_iterations = args.max_epochs * len(trainloader) # max_epoch = max_iterations // len(trainloader) + 1 + logging.info("{} iterations per epoch. {} max iterations ".format(len(trainloader), max_iterations)) + best_performance = 0.0 + iterator = tqdm(range(max_epoch), ncols=70) + for epoch_num in iterator: + for i_batch, sampled_batch in enumerate(trainloader): + image_batch, label_batch = sampled_batch['image'], sampled_batch['label'] + image_batch, label_batch = image_batch.cuda(), label_batch.cuda() + outputs = model(image_batch) + loss_ce = ce_loss(outputs, label_batch[:].long()) + loss_dice = dice_loss(outputs, label_batch, softmax=True) + loss = 0.5 * loss_ce + 0.5 * loss_dice + optimizer.zero_grad() + loss.backward() + optimizer.step() + lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 + for param_group in optimizer.param_groups: + param_group['lr'] = lr_ + + iter_num = iter_num + 1 + writer.add_scalar('info/lr', lr_, iter_num) + writer.add_scalar('info/total_loss', loss, iter_num) + writer.add_scalar('info/loss_ce', loss_ce, iter_num) + + logging.info('iteration %d : loss : %f, loss_ce: %f' % (iter_num, loss.item(), loss_ce.item())) + + if iter_num % 20 == 0: + image = image_batch[1, 0:1, :, :] + image = (image - image.min()) / (image.max() - image.min()) + writer.add_image('train/Image', image, iter_num) + outputs = torch.argmax(torch.softmax(outputs, dim=1), dim=1, keepdim=True) + writer.add_image('train/Prediction', outputs[1, ...] * 50, iter_num) + labs = label_batch[1, ...].unsqueeze(0) * 50 + writer.add_image('train/GroundTruth', labs, iter_num) + + save_interval = 50 # int(max_epoch/6) + if epoch_num > int(max_epoch / 2) and (epoch_num + 1) % save_interval == 0: + save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + + if epoch_num >= max_epoch - 1: + save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth') + torch.save(model.state_dict(), save_mode_path) + logging.info("save model to {}".format(save_mode_path)) + iterator.close() + break + + writer.close() + return "Training Finished!" \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/TransUNet_main/utils.py b/PuzzleTuning/SSL_structures/TransUNet_main/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0e3a1bf9ad7058f506faff0a8ae4f6056514575c --- /dev/null +++ b/PuzzleTuning/SSL_structures/TransUNet_main/utils.py @@ -0,0 +1,102 @@ +import numpy as np +import torch +from medpy import metric +from scipy.ndimage import zoom +import torch.nn as nn +import SimpleITK as sitk + + +class DiceLoss(nn.Module): + def __init__(self, n_classes): + super(DiceLoss, self).__init__() + self.n_classes = n_classes + + def _one_hot_encoder(self, input_tensor): + tensor_list = [] + for i in range(self.n_classes): + temp_prob = input_tensor == i # * torch.ones_like(input_tensor) + tensor_list.append(temp_prob.unsqueeze(1)) + output_tensor = torch.cat(tensor_list, dim=1) + return output_tensor.float() + + def _dice_loss(self, score, target): + target = target.float() + smooth = 1e-5 + intersect = torch.sum(score * target) + y_sum = torch.sum(target * target) + z_sum = torch.sum(score * score) + loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth) + loss = 1 - loss + return loss + + def forward(self, inputs, target, weight=None, softmax=False): + if softmax: + inputs = torch.softmax(inputs, dim=1) + target = self._one_hot_encoder(target) + if weight is None: + weight = [1] * self.n_classes + assert inputs.size() == target.size(), 'predict {} & target {} shape do not match'.format(inputs.size(), target.size()) + class_wise_dice = [] + loss = 0.0 + for i in range(0, self.n_classes): + dice = self._dice_loss(inputs[:, i], target[:, i]) + class_wise_dice.append(1.0 - dice.item()) + loss += dice * weight[i] + return loss / self.n_classes + + +def calculate_metric_percase(pred, gt): + pred[pred > 0] = 1 + gt[gt > 0] = 1 + if pred.sum() > 0 and gt.sum()>0: + dice = metric.binary.dc(pred, gt) + hd95 = metric.binary.hd95(pred, gt) + return dice, hd95 + elif pred.sum() > 0 and gt.sum()==0: + return 1, 0 + else: + return 0, 0 + + +def test_single_volume(image, label, net, classes, patch_size=[256, 256], test_save_path=None, case=None, z_spacing=1): + image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy() + if len(image.shape) == 3: + prediction = np.zeros_like(label) + for ind in range(image.shape[0]): + slice = image[ind, :, :] + x, y = slice.shape[0], slice.shape[1] + if x != patch_size[0] or y != patch_size[1]: + slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=3) # previous using 0 + input = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().cuda() + net.eval() + with torch.no_grad(): + outputs = net(input) + out = torch.argmax(torch.softmax(outputs, dim=1), dim=1).squeeze(0) + out = out.cpu().detach().numpy() + if x != patch_size[0] or y != patch_size[1]: + pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0) + else: + pred = out + prediction[ind] = pred + else: + input = torch.from_numpy(image).unsqueeze( + 0).unsqueeze(0).float().cuda() + net.eval() + with torch.no_grad(): + out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0) + prediction = out.cpu().detach().numpy() + metric_list = [] + for i in range(1, classes): + metric_list.append(calculate_metric_percase(prediction == i, label == i)) + + if test_save_path is not None: + img_itk = sitk.GetImageFromArray(image.astype(np.float32)) + prd_itk = sitk.GetImageFromArray(prediction.astype(np.float32)) + lab_itk = sitk.GetImageFromArray(label.astype(np.float32)) + img_itk.SetSpacing((1, 1, z_spacing)) + prd_itk.SetSpacing((1, 1, z_spacing)) + lab_itk.SetSpacing((1, 1, z_spacing)) + sitk.WriteImage(prd_itk, test_save_path + '/'+case + "_pred.nii.gz") + sitk.WriteImage(img_itk, test_save_path + '/'+ case + "_img.nii.gz") + sitk.WriteImage(lab_itk, test_save_path + '/'+ case + "_gt.nii.gz") + return metric_list \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/UtnetV2/conv_layers.py b/PuzzleTuning/SSL_structures/UtnetV2/conv_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..0f39d6c816e3a2c9b98334a6b8377e6c3251a997 --- /dev/null +++ b/PuzzleTuning/SSL_structures/UtnetV2/conv_layers.py @@ -0,0 +1,358 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from timm.models.layers import trunc_normal_, DropPath +import pdb + +__all__ = [ + 'ConvNormAct', + 'SingleConv', + 'BasicBlock', + 'Bottleneck', + 'DepthwiseSeparableConv', + 'SEBlock', + 'DropPath', + 'MBConv', + 'FusedMBConv', + 'ConvNeXtBlock', + 'LayerNorm' +] + +class ConvNormAct(nn.Module): + """ + Layer grouping a convolution, normalization and activation funtion + normalization includes BN and IN + """ + def __init__(self, in_ch, out_ch, kernel_size=3, stride=1, padding=0, + groups=1, dilation=1, bias=False, norm=nn.BatchNorm2d, act=nn.ReLU, preact=False): + + super().__init__() + assert norm in [nn.BatchNorm2d, nn.InstanceNorm2d, True, False] + assert act in [nn.ReLU, nn.ReLU6, nn.GELU, nn.SiLU, True, False] + + self.conv = nn.Conv2d( + in_channels=in_ch, + out_channels=out_ch, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + dilation=dilation, + bias=bias + ) + if preact: + self.norm = norm(in_ch) if norm else nn.Identity() + else: + self.norm = norm(out_ch) if norm else nn.Identity() + self.act = act() if act else nn.Identity() + self.preact = preact + + def forward(self, x): + + if self.preact: + out = self.conv(self.act(self.norm(x))) # norm relu conv + else: + out = self.act(self.norm(self.conv(x))) # conv norm relu + + return out + +class SingleConv(nn.Module): + def __init__(self, in_ch, out_ch, stride=1, norm=nn.BatchNorm2d, act=nn.ReLU, preact=False): + super().__init__() + assert norm in [nn.BatchNorm2d, nn.InstanceNorm2d, LayerNorm, True, False] + assert act in [nn.ReLU, nn.ReLU6, nn.GELU, nn.SiLU, True, False] + + + self.conv = ConvNormAct(in_ch, out_ch, 3, stride=stride, padding=1, norm=norm, act=act, preact=preact) + + def forward(self, x): + + return self.conv(x) + + + +class BasicBlock(nn.Module): + def __init__(self, in_ch, out_ch, stride=1, norm=nn.BatchNorm2d, act=nn.ReLU, preact=True): + super().__init__() + assert norm in [nn.BatchNorm2d, nn.InstanceNorm2d, True, False] + assert act in [nn.ReLU, nn.ReLU6, nn.GELU, nn.SiLU, True, False] + + self.conv1 = ConvNormAct(in_ch, out_ch, 3, stride=stride, padding=1, norm=norm, act=act, preact=preact) + self.conv2 = ConvNormAct(out_ch, out_ch, 3, stride=1, padding=1, norm=norm, act=act, preact=preact) + + self.shortcut = nn.Sequential() + if stride != 1 or in_ch != out_ch: # 如果不相等就通过一层conv将残差改变 + self.shortcut = ConvNormAct(in_ch, out_ch, 3, stride=stride, padding=1, norm=norm, act=act, preact=preact) + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.conv2(out) + + out += self.shortcut(residual) + + return out + +class Bottleneck(nn.Module): + def __init__(self, in_ch, out_ch, stride=1, groups=1, dilation=1, norm=nn.BatchNorm2d, act=nn.ReLU, preact=True): + super().__init__() + assert norm in [nn.BatchNorm2d, nn.InstanceNorm2d, True, False] + assert act in [nn.ReLU, nn.ReLU6, nn.GELU, nn.SiLU, True, False] + self.expansion = 4 + self.conv1 = ConvNormAct(in_ch, out_ch//self.expansion, 1, stride=1, padding=0, norm=norm, act=act, preact=preact) + self.conv2 = ConvNormAct(out_ch//self.expansion, out_ch//self.expansion, 3, stride=stride, padding=1, norm=norm, act=act, groups=groups, dilation=dilation, preact=preact) + + self.conv3 = ConvNormAct(out_ch//self.expansion, out_ch, 1, stride=1, padding=0, norm=norm, act=act, preact=preact) + self.shortcut = nn.Sequential() + if stride != 1 or in_ch != out_ch: + self.shortcut = ConvNormAct(in_ch, out_ch, 3, stride=stride, padding=1, norm=norm, act=act, preact=preact) + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.conv2(out) + out = self.conv3(out) + + out += self.shortcut(residual) + + return out + + + + +class DepthwiseSeparableConv(nn.Module): + def __init__(self, in_ch, out_ch, stride=1, kernel_size=3, padding=1, bias=False): + super().__init__() + self.depthwise = nn.Conv2d( + in_channels=in_ch, + out_channels=in_ch, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=in_ch, + bias=bias + ) + self.pointwise = nn.Conv2d( + in_channels=in_ch, + out_channels=out_ch, + kernel_size=1, + stride=1, + padding=0, + groups=1, + bias=bias + ) + def forward(self, x): + out = self.depthwise(x) + out = self.pointwise(out) + + return out + + +class SEBlock(nn.Module): + def __init__(self, in_ch, ratio=4, act=nn.ReLU): + super().__init__() + + self.squeeze = nn.AdaptiveAvgPool2d(1) + self.excitation = nn.Sequential( + nn.Conv2d(in_ch, in_ch//ratio, kernel_size=1), + act(), + nn.Conv2d(in_ch//ratio, in_ch, kernel_size=1), + nn.Sigmoid() + ) + def forward(self, x): + out = self.squeeze(x) + out = self.excitation(out) + + return x * out + +class DropPath(nn.Module): + """ + Drop connection with pobability p + """ + def __init__(self, p=0): + super().__init__() + + self.p = p + def forward(self, x): + if (not self.p) or (not self.training): + return x + + batch_size = x.shape[0] + random_tensor = torch.rand(batch_size, 1, 1, 1).to(x.device) + binary_mask = self.p < random_tensor + + x = x.div(1 - self.p) + x = x * binary_mask + + return x + +class MBConv(nn.Module): + """ + MBConv with an expansion factor of N, and squeeze-and-excitation module + """ + def __init__(self, in_ch, out_ch, expansion=4, kernel_size=3, stride=1, ratio=4, p=0, se=True, norm=nn.BatchNorm2d, act=nn.ReLU): + super().__init__() + + + padding = (kernel_size - 1) // 2 + expanded = expansion * in_ch + self.se = se + + self.expand_proj = nn.Identity() if (expansion==1) else ConvNormAct(in_ch, expanded, kernel_size=1, norm=norm, act=act, preact=True) + + self.depthwise = ConvNormAct(expanded, expanded, kernel_size=kernel_size, stride=stride, padding=padding, groups=expanded, act=act, norm=norm, preact=True) + + if self.se: + self.se_block = SEBlock(expanded, ratio=ratio) + + self.pointwise = ConvNormAct(expanded, out_ch, kernel_size=1, padding=0, norm=norm, act=False, preact=True) + + self.drop_path = DropPath(p) + + self.shortcut = nn.Sequential() + if in_ch != out_ch or stride != 1: + self.shortcut = nn.Sequential(ConvNormAct(in_ch, out_ch, kernel_size, stride=stride, padding=padding, norm=False, act=False)) + + def forward(self, x): + residual = x + + x = self.expand_proj(x) + x = self.depthwise(x) + if self.se: + x = self.se_block(x) + x = self.pointwise(x) + + x = self.drop_path(x) + + x = x + self.shortcut(residual) + + return x + +class FusedMBConv(nn.Module): + """ + MBConv with an expansion factor of N, and squeeze-and-excitation module + """ + def __init__(self, in_ch, out_ch, expansion=4, kernel_size=3, stride=1, ratio=4, p=0, se=True, norm=nn.BatchNorm2d, act=nn.ReLU): + super().__init__() + + + padding = (kernel_size - 1) // 2 + expanded = expansion * in_ch + + self.stride = stride + self.se = se + + self.conv3x3 = ConvNormAct(in_ch, expanded, kernel_size=kernel_size, stride=stride, padding=padding, groups=1, norm=norm, act=act, preact=True) + + if self.se: + self.se_block = SEBlock(expanded, ratio=ratio) + + self.pointwise = ConvNormAct(expanded, out_ch, kernel_size=1, padding=0, norm=norm, act=False, preact=True) + + self.drop_path = DropPath(p) + + self.shortcut = nn.Sequential() + if in_ch != out_ch or stride != 1: + self.shortcut = nn.Sequential(ConvNormAct(in_ch, out_ch, 3, stride=stride, padding=1, norm=False, act=False)) + + def forward(self, x): + residual = x + + x = self.conv3x3(x) + if self.se: + x = self.se_block(x) + x = self.pointwise(x) + + x = self.drop_path(x) + + x = x + self.shortcut(residual) + + return x + +class ConvNeXtBlock(nn.Module): + r""" ConvNeXt Block. There are two equivalent implementations: + (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) + (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back + We use (2) as we find it slightly faster in PyTorch + Args: + dim (int): Number of input channels. + drop_path (float): Stochastic depth rate. Default: 0.0 + layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. + """ + + def __init__(self, dim, out_ch, stride=1, kernel_size=7, norm=None, act=None, preact=None, drop_path=0., layer_scale_init_value=1e-6): + + super().__init__() + padding = kernel_size // 2 + self.dwconv = nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=padding, groups=dim) # depthwise conv + self.norm = LayerNorm(dim, eps=1e-6) + self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers + self.act = nn.GELU() + self.pwconv2 = nn.Linear(4 * dim, dim) + self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), + requires_grad=True) if layer_scale_init_value > 0 else None + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + input = x + x = self.dwconv(x) + x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) + x = self.norm(x) + x = self.pwconv1(x) + x = self.act(x) + x = self.pwconv2(x) + if self.gamma is not None: + x = self.gamma * x + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + + x = input + self.drop_path(x) + + return x + +class LayerNorm(nn.Module): + + r""" LayerNorm that supports two data formats: channels_last (default) or channels_first. + + The ordering of the dimensions in the inputs. channels_last corresponds to inputs with + + shape (batch_size, height, width, channels) while channels_first corresponds to inputs + + with shape (batch_size, channels, height, width). + + """ + + def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.data_format = data_format + + if self.data_format not in ["channels_last", "channels_first"]: + raise NotImplementedError + self.normalized_shape = (normalized_shape, ) + + def forward(self, x): + if self.data_format == "channels_last": + return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + elif self.data_format == "channels_first": + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + + return x + + +if __name__ == '__main__': + img = torch.randn(2, 3, 256, 256) + depth_conv = DepthwiseSeparableConv(3, 32) + + out = depth_conv(img) + print(out.shape) + + + + diff --git a/PuzzleTuning/SSL_structures/UtnetV2/trans_layers.py b/PuzzleTuning/SSL_structures/UtnetV2/trans_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..66a7b58a0da49c879ea042657b093716441a76ed --- /dev/null +++ b/PuzzleTuning/SSL_structures/UtnetV2/trans_layers.py @@ -0,0 +1,94 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +import pdb + + +__all__ = [ + 'Mlp', + 'Attention', + 'TransformerBlock', +] + +class Mlp(nn.Module): + def __init__(self, in_dim, hid_dim=None, out_dim=None, act=nn.GELU, drop=0.): + super().__init__() + out_dim = out_dim or in_dim + hid_dim = hid_dim or in_dim + self.fc1 = nn.Linear(in_dim, hid_dim) + self.act = act() + self.fc2 = nn.Linear(hid_dim, out_dim) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + + return x + +class PreNorm(nn.Module): + def __init__(self, dim, fn): + super().__init__() + self.norm = nn.LayerNorm(dim) + self.fn = fn + def forward(self, x, **kwargs): + return self.fn(self.norm(x), **kwargs) + + + +class Attention(nn.Module): + def __init__(self, dim, heads, dim_head, attn_drop=0., proj_drop=0.): + super().__init__() + + inner_dim = dim_head * heads + + self.heads = heads + self.scale = dim_head ** -0.5 + + self.to_qkv = nn.Linear(dim, inner_dim*3, bias=False) + + self.to_out = nn.Linear(inner_dim, dim) + + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + # x: B, L, C. Batch, sequence length, dim + q, k, v = self.to_qkv(x).chunk(3, dim=-1) + + q, k, v = map(lambda t: rearrange(t, 'b l (heads dim_head) -> b heads l dim_head', heads=self.heads), [q, k, v]) + attn = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale + + attn = F.softmax(attn, dim=-1) + + attned = torch.einsum('bhij,bhjd->bhid', attn, v) + attned = rearrange(attned, 'b heads l dim_head -> b l (dim_head heads)') + + attned = self.to_out(attned) + + return attned + + +class TransformerBlock(nn.Module): + def __init__(self, dim, depth, heads, dim_head, mlp_dim, attn_drop=0., proj_drop=0.): + super().__init__() + + self.layers = nn.ModuleList([]) + + for i in range(depth): + self.layers.append(nn.ModuleList([ + PreNorm(dim, Attention(dim, heads, dim_head, attn_drop, proj_drop)), + PreNorm(dim, Mlp(dim, mlp_dim, dim, drop=proj_drop)) + ])) + def forward(self, x): + + for attn, ffn in self.layers: + x = attn(x) + x + x = ffn(x) + x + + return x + + diff --git a/PuzzleTuning/SSL_structures/UtnetV2/utnetv2.py b/PuzzleTuning/SSL_structures/UtnetV2/utnetv2.py new file mode 100644 index 0000000000000000000000000000000000000000..8c5d8151220dc005f56bc9defbf4b1a451e16f8c --- /dev/null +++ b/PuzzleTuning/SSL_structures/UtnetV2/utnetv2.py @@ -0,0 +1,77 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +# from .utils import get_block +from .utnetv2_utils import Down_block, Up_block, inconv, SemanticMapFusion +import pdb + +from .conv_layers import BasicBlock, Bottleneck, SingleConv, MBConv, FusedMBConv, ConvNeXtBlock + +def get_block(name): + block_map = { + 'SingleConv': SingleConv, + 'BasicBlock': BasicBlock, + 'Bottleneck': Bottleneck, + 'MBConv': MBConv, + 'FusedMBConv': FusedMBConv, + 'ConvNeXtBlock': ConvNeXtBlock + } + return block_map[name] + + +class UTNetV2(nn.Module): + + def __init__(self, in_chan, num_classes, base_chan=32, map_size=8, conv_block='BasicBlock', conv_num=[2,1,0,0, 0,1,2,2], trans_num=[0,1,2,2, 2,1,0,0], num_heads=[1,4,8,16, 8,4,1,1], fusion_depth=2, fusion_dim=512, fusion_heads=16, expansion=4, attn_drop=0., proj_drop=0., proj_type='depthwise', norm=nn.BatchNorm2d, act=nn.GELU): + super().__init__() + + + chan_num = [2*base_chan, 4*base_chan, 8*base_chan, 16*base_chan, + 8*base_chan, 4*base_chan, 2*base_chan, base_chan] # [64, 128, 256, 512, 256, 128, 64, 32] + dim_head = [chan_num[i]//num_heads[i] for i in range(8)] # [64, 32, 32, 32, 32, 32, 64, 32] + conv_block = get_block(conv_block) # BasicBlock + + # self.inc and self.down1 forms the conv stem + self.inc = inconv(in_chan, base_chan, norm=norm, act=act) + self.down1 = Down_block(base_chan, chan_num[0], conv_num[0], trans_num[0], conv_block, norm=norm, act=act, map_generate=False, map_proj=False) + # self.down1 = down_block(32, 64, 2, 0, basicblock, batchnorm, gelu, False, False) + + # down2 down3 down4 apply the B-MHA blocks + self.down2 = Down_block(chan_num[0], chan_num[1], conv_num[1], trans_num[1], conv_block, heads=num_heads[1], dim_head=dim_head[1], expansion=expansion, attn_drop=attn_drop, proj_drop=proj_drop, map_size=map_size, proj_type=proj_type, norm=norm, act=act, map_generate=True, map_proj=False) + self.down3 = Down_block(chan_num[1], chan_num[2], conv_num[2], trans_num[2], conv_block, heads=num_heads[2], dim_head=dim_head[2], expansion=expansion, attn_drop=attn_drop, proj_drop=proj_drop, map_size=map_size, proj_type=proj_type, norm=norm, act=act, map_generate=False, map_proj=True) + self.down4 = Down_block(chan_num[2], chan_num[3], conv_num[3], trans_num[3], conv_block, heads=num_heads[3], dim_head=dim_head[3], expansion=expansion, attn_drop=attn_drop, proj_drop=proj_drop, map_size=map_size, proj_type=proj_type, norm=norm, act=act, map_generate=False, map_proj=True) + + + self.map_fusion = SemanticMapFusion(chan_num[1:4], fusion_dim, fusion_heads, depth=fusion_depth, norm=norm) + + + self.up1 = Up_block(chan_num[3], chan_num[4], conv_num[4], trans_num[4], conv_block, heads=num_heads[4], dim_head=dim_head[4], expansion=expansion, attn_drop=attn_drop, proj_drop=proj_drop, map_size=map_size, proj_type=proj_type, norm=norm, act=act, map_shortcut=True) + self.up2 = Up_block(chan_num[4], chan_num[5], conv_num[5], trans_num[5], conv_block, heads=num_heads[5], dim_head=dim_head[5], expansion=expansion, attn_drop=attn_drop, proj_drop=proj_drop, map_size=map_size, proj_type=proj_type, norm=norm, act=act, map_shortcut=True) + + # up3 up4 form the conv decoder + self.up3 = Up_block(chan_num[5], chan_num[6], conv_num[6], trans_num[6], conv_block, norm=norm, act=act, map_shortcut=False) + self.up4 = Up_block(chan_num[6], chan_num[7], conv_num[7], trans_num[7], conv_block, norm=norm, act=act, map_shortcut=False) + + + self.outc = nn.Conv2d(chan_num[7], num_classes, kernel_size=1) + + def forward(self, x): + # print('x: ', x.shape) + x0 = self.inc(x) # (3, 480, 480) -> (32, 480, 480) + x1, _ = self.down1(x0) + x2, map2 = self.down2(x1, None) + x3, map3 = self.down3(x2, map2) + x4, map4 = self.down4(x3, map3) + + map_list = [map2, map3, map4] + map_list = self.map_fusion(map_list) + + out, semantic_map = self.up1(x4, x3, map_list[2], map_list[1]) + out, semantic_map = self.up2(out, x2, semantic_map, map_list[0]) + out, semantic_map = self.up3(out, x1, semantic_map, None) + out, semantic_map = self.up4(out, x0, semantic_map, None) + + out = self.outc(out) + + return out + diff --git a/PuzzleTuning/SSL_structures/UtnetV2/utnetv2_utils.py b/PuzzleTuning/SSL_structures/UtnetV2/utnetv2_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9f35a7d85ce7544a76b9472048165a6b40b20664 --- /dev/null +++ b/PuzzleTuning/SSL_structures/UtnetV2/utnetv2_utils.py @@ -0,0 +1,362 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from .conv_layers import DepthwiseSeparableConv, BasicBlock, Bottleneck, MBConv, FusedMBConv, ConvNormAct +from .trans_layers import TransformerBlock + +from einops import rearrange +import pdb + + +class BidirectionAttention(nn.Module): + def __init__(self, feat_dim, map_dim, out_dim, heads=4, dim_head=64, attn_drop=0., + proj_drop=0., map_size=16, proj_type='depthwise'): + super().__init__() + + self.inner_dim = dim_head * heads + self.feat_dim = feat_dim + self.map_dim = map_dim + self.heads = heads + self.scale = dim_head ** (-0.5) + self.dim_head = dim_head + self.map_size = map_size + + assert proj_type in ['linear', 'depthwise'] + + if proj_type == 'linear': + self.feat_qv = nn.Conv2d(feat_dim, self.inner_dim*2, kernel_size=1, bias=False) + self.feat_out = nn.Conv2d(self.inner_dim, out_dim, kernel_size=1, bias=False) + + else: + self.feat_qv = DepthwiseSeparableConv(feat_dim, self.inner_dim * 2) + self.feat_out = DepthwiseSeparableConv(self.inner_dim, out_dim) + + self.map_qv = nn.Conv2d(map_dim, self.inner_dim*2, kernel_size=1, bias=False) + self.map_out = nn.Conv2d(self.inner_dim, map_dim, kernel_size=1, bias=False) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, feat, semantic_map): + + B, C, H, W = feat.shape + + feat_q, feat_v = self.feat_qv(feat).chunk(2, dim=1) # B, inner_dim, H, W + map_q, map_v = self.map_qv(semantic_map).chunk(2, dim=1) # B, inner_dim, rs, rs + + feat_q, feat_v = map(lambda t: rearrange(t, 'b (dim_head heads) h w -> b heads (h w) dim_head', dim_head = self.dim_head, heads=self.heads, h=H, w=W), [feat_q, feat_v]) + map_q, map_v = map(lambda t: rearrange(t, 'b (dim_head heads) h w -> b heads (h w) dim_head', dim_head=self.dim_head, heads=self.heads, h=self.map_size, w=self.map_size), [map_q, map_v]) + + attn = torch.einsum('bhid,bhjd->bhij', feat_q, map_q) + attn *= self.scale + + feat_map_attn = F.softmax(attn, dim=-1) # semantic map is very concise that don't need dropout + # add dropout migth cause unstable during training + map_feat_attn = self.attn_drop(F.softmax(attn, dim=-2)) + + feat_out = torch.einsum('bhij,bhjd->bhid', feat_map_attn, map_v) + feat_out = rearrange(feat_out, 'b heads (h w) dim_head -> b (dim_head heads) h w', h=H, w=W, dim_head=self.dim_head, heads=self.heads) + + map_out = torch.einsum('bhji,bhjd->bhid', map_feat_attn, feat_v) + map_out = rearrange(map_out, 'b heads (h w) dim_head -> b (dim_head heads) h w', b=B, dim_head=self.dim_head, heads=self.heads, h=self.map_size, w=self.map_size) + + feat_out = self.proj_drop(self.feat_out(feat_out)) + map_out = self.proj_drop(self.map_out(map_out)) + + return feat_out, map_out + + +class BidirectionAttentionBlock(nn.Module): + def __init__(self, feat_dim, map_dim, out_dim, heads, dim_head, norm=nn.BatchNorm2d, + act=nn.GELU, expansion=4, attn_drop=0., proj_drop=0., map_size=8, + proj_type='depthwise'): + super().__init__() + + assert norm in [nn.BatchNorm2d, nn.InstanceNorm2d, True, False] + assert act in [nn.ReLU, nn.ReLU6, nn.GELU, nn.SiLU, True, False] + assert proj_type in ['linear', 'depthwise'] + + self.norm1 = norm(feat_dim) if norm else nn.Identity() # norm layer for feature map + self.norm2 = norm(map_dim) if norm else nn.Identity() # norm layer for semantic map + + + self.attn = BidirectionAttention(feat_dim, map_dim, out_dim, heads=heads, dim_head=dim_head, attn_drop=attn_drop, proj_drop=proj_drop, map_size=map_size, proj_type=proj_type) + + self.shortcut = nn.Sequential() + if feat_dim != out_dim: + self.shortcut = ConvNormAct(feat_dim, out_dim, kernel_size=1, padding=0, norm=norm, act=act, preact=True) + + + if proj_type == 'linear': + self.feedforward = FusedMBConv(out_dim, out_dim, expansion=expansion, kernel_size=1, act=act, norm=norm) # 2 conv1x1 + else: + self.feedforward = MBConv(out_dim, out_dim, expansion=expansion, kernel_size=3, act=act, norm=norm, p=proj_drop) # depthwise conv + + def forward(self, x, semantic_map): + + feat = self.norm1(x) + mapp = self.norm2(semantic_map) + + out, mapp = self.attn(feat, mapp) + + out += self.shortcut(x) + out = self.feedforward(out) + + mapp += semantic_map + + return out, mapp + +class PatchMerging(nn.Module): + """ + Modified patch merging layer that works as down-sampling + """ + + def __init__(self, dim, out_dim, norm=nn.BatchNorm2d, proj_type='depthwise', map_proj=True): + super().__init__() + self.dim = dim # 32 + if proj_type == 'linear': + self.reduction = nn.Conv2d(4*dim, out_dim, kernel_size=1, bias=False) + else: + self.reduction = DepthwiseSeparableConv(4*dim, out_dim) # (32*4, 64) + + self.norm = norm(4*dim) + + if map_proj: + self.map_projection = nn.Conv2d(dim, out_dim, kernel_size=1, bias=False) + # (32, 64, kernel_size, bias) + + def forward(self, x, semantic_map=None): + """ + x: B, C, H, W + """ + x0 = x[:, :, 0::2, 0::2] + x1 = x[:, :, 1::2, 0::2] + x2 = x[:, :, 0::2, 1::2] + x3 = x[:, :, 1::2, 1::2] + + x = torch.cat([x0, x1, x2, x3], 1) # B, 4C, H, W + + x = self.norm(x) + x = self.reduction(x) # depthwise + pointwise 4C -> outdim + + if semantic_map is not None: + semantic_map = self.map_projection(semantic_map) # dim -> outdim + + return x, semantic_map + +class BasicLayer(nn.Module): + """ + A basic transformer layer for one stage + No downsample of upsample operation in this layer, they are wraped in the down_block or up_block of UTNet + """ + + def __init__(self, feat_dim, map_dim, out_dim, num_blocks, heads=4, dim_head=64, expansion=1, attn_drop=0., proj_drop=0., map_size=8, proj_type='depthwise', norm=nn.BatchNorm2d, act=nn.GELU): + super().__init__() + + dim1 = feat_dim + dim2 = out_dim + + self.blocks = nn.ModuleList([]) + for i in range(num_blocks): + self.blocks.append(BidirectionAttentionBlock(dim1, map_dim, dim2, heads, dim_head, expansion=expansion, attn_drop=attn_drop, proj_drop=proj_drop, map_size=map_size, proj_type=proj_type, norm=norm, act=act)) + dim1 = out_dim + + def forward(self, x, semantic_map): + for block in self.blocks: + x, semantic_map = block(x, semantic_map) + + return x, semantic_map + +class SemanticMapGeneration(nn.Module): + def __init__(self, feat_dim, map_dim, map_size): # (64, 64, 8) + super().__init__() + + self.map_size = map_size # 8 + self.map_dim = map_dim # 64 + + self.map_code_num = map_size * map_size # 8*8=64 + + self.base_proj = nn.Conv2d(feat_dim, map_dim, kernel_size=3, padding=1, bias=False) + # (64, 64, 3, 1, false) + self.semantic_proj = nn.Conv2d(feat_dim, self.map_code_num, kernel_size=3, padding=1, bias=False) + # (64, 64, 3, 1 false) + + + + def forward(self, x): + B, C, H, W = x.shape # B, C, H, W + feat = self.base_proj(x) # B, map_dim, h, w + weight_map = self.semantic_proj(x) # B, map_code_num, h, w + + weight_map = weight_map.view(B, self.map_code_num, -1) + weight_map = F.softmax(weight_map, dim=2) # B, map_code_num, hw + feat = feat.view(B, self.map_dim, -1) # B, map_dim, hw + + semantic_map = torch.einsum('bij,bkj->bik', feat, weight_map) + + return semantic_map.view(B, self.map_dim, self.map_size, self.map_size) + + +class SemanticMapFusion(nn.Module): + def __init__(self, in_dim_list, dim, heads, depth=1, norm=nn.BatchNorm2d): + super().__init__() + + + self.dim = dim + + # project all maps to the same channel num + self.in_proj = nn.ModuleList([]) + for i in range(len(in_dim_list)): + self.in_proj.append(nn.Conv2d(in_dim_list[i], dim, kernel_size=1, bias=False)) + + self.fusion = TransformerBlock(dim, depth, heads, dim//heads, dim, attn_drop=0., proj_drop=0.) + + # project all maps back to their origin channel num + self.out_proj = nn.ModuleList([]) + for i in range(len(in_dim_list)): + self.out_proj.append(nn.Conv2d(dim, in_dim_list[i], kernel_size=1, bias=False)) + + + + def forward(self, map_list): + B, _, H, W = map_list[0].shape + proj_maps = [self.in_proj[i](map_list[i]).view(B, self.dim, -1).permute(0, 2, 1) for i in range(len(map_list))] + # B, L, C where L=HW + + proj_maps = torch.cat(proj_maps, dim=1) + + attned_maps = self.fusion(proj_maps) + + attned_maps = attned_maps.chunk(len(map_list), dim=1) + + maps_out = [self.out_proj[i](attned_maps[i].permute(0, 2, 1).view(B, self.dim, H, W)) for i in range(len(map_list))] + + return maps_out + + + + + + + + + + + + + +####################################################################### +# UTNet block that for one stage, which contains conv block and trans block + + +class inconv(nn.Module): + def __init__(self, in_ch, out_ch, block=BasicBlock, norm=nn.BatchNorm2d, act=nn.GELU): + super().__init__() + self.conv1 = nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1, bias=False) + + self.conv2 = block(out_ch, out_ch, norm=norm, act=act) + + def forward(self, x): + if x.shape == 5: + x = x.squeeze(1) + out = self.conv1(x) # (3, 480, 480) -> (32, 480, 480] + out = self.conv2(out) # block (32, 32, norm, act) conv norm relu 残差 + + return out + + + + +class Down_block(nn.Module): + def __init__(self, in_ch, out_ch, conv_num, trans_num, conv_block=BasicBlock, + heads=4, dim_head=64, expansion=4, attn_drop=0., proj_drop=0., map_size=8, + proj_type='depthwise', norm=nn.BatchNorm2d, act=nn.GELU, map_generate=False, + map_proj=True, map_dim=None): + # (32, 64, 2, 0, basicblock, batchnorm, gelu, False, False) + super().__init__() + + map_dim = out_ch if map_dim is None else map_dim # 64 + self.map_generate = map_generate # False + if map_generate: + self.map_gen = SemanticMapGeneration(out_ch, map_dim, map_size) + # return semantic_map.view(B, self.map_dim, self.map_size, self.map_size) + + self.patch_merging = PatchMerging(in_ch, out_ch, proj_type=proj_type, norm=norm, map_proj=map_proj) + # in_ch->out_ch + block_list = [] + for i in range(conv_num): # 2 + block_list.append(conv_block(out_ch, out_ch, norm=norm, act=act)) + dim1 = out_ch + + self.conv_blocks = nn.Sequential(*block_list) + + self.trans_blocks = BasicLayer(out_ch, map_dim, out_ch, num_blocks=trans_num, \ + heads=heads, dim_head=dim_head, norm=norm, act=act, expansion=expansion,\ + attn_drop=attn_drop, proj_drop=proj_drop, map_size=map_size, proj_type=proj_type) + + + def forward(self, x, semantic_map=None): + + x, semantic_map = self.patch_merging(x, semantic_map) # in_ch->out_chan + + out = self.conv_blocks(x) # out->out + if self.map_generate: + semantic_map = self.map_gen(out) # (B, self.map_dim, self.map_size, self.map_size)) + + out, semantic_map = self.trans_blocks(out, semantic_map) + + return out, semantic_map + +class Up_block(nn.Module): + def __init__(self, in_ch, out_ch, conv_num, trans_num, conv_block=BasicBlock, + heads=4, dim_head=64, expansion=1, attn_drop=0., proj_drop=0., map_size=8, + proj_type='linear', norm=nn.BatchNorm2d, act=nn.GELU, map_dim=None, + map_shortcut=False): + super().__init__() + + self.reduction = nn.Conv2d(in_ch+out_ch, out_ch, kernel_size=1, padding=0, bias=False) + self.norm = norm(in_ch+out_ch) + + self.map_shortcut = map_shortcut + map_dim = out_ch if map_dim is None else map_dim + if map_shortcut: + self.map_reduction = nn.Conv2d(in_ch+out_ch, map_dim, kernel_size=1, bias=False) + else: + self.map_reduction = nn.Conv2d(in_ch, map_dim, kernel_size=1, bias=False) + + + + self.trans_blocks = BasicLayer(out_ch, map_dim, out_ch, num_blocks=trans_num, \ + heads=heads, dim_head=dim_head, norm=norm, act=act, expansion=expansion,\ + attn_drop=attn_drop, proj_drop=proj_drop, map_size=map_size, proj_type=proj_type) + + conv_list = [] + for i in range(conv_num): + conv_list.append(conv_block(out_ch, out_ch, norm=norm, act=act)) + + self.conv_blocks = nn.Sequential(*conv_list) + + def forward(self, x1, x2, map1, map2=None): + # x1: low-res feature, x2: high-res feature + # map1: semantic map from previous low-res layer + # map2: semantic map from encoder shortcut path, might be none if we don't have the map from encoder + + + x1 = F.interpolate(x1, size=x2.shape[-2:], mode='bilinear', align_corners=True) + feat = torch.cat([x1, x2], dim=1) + + out = self.reduction(self.norm(feat)) + + if self.map_shortcut and map2 is not None: + semantic_map = torch.cat([map1, map2], dim=1) + else: + semantic_map = map1 + semantic_map = self.map_reduction(semantic_map) + + out, semantic_map = self.trans_blocks(out, semantic_map) + out = self.conv_blocks(out) + + return out, semantic_map + + diff --git a/PuzzleTuning/SSL_structures/engine_pretrain.py b/PuzzleTuning/SSL_structures/engine_pretrain.py new file mode 100644 index 0000000000000000000000000000000000000000..a0fba243d2cbbac664004b2029c58349a5e42773 --- /dev/null +++ b/PuzzleTuning/SSL_structures/engine_pretrain.py @@ -0,0 +1,157 @@ +""" +Training Engine Script ver: Feb 8th 16:00 + +Based on MAE code. +https://github.com/facebookresearch/mae + +""" + +import math +import sys +from typing import Iterable +import os +import torch +from torchvision.transforms import ToPILImage +import SSL_structures.misc as misc +import utils.schedulers as lr_sched +from utils.visual_usage import unpatchify, patchify, Draw_tri_fig + + +def train_one_epoch(model: torch.nn.Module, + data_loader: Iterable, optimizer: torch.optim.Optimizer, + device: torch.device, epoch: int, loss_scaler, fix_position_ratio_scheduler=None, + puzzle_patch_size_scheduler=None, check_samples=1, print_freq=20, log_writer=None, args=None): + model.train(True) + + # update logger + metric_logger = misc.MetricLogger(delimiter=" ") + # 初始化学习率记录 + metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}')) + + header = 'Epoch: [{}]'.format(epoch) + + accum_iter = args.accum_iter + + optimizer.zero_grad() + + if log_writer is not None: # Tensorboard PATH + print('log_dir: {}'.format(args.log_dir)) + + # Iteration + for data_iter_step, (samples, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): + + # per iteration lr scheduler基于中间epoch位置 + # 来实现更精确的调节学习率:data_iter_step / len(data_loader) + epoch + if data_iter_step % accum_iter == 0: + lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args) + + # 拿数据 + samples = samples.to(device, non_blocking=True) + + with torch.cuda.amp.autocast(): # 使用自动混合精度加速训练 + + if fix_position_ratio_scheduler is not None and puzzle_patch_size_scheduler is not None: # SAE + fix_position_ratio = fix_position_ratio_scheduler(epoch) + puzzle_patch_size = puzzle_patch_size_scheduler(epoch) + else: + fix_position_ratio = None + puzzle_patch_size = None + + if args.model[0:3] == 'sae': + loss, pred, imgs_puzzled_patches = model(samples, fix_position_ratio=fix_position_ratio, + puzzle_patch_size=puzzle_patch_size) # SAE + else: # args.model[0:3] == 'mae' + loss, pred, mask_patch_indicators = model(samples, mask_ratio=args.mask_ratio) # MAE + # fixme mae curriculum maybe not good enough for future + if args.DDP_distributed: + loss_value = loss.item() + else: + loss_value = float(loss.cpu().detach().numpy()) \ + if torch.cuda.device_count() == 1 else sum(loss.cpu().detach().numpy()) + + if not math.isfinite(loss_value): # 检查确保没有loss爆炸 + print("Loss is {}, stopping training".format(loss_value)) + sys.exit(1) + + loss = loss / accum_iter # 计算的是每个minibatch的loss,如果有梯度累加则需要减少占比,loss在loss_scaler里面会进行叠加 + + # loss backward 核心(不要怕,其实就是功能上集成了loss.backward+opt.step,然后引入了梯度裁剪) + loss_scaler(loss, optimizer, parameters=model.parameters(), + update_grad=(data_iter_step + 1) % accum_iter == 0) + + if (data_iter_step + 1) % accum_iter == 0: + optimizer.zero_grad() + + torch.cuda.synchronize() # 等待当前设备上所有流中的所有核心完成 + + # 更新记录 + metric_logger.update(loss=loss_value) + lr = optimizer.param_groups[0]["lr"] + metric_logger.update(lr=lr) + + # 计算平均在单卡上的loss + loss_value_reduce = misc.all_reduce_mean(loss_value) + + if log_writer is not None: + log_writer.add_scalar('train_loss', loss_value_reduce, epoch) + log_writer.add_scalar('lr', lr, epoch) + + if fix_position_ratio is not None and puzzle_patch_size is not None: + log_writer.add_scalar('puzzle_patch_size', puzzle_patch_size, epoch) + log_writer.add_scalar('fix_position_ratio', fix_position_ratio, epoch) + + # gather the stats from all processes + metric_logger.synchronize_between_processes() + if fix_position_ratio is not None and puzzle_patch_size is not None: + print("Averaged stats:", metric_logger, 'fix_position_ratio:', fix_position_ratio, + ' puzzle_patch_size:', puzzle_patch_size) + else: + print("Averaged stats:", metric_logger) + + # TODO: currently, only paint at the end of each epoch Train, + if args.model[0:3] == 'sae': + imgs_puzzled_batch = unpatchify(imgs_puzzled_patches, patch_size=16) + else: # MAE + sample_img_patches = patchify(samples, patch_size=16) # on GPU + masked_img_patches = sample_img_patches * \ + mask_patch_indicators.unsqueeze(-1).expand(-1, -1, + sample_img_patches.shape[-1]) + masked_img_batch = unpatchify(masked_img_patches, patch_size=16) + + # paint images at the end of each epoch on main process + if misc.is_main_process(): + for sampleIDX in range(check_samples): + + sample_img = samples.cpu()[sampleIDX] + sample_img = ToPILImage()(sample_img) + sample_img.save(os.path.join(args.output_dir, 'figs', 'sample_e_' + str(epoch) + + '_sampleIDX_' + str(sampleIDX) + '.jpg')) + + recons_img_batch = unpatchify(pred, patch_size=16) + recons_img = recons_img_batch.cpu()[sampleIDX] + recons_img = ToPILImage()(recons_img) + recons_img.save(os.path.join(args.output_dir, 'figs', 'recons_e_' + str(epoch) + + '_sampleIDX_' + str(sampleIDX) + '.jpg')) + + if args.model[0:3] == 'sae': # SAE + puzzled_img = imgs_puzzled_batch.cpu()[sampleIDX] + puzzled_img = ToPILImage()(puzzled_img) + puzzled_img.save(os.path.join(args.output_dir, 'figs', 'puzzled_e_' + str(epoch) + + '_sampleIDX_' + str(sampleIDX) + '.jpg')) + + picpath = os.path.join(args.output_dir, 'figs', 'puzzled_e_' + str(epoch) + + '_sampleIDX_' + str(sampleIDX) + '.jpg') + Draw_tri_fig(sample_img, puzzled_img, recons_img, picpath) + + else: # MAE + masked_img = masked_img_batch.cpu()[sampleIDX] # put on CPU + masked_img = ToPILImage()(masked_img) + masked_img.save(os.path.join(args.output_dir, 'figs', 'masked_e_' + str(epoch) + + '_sampleIDX_' + str(sampleIDX) + '.jpg')) + + picpath = os.path.join(args.output_dir, 'figs', 'masked_e_' + str(epoch) + + '_sampleIDX_' + str(sampleIDX) + '.jpg') + Draw_tri_fig(sample_img, masked_img, recons_img, picpath) + + # 返回记录,其他的已经在对象内迭代 + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} diff --git a/PuzzleTuning/SSL_structures/misc.py b/PuzzleTuning/SSL_structures/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..772e2fe06d85f06b610c8636df1e157434b07f66 --- /dev/null +++ b/PuzzleTuning/SSL_structures/misc.py @@ -0,0 +1,403 @@ +""" +pre-training funcs Script ver: Feb 8th 16:00 +有修改loss backward + +""" +import builtins +import datetime +import os +import time +from collections import defaultdict, deque +from pathlib import Path + +import torch +import torch.distributed as dist + +try: + from torch import inf +except: + from torch._six import inf + + +# SmoothedValue operator +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) # SmoothedValue operator + self.delimiter = delimiter + + def update(self, **kwargs): # 更新内容字典 + for k, v in kwargs.items(): + if v is None: + continue + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): # 报错 + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): # 转换为str给print + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): # 多进程同步 + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): # 新增一个indicator元素 + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): # warp minibatch + # 初始化迭代idx + i = 0 + # 初始化头文件 + if not header: + header = '' + + # 初始化计时 + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + # 初始化输出 + log_msg = [ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ] + if torch.cuda.is_available(): + log_msg.append('max mem: {memory:.0f}') + + log_msg = self.delimiter.join(log_msg) # 缩进 + + MB = 1024.0 * 1024.0 + + for obj in iterable: + + data_time.update(time.time() - end) + yield obj # 生成迭代的下一个对象 + iter_time.update(time.time() - end) + + if i % print_freq == 0 or i == len(iterable) - 1: + # 估算时间 + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + # 输出 + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + builtin_print = builtins.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + force = force or (get_world_size() > 8) + if is_master or force: + now = datetime.datetime.now().time() + builtin_print('[{}] '.format(now), end='') # print with time stamp + builtin_print(*args, **kwargs) + + builtins.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + """ + 配置多服务器环境文件信息,安排args.DDP_distributed + + :param args: + :return: + """ + if args.dist_on_itp: + args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) + args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) + args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) + args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']) + os.environ['LOCAL_RANK'] = str(args.gpu) + os.environ['RANK'] = str(args.rank) + os.environ['WORLD_SIZE'] = str(args.world_size) + # ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] + + elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + + else: + print('Not using DDP_distributed mode') + setup_for_distributed(is_master=True) # hack + args.DDP_distributed = False + return + + args.DDP_distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| DDP_distributed init (rank {}): {}, gpu {}'.format( + args.rank, args.dist_url, args.gpu), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +class NativeScalerWithGradNormCount: + """ + 定义的 loss 优化器 + + 基于自动混合精度训练设置的loss_scaler,额外增加了梯度裁剪的功能 + """ + state_dict_key = "amp_scaler" + + def __init__(self, GPU_count=1, DDP_distributed=False): + self._scaler = torch.cuda.amp.GradScaler() + self.GPU_count = GPU_count + self.DDP_distributed=DDP_distributed + + def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): + + # 反传 + if self.DDP_distributed: + loss = loss.unsqueeze(-1) + self._scaler.scale(loss).backward(loss, create_graph=create_graph) # create_graph + else: + if self.GPU_count == 1: # only one GPU + loss = loss.unsqueeze(-1) # fixme 加了expand解决梯度标量问题,原本设计为了多卡,多卡有形状,单卡变没有形状的标量了 + # fixme 加了ones_like不知道为啥存在, 可能原本是分布式多个word + self._scaler.scale(loss).backward(torch.ones_like(loss), create_graph=create_graph) # create_graph + + if update_grad: + # 梯度裁剪 + if clip_grad is not None: + assert parameters is not None + self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place + norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) + else: + self._scaler.unscale_(optimizer) + norm = get_grad_norm_(parameters) + + self._scaler.step(optimizer) # 使用optimizer更新模型 + + self._scaler.update() + else: + norm = None + + return norm + + def state_dict(self): # 记录loss_scaler的state_dict,应该就是保存梯度 + return self._scaler.state_dict() + + def load_state_dict(self, state_dict): # 还原某个checkpoint的state_dict + self._scaler.load_state_dict(state_dict) + + +def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: + + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + + # 确定需要梯度的模型参数 + parameters = [p for p in parameters if p.grad is not None] + norm_type = float(norm_type) + + if len(parameters) == 0: + return torch.tensor(0.) + + # 从对应GPU上进行操作 + device = parameters[0].grad.device + + if norm_type == inf: + # 面对norm_type == inf爆炸值,保留 + total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) + else: + # 无norm_type == inf爆炸值,做norm + total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) + + return total_norm + + +def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_idx='SAE_'): + output_dir = Path(args.output_dir) + epoch_name = str(epoch) + + if loss_scaler is not None: + checkpoint_paths = [output_dir / (model_idx+'_checkpoint-%s.pth' % epoch_name)] + for checkpoint_path in checkpoint_paths: + to_save = { + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'epoch': epoch, + 'scaler': loss_scaler.state_dict(), + 'args': args, # 保存配置参数,但是在加载的时候不加载 + } + + save_on_master(to_save, checkpoint_path) + else: + client_state = {'epoch': epoch} + model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state) + + +def load_model(args, model_without_ddp, optimizer, loss_scaler): + + # 加载配置checkpoint的路径args.resume,默认没有则不加载 + if args.resume: + if args.resume.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url( + args.resume, map_location='cpu', check_hash=True) + else: + checkpoint = torch.load(args.resume, map_location='cpu') + + model_without_ddp.load_state_dict(checkpoint['model']) + + print("Resume checkpoint %s" % args.resume) + + if 'optimizer' in checkpoint and 'epoch' in checkpoint and not (hasattr(args, 'eval') and args.eval): + optimizer.load_state_dict(checkpoint['optimizer']) + args.start_epoch = checkpoint['epoch'] + 1 + + if 'scaler' in checkpoint: + loss_scaler.load_state_dict(checkpoint['scaler']) + + print("With optim & sched!") + + +# 计算平均在单卡上的loss +def all_reduce_mean(x): + world_size = get_world_size() + + if world_size > 1: + x_reduce = torch.tensor(x).cuda() + dist.all_reduce(x_reduce) + x_reduce /= world_size + return x_reduce.item() + + else: + return x \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/models_mae.py b/PuzzleTuning/SSL_structures/models_mae.py new file mode 100644 index 0000000000000000000000000000000000000000..b5ffb6a8c96b13bdab4de57ecdd9ba25e580e3f8 --- /dev/null +++ b/PuzzleTuning/SSL_structures/models_mae.py @@ -0,0 +1,665 @@ +""" +MAE Model Script ver: Oct 23rd 15:00 + +# References: +Based on MAE code. +https://github.com/facebookresearch/mae + +timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm +DeiT: https://github.com/facebookresearch/deit + + +July 16th +Add patchify_decoder to form B,N,D +Add a parameter for MAE to import segmentation network +""" +from functools import partial + +import torch +import torch.nn as nn + +from timm.models.vision_transformer import PatchEmbed, Block +from Backbone.VPT_structure import VPT_ViT +from SSL_structures.pos_embed import get_2d_sincos_pos_embed + + +class MaskedAutoencoderViT(VPT_ViT): + """ + Masked Autoencoder with VisionTransformer backbone + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, + embed_dim=1024, depth=24, num_heads=16, + decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False, + prompt_mode=None, Prompt_Token_num=20, basic_state_dict=None, decoder=None, decoder_rep_dim=None): + + # model = MaskedAutoencoderViT( + # patch_size=16, embed_dim=768, depth=12, num_heads=12, + # decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, + # mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + + if prompt_mode is None: + super().__init__() + # MAE encoder specifics (this part just the same as ViT) + # -------------------------------------------------------------------------- + self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim) # BCHW -> BNC + num_patches = self.patch_embed.num_patches + + # learnable cls token is still used but on cls head need + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + # set and freeze encoder_pos_embed, use the fixed sin-cos embedding for tokens + mask_token + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False) + # Encoder blocks + self.blocks = nn.ModuleList([ # qk_scale=None fixme related to timm version + Block(embed_dim, num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + self.prompt_mode = prompt_mode + # -------------------------------------------------------------------------- + + else: + super().__init__(img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dim, depth=depth, num_heads=num_heads, mlp_ratio=mlp_ratio, + norm_layer=norm_layer, Prompt_Token_num=Prompt_Token_num, VPT_type=prompt_mode, + basic_state_dict=None) # Firstly, set then Encoder state_dict to none here. + num_patches = self.patch_embed.num_patches # set patch_embed of VPT + # set and freeze encoder_pos_embed, use the fixed sin-cos embedding for tokens + mask_token + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False) + + self.prompt_mode = prompt_mode + # Freeze Encoder parameters except of the Prompt Tokens + self.Freeze() + + # MAE decoder specifics + # -------------------------------------------------------------------------- + # if the feature dimension of encoder and decoder are different, use decoder_embed to align them + if embed_dim != decoder_embed_dim: + self.decoder_embed = nn.Linear(embed_dim, decoder_embed_dim, bias=True) + else: + self.decoder_embed = nn.Identity() + + if decoder is not None: + self.decoder = decoder + # set mask_token (learnable mask token for reconstruction) + self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + # Decoder use a FC to reconstruct image, unlike the Encoder which use a CNN to split patch + self.decoder_pred = nn.Linear(decoder_rep_dim, patch_size ** 2 * in_chans, bias=True) # decoder to patch + + else: + self.decoder = None # 未传入decoder则与encoder流程一致,但是更改了通道数量,构建block(原版MAE) + # set mask_token (learnable mask token for reconstruction) + self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim)) + + # set and freeze decoder_pos_embed, use the fixed sin-cos embedding for tokens + mask_token + self.decoder_pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, decoder_embed_dim), + requires_grad=False) + self.decoder_blocks = nn.ModuleList([Block(decoder_embed_dim, decoder_num_heads, mlp_ratio, + qkv_bias=True, norm_layer=norm_layer) + for i in range(decoder_depth)]) + # qk_scale=None fixme related to timm version + self.decoder_norm = norm_layer(decoder_embed_dim) + + # Decoder use a FC to reconstruct image, unlike the Encoder which use a CNN to split patch + self.decoder_pred = nn.Linear(decoder_embed_dim, patch_size ** 2 * in_chans, bias=True) # decoder to patch + + # -------------------------------------------------------------------------- + # wether or not to use norm_pix_loss + self.norm_pix_loss = norm_pix_loss + # parameter initialization + self.initialize_weights() + + # load basic state_dict of backbone for Transfer-learning-based tuning + if basic_state_dict is not None: + self.load_state_dict(basic_state_dict, False) + + def initialize_weights(self): + # initialization + # initialize a 2d positional encoding of (embed_dim, grid) by sin-cos embedding + pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], + int(self.patch_embed.num_patches ** .5), + cls_token=True) + # return: pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) + self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0)) + + if self.decoder is None: + # initialize a 2d positional encoding of (embed_dim, grid) by sin-cos embedding + decoder_pos_embed = get_2d_sincos_pos_embed(self.decoder_pos_embed.shape[-1], + int(self.patch_embed.num_patches ** .5), + cls_token=True) + self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0)) + + # initialize patch_embed like nn.Linear (instead of nn.Conv2d) + w = self.patch_embed.proj.weight.data + torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1])) # xavier_uniform,让输入输出的方差相同,包括前后向传播 + + # timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.) + torch.nn.init.normal_(self.cls_token, std=.02) + torch.nn.init.normal_(self.mask_token, std=.02) + + # initialize nn.Linear and nn.LayerNorm + self.apply(self._init_weights) + + def _init_weights(self, m): + # initialize nn.Linear and nn.LayerNorm + if isinstance(m, nn.Linear): + # we use xavier_uniform following official JAX ViT: + torch.nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def patchify(self, imgs): + """ + Encode image to patch tokens + + input: + imgs: (B, 3, H, W) + + output: + x: (B, num_patches, patch_size**2 *3) AKA [B, num_patches, flatten_dim] + """ + # patch_size + p = self.patch_embed.patch_size[0] + # assert H == W and image shape is dividedable by patch + assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0 + # patch num in rol or column + h = w = imgs.shape[2] // p + + # use reshape to split patch [B, C, H, W] -> [B, C, h_p, p, w_p, p] + x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p)) + # ReArrange dimensions [B, C, h_p, p, w_p, p] -> [B, h_p, w_p, p, p, C] + x = torch.einsum('nchpwq->nhwpqc', x) + # ReArrange dimensions [B, h_p, w_p, p, p, C] -> [B, num_patches, flatten_dim] + x = x.reshape(shape=(imgs.shape[0], h * w, p ** 2 * 3)) + return x + + def patchify_decoder(self, imgs, patch_size=None): # TODO 这里目的很大,需要实现预训练! + """ + Break image to patch tokens + + fixme,注意,这里patch_size应该是按照decoder的网络设置来作为default + + input: + imgs: (B, CLS, H, W) + + output: + x: (B, num_patches, -1) AKA [B, num_patches, -1] + """ + # patch_size + patch_size = self.patch_embed.patch_size[0] if patch_size is None else patch_size + + # assert H == W and image shape is divided-able by patch + assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % patch_size == 0 + # patch num in rol or column + h = w = imgs.shape[2] // patch_size + + # use reshape to split patch [B, C, H, W] -> [B, C, h_p, patch_size, w_p, patch_size] + x = imgs.reshape(shape=(imgs.shape[0], -1, h, patch_size, w, patch_size)) + + # ReArrange dimensions [B, C, h_p, patch_size, w_p, patch_size] -> [B, h_p, w_p, patch_size, patch_size, C] + x = torch.einsum('nchpwq->nhwpqc', x) + # ReArrange dimensions [B, h_p, w_p, patch_size, patch_size, C] -> [B, num_patches, flatten_dim] + x = x.reshape(shape=(imgs.shape[0], h * w, -1)) + return x + + def unpatchify(self, x, patch_size=None): + """ + Decoding encoded patch tokens + + input: + x: (B, num_patches, patch_size**2 *3) AKA [B, num_patches, flatten_dim] + + output: + imgs: (B, 3, H, W) + """ + # patch_size + p = self.patch_embed.patch_size[0] if patch_size is None else patch_size + + # squre root of num_patches(without CLS token required) + h = w = int(x.shape[1] ** .5) + # assert num_patches is without CLS token + assert h * w == x.shape[1] + + # ReArrange dimensions [B, num_patches, flatten_dim] -> [B, h_p, w_p, p, p, C] + x = x.reshape(shape=(x.shape[0], h, w, p, p, 3)) + # ReArrange dimensions [B, h_p, w_p, p, p, C] -> [B, C, h_p, p, w_p, p] + x = torch.einsum('nhwpqc->nchpwq', x) + # use reshape to compose patch [B, C, h_p, p, w_p, p] -> [B, C, H, W] + imgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p)) + return imgs + + def random_masking(self, x, mask_ratio): + """ + Perform per-sample random masking by per-sample shuffling. + Per-sample shuffling is done by argsort random noise. + + 注意torch.argsort返回的是: + 在每个指定dim,按原tensor每个位置数值大小升序排列后,的原本位置的idx组成的矩阵 + + input: + x: [B, num_patches, D], sequence of Tokens + + output: x_remained, mask, ids_restore + x_remained: [B, num_patches * (1-mask_ratio), D], sequence of Tokens + mask: [B, num_patches], binary mask + ids_restore: [B, num_patches], idx of restoring all position + """ + B, num_patches, D = x.shape # batch, length, dim + # 计算需要保留的位置的个数 + len_keep = int(num_patches * (1 - mask_ratio)) + # 做一个随机序列[B,num_patches],用于做位置标号 + noise = torch.rand(B, num_patches, device=x.device) # noise in [0, 1] + + # 在Batch里面每个序列上获得noise tensor经过升序排列后原本位置的idx矩阵 在batch内进行升序排列 + ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove + # 再对idx矩阵继续升序排列可获得:原始noise tensor的每个位置的排序顺位 + ids_restore = torch.argsort(ids_shuffle, dim=1) + + # keep the first subset + ids_keep = ids_shuffle[:, :len_keep] + + # 设置需要的patch的索引 + # ids_keep.unsqueeze(-1).repeat(1, 1, D): + # [B,num_patches] -> [B,keep_patches] -> [B,keep_patches,1] 每个位置数字为idx of ori patch -> [B,keep_patches,D] + + # torch.gather 按照索引取值构建新tensor: x_remained [B,keep_patches,D] 表示被标记需要保留的位置, 原文是x_masked + x_remained = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D)) + + # generate the binary mask: 0 is keep, 1 is remove + mask = torch.ones([B, num_patches], device=x.device) + mask[:, :len_keep] = 0 # 设置mask矩阵,前len_keep个为0,后面为1 + + # 按照noise tensor每个位置的大小顺序,来设置mask符号为0的位置,获得mask矩阵 + mask = torch.gather(mask, dim=1, index=ids_restore) + + return x_remained, mask, ids_restore # x_remained原文是x_masked + + def forward_encoder(self, imgs, mask_ratio): + """ + :param imgs: [B, C, H, W], sequence of imgs + :param mask_ratio: mask_ratio + + :return: Encoder output: encoded tokens, mask position, restore idxs + x: [B, 1 + num_patches * (1-mask_ratio), D], sequence of Tokens (including the cls token) + mask: [B, num_patches], binary mask + ids_restore: [B, num_patches], idx of restoring all position + """ + if self.prompt_mode is None: # ViT + # embed patches + x = self.patch_embed(imgs) # BCHW -> BNC + + # add pos embed w/o cls token + x = x + self.pos_embed[:, 1:, :] # add pos embed before concatenate the cls token + + # masking: length -> length * (1-mask_ratio) + # x_remained: [B, num_patches * (1-mask_ratio), D], sequence of Tokens + x, mask, ids_restore = self.random_masking(x, mask_ratio) + + # append cls token + cls_token = self.cls_token + self.pos_embed[:, :1, :] + cls_tokens = cls_token.expand(x.shape[0], -1, -1) # batch fix 调整batch + x = torch.cat((cls_tokens, x), dim=1) + + # apply Transformer Encoders + for blk in self.blocks: + x = blk(x) + + else: # VPT + x = self.patch_embed(imgs) + # add pos embed before concatenate the cls token + x = x + self.pos_embed[:, 1:, :] + # masking: length -> length * (1-mask_ratio) + # x_remained: [B, num_patches * (1-mask_ratio), D], sequence of Tokens + x, mask, ids_restore = self.random_masking(x, mask_ratio) + + # append cls token + cls_token = self.cls_token + self.pos_embed[:, :1, :] + cls_tokens = cls_token.expand(x.shape[0], -1, -1) # batch fix 调整batch + x = torch.cat((cls_tokens, x), dim=1) + + if self.VPT_type == "Deep": + Prompt_Token_num = self.Prompt_Tokens.shape[1] + for i in range(len(self.blocks)): + # concatenate Prompt_Tokens + Prompt_Tokens = self.Prompt_Tokens[i].unsqueeze(0) + # firstly concatenate + x = torch.cat((x, Prompt_Tokens.expand(x.shape[0], -1, -1)), dim=1) + num_tokens = x.shape[1] + # lastly remove, a good trick + x = self.blocks[i](x)[:, :num_tokens - Prompt_Token_num] + + else: # self.VPT_type == "Shallow" + Prompt_Token_num = self.Prompt_Tokens.shape[1] + # concatenate Prompt_Tokens + Prompt_Tokens = self.Prompt_Tokens.expand(x.shape[0], -1, -1) + x = torch.cat((x, Prompt_Tokens), dim=1) + num_tokens = x.shape[1] + # A whole sequential process + x = self.blocks(x)[:, :num_tokens - Prompt_Token_num] + + # last norm of Transformer + x = self.norm(x) + + # Encoder output: encoded tokens, mask position, restore idxs + return x, mask, ids_restore + + def forward_decoder(self, x, ids_restore): + """ + :param x: [B, 1 + num_patches * (1-mask_ratio), D], sequence of Tokens (including the cls token) + :param ids_restore: restore idxs for torch.gather(mask, dim=1, index=ids_restore) + + :return: Decoder output: reconstracted tokens + x: [B, num_patches * (1-mask_ratio), D], sequence of Tokens + """ + if self.decoder is None: + # embed tokens: [B, num_encoded_tokens, embed_dim] -> [B, num_encoded_tokens, D_Decoder] + x = self.decoder_embed(x) # 更改适合的通道数 + + # append mask tokens to sequence as place holder: [B, num_patches + 1 - num_encoded_tokens, D_Decoder] + # number of mask token need is the requirement to fill the num_patches + mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1) + # 这里ids_restore.shape[1] + 1 - x.shape[1] 其实意思是ids_restore.shape[1] - (x.shape[1]-1), 因为不要CLS token + + # -> [B, num_patches, D_Decoder] + x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) # stripe the cls token in Decoder for restore position + + # unshuffle to restore the position of tokens + x_ = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])) + # torch.gather 按照索引取值构建新tensor: x_ [B,num_patches,D_Decoder] 表示位置还原之后的图,此时数值还不对 + + # append back the cls token at the first -> [B,1+num_patches,D_Decoder] + x = torch.cat([x[:, :1, :], x_], dim=1) + + # add pos embed + x = x + self.decoder_pos_embed + + # apply Transformer blocks + for blk in self.decoder_blocks: + x = blk(x) + x = self.decoder_norm(x) + + # Reconstruction projection [B, num_patches, D_Decoder] -> [B, num_patches, p*p*3] + x = self.decoder_pred(x) + + # remove cls token + x = x[:, 1:, :] + + else: + # append mask tokens to sequence as place holder: [B, num_patches + 1 - num_encoded_tokens, D] + # number of mask token need is the requirement to fill the num_patches + mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1) + # 这里ids_restore.shape[1] + 1 - x.shape[1] 其实意思是ids_restore.shape[1] - (x.shape[1]-1), 因为不要CLS token + + # -> [B, num_patches, D] + x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) # stripe the cls token in Decoder for restore position + + # unshuffle to restore the position of tokens + x_ = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])) + # torch.gather 按照索引取值构建新tensor: x_ [B,num_patches,D] 表示位置还原之后的图,此时数值还不对 + + # embed tokens: [B, num_encoded_tokens, D_Encoder] -> [B, num_encoded_tokens, D_Decoder] + x_ = self.decoder_embed(x_) + + # unpatchify to make image form [B, N, Enc] to [B,H,W,C] + x = self.unpatchify(x_) # restore image by Encoder + + # apply decoder module to segment the output of encoder + x = self.decoder(x) # [B, CLS, H, W] + # the output of segmentation is transformed to [B, N, Dec] + x = self.patchify_decoder(x) # TODO 做一个有意义的设计 + + # Convert the number of channels to match image for loss function + x = self.decoder_pred(x) # [B, N, Dec] -> [B, N, p*p*3] + + return x + + def forward_loss(self, imgs, pred, mask): # 通过把loss放到model里面,把model变成了一个训练框架 + """ + MSE loss for all patches towards the ori image + + Input: + imgs: [B, 3, H, W], Encoder input image + pred: [B, num_patches, p*p*3], Decoder reconstructed image + mask: [B, num_patches], 0 is keep, 1 is remove, + + """ + target = self.patchify(imgs) + + if self.norm_pix_loss: # 把target image patches 标准化 + mean = target.mean(dim=-1, keepdim=True) + var = target.var(dim=-1, keepdim=True) + target = (target - mean) / (var + 1.e-6) ** .5 + + # MSE loss + loss = (pred - target) ** 2 + loss = loss.mean(dim=-1) # [N, L], mean loss per patch + + # binary mask, 1 for removed patches + loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches + return loss + + def forward(self, imgs, mask_ratio=0.75): + # Encoder to obtain latent tokens + latent, mask, ids_restore = self.forward_encoder(imgs, mask_ratio) + # Decoder to obtain Reconstructed image patches + pred = self.forward_decoder(latent, ids_restore) # [N, L, p*p*3] + # MSE loss for all patches towards the ori image + loss = self.forward_loss(imgs, pred, mask) + # print(loss) # todo 这里原文是为了关注loss爆炸, 可能有坑 + return loss, pred, mask + + +def mae_vit_base_patch16_dec512d8b(dec_idx=None, **kwargs): + print("Decoder:", dec_idx) + + model = MaskedAutoencoderViT( + patch_size=16, embed_dim=768, depth=12, num_heads=12, + decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def mae_vit_large_patch16_dec512d8b(dec_idx=None, **kwargs): + print("Decoder:", dec_idx) + + model = MaskedAutoencoderViT( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, + decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def mae_vit_huge_patch14_dec512d8b(dec_idx=None, **kwargs): + print("Decoder:", dec_idx) + + model = MaskedAutoencoderViT( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, + decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def mae_vit_base_patch16_decoder(dec_idx=None, num_classes=3, img_size=224, **kwargs): + # num_classes做的是one-hot seg但是不是做还原,我们得设计一下如何去做这个还原才能实现预训练 + + if dec_idx == 'swin_unet': + decoder_embed_dim = 768 + decoder_rep_dim = 16 * 16 * 3 + + from SSL_structures.Swin_Unet_main.networks.vision_transformer import SwinUnet as ViT_seg + decoder = ViT_seg(num_classes=num_classes, **kwargs) + + elif dec_idx == 'transunet': + decoder_embed_dim = 768 + decoder_rep_dim = 16 * 16 * 3 + + transunet_name = 'R50-ViT-B_16' + transunet_patches_size = 16 + from SSL_structures.TransUNet_main.networks.vit_seg_modeling import CONFIGS as CONFIGS_Transunet_seg + from SSL_structures.TransUNet_main.networks.vit_seg_modeling import VisionTransformer as Transunet_seg + + config_vit = CONFIGS_Transunet_seg[transunet_name] + config_vit.n_classes = num_classes + config_vit.n_skip = 3 + + if transunet_name.find('R50') != -1: + config_vit.patches.grid = ( + int(img_size / transunet_patches_size), int(img_size / transunet_patches_size)) + decoder = Transunet_seg(config_vit, num_classes=config_vit.n_classes) + + elif dec_idx == 'UTNetV2': + decoder_embed_dim = 768 + decoder_rep_dim = 16 * 16 * 3 + + from SSL_structures.UtnetV2.utnetv2 import UTNetV2 as UTNetV2_seg + decoder = UTNetV2_seg(in_chan=3, num_classes=num_classes) + + else: + print('no effective decoder!') + return -1 + + print('dec_idx: ', dec_idx) + + model = MaskedAutoencoderViT( + patch_size=16, embed_dim=768, depth=12, num_heads=12, + decoder_embed_dim=decoder_embed_dim, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), decoder_rep_dim=decoder_rep_dim, decoder=decoder, + **kwargs) + return model + + +def mae_vit_large_patch16_decoder(dec_idx=None, num_classes=3, img_size=224, **kwargs): + # num_classes做的是one-hot seg但是不是做还原,我们得设计一下如何去做这个还原才能实现预训练 + + if dec_idx == 'swin_unet': + decoder_embed_dim = 768 + decoder_rep_dim = 16 * 16 * 3 + + from SSL_structures.Swin_Unet_main.networks.vision_transformer import SwinUnet as ViT_seg + decoder = ViT_seg(num_classes=num_classes, **kwargs) + + elif dec_idx == 'transunet': + decoder_embed_dim = 768 + decoder_rep_dim = 16 * 16 * 3 + + transunet_name = 'R50-ViT-B_16' + transunet_patches_size = 16 + from SSL_structures.TransUNet_main.networks.vit_seg_modeling import CONFIGS as CONFIGS_Transunet_seg + from SSL_structures.TransUNet_main.networks.vit_seg_modeling import VisionTransformer as Transunet_seg + + config_vit = CONFIGS_Transunet_seg[transunet_name] + config_vit.n_classes = num_classes + config_vit.n_skip = 3 + + if transunet_name.find('R50') != -1: + config_vit.patches.grid = ( + int(img_size / transunet_patches_size), int(img_size / transunet_patches_size)) + decoder = Transunet_seg(config_vit, num_classes=config_vit.n_classes) + + elif dec_idx == 'UTNetV2': + decoder_embed_dim = 768 + decoder_rep_dim = 16 * 16 * 3 + + from SSL_structures.UtnetV2.utnetv2 import UTNetV2 as UTNetV2_seg + decoder = UTNetV2_seg(in_chan=3, num_classes=num_classes) + + else: + print('no effective decoder!') + return -1 + + print('dec_idx: ', dec_idx) + + model = MaskedAutoencoderViT( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, + decoder_embed_dim=decoder_embed_dim, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), decoder_rep_dim=decoder_rep_dim, decoder=decoder, + **kwargs) + return model + + +def mae_vit_huge_patch14_decoder(dec_idx=None, num_classes=3, img_size=224, **kwargs): + # num_classes做的是one-hot seg但是不是做还原,我们得设计一下如何去做这个还原才能实现预训练 + + if dec_idx == 'swin_unet': + decoder_embed_dim = 588 # 1280 14*14*3 + decoder_rep_dim = 14 * 14 * 3 + + from SSL_structures.Swin_Unet_main.networks.vision_transformer import SwinUnet as ViT_seg + decoder = ViT_seg(num_classes=num_classes, **kwargs) + + elif dec_idx == 'transunet': + decoder_embed_dim = 768 + decoder_rep_dim = 16 * 16 * 3 + + transunet_name = 'R50-ViT-B_16' + transunet_patches_size = 16 + from SSL_structures.TransUNet_main.networks.vit_seg_modeling import CONFIGS as CONFIGS_Transunet_seg + from SSL_structures.TransUNet_main.networks.vit_seg_modeling import VisionTransformer as Transunet_seg + + config_vit = CONFIGS_Transunet_seg[transunet_name] + config_vit.n_classes = num_classes + config_vit.n_skip = 3 + + if transunet_name.find('R50') != -1: + config_vit.patches.grid = ( + int(img_size / transunet_patches_size), int(img_size / transunet_patches_size)) + decoder = Transunet_seg(config_vit, num_classes=config_vit.n_classes) + + elif dec_idx == 'UTNetV2': + decoder_embed_dim = 768 + decoder_rep_dim = 14 * 14 * 3 + + from SSL_structures.UtnetV2.utnetv2 import UTNetV2 as UTNetV2_seg + decoder = UTNetV2_seg(in_chan=3, num_classes=num_classes) + + else: + print('no effective decoder!') + return -1 + + print('dec_idx: ', dec_idx) + + model = MaskedAutoencoderViT( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, + decoder_embed_dim=decoder_embed_dim, decoder_depth=8, decoder_num_heads=16, + mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), decoder_rep_dim=decoder_rep_dim, decoder=decoder, + **kwargs) + return model + + +# set recommended archs +mae_vit_base_patch16 = mae_vit_base_patch16_dec512d8b # decoder: 512 dim, 8 blocks +mae_vit_large_patch16 = mae_vit_large_patch16_dec512d8b # decoder: 512 dim, 8 blocks +mae_vit_huge_patch14 = mae_vit_huge_patch14_dec512d8b # decoder: 512 dim, 8 blocks + +# Equiped with decoders +mae_vit_base_patch16_decoder = mae_vit_base_patch16_decoder # decoder: 768 dim, HYF +mae_vit_large_patch16_decoder = mae_vit_large_patch16_decoder # decoder: 768 dim, HYF +mae_vit_huge_patch14_decoder = mae_vit_huge_patch14_decoder # decoder: 768 dim, HYF + + +if __name__ == '__main__': + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + img_size = 224 + num_classes = 3 + x = torch.rand(8, 3, img_size, img_size, device=device) + + # model = mae_vit_base_patch16(img_size=224, decoder=None) # decoder_embed_dim=512 + model = mae_vit_base_patch16_decoder(prompt_mode='Deep', Prompt_Token_num=20, basic_state_dict=None, + dec_idx='UTNetV2', img_size=img_size) + + model.to(device) + + loss, pred, mask_patch_indicators = model(x) + + print(loss, '\n') + + print(loss.shape, '\n') + + print(pred.shape, '\n') + + print(mask_patch_indicators.shape, '\n') diff --git a/PuzzleTuning/SSL_structures/pos_embed.py b/PuzzleTuning/SSL_structures/pos_embed.py new file mode 100644 index 0000000000000000000000000000000000000000..0f08cd74cdbdd5ff13b251030b81c8160fd038d7 --- /dev/null +++ b/PuzzleTuning/SSL_structures/pos_embed.py @@ -0,0 +1,100 @@ +""" +Position embedding utils Script ver: Sep 18th 16:30 + +Based on MAE code. +https://github.com/facebookresearch/mae + +""" + +import numpy as np + +import torch + + +# -------------------------------------------------------- +# 2D sine-cosine position embedding +# References: +# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py +# MoCo v3: https://github.com/facebookresearch/moco-v3 +# -------------------------------------------------------- +def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): + """ + grid_size: int of the grid height and width, AKA the num of patch on each direction. + + return: + pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) + """ + grid_h = np.arange(grid_size, dtype=np.float32) + grid_w = np.arange(grid_size, dtype=np.float32) + + grid = np.meshgrid(grid_w, grid_h) # here w goes first + grid = np.stack(grid, axis=0) # (2, grid_w, grid_h) + grid = grid.reshape([2, 1, grid_size, grid_size]) # (2, 1, grid_w, grid_h) + + # get a 2d positional encoding of (embed_dim, grid) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + + if cls_token: # if the CLS token is here, give it a zero encoding + pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) + return pos_embed + + +def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): + assert embed_dim % 2 == 0 + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + + emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) + return emb + + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position + pos: a list of positions to be encoded: size (M,) + out: (M, D) + """ + assert embed_dim % 2 == 0 + omega = np.arange(embed_dim // 2, dtype=np.float32) # fixme earlier: dtype=np.float + omega /= embed_dim / 2. + omega = 1. / 10000 ** omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) + return emb + + +# -------------------------------------------------------- +# Interpolate position embeddings for high-resolution +# References: +# DeiT: https://github.com/facebookresearch/deit +# -------------------------------------------------------- +def interpolate_pos_embed(model, checkpoint_model): + if 'pos_embed' in checkpoint_model: + pos_embed_checkpoint = checkpoint_model['pos_embed'] + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = model.patch_embed.num_patches + num_extra_tokens = model.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + checkpoint_model['pos_embed'] = new_pos_embed \ No newline at end of file diff --git a/PuzzleTuning/SSL_structures/temp-tensors/color.pt b/PuzzleTuning/SSL_structures/temp-tensors/color.pt new file mode 100644 index 0000000000000000000000000000000000000000..058c6edd177162c02e2cf7e802a0c9191b5897b4 --- /dev/null +++ b/PuzzleTuning/SSL_structures/temp-tensors/color.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63b58afc3d30f2cead67e4e9d77d0509a9cc547cc0a26390e07204e8f8f9ff0c +size 2409195 diff --git a/PuzzleTuning/SSL_structures/temp-tensors/color_labels.pt b/PuzzleTuning/SSL_structures/temp-tensors/color_labels.pt new file mode 100644 index 0000000000000000000000000000000000000000..135a55b90ab671cb650ff50862582cb120d6e69f --- /dev/null +++ b/PuzzleTuning/SSL_structures/temp-tensors/color_labels.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25f4d6316bbcab066eaf26be4e418547a468d3f5d45c274535306bd37ca814b6 +size 747 diff --git a/PuzzleTuning/SSL_structures/temp-tensors/warwick.pt b/PuzzleTuning/SSL_structures/temp-tensors/warwick.pt new file mode 100644 index 0000000000000000000000000000000000000000..e37fa21a753b5c76591757775ad2e267651af424 --- /dev/null +++ b/PuzzleTuning/SSL_structures/temp-tensors/warwick.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ec6955b6afe4692ee4ead145f50e052e5c4481409beeda42bc4377d6fe8f579 +size 2409195 diff --git a/PuzzleTuning/SSL_structures/temp-tensors/warwick_labels.pt b/PuzzleTuning/SSL_structures/temp-tensors/warwick_labels.pt new file mode 100644 index 0000000000000000000000000000000000000000..edca0ee35b3cf03395e6319763430844f128bbb0 --- /dev/null +++ b/PuzzleTuning/SSL_structures/temp-tensors/warwick_labels.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65c8a01f50cf37cccc74514ada3f2e4d696133652404b2bb4313cfd0fe6df063 +size 747 diff --git a/PuzzleTuning/Test.py b/PuzzleTuning/Test.py new file mode 100644 index 0000000000000000000000000000000000000000..a854908244a66a585ad3e10301b18b58fe07c76b --- /dev/null +++ b/PuzzleTuning/Test.py @@ -0,0 +1,478 @@ +""" +Testing Script ver: Oct 23rd 17:30 +""" + +from __future__ import print_function, division + +import argparse +import json +import time + +import torchvision +from tensorboardX import SummaryWriter + +from Backbone.getmodel import get_model +from Backbone.GetPromptModel import build_promptmodel + +from utils.data_augmentation import * +from utils.visual_usage import * + + +def test_model(model, test_dataloader, criterion, class_names, test_dataset_size, model_idx, test_model_idx, edge_size, + check_minibatch=100, device=None, draw_path='../imaging_results', enable_attention_check=None, + enable_visualize_check=True, writer=None): + """ + Testing iteration + + :param model: model object + :param test_dataloader: the test_dataloader obj + :param criterion: loss func obj + :param class_names: The name of classes for priting + :param test_dataset_size: size of datasets + + :param model_idx: model idx for the getting trained model + :param edge_size: image size for the input image + :param check_minibatch: number of skip over minibatch in calculating the criteria's results etc. + + :param device: cpu/gpu object + :param draw_path: path folder for output pic + :param enable_attention_check: use attention_check to show the pics of models' attention areas + :param enable_visualize_check: use visualize_check to show the pics + + :param writer: attach the records to the tensorboard backend + """ + + # scheduler is an LR scheduler object from torch.optim.lr_scheduler. + if device is None: + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + since = time.time() + + print('Epoch: Test') + print('-' * 10) + + phase = 'test' + index = 0 + model_time = time.time() + + # initiate the empty json dict + json_log = {'test': {}} + + # initiate the empty log dict + log_dict = {} + for cls_idx in range(len(class_names)): + log_dict[class_names[cls_idx]] = {'tp': 0.0, 'tn': 0.0, 'fp': 0.0, 'fn': 0.0} + + model.eval() # Set model to evaluate mode + + # criterias, initially empty + running_loss = 0.0 + log_running_loss = 0.0 + running_corrects = 0 + + # Iterate over data. + for inputs, labels in test_dataloader: # use different dataloder in different phase + inputs = inputs.to(device) + # print('inputs[0]',type(inputs[0])) + + labels = labels.to(device) + + # zero the parameter gradients only need in training + # optimizer.zero_grad() + + # forward + outputs = model(inputs) + _, preds = torch.max(outputs, 1) + loss = criterion(outputs, labels) + + # log criterias: update + log_running_loss += loss.item() + running_loss += loss.item() * inputs.size(0) + running_corrects += torch.sum(preds == labels.data) + + # Compute recision and recall for each class. + for cls_idx in range(len(class_names)): + # NOTICE remember to put tensor back to cpu + tp = np.dot((labels.cpu().data == cls_idx).numpy().astype(int), + (preds == cls_idx).cpu().numpy().astype(int)) + tn = np.dot((labels.cpu().data != cls_idx).numpy().astype(int), + (preds != cls_idx).cpu().numpy().astype(int)) + + fp = np.sum((preds == cls_idx).cpu().numpy()) - tp + + fn = np.sum((labels.cpu().data == cls_idx).numpy()) - tp + + # log_dict[cls_idx] = {'tp': 0, 'tn': 0, 'fp': 0, 'fn': 0} + log_dict[class_names[cls_idx]]['tp'] += tp + log_dict[class_names[cls_idx]]['tn'] += tn + log_dict[class_names[cls_idx]]['fp'] += fp + log_dict[class_names[cls_idx]]['fn'] += fn + + # attach the records to the tensorboard backend + if writer is not None: + # ...log the running loss + writer.add_scalar(phase + ' minibatch loss', + float(loss.item()), + index) + writer.add_scalar(phase + ' minibatch ACC', + float(torch.sum(preds == labels.data) / inputs.size(0)), + index) + + # at the checking time now + if index % check_minibatch == check_minibatch - 1: + model_time = time.time() - model_time + + check_index = index // check_minibatch + 1 + + epoch_idx = 'test' + print('Epoch:', epoch_idx, ' ', phase, 'index of ' + str(check_minibatch) + ' minibatch:', + check_index, ' time used:', model_time) + + print('minibatch AVG loss:', float(log_running_loss) / check_minibatch) + + # how many image u want to check, should SMALLER THAN the batchsize + + if enable_attention_check: + try: + check_SAA(inputs, labels, model, model_idx, edge_size, class_names, num_images=1, + pic_name='GradCAM_' + str(epoch_idx) + '_I_' + str(index + 1), + draw_path=draw_path, writer=writer) + except: + print('model:', model_idx, ' with edge_size', edge_size, 'is not supported yet') + else: + pass + + if enable_visualize_check: + visualize_check(inputs, labels, model, class_names, num_images=-1, + pic_name='Visual_' + str(epoch_idx) + '_I_' + str(index + 1), + draw_path=draw_path, writer=writer) + + model_time = time.time() + log_running_loss = 0.0 + + index += 1 + # json log: update + json_log['test'][phase] = log_dict + + # log criterias: print + epoch_loss = running_loss / test_dataset_size + epoch_acc = running_corrects.double() / test_dataset_size * 100 + print('\nEpoch: {} \nLoss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) + + for cls_idx in range(len(class_names)): + # calculating the confusion matrix + tp = log_dict[class_names[cls_idx]]['tp'] + tn = log_dict[class_names[cls_idx]]['tn'] + fp = log_dict[class_names[cls_idx]]['fp'] + fn = log_dict[class_names[cls_idx]]['fn'] + tp_plus_fp = tp + fp + tp_plus_fn = tp + fn + fp_plus_tn = fp + tn + fn_plus_tn = fn + tn + + # precision + if tp_plus_fp == 0: + precision = 0 + else: + precision = float(tp) / tp_plus_fp * 100 + # recall + if tp_plus_fn == 0: + recall = 0 + else: + recall = float(tp) / tp_plus_fn * 100 + + # TPR (sensitivity) + TPR = recall + + # TNR (specificity) + # FPR + if fp_plus_tn == 0: + TNR = 0 + FPR = 0 + else: + TNR = tn / fp_plus_tn * 100 + FPR = fp / fp_plus_tn * 100 + + # NPV + if fn_plus_tn == 0: + NPV = 0 + else: + NPV = tn / fn_plus_tn * 100 + + print('{} precision: {:.4f} recall: {:.4f}'.format(class_names[cls_idx], precision, recall)) + print('{} sensitivity: {:.4f} specificity: {:.4f}'.format(class_names[cls_idx], TPR, TNR)) + print('{} FPR: {:.4f} NPV: {:.4f}'.format(class_names[cls_idx], FPR, NPV)) + print('{} TP: {}'.format(class_names[cls_idx], tp)) + print('{} TN: {}'.format(class_names[cls_idx], tn)) + print('{} FP: {}'.format(class_names[cls_idx], fp)) + print('{} FN: {}'.format(class_names[cls_idx], fn)) + + print('\n') + + time_elapsed = time.time() - since + print('Testing complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) + + # attach the records to the tensorboard backend + if writer is not None: + writer.close() + + # save json_log indent=2 for better view + json.dump(json_log, open(os.path.join(draw_path, test_model_idx + '_log.json'), 'w'), ensure_ascii=False, indent=2) + + return model + + +def main(args): + if args.paint: + # use Agg kernal, not painting in the front-desk + import matplotlib + matplotlib.use('Agg') + + gpu_idx = args.gpu_idx # GPU idx start with0, -1 to use multiple GPU + + enable_tensorboard = args.enable_tensorboard # False + + enable_attention_check = args.enable_attention_check # False + enable_visualize_check = args.enable_visualize_check # False + + data_augmentation_mode = args.data_augmentation_mode # 0 + + # Prompt + PromptTuning = args.PromptTuning # None "Deep" / "Shallow" + Prompt_Token_num = args.Prompt_Token_num # 20 + PromptUnFreeze = args.PromptUnFreeze # False + + model_idx = args.model_idx # the model we are going to use. by the format of Model_size_other_info + + # structural parameter + drop_rate = args.drop_rate + attn_drop_rate = args.attn_drop_rate + drop_path_rate = args.drop_path_rate + use_cls_token = False if args.cls_token_off else True + use_pos_embedding = False if args.pos_embedding_off else True + use_att_module = None if args.att_module == 'None' else args.att_module + + # PATH info + draw_root = args.draw_root + model_path = args.model_path + dataroot = args.dataroot + model_path_by_hand = args.model_path_by_hand # None + # Pre_Trained model basic for prompt turned model's test + Pre_Trained_model_path = args.Pre_Trained_model_path # None + + # CLS_ is for the CLS trained models, MIL_Stripe will be MIL trained and use Stripe to test + test_model_idx = 'CLS_' + model_idx + '_test' + # NOTICE: MIL model should only be tested in stripe model in this test.py + + draw_path = os.path.join(draw_root, test_model_idx) + + # load Finetuning trained model by its task-based saving name, + # also support MIL-SI model but the MIL_Stripe is required + if model_path_by_hand is None: + # CLS_ is for the CLS training, MIL will be MIL training + save_model_path = os.path.join(model_path, 'CLS_' + model_idx + '.pth') + else: + save_model_path = model_path_by_hand + + if not os.path.exists(draw_path): + os.makedirs(draw_path) + + # choose the test dataset + test_dataroot = os.path.join(dataroot, 'test') + + # dataset info + num_classes = args.num_classes # default 0 for auto-fit + edge_size = args.edge_size + + # validating setting + batch_size = args.batch_size + criterion = nn.CrossEntropyLoss() + + # Data Augmentation is not used in validating or testing + data_transforms = data_augmentation(data_augmentation_mode, edge_size=edge_size) + + # test setting is the same as the validate dataset's setting + test_datasets = torchvision.datasets.ImageFolder(test_dataroot, data_transforms['val']) + test_dataset_size = len(test_datasets) + # skip minibatch none to draw 20 figs + check_minibatch = args.check_minibatch if args.check_minibatch is not None else test_dataset_size // ( + 20 * batch_size) + + test_dataloader = torch.utils.data.DataLoader(test_datasets, batch_size=batch_size, shuffle=False, num_workers=1) + + class_names = [d.name for d in os.scandir(test_dataroot) if d.is_dir()] + class_names.sort() + + if num_classes == 0: + print("class_names:", class_names) + num_classes = len(class_names) + else: + if len(class_names) == num_classes: + print("class_names:", class_names) + else: + print('classfication number of the model mismatch the dataset requirement of:', len(class_names)) + return -1 + + # get model + pretrained_backbone = False # model is trained already, pretrained backbone weight is useless here + + if PromptTuning is None: + model = get_model(num_classes, edge_size, model_idx, drop_rate, attn_drop_rate, drop_path_rate, + pretrained_backbone, use_cls_token, use_pos_embedding, use_att_module) + else: + if Pre_Trained_model_path is not None and os.path.exists(Pre_Trained_model_path): + base_state_dict = torch.load(Pre_Trained_model_path) + else: + base_state_dict = 'timm' + print('base_state_dict of timm') + + print('Test the PromptTuning of ', model_idx) + print('Prompt VPT type:', PromptTuning) + model = build_promptmodel(num_classes, edge_size, model_idx, Prompt_Token_num=Prompt_Token_num, + VPT_type=PromptTuning, base_state_dict=base_state_dict) + + try: + if PromptTuning is None: + model.load_state_dict(torch.load(save_model_path)) + else: + if PromptUnFreeze: + model.load_state_dict(torch.load(save_model_path)) + else: + model.load_prompt(torch.load(save_model_path)) + + print("model loaded") + print("model :", model_idx) + + except: + try: + model = nn.DataParallel(model) + + if PromptTuning is None: + model.load_state_dict(torch.load(save_model_path)) + else: + if PromptUnFreeze: + model.load_state_dict(torch.load(save_model_path)) + else: + model.load_prompt(torch.load(save_model_path)) + + print("DataParallel model loaded") + except: + print("model loading erro!!") + return -1 + + if gpu_idx == -1: + if torch.cuda.device_count() > 1: + print("Use", torch.cuda.device_count(), "GPUs!") + # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs + model = nn.DataParallel(model) + else: + print('we dont have more GPU idx here, try to use gpu_idx=0') + try: + # setting 0 for: only card idx 0 is sighted for this code + os.environ['CUDA_VISIBLE_DEVICES'] = '0' + except: + print("GPU distributing ERRO occur use CPU instead") + + else: + # Decide which device we want to run on + try: + # setting k for: only card idx k is sighted for this code + os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_idx) + except: + print('we dont have that GPU idx here, try to use gpu_idx=0') + try: + # setting 0 for: only card idx 0 is sighted for this code + os.environ['CUDA_VISIBLE_DEVICES'] = '0' + except: + print("GPU distributing ERRO occur use CPU instead") + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # single card for test + + model.to(device) + + # start tensorboard backend + if enable_tensorboard: + writer = SummaryWriter(draw_path) + else: + writer = None + + # if you want to run tensorboard locally + # nohup tensorboard --logdir=/home/experiments/runs --host=0.0.0.0 --port=7777 & + + print("*********************************{}*************************************".format('setting')) + print(args) + + test_model(model, test_dataloader, criterion, class_names, test_dataset_size, model_idx=model_idx, + test_model_idx=test_model_idx, edge_size=edge_size, check_minibatch=check_minibatch, + device=device, draw_path=draw_path, enable_attention_check=enable_attention_check, + enable_visualize_check=enable_visualize_check, writer=writer) + + +def get_args_parser(): + parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') + + # Model Name or index + parser.add_argument('--model_idx', default='ViT_base', type=str, help='Model Name or index') + + # drop_rate, attn_drop_rate, drop_path_rate + parser.add_argument('--drop_rate', default=0.0, type=float, help='dropout rate , default 0.0') + parser.add_argument('--attn_drop_rate', default=0.0, type=float, help='dropout rate Aftter Attention, default 0.0') + parser.add_argument('--drop_path_rate', default=0.0, type=float, help='drop path for stochastic depth, default 0.0') + + # Abalation Studies for MSHT + parser.add_argument('--cls_token_off', action='store_true', help='use cls_token in model structure') + parser.add_argument('--pos_embedding_off', action='store_true', help='use pos_embedding in model structure') + # 'SimAM', 'CBAM', 'SE' 'None' + parser.add_argument('--att_module', default='SimAM', type=str, help='use which att_module in model structure') + + # Enviroment parameters + parser.add_argument('--gpu_idx', default=0, type=int, + help='use a single GPU with its index, -1 to use multiple GPU') + + # Path parameters + parser.add_argument('--dataroot', default=r'/data/k5_dataset', + help='path to dataset') + parser.add_argument('--model_path', default=r'/home/saved_models', + help='root path to save model state-dict, model will be find by name') + parser.add_argument('--draw_root', default=r'/home/runs', + help='path to draw and save tensorboard output') + # model_path_by_hand + parser.add_argument('--model_path_by_hand', default=None, type=str, help='specified path to a model state-dict') + + # Help tool parameters + parser.add_argument('--paint', action='store_false', help='paint in front desk') # matplotlib.use('Agg') + + # check tool parameters + parser.add_argument('--enable_tensorboard', action='store_true', help='enable tensorboard to save status') + + parser.add_argument('--enable_attention_check', action='store_true', help='check and save attention map') + parser.add_argument('--enable_visualize_check', action='store_true', help='check and save pics') + + parser.add_argument('--data_augmentation_mode', default=0, type=int, help='data_augmentation_mode') + + # PromptTuning + parser.add_argument('--PromptTuning', default=None, type=str, + help='use Prompt Tuning strategy instead of Finetuning') + # Prompt_Token_num + parser.add_argument('--Prompt_Token_num', default=20, type=int, help='Prompt_Token_num') + # PromptUnFreeze + parser.add_argument('--PromptUnFreeze', action='store_true', help='prompt tuning with all parameaters un-freezed') + # prompt model basic model path + parser.add_argument('--Pre_Trained_model_path', default=None, type=str, + help='Finetuning a trained model in this dataset') + + # Dataset based parameters + parser.add_argument('--num_classes', default=0, type=int, help='classification number, default 0 for auto-fit') + parser.add_argument('--edge_size', default=384, type=int, help='edge size of input image') # 224 256 384 1000 + + # Test setting parameters + parser.add_argument('--batch_size', default=1, type=int, help='testing batch_size default 1') + # check_minibatch for painting pics + parser.add_argument('--check_minibatch', default=None, type=int, help='check batch_size') + + return parser + + +if __name__ == '__main__': + parser = get_args_parser() + args = parser.parse_args() + main(args) diff --git a/PuzzleTuning/Train.py b/PuzzleTuning/Train.py new file mode 100644 index 0000000000000000000000000000000000000000..b46efb02266af69f1f8875d390a6de116a95601e --- /dev/null +++ b/PuzzleTuning/Train.py @@ -0,0 +1,858 @@ +""" +Training Script ver: Oct 23rd 17:30 +dataset structure: ImageNet +image folder dataset is used. +""" + +from __future__ import print_function, division + +import argparse +import copy +import json +import time +import os +import numpy as np + +import torch +import torch.nn as nn +import torch.optim as optim +import torchvision +from tensorboardX import SummaryWriter +from torch.optim import lr_scheduler +from torchsummary import summary + +from utils.data_augmentation import data_augmentation +from utils.SoftCrossEntropyLoss import SoftlabelCrossEntropy +from utils.online_augmentations import get_online_augmentation +from utils.visual_usage import visualize_check, check_SAA +from utils.tools import setup_seed, del_file, FixStateDict +from utils.schedulers import patch_scheduler, ratio_scheduler + +from Backbone.getmodel import get_model +from Backbone.GetPromptModel import build_promptmodel + + +# Training Strategy +def better_performance(temp_acc, temp_vac, best_acc, best_vac): # determin which epoch have the best model + + if temp_vac >= best_vac and temp_acc >= best_acc: + return True + elif temp_vac > best_vac: + return True + else: + return False + + +def train_model(model, dataloaders, criterion, optimizer, class_names, dataset_sizes, Augmentation=None, + fix_position_ratio_scheduler=None, puzzle_patch_size_scheduler=None, edge_size=384, + model_idx=None, num_epochs=25, intake_epochs=0, check_minibatch=100, scheduler=None, device=None, + draw_path='../imagingresults', enable_attention_check=False, enable_visualize_check=False, + enable_sam=False, writer=None): + """ + Training iteration + :param model: model object + :param dataloaders: 2 dataloader(train and val) dict + :param criterion: loss func obj + :param optimizer: optimizer obj + :param class_names: The name of classes for priting + :param dataset_sizes: size of datasets + :param Augmentation: Online augmentation methods + :param fix_position_ratio_scheduler: Online augmentation fix_position_ratio_scheduler + :param puzzle_patch_size_scheduler: Online augmentation puzzle_patch_size_scheduler + :param edge_size: image size for the input image + :param model_idx: model idx for the getting pre-setted model + :param num_epochs: total training epochs + :param intake_epochs: number of skip over epochs when choosing the best model + :param check_minibatch: number of skip over minibatch in calculating the criteria's results etc. + :param scheduler: scheduler is an LR scheduler object from torch.optim.lr_scheduler. + :param device: cpu/gpu object + :param draw_path: path folder for output pic + :param enable_attention_check: use attention_check to show the pics of models' attention areas + :param enable_visualize_check: use visualize_check to show the pics + :param enable_sam: use SAM training strategy + :param writer: attach the records to the tensorboard backend + """ + + if device is None: + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + since = time.time() + + # for saving the best model state dict + best_model_wts = copy.deepcopy(model.state_dict()) # deepcopy + # initial an empty dict + json_log = {} + + # initial best performance + best_acc = 0.0 + best_vac = 0.0 + temp_acc = 0.0 + temp_vac = 0.0 + best_epoch_idx = 1 + + epoch_loss = 0.0 # initial value for loss-drive + + for epoch in range(num_epochs): + print('Epoch {}/{}'.format(epoch + 1, num_epochs)) + print('-' * 10) + + # record json log, initially empty + json_log[str(epoch + 1)] = {} + + # Each epoch has a training and validation phase + for phase in ['train', 'val']: # alternatively train/val + + index = 0 + check_index = -1 # set a visulize check at the end of each epoch's train and val + model_time = time.time() + + # initiate the empty log dict + log_dict = {} + for cls_idx in range(len(class_names)): + # only float type is allowed in json, set to float inside + log_dict[class_names[cls_idx]] = {'tp': 0.0, 'tn': 0.0, 'fp': 0.0, 'fn': 0.0} + + if phase == 'train': + model.train() # Set model to training mode + else: + model.eval() # Set model to evaluate mode + + # criterias, initially empty + running_loss = 0.0 + log_running_loss = 0.0 + running_corrects = 0 + + # Iterate over data. + for inputs, labels in dataloaders[phase]: # use different dataloder in different phase + + inputs = inputs.to(device) # print('inputs[0]',type(inputs[0])) + # NOTICE in CLS task the labels' type is long tensor([B]),not one-hot ([B,CLS]) + labels = labels.to(device) + + # Online Augmentations on device + if Augmentation is not None: + if phase == 'train': + # cellmix + if fix_position_ratio_scheduler is not None and puzzle_patch_size_scheduler is not None: + # loss-drive + fix_position_ratio = fix_position_ratio_scheduler(epoch, epoch_loss) + puzzle_patch_size = puzzle_patch_size_scheduler(epoch, epoch_loss) + + inputs, labels, GT_long_labels = Augmentation(inputs, labels, + fix_position_ratio, puzzle_patch_size) + # Counterpart augmentations + else: + inputs, labels, GT_long_labels = Augmentation(inputs, labels) + + else: # Val + inputs, labels, GT_long_labels = Augmentation(inputs, labels, act=False) + else: + GT_long_labels = labels # store ori_label on CPU + + # zero the parameter gradients + if not enable_sam: + optimizer.zero_grad() + + # forward + # track grad if only in train! + with torch.set_grad_enabled(phase == 'train'): + + outputs = model(inputs) # pred outputs of confidence: [B,CLS] + _, preds = torch.max(outputs, 1) # idx outputs: [B] each is a idx + loss = criterion(outputs, labels) # cross entrphy of one-hot outputs: [B,CLS] and idx label [B] + + # backward + optimize only if in training phase + if phase == 'train': + if enable_sam: + loss.backward() + # first forward-backward pass + optimizer.first_step(zero_grad=True) + + # second forward-backward pass + loss2 = criterion(model(inputs), labels) # SAM need another model(inputs) + loss2.backward() # make sure to do a full forward pass when using SAM + optimizer.second_step(zero_grad=True) + else: + loss.backward() + optimizer.step() + + # log criterias: update + log_running_loss += loss.item() + running_loss += loss.item() * inputs.size(0) + running_corrects += torch.sum(preds.cpu() == GT_long_labels.cpu().data) + + # Compute precision and recall for each class. + for cls_idx in range(len(class_names)): + tp = np.dot((GT_long_labels.cpu().data == cls_idx).numpy().astype(int), + (preds == cls_idx).cpu().numpy().astype(int)) + tn = np.dot((GT_long_labels.cpu().data != cls_idx).numpy().astype(int), + (preds != cls_idx).cpu().numpy().astype(int)) + + fp = np.sum((preds == cls_idx).cpu().numpy()) - tp + + fn = np.sum((GT_long_labels.cpu().data == cls_idx).numpy()) - tp + + # log_dict[cls_idx] = {'tp': 0.0, 'tn': 0.0, 'fp': 0.0, 'fn': 0.0} set to float inside + log_dict[class_names[cls_idx]]['tp'] += tp + log_dict[class_names[cls_idx]]['tn'] += tn + log_dict[class_names[cls_idx]]['fp'] += fp + log_dict[class_names[cls_idx]]['fn'] += fn + + # attach the records to the tensorboard backend + if writer is not None: + # ...log the running loss + writer.add_scalar(phase + ' minibatch loss', + float(loss.item()), + epoch * len(dataloaders[phase]) + index) + writer.add_scalar(phase + ' minibatch ACC', + float(torch.sum(preds.cpu() == GT_long_labels.cpu().data) / inputs.size(0)), + epoch * len(dataloaders[phase]) + index) + + # at the checking time now + if index % check_minibatch == check_minibatch - 1: + model_time = time.time() - model_time + + check_index = index // check_minibatch + 1 + + epoch_idx = epoch + 1 + print('Epoch:', epoch_idx, ' ', phase, 'index of ' + str(check_minibatch) + ' minibatch:', + check_index, ' time used:', model_time) + + print('minibatch AVG loss:', float(log_running_loss) / check_minibatch) + + if enable_visualize_check: + visualize_check(inputs, GT_long_labels, model, class_names, num_images=-1, + pic_name='Visual_' + phase + '_E_' + str(epoch_idx) + '_I_' + str(index + 1), + draw_path=draw_path, writer=writer) + + if enable_attention_check: + try: + check_SAA(inputs, GT_long_labels, model, model_idx, edge_size, class_names, num_images=1, + pic_name='GradCAM_' + phase + '_E_' + str(epoch_idx) + '_I_' + str(index + 1), + draw_path=draw_path, writer=writer) + except: + print('model:', model_idx, ' with edge_size', edge_size, 'is not supported yet') + else: + pass + + model_time = time.time() + log_running_loss = 0.0 + + index += 1 + + if phase == 'train': + if scheduler is not None: # lr scheduler: update + scheduler.step() + + # at the last of train/val in each epoch, if no check has been triggered + if check_index == -1: + epoch_idx = epoch + 1 + if enable_visualize_check: + visualize_check(inputs, GT_long_labels, model, class_names, num_images=-1, + pic_name='Visual_' + phase + '_E_' + str(epoch_idx), + draw_path=draw_path, writer=writer) + + if enable_attention_check: + try: + check_SAA(inputs, GT_long_labels, model, model_idx, edge_size, class_names, num_images=1, + pic_name='GradCAM_' + phase + '_E_' + str(epoch_idx), + draw_path=draw_path, writer=writer) + except: + print('model:', model_idx, ' with edge_size', edge_size, 'is not supported yet') + else: + pass + + # log criterias: print + epoch_loss = running_loss / dataset_sizes[phase] + epoch_acc = running_corrects.double() / dataset_sizes[phase] * 100 + print('\nEpoch: {} {} \nLoss: {:.4f} Acc: {:.4f}'.format(epoch + 1, phase, epoch_loss, epoch_acc)) + + if phase == 'train' and fix_position_ratio_scheduler is not None \ + and puzzle_patch_size_scheduler is not None: + print('\nEpoch: {}, Fix_position_ratio: {}, Puzzle_patch_size: ' + '{}'.format(epoch + 1, fix_position_ratio, puzzle_patch_size)) + + # attach the records to the tensorboard backend + if writer is not None: + # ...log the running loss + writer.add_scalar(phase + ' loss', + float(epoch_loss), + epoch + 1) + writer.add_scalar(phase + ' ACC', + float(epoch_acc), + epoch + 1) + + # calculating the confusion matrix + for cls_idx in range(len(class_names)): + tp = log_dict[class_names[cls_idx]]['tp'] + tn = log_dict[class_names[cls_idx]]['tn'] + fp = log_dict[class_names[cls_idx]]['fp'] + fn = log_dict[class_names[cls_idx]]['fn'] + tp_plus_fp = tp + fp + tp_plus_fn = tp + fn + fp_plus_tn = fp + tn + fn_plus_tn = fn + tn + + # precision + if tp_plus_fp == 0: + precision = 0 + else: + precision = float(tp) / tp_plus_fp * 100 + # recall + if tp_plus_fn == 0: + recall = 0 + else: + recall = float(tp) / tp_plus_fn * 100 + + # TPR (sensitivity) + TPR = recall + + # TNR (specificity) + # FPR + if fp_plus_tn == 0: + TNR = 0 + FPR = 0 + else: + TNR = tn / fp_plus_tn * 100 + FPR = fp / fp_plus_tn * 100 + + # NPV + if fn_plus_tn == 0: + NPV = 0 + else: + NPV = tn / fn_plus_tn * 100 + + print('{} precision: {:.4f} recall: {:.4f}'.format(class_names[cls_idx], precision, recall)) + print('{} sensitivity: {:.4f} specificity: {:.4f}'.format(class_names[cls_idx], TPR, TNR)) + print('{} FPR: {:.4f} NPV: {:.4f}'.format(class_names[cls_idx], FPR, NPV)) + print('{} TP: {}'.format(class_names[cls_idx], tp)) + print('{} TN: {}'.format(class_names[cls_idx], tn)) + print('{} FP: {}'.format(class_names[cls_idx], fp)) + print('{} FN: {}'.format(class_names[cls_idx], fn)) + # attach the records to the tensorboard backend + if writer is not None: + # ...log the running loss + writer.add_scalar(phase + ' ' + class_names[cls_idx] + ' precision', + precision, + epoch + 1) + writer.add_scalar(phase + ' ' + class_names[cls_idx] + ' recall', + recall, + epoch + 1) + + # json log: update + json_log[str(epoch + 1)][phase] = log_dict + + if phase == 'val': + temp_vac = epoch_acc + else: + temp_acc = epoch_acc # not useful actually + + # deep copy the model + if phase == 'val' and better_performance(temp_acc, temp_vac, best_acc, best_vac) and epoch >= intake_epochs: + # what is better? we now use the wildly used method only + best_epoch_idx = epoch + 1 + best_acc = temp_acc + best_vac = temp_vac + best_model_wts = copy.deepcopy(model.state_dict()) + best_log_dic = log_dict + + print('\n') + + print() + + time_elapsed = time.time() - since + print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) + print('Best epoch idx: ', best_epoch_idx) + print('Best epoch train Acc: {:4f}'.format(best_acc)) + print('Best epoch val Acc: {:4f}'.format(best_vac)) + for cls_idx in range(len(class_names)): + tp = best_log_dic[class_names[cls_idx]]['tp'] + tn = best_log_dic[class_names[cls_idx]]['tn'] + fp = best_log_dic[class_names[cls_idx]]['fp'] + fn = best_log_dic[class_names[cls_idx]]['fn'] + tp_plus_fp = tp + fp + tp_plus_fn = tp + fn + fp_plus_tn = fp + tn + fn_plus_tn = fn + tn + + # precision + if tp_plus_fp == 0: + precision = 0 + else: + precision = float(tp) / tp_plus_fp * 100 + # recall + if tp_plus_fn == 0: + recall = 0 + else: + recall = float(tp) / tp_plus_fn * 100 + + # TPR (sensitivity) + TPR = recall + + # TNR (specificity) + # FPR + if fp_plus_tn == 0: + TNR = 0 + FPR = 0 + else: + TNR = tn / fp_plus_tn * 100 + FPR = fp / fp_plus_tn * 100 + + # NPV + if fn_plus_tn == 0: + NPV = 0 + else: + NPV = tn / fn_plus_tn * 100 + + print('{} precision: {:.4f} recall: {:.4f}'.format(class_names[cls_idx], precision, recall)) + print('{} sensitivity: {:.4f} specificity: {:.4f}'.format(class_names[cls_idx], TPR, TNR)) + print('{} FPR: {:.4f} NPV: {:.4f}'.format(class_names[cls_idx], FPR, NPV)) + + # attach the records to the tensorboard backend + if writer is not None: + writer.close() + + # load best model weights as final model training result + model.load_state_dict(best_model_wts) + # save json_log indent=2 for better view + json.dump(json_log, open(os.path.join(draw_path, model_idx + '_log.json'), 'w'), ensure_ascii=False, indent=2) + return model + + +def main(args): + if args.paint: + # use Agg kernal, not painting in the front-desk + import matplotlib + matplotlib.use('Agg') + + enable_tensorboard = args.enable_tensorboard # True + enable_attention_check = args.enable_attention_check # False 'CAM' 'SAA' + enable_visualize_check = args.enable_visualize_check # False + + enable_sam = args.enable_sam # False + + data_augmentation_mode = args.data_augmentation_mode # 0 + + linearprobing = args.linearprobing # False + + Pre_Trained_model_path = args.Pre_Trained_model_path # None + Prompt_state_path = args.Prompt_state_path # None + + # Prompt + PromptTuning = args.PromptTuning # None "Deep" / "Shallow" + Prompt_Token_num = args.Prompt_Token_num # 20 + PromptUnFreeze = args.PromptUnFreeze # False + + gpu_idx = args.gpu_idx # GPU idx start with0, -1 to use multipel GPU + + # model info + model_idx = args.model_idx # the model we are going to use. by the format of Model_size_other_info + # structural parameter + drop_rate = args.drop_rate + attn_drop_rate = args.attn_drop_rate + drop_path_rate = args.drop_path_rate + use_cls_token = False if args.cls_token_off else True + use_pos_embedding = False if args.pos_embedding_off else True + use_att_module = None if args.att_module == 'None' else args.att_module + + # pretrained_backbone + pretrained_backbone = False if args.backbone_PT_off else True + + # classification required number of your dataset + num_classes = args.num_classes # default 0 for auto-fit + # image size for the input image + edge_size = args.edge_size # 224 384 1000 + + # batch info + batch_size = args.batch_size # 8 + num_workers = args.num_workers # main training num_workers 4 + + num_epochs = args.num_epochs # 50 + intake_epochs = args.intake_epochs # 0 + check_minibatch = args.check_minibatch if args.check_minibatch is not None else 400 // batch_size + + lr = args.lr # 0.000007 + lrf = args.lrf # 0.0 + + opt_name = args.opt_name # 'Adam' + + # PATH info + draw_root = args.draw_root + model_path = args.model_path + dataroot = args.dataroot + + draw_path = os.path.join(draw_root, 'CLS_' + model_idx) # CLS_ is for the CLS training, MIL will be MIL training + save_model_path = os.path.join(model_path, 'CLS_' + model_idx + '.pth') + + if not os.path.exists(model_path): + os.makedirs(model_path) + + if os.path.exists(draw_path): + del_file(draw_path) # fixme clear the output folder, NOTICE this may be DANGEROUS + else: + os.makedirs(draw_path) + + # Train Augmentation + augmentation_name = args.augmentation_name # None + + # Data Augmentation + data_transforms = data_augmentation(data_augmentation_mode, edge_size=edge_size) + + datasets = {x: torchvision.datasets.ImageFolder(os.path.join(dataroot, x), data_transforms[x]) for x in + ['train', 'val']} # 2 dataset obj is prepared here and combine together + dataset_sizes = {x: len(datasets[x]) for x in ['train', 'val']} # size of each dataset + + dataloaders = {'train': torch.utils.data.DataLoader(datasets['train'], batch_size=batch_size, shuffle=True, + num_workers=num_workers, drop_last=True), # colab suggest 2 + 'val': torch.utils.data.DataLoader(datasets['val'], batch_size=batch_size, shuffle=False, + num_workers=num_workers // 4 + 1, drop_last=True) + } + + class_names = [d.name for d in os.scandir(os.path.join(dataroot, 'train')) if d.is_dir()] + class_names.sort() + if num_classes == 0: + print("class_names:", class_names) + num_classes = len(class_names) + else: + if len(class_names) == num_classes: + print("class_names:", class_names) + else: + print('classfication number of the model mismatch the dataset requirement of:', len(class_names)) + return -1 + + print("*********************************{}*************************************".format('setting')) + print(args) + + # start tensorboard backend + if enable_tensorboard: + writer = SummaryWriter(draw_path) + else: + writer = None + # if u run locally + # nohup tensorboard --logdir=/home/MSHT/runs --host=0.0.0.0 --port=7777 & + # tensorboard --logdir=/home/ZTY/runs --host=0.0.0.0 --port=7777 + + if gpu_idx == -1: # use all cards + if torch.cuda.device_count() > 1: + print("Use", torch.cuda.device_count(), "GPUs!") + # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs + gpu_use = gpu_idx + else: + print('we dont have more GPU idx here, try to use gpu_idx=0') + try: + os.environ['CUDA_VISIBLE_DEVICES'] = '0' # setting k for: only card idx k is sighted for this code + gpu_use = 0 + except: + print("GPU distributing ERRO occur use CPU instead") + gpu_use = 'cpu' + + else: + # Decide which device we want to run on + try: + # setting k for: only card idx k is sighted for this code + os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_idx) + gpu_use = gpu_idx + except: + print('we dont have that GPU idx here, try to use gpu_idx=0') + try: + # setting 0 for: only card idx 0 is sighted for this code + os.environ['CUDA_VISIBLE_DEVICES'] = '0' + gpu_use = 0 + except: + print("GPU distributing ERRO occur use CPU instead") + gpu_use = 'cpu' + + # device environment + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + # get model + if PromptTuning is not None: + print('PromptTuning of ', model_idx) + print('Prompt VPT type:', PromptTuning) + + # initialize the model backbone: + if Pre_Trained_model_path is None or Pre_Trained_model_path == 'timm': + base_state_dict = 'timm' + print('backbone base_state_dict of timm') + elif Pre_Trained_model_path is not None and os.path.exists(Pre_Trained_model_path): + print('backbone base_state_dict at: ', Pre_Trained_model_path) + base_state_dict = torch.load(Pre_Trained_model_path) + else: + print('invalid Pre_Trained_model_path for prompting at: ', Pre_Trained_model_path) + raise + + # put the additional prompt tokens to model: + if Prompt_state_path is None: + prompt_state_dict = None + print('prompting with empty prompt_state: prompt_state of None') + elif Prompt_state_path is not None and os.path.exists(Prompt_state_path): + print('prompting with prompt_state at: ', Prompt_state_path) + prompt_state_dict = torch.load(Prompt_state_path) + else: + print('invalid prompt_state_dict for prompting, path at:', Prompt_state_path) + raise + + model = build_promptmodel(num_classes, edge_size, model_idx, Prompt_Token_num=Prompt_Token_num, + VPT_type=PromptTuning, prompt_state_dict=prompt_state_dict, + base_state_dict=base_state_dict) + # Use FineTuning with prompt tokens (when PromptUnFreeze==True) + if PromptUnFreeze: + model.UnFreeze() + print('prompt tuning with all parameaters un-freezed') + + else: + # get model: randomly initiate model, except the backbone CNN(when pretrained_backbone is True) + model = get_model(num_classes, edge_size, model_idx, drop_rate, attn_drop_rate, drop_path_rate, + pretrained_backbone, use_cls_token, use_pos_embedding, use_att_module) + + # Manually get the model pretrained on the Imagenet1000 + if Pre_Trained_model_path is not None: + if os.path.exists(Pre_Trained_model_path): + state_dict = FixStateDict(torch.load(Pre_Trained_model_path), remove_key_head='head') + model.load_state_dict(state_dict, False) + print('Specified backbone model weight loaded:', Pre_Trained_model_path) + else: + print('Specified Pre_Trained_model_path:' + Pre_Trained_model_path, ' is NOT avaliable!!!!\n') + raise + else: + print('building model (no-prompt) with pretrained_backbone status:',pretrained_backbone) + if pretrained_backbone is True: + print('timm loaded') + + if linearprobing: + # Only tuning the last FC layer for CLS task + module_all = 0 + for child in model.children(): # find all nn.modules + module_all += 1 + + for param in model.parameters(): # freeze all parameters + param.requires_grad = False + + for module_idx, child in enumerate(model.children()): + if module_idx == module_all: # Unfreeze the parameters of the last FC layer + for param in child.parameters(): + param.requires_grad = True + + print('GPU:', gpu_use) + + if gpu_use == -1: + model = nn.DataParallel(model) + + model.to(device) + + try: + summary(model, input_size=(3, edge_size, edge_size)) # should be after .to(device) + except: + pass + + print("model :", model_idx) + + # Augmentation + Augmentation = get_online_augmentation(augmentation_name, p=0.5, class_num=num_classes, + batch_size=batch_size, edge_size=edge_size, device=device) + + if augmentation_name != 'CellMix-Split' and augmentation_name != 'CellMix-Group' \ + and augmentation_name != 'CellMix-Random': + fix_position_ratio_scheduler = None + puzzle_patch_size_scheduler = None + else: + # setting puzzle_patch_size and fix_position_ratio schedulers + fix_position_ratio_scheduler = ratio_scheduler(total_epoches=num_epochs, + warmup_epochs=0, + basic_ratio=0.5, + strategy=args.ratio_strategy, # 'linear' + fix_position_ratio=args.fix_position_ratio, + threshold=args.loss_drive_threshold) + + puzzle_patch_size_scheduler = patch_scheduler(total_epoches=num_epochs, + warmup_epochs=0, + edge_size=edge_size, + basic_patch=16, + strategy=args.patch_strategy, # 'random', 'linear' or 'loop' + threshold=args.loss_drive_threshold, + fix_patch_size=args.fix_patch_size, # 16,32,48,64,96,128,192 + patch_size_jump=args.patch_size_jump) # 'odd' or 'even' + + # Default cross entrphy of one-hot outputs: [B,CLS] and idx label [B] long tensor + # augmentation loss is SoftlabelCrossEntropy + criterion = SoftlabelCrossEntropy() \ + if Augmentation is not None and augmentation_name != 'Cutout' else nn.CrossEntropyLoss() + + if opt_name == 'SGD': + optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.8, weight_decay=0.005) + scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5) # 15 0.1 default SGD StepLR scheduler + elif opt_name == 'Adam': + optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.01) + scheduler = None + else: + print('no optimizer') + raise + + if enable_sam: + from utils.sam import SAM + + if opt_name == 'SGD': + base_optimizer = torch.optim.SGD # define an optimizer for the "sharpness-aware" update + optimizer = SAM(model.parameters(), base_optimizer, lr=lr, momentum=0.8) + scheduler = None + elif opt_name == 'Adam': + base_optimizer = torch.optim.Adam # define an optimizer for the "sharpness-aware" update + optimizer = SAM(model.parameters(), base_optimizer, lr=lr, weight_decay=0.01) + else: + print('no optimizer') + raise + + if lrf > 0: # use cosine learning rate schedule + import math + # cosine Scheduler by https://arxiv.org/pdf/1812.01187.pdf + lf = lambda x: ((1 + math.cos(x * math.pi / num_epochs)) / 2) * (1 - lrf) + lrf # cosine + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) + + # train + model_ft = train_model(model, dataloaders, criterion, optimizer, class_names, dataset_sizes, + fix_position_ratio_scheduler=fix_position_ratio_scheduler, + puzzle_patch_size_scheduler=puzzle_patch_size_scheduler, + Augmentation=Augmentation, + edge_size=edge_size, model_idx=model_idx, num_epochs=num_epochs, + intake_epochs=intake_epochs, check_minibatch=check_minibatch, + scheduler=scheduler, device=device, draw_path=draw_path, + enable_attention_check=enable_attention_check, + enable_visualize_check=enable_visualize_check, + enable_sam=enable_sam, writer=writer) + + # save model if its a multi-GPU model, save as a single GPU one too + if gpu_use == -1: + + if PromptTuning is None: + torch.save(model_ft.module.state_dict(), save_model_path) + + else: + if PromptUnFreeze: + torch.save(model_ft.module.state_dict(), save_model_path) + else: + prompt_state_dict = model_ft.module.obtain_prompt() + # fixme maybe bug at DP module.obtain_prompt, just model.obtain_prompt is enough + torch.save(prompt_state_dict, save_model_path) + + print('model trained by multi-GPUs has its single GPU copy saved at ', save_model_path) + + else: + if PromptTuning is None: + torch.save(model_ft.state_dict(), save_model_path) + + else: + if PromptUnFreeze: + torch.save(model_ft.state_dict(), save_model_path) + else: + prompt_state_dict = model_ft.obtain_prompt() + torch.save(prompt_state_dict, save_model_path) + + print('model trained by GPU (idx:' + str(gpu_use) + ') has been saved at ', save_model_path) + + +def get_args_parser(): + parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') + + # Model Name or index + parser.add_argument('--model_idx', default='Hybrid2_384_401_testsample', type=str, help='Model Name or index') + # drop_rate, attn_drop_rate, drop_path_rate + parser.add_argument('--drop_rate', default=0.0, type=float, help='dropout rate , default 0.0') + parser.add_argument('--attn_drop_rate', default=0.0, type=float, help='dropout rate Aftter Attention, default 0.0') + parser.add_argument('--drop_path_rate', default=0.0, type=float, help='drop path for stochastic depth, default 0.0') + + # Abalation Studies + parser.add_argument('--cls_token_off', action='store_true', help='use cls_token in model structure') + parser.add_argument('--pos_embedding_off', action='store_true', help='use pos_embedding in model structure') + # 'SimAM', 'CBAM', 'SE' 'None' + parser.add_argument('--att_module', default='SimAM', type=str, help='use which att_module in model structure') + + # backbone_PT_off by default is false, in default setting the backbone weight is required + parser.add_argument('--backbone_PT_off', action='store_true', help='use a freash backbone weight in training') + + # Enviroment parameters + parser.add_argument('--gpu_idx', default=-1, type=int, + help='use a single GPU with its index, -1 to use multiple GPU') + + # Path parameters + parser.add_argument('--dataroot', default='/data/MIL_Experiment/dataset/ROSE_CLS', + help='path to dataset') + parser.add_argument('--model_path', default='/home/pancreatic-cancer-project/saved_models', + help='path to save model state-dict') + parser.add_argument('--draw_root', default='/home/pancreatic-cancer-project/runs', + help='path to draw and save tensorboard output') + + # Help tool parameters + parser.add_argument('--paint', action='store_false', help='paint in front desk') # matplotlib.use('Agg') + + # check tool parameters + parser.add_argument('--enable_tensorboard', action='store_true', help='enable tensorboard to save status') + parser.add_argument('--enable_attention_check', action='store_true', help='check and save attention map') + parser.add_argument('--enable_visualize_check', action='store_true', help='check and save pics') + + # Tuning setting + # PromptTuning + parser.add_argument('--PromptTuning', default=None, type=str, + help='use Prompt Tuning strategy instead of Finetuning') + # Prompt_Token_num + parser.add_argument('--Prompt_Token_num', default=20, type=int, help='Prompt_Token_num') + + # PromptUnFreeze + parser.add_argument('--PromptUnFreeze', action='store_true', help='prompt tuning with all parameaters un-freezed') + + # linearprobing + parser.add_argument('--linearprobing', action='store_true', help='use linearprobing tuning') + + # Finetuning a Pretrained model at PATH + # '/home/MIL_Experiment/saved_models/Hybrid2_384_PreTrain_000.pth' + parser.add_argument('--Pre_Trained_model_path', default=None, type=str, + help='Finetuning a trained model in this dataset') + # Prompt_state_path + parser.add_argument('--Prompt_state_path', default=None, type=str, + help='Prompt_state_path for prompt tokens') + + # Training status parameters + # SAM + parser.add_argument('--enable_sam', action='store_true', help='use SAM strategy in training') + + # Online augmentation_name + parser.add_argument('--augmentation_name', default=None, type=str, help='Online augmentation name') + + # CellMix ablation: loss_drive strategy + parser.add_argument('--ratio_strategy', default=None, type=str, help='CellMix ratio scheduler strategy') + parser.add_argument('--patch_strategy', default=None, type=str, help='CellMix patch scheduler strategy') + parser.add_argument('--loss_drive_threshold', default=4.0, type=float, help='CellMix loss_drive_threshold') + + # CellMix ablation: fix_patch_size patch_size_jump + parser.add_argument('--fix_position_ratio', default=0.5, type=float, help='CellMix ratio scheduler strategy') + parser.add_argument('--fix_patch_size', default=None, type=int, help='CellMix ablation using fix_patch_size') + parser.add_argument('--patch_size_jump', default=None, type=str, help='CellMix patch_size_jump strategy') + + # Dataset based parameters + parser.add_argument('--num_classes', default=0, type=int, help='classification number, default 0 for auto-fit') + parser.add_argument('--edge_size', default=384, type=int, help='edge size of input image') # 224 256 384 1000 + # Dataset specific augmentations in dataloader + parser.add_argument('--data_augmentation_mode', default=0, type=int, help='data_augmentation_mode') + + # Training seting parameters + parser.add_argument('--batch_size', default=8, type=int, help='Training batch_size default 8') + parser.add_argument('--num_epochs', default=50, type=int, help='training epochs') + parser.add_argument('--intake_epochs', default=0, type=int, help='only save model at epochs after intake_epochs') + parser.add_argument('--lr', default=0.00001, type=float, help='learing rate') + parser.add_argument('--lrf', type=float, default=0.0, + help='learing rate decay rate, default 0(not enabled), suggest 0.1 and lr=0.00005') + parser.add_argument('--opt_name', default='Adam', type=str, help='optimizer name Adam or SGD') + + # check_minibatch for painting pics + parser.add_argument('--check_minibatch', default=None, type=int, help='check batch_size') + parser.add_argument('--num_workers', default=2, type=int, help='use CPU num_workers , default 2 for colab') + + return parser + + +if __name__ == '__main__': + # setting up the random seed + setup_seed(42) + + parser = get_args_parser() + args = parser.parse_args() + main(args) diff --git a/PuzzleTuning/dataprocessing/CPIA-main/README.md b/PuzzleTuning/dataprocessing/CPIA-main/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ed8802e13c4d350c90e8496a37669123386aa62a --- /dev/null +++ b/PuzzleTuning/dataprocessing/CPIA-main/README.md @@ -0,0 +1,133 @@ +# CPIA_data_process + +The CPIA dataset contains 3,474,406 (the total number is growing as we continue to process the datasets) standardized images, covering over 50 organs/tissues and about 98 kinds of diseases, which includes two main data types: whole slide images (WSIs) and characteristic regions of interest (ROIs). + +In this repo, we provide relevant codes for processing all sub-datasets within the CPIA dataset. + +![image](https://github.com/Desperadodo/CPIA_data_process/assets/87553719/ec9631e0-c398-4711-9eca-b764333ef10b) +*The compositions and WSI processing strategy of the CPIA dataset.* + +![image](https://github.com/Desperadodo/CPIA_data_process/assets/87553719/2f8660a5-429d-4e42-97f5-8bc5eeb4c587) +*The multi-scale strategy and diverse characteristics of the CPIA dataset.* + +## WSI +Each WSI dataset is divided into four levels using one python program. Most of the Whole Slide Imaging (WSI) images are stored in the SVS format, which includes the micron-per-pixel (MPP) information in the header file. Our processing program can automatically identify the MPP of each image and standardize it to 0.4942um/pixel by adjusting the edge length of each patch. Finally, the patch images are divided into four different sizes with edge lengths of 3840, 960, 384, and 96, respectively, and stored in their respective folders. +| Sub-dataset name | Process Code | Suffix | +|-------------------|--------------|--------| +| CAM16 | CPIA_WSI | tif | +| CATCH dataset | CPIA_WSI | svs | +| CMB-CRC | CPIA_WSI | svs | +| CMB-LCA | CPIA_WSI | svs | +| CMB-MEL | CPIA_WSI | svs | +| CPTAC-AML | CPIA_WSI | svs | +| CPTAC-BRCA | CPIA_WSI | svs | +| CPTAC-CCRCC | CPIA_WSI | svs | +| CPTAC-CM | CPIA_WSI | svs | +| CPTAC-COAD | CPIA_WSI | svs | +| CPTAC-HNSCC | CPIA_WSI | svs | +| CPTAC-LSCC | CPIA_WSI | svs | +| CPTAC-LUAD | CPIA_WSI | svs | +| CPTAC-OV | CPIA_WSI | svs | +| CPTAC-PDA | CPIA_WSI | svs | +| CPTAC-SAR | CPIA_WSI | svs | +| CPTAC-UCEC | CPIA_WSI | svs | +| HER2 tumor ROIs | CPIA_WSI | svs | +| MSKCC(SLN-Breast) | CPIA_WSI | svs | +| PAIP2019 | CPIA_WSI | svs | +| PAIP2020 | CPIA_WSI | svs | +| PAIP2021 | CPIA_WSI | svs | +| Post-NAT-BRCA | CPIA_WSI | svs | +| TCGA-ACC | CPIA_WSI | svs | +| TCGA-BLCA | CPIA_WSI | svs | +| TCGA-BRCA | CPIA_WSI | svs | +| TCGA-CESC | CPIA_WSI | svs | +| TCGA-CHOL | CPIA_WSI | svs | +| TCGA-COAD | CPIA_WSI | svs | +| TCGA-DLBC | CPIA_WSI | svs | +| TCGA-ESCA | CPIA_WSI | svs | +| TCGA-GBM | CPIA_WSI | svs | +| TCGA-HNSC | CPIA_WSI | svs | +| TCGA-KICH | CPIA_WSI | svs | +| TCGA-KIRC | CPIA_WSI | svs | +| TCGA-KIRP | CPIA_WSI | svs | +| TCGA-LGG | CPIA_WSI | svs | +| TCGA-LIHC | CPIA_WSI | svs | +| TCGA-LUAD | CPIA_WSI | svs | +| TCGA-LUSC | CPIA_WSI | svs | +| TCGA-MESO | CPIA_WSI | svs | +| TCGA-OV | CPIA_WSI | svs | +| TCGA-PAAD | CPIA_WSI | svs | +| TCGA-PCPG | CPIA_WSI | svs | +| TCGA-PRAD | CPIA_WSI | svs | +| TCGA-READ | CPIA_WSI | svs | +| TCGA-SARC | CPIA_WSI | svs | +| TCGA-SKCM | CPIA_WSI | svs | +| TCGA-STAD | CPIA_WSI | svs | +| TCGA-TGCT | CPIA_WSI | svs | +| TCGA-THCA | CPIA_WSI | svs | +| TCGA-THYM | CPIA_WSI | svs | +| TCGA-UCEC | CPIA_WSI | svs | +| TCGA-UCS | CPIA_WSI | svs | +| TCGA-UVM | CPIA_WSI | svs | + + +## ROI +The processing code for the ROI dataset is related to the structure of the original dataset. After the processing is complete, each dataset folder contains only all images of that dataset, with each image having a dimension of 384x384 pixels, and is stored in jpg format. + +Before processing the data, please ensure that only the target images to be processed are contained in the input path of the program, and enter the correct suffix of the images to be processed (this might require slight adjustments to the original dataset folder). + +| Sub-dataset name | Process Code | Suffix | Add_class | +|------------------------------------------------------|------------------------------------------------|--------|-----------| +| ANHIR | CPIA_ROI_1_Crop&Resize | png | FALSE | +| BCSS | CPIA_ROI_1_Crop&Resize | png | FALSE | +| AML_Cytomorphology | CPIA_ROI_1_Crop&Resize | tiff | FALSE | +| BCCD | CPIA_ROI_1_Crop&Resize | jpg | FALSE | +| Blood_Cell_Images | CPIA_ROI_1_Crop&Resize | jpg | TRUE | +| BreakHis | CPIA_ROI_0_BreakHis + CPIA_ROI_1_Crop&Resize | jpg | TRUE | +| Breast_Histopathology_Images | CPIA_ROI_1_Crop&Resize | png | FALSE | +| BreastCancerCells | CPIA_ROI_1_Crop&Resize | tif | FALSE | +| BreastPathQ | CPIA_ROI_1_Crop&Resize | tif | FALSE | +| BreCaHAD | CPIA_ROI_1_Crop&Resize | tif | FALSE | +| Chaoyang | CPIA_ROI_1_Crop&Resize | jpg | FALSE | +| Colorectal Histology MNIST | CPIA_ROI_1_Crop&Resize | tif | FALSE | +| CoNSeP | CPIA_ROI_1_Crop&Resize | png | FALSE | +| CPM_15 | CPIA_ROI_1_Crop&Resize | png | FALSE | +| CPM_17 | CPIA_ROI_1_Crop&Resize | png | TRUE | +| CRAG | CPIA_ROI_1_Crop&Resize | jpg | FALSE | +| CRChistophenotypes | CPIA_ROI_1_Crop&Resize | bmp | FALSE | +| CRC-TP | CPIA_ROI_1_Crop&Resize | png | TRUE | +| CRC-VAL-HE-7K | CPIA_ROI_1_Crop&Resize | jpg | FALSE | +| CryoNuSeg | CPIA_ROI_1_Crop&Resize | tif | FALSE | +| DataBiox | CPIA_ROI_1_MicroScope DataBiox | jpg | FALSE | +| GasHisSDB | CPIA_ROI_0_GasHisSDB + CPIA_ROI_1_Crop&Resize | jpg | FALSE | +| Gastrointestinal_Cancer | CPIA_ROI_1_Crop&Resize | jpg | FALSE | +| Gleason 2019 | CPIA_ROI_1_Crop&Resize | jpg | FALSE | +| HuBMAP+HBA_512 | CPIA_ROI_1_Crop&Resize | png | FALSE | +| Kumar(MoNuSeg) | CPIA_ROI_1_Crop&Resize | tif | FALSE | +| LISC | CPIA_ROI_1_Crop&Resize | bmp | FALSE | +| LizardDataset | CPIA_ROI_1_Crop&Resize | png | FALSE | +| Lung_and_Colon_Cancer_Histopathological_Images_Colon | CPIA_ROI_1_Crop&Resize | jpg | TRUE | +| Lung_and_Colon_Cancer_Histopathological_Images_Lung | CPIA_ROI_1_Crop&Resize | jpg | TRUE | +| LYON19 | CPIA_ROI_1_Crop&Resize | png | FALSE | +| Malignant_Lymphoma_Dataset | CPIA_ROI_1_Crop&Resize | tif | FALSE | +| MARS | CPIA_ROI_1_Crop&Resize | png | FALSE | +| MHIST | CPIA_ROI_1_Crop&Resize | png | FALSE | +| MoNuSAC | CPIA_ROI_1_Crop&Resize | tif | FALSE | +| Monuseg_train&test | CPIA_ROI_1_Crop&Resize | png | FALSE | +| Naylor et al, IEEE TMI 2019 | CPIA_ROI_1_Crop&Resize | png | FALSE | +| NCT-CRC-HE-100K | CPIA_ROI_1_Crop&Resize | tif | FALSE | +| Osteosarcoma_Tumor_Assessment | CPIA_ROI_1_Crop&Resize | jpg | FALSE | +| P_falciparum | CPIA_ROI_1_MicroScope P.falciparum | jpg | FALSE | +| P_uninfected | CPIA_ROI_1_MicroScope P.uninfected | jpg | FALSE | +| P_vivax | CPIA_ROI_1_MicroScope P.vivax | jpg | FALSE | +| PCam | CPIA_ROI_1_Crop&Resize | tif | FALSE | +| SICAPv2 | CPIA_ROI_1_Crop&Resize | jpg | FALSE | +| SIPaKMeD | CPIA_ROI_0_SIPaKMeD + CPIA_ROI_1_Crop&Resize | jpg | FALSE | +| warwick_CLS | CPIA_ROI_0_Warwick + CPIA_ROI_1_Crop&Resize | jpg | FALSE | +| WBC | CPIA_ROI_1_Crop&Resize | jpg | TRUE | +| Weinart et al, Scientific Reports 2012 | CPIA_ROI_1_Crop&Resize | jpg | FALSE | +| WSSS4LUAD | CPIA_ROI_1_Crop&Resize | png | TRUE | + + + + diff --git a/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_0_BreakHis.py b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_0_BreakHis.py new file mode 100644 index 0000000000000000000000000000000000000000..472eac90aab23ae1e31b7c139dbf193851d8a1b9 --- /dev/null +++ b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_0_BreakHis.py @@ -0,0 +1,120 @@ +""" +CPIA_ROI_0_BreakHis.py ver 23.6.9 +This code aims to split images of different zooming size into different folders,this code also puts different classes +into different folders +""" +import argparse +import os +from PIL import Image +from tqdm import tqdm + + +def get_args_parser(): + parser = argparse.ArgumentParser('CPIA dataset ROI part Warwick_QU dataset pre-processing', add_help=False) + parser.add_argument('--input_root', default='..', type=str, + help='The root that contains the orginal images. Please make sure that there is no unwanted ' + 'images with corresponding suffix under the same root') + parser.add_argument('--output_root', default='..', type=str, + help='The root for the resized and cropped output images. If the root is not provided, this ' + 'program will automatically make an output path') + return parser + + +def make_and_clear_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + + +def find_all_files(root, suffix=None): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + print(res) + print(len(res)) + return res + + +def save_file(f_image, save_dir, suffix='.jpg'): + filepath, _ = os.path.split(save_dir) + if not os.path.exists(filepath): + os.makedirs(filepath) + f_image.save(save_dir + suffix) + + +def pc_to_stander(root_from, root_to): + root_target = root_to + make_and_clear_path(root_target) + + f_dir_list = find_all_files(root=root_from, suffix=".png") + print(f_dir_list) + name_dict = {} + + for seq in tqdm(range(len(f_dir_list))): + f_dir = f_dir_list[seq] + _, str = os.path.split(f_dir) + mp = str.split("-")[-2] + type = (str.split("_")[2]).split("-")[0] + name = str.split(".")[0] + print(mp) + print(type) + + f_img = Image.open(f_dir) + if mp == '40': + root_target = os.path.join(root_to, '40') + elif mp == '100': + root_target = os.path.join(root_to, '100') + elif mp == '200': + root_target = os.path.join(root_to, '200') + else: + root_target = os.path.join(root_to, '400') + if type == 'DC': + root_target = os.path.join(root_target, 'ductal_carcinoma') + elif type == 'LC': + root_target = os.path.join(root_target, 'lobular_carcinoma') + elif type == 'MC': + root_target = os.path.join(root_target, 'mucinous_carcinoma') + elif type == 'PC': + root_target = os.path.join(root_target, 'papillary_carcinoma') + elif type == 'A': + root_target = os.path.join(root_target, 'adenosis') + elif type == 'F': + root_target = os.path.join(root_target, 'fibroadenoma') + elif type == 'PT': + root_target = os.path.join(root_target, 'phyllodes_tumor') + else: + root_target = os.path.join(root_target, 'tubular_adenoma') + + save_dir = os.path.join(root_target, name) + + + name_dict[save_dir] = f_dir + + save_file(f_img, save_dir) + + root_target, _ = os.path.split(root_to) + root_target, _ = os.path.split(root_target) + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + + input_root = args.input_root + output_root = args.output_root + + pc_to_stander(input_root, output_root) + + + + + + + + + + diff --git a/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_0_GasHisSDB.py b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_0_GasHisSDB.py new file mode 100644 index 0000000000000000000000000000000000000000..79edb18cec99a0f39c6d21fccb82440961ad4ae7 --- /dev/null +++ b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_0_GasHisSDB.py @@ -0,0 +1,131 @@ +""" +CPIA_ROI_0_GasHisSDB.py ver 23.6.9 +This code aims to split images of different zooming size into different folders, this code also puts different classes +into different folders +""" + +import argparse +import os +import re +import csv +import shutil +import pandas as pd +from PIL import Image +from tqdm import tqdm +import torchvision.transforms + + +def get_args_parser(): + parser = argparse.ArgumentParser('CPIA dataset ROI part Warwick_QU dataset pre-processing', add_help=False) + parser.add_argument('--input_root', default='..', type=str, + help='The root that contains the orginal images. Please make sure that there is no unwanted ' + 'images with corresponding suffix under the same root') + parser.add_argument('--output_root', default='..', type=str, + help='The root for the resized and cropped output images. If the root is not provided, this ' + 'program will automatically make an output path') + return parser + + +def del_file(filepath): + """ + Delete all files and folders in one directory + :param filepath: file path + :return: + """ + del_list = os.listdir(filepath) + for f in del_list: + file_path = os.path.join(filepath, f) + if os.path.isfile(file_path): + os.remove(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + + +def make_and_clear_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + + +def find_all_files(root, suffix=None): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + print(files) + return res + + +def save_file(f_image, save_dir, suffix='.jpg'): + + filepath, _ = os.path.split(save_dir) + if not os.path.exists(filepath): + os.makedirs(filepath) + f_image.save(save_dir + suffix) + + +def pc_to_stander(root_from, root_to): + root_target = root_to + make_and_clear_path(root_target) + + f_dir_list = find_all_files(root=root_from, suffix=".png") + print(f_dir_list) + name_dict = {} + + for seq in tqdm(range(len(f_dir_list))): + f_dir = f_dir_list[seq] + f_img = Image.open(f_dir) + _, img_name = os.path.split(f_dir) + name = img_name.split('.')[0] + + if '80' in f_dir: + root_target = os.path.join(root_to, '80') + elif '120' in f_dir: + root_target = os.path.join(root_to, '120') + else: + root_target = os.path.join(root_to, '160') + + if 'Normal' in f_dir: + save_dir = os.path.join(root_target, 'Normal') + else: + save_dir = os.path.join(root_target, 'Abnormal') + + """if img_name == result[i][0]: + root_target = os.path.join(root_to, result[i][1]) + if result[i][1] == 0: + i1 += 1 + save_dir = os.path.join(root_target, str(i1)) + elif result[i][1] == 1: + i2 += 1 + save_dir = os.path.join(root_target, str(i2)) + else: + i3 += 1 + save_dir = os.path.join(root_target, str(i3)) + break + else: + continue""" + save_dir = os.path.join(save_dir, name) + + name_dict[save_dir] = f_dir + + save_file(f_img, save_dir) + + root_target, _ = os.path.split(root_to) + root_target, _ = os.path.split(root_target) + pd.DataFrame.from_dict(name_dict, orient='index', columns=['origin path']).to_csv( + os.path.join(root_target, 'name_dict_gashis.csv') + ) + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + + input_root = args.input_root + output_root = args.output_root + + pc_to_stander(input_root, output_root) \ No newline at end of file diff --git a/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_0_SIPaKMeD.py b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_0_SIPaKMeD.py new file mode 100644 index 0000000000000000000000000000000000000000..32ac18d3f8bfc9754957504980f9fe8bef056db1 --- /dev/null +++ b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_0_SIPaKMeD.py @@ -0,0 +1,142 @@ +""" +CPIA_ROI_0_SIPaKMeD.py +This code aims to split test/train pictures and img/annotation pictures into different folders +The original SIPaKMeD dataset has a CROPPED part inside the folder of each class. This code splits +the original view images and cropped images into two folders. +""" +import argparse +import os +import re +import csv +import shutil +import pandas as pd +from PIL import Image +from tqdm import tqdm +import torchvision.transforms + + +def get_args_parser(): + parser = argparse.ArgumentParser('CPIA dataset ROI part Warwick_QU dataset pre-processing', add_help=False) + parser.add_argument('--input_root', default='..', type=str, + help='The root that contains the orginal images. Please make sure that there is no unwanted ' + 'images with corresponding suffix under the same root') + parser.add_argument('--output_root', default='..', type=str, + help='The root for the resized and cropped output images. If the root is not provided, this ' + 'program will automatically make an output path') + return parser + + +def del_file(filepath): + """ + Delete all files and folders in one directory + :param filepath: file path + :return: + """ + del_list = os.listdir(filepath) + for f in del_list: + file_path = os.path.join(filepath, f) + if os.path.isfile(file_path): + os.remove(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + + +def make_and_clear_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + # del_file(file_pack_path) + + +def find_all_files(root, suffix=None): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + return res + + +def save_file(f_image, save_dir, suffix='.jpg'): + filepath, _ = os.path.split(save_dir) + if not os.path.exists(filepath): + os.makedirs(filepath) + f_image.save(save_dir + suffix) + + +def pc_to_stander(root_from, root_to): + root_target = root_to + make_and_clear_path(root_target) + + f_dir_list = find_all_files(root=root_from, suffix=".bmp") + print(f_dir_list) + name_dict = {} + i1 = 0 + i2 = 0 + i3 = 0 + i4 = 0 + i5 = 0 + + for seq in tqdm(range(len(f_dir_list))): + f_dir = f_dir_list[seq] + + f_img = Image.open(f_dir) + + + if 'im_Superficial-Intermediate' in f_dir: + if 'CROPPED' in f_dir: + root_target = os.path.join(root_to, 'Cropped') + else: + root_target = os.path.join(root_to, "Full") + root_target = os.path.join(root_target, 'Sup_Intermediate') + i1 += 1 + save_dir = os.path.join(root_target, str(i1)) + elif 'im_Parabasal' in f_dir: + if 'CROPPED' in f_dir: + root_target = os.path.join(root_to, 'Cropped') + else: + root_target = os.path.join(root_to, "Full") + root_target = os.path.join(root_target, 'Parabasal') + i2 += 1 + save_dir = os.path.join(root_target, str(i2)) + elif 'im_Dyskeratotic' in f_dir: + if 'CROPPED' in f_dir: + root_target = os.path.join(root_to, 'Cropped') + else: + root_target = os.path.join(root_to, "Full") + root_target = os.path.join(root_target, 'Dyskeratotic') + i3 += 1 + save_dir = os.path.join(root_target, str(i3)) + elif 'im_Koilocytotic' in f_dir: + if 'CROPPED' in f_dir: + root_target = os.path.join(root_to, 'Cropped') + else: + root_target = os.path.join(root_to, "Full") + root_target = os.path.join(root_target, 'Koilocytotic') + i4 += 1 + save_dir = os.path.join(root_target, str(i4)) + else: + if 'CROPPED' in f_dir: + root_target = os.path.join(root_to, 'Cropped') + else: + root_target = os.path.join(root_to, "Full") + root_target = os.path.join(root_target, 'Metaplastic') + i5 += 1 + save_dir = os.path.join(root_target, str(i5)) + + name_dict[save_dir] = f_dir + + save_file(f_img, save_dir) + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + + input_root = args.input_root + output_root = args.output_root + + pc_to_stander(input_root, output_root) \ No newline at end of file diff --git a/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_0_Warwick.py b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_0_Warwick.py new file mode 100644 index 0000000000000000000000000000000000000000..2b21d305850b49750299f977474706d8414e4ef9 --- /dev/null +++ b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_0_Warwick.py @@ -0,0 +1,127 @@ +""" +CPIA_ROI_0_Warwick.py ver 23.6.9 +This code aims to split test/train pictures and img/annotation pictures into different folders +""" +import argparse +import os +import csv +import shutil +import pandas as pd +from PIL import Image +from tqdm import tqdm + + +def get_args_parser(): + parser = argparse.ArgumentParser('CPIA dataset ROI part Warwick_QU dataset pre-processing', add_help=False) + parser.add_argument('--input_root', default='..', type=str, + help='The root that contains the orginal images. Please make sure that there is no unwanted ' + 'images with corresponding suffix under the same root') + parser.add_argument('--output_root', default='..', type=str, + help='The root for the resized and cropped output images. If the root is not provided, this ' + 'program will automatically make an output path') + return parser + +def del_file(filepath): + """ + Delete all files and folders in one directory + :param filepath: file path + :return: + """ + del_list = os.listdir(filepath) + for f in del_list: + file_path = os.path.join(filepath, f) + if os.path.isfile(file_path): + os.remove(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + + +def make_and_clear_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + # del_file(file_pack_path) + + +def find_all_files(root, suffix=None): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + print(files) + return res + + +def save_file(f_image, save_dir, suffix='.jpg'): + filepath, _ = os.path.split(save_dir) + if not os.path.exists(filepath): + os.makedirs(filepath) + f_image.save(save_dir + suffix) + + +def pc_to_stander(root_from, root_to): + root_target = root_to + make_and_clear_path(root_target) + + f_dir_list = find_all_files(root=root_from, suffix=".bmp") + print(f_dir_list) + name_dict = {} + + with open(r'E:\A_bunch_of_data\Raw\Warwick QU Dataset (Released 2016_07_08)\Grade.csv', 'r') as f: + reader = csv.reader(f) + result = list(reader) + length = len(result) + + for seq in tqdm(range(len(f_dir_list))): + f_dir = f_dir_list[seq] + f_img = Image.open(f_dir) + _, img_name = os.path.split(f_dir) + img_name = img_name.split('.')[0] + + if img_name[0:4] == 'test': + save_dir = os.path.join(root_target, 'test') + else: + save_dir = os.path.join(root_target, 'train') + + if img_name[-4:] == 'anno': + save_dir = os.path.join(save_dir, 'mask') + img_name = img_name[0:-5] + else: + save_dir = os.path.join(save_dir, 'data') + + for i in range(length): + # print(img_name.split('.')[0]) + l = len(result[i][0]) + if img_name == result[i][0]: + if result[i][2] == ' malignant': + save_dir = os.path.join(save_dir, 'malignant') + print('1') + else: + save_dir = os.path.join(save_dir, 'benign') + break + else: + continue + save_dir = os.path.join(save_dir, img_name.split('.')[0]) + name_dict[save_dir] = f_dir + save_file(f_img, save_dir) + + root_target, _ = os.path.split(root_to) + root_target, _ = os.path.split(root_target) + pd.DataFrame.from_dict(name_dict, orient='index', columns=['origin path']).to_csv( + os.path.join(root_target, 'name_dict_warwick.csv') + ) + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + + input_root = args.input_root + output_root = args.output_root + + pc_to_stander(input_root, output_root) + diff --git a/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_1_Crop&Resize.py b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_1_Crop&Resize.py new file mode 100644 index 0000000000000000000000000000000000000000..78aee4185a083d0c59e73af2dc875686d8cea86e --- /dev/null +++ b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_1_Crop&Resize.py @@ -0,0 +1,112 @@ +""" +CPIA_ROI_1_Crop&Resize.py ver 23.6.8 +This code aims to crop each ROI image by the largest center square, and resize the square image into 384*384 +""" +import argparse +import os +import PIL.Image as Image +from PIL import ImageFile + +ImageFile.LOAD_TRUNCATED_IMAGES = True +Image.MAX_IMAGE_PIXELS = None + + +def get_args_parser(): + parser = argparse.ArgumentParser('CPIA dataset ROI part image cropping and resizing', add_help=False) + parser.add_argument('--input_root', default='..', type=str, + help='The root that contains the orginal images. Please make sure that there is no unwanted ' + 'images with corresponding suffix under the same root') + parser.add_argument('--output_root', default=None, type=str, + help='The root for the resized and cropped output images. If the root is not provided, this ' + 'program will automatically make an output path') + parser.add_argument('--suffix', default='jpg', type=str, + help='The suffix of the input image') + parser.add_argument('--size', default=384, type=int, + help='The size of the output image') + parser.add_argument('--add_class', default=False, type=bool, + help='Add class information to the image name.') + + return parser + + +def save_file(f_image, save_dir, suffix='.jpg'): + """ + Save images with designated suffix + """ + f_image = f_image.convert('RGB') + filepath, _ = os.path.split(save_dir) + if not os.path.exists(filepath): + os.makedirs(filepath) + f_image.save(save_dir + suffix) + + +def make_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + + +def find_all_files(root, suffix): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + print(files) + return res + + +def center_crop(img_size): + """ + Return the cropping zone of a non-square image + :param img_size: img.size + :return: list that contains the cropping zone + """ + width, height = img_size # Get dimensions + a = min(width, height) + left = int((width - a) / 2) + top = int((height - a) / 2) + right = left + a + bottom = top + a + + return [left, top, right, bottom] + + +def data_crop_resize(class_root, output_root, suffix, size=384, add_class=False): + all_data = find_all_files(class_root, suffix) + for data_root in all_data: + if data_root.endswith('.txt'): + continue + elif data_root.endswith('.DS_Store'): + continue + elif output_root is None: + new_data_root = (data_root + '_Lite').split('.')[0] + # specially made for GS dataset: + """new_data_root = (data_root + '_Lite').replace('.', '_') + new_data_root = new_data_root.replace('_jpg', '')""" + else: + data_name_without_suffix = os.path.split(data_root)[1].split('.')[0] + if add_class: + class_name = os.path.split(data_root.split('.')[0])[1] + data_name_without_suffix = class_name + '_' + data_name_without_suffix + new_data_root = os.path.join(output_root, data_name_without_suffix) + + img = Image.open(data_root) + img = img.crop(center_crop(img.size)) + resized_img = img.resize((int(size), int(size)), Image.ANTIALIAS) + save_file(resized_img, new_data_root) + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + + input_root = args.input_root + output_root = args.output_root + suffix = args.suffix + size = args.size + add_class = args.add_class + data_crop_resize(input_root, output_root, suffix, size, add_class) diff --git a/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_1_Microscope_DataBiox.py b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_1_Microscope_DataBiox.py new file mode 100644 index 0000000000000000000000000000000000000000..1112b6dca32fcec9295aa6ea2b649fc44472f632 --- /dev/null +++ b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_1_Microscope_DataBiox.py @@ -0,0 +1,99 @@ +""" +CPIA_ROI_1_Microscope_DataBiox.py ver 23.6.8 +This code aims to crop each Microscope image by the largest center square, keeping the black surroundings out of the +final image. +Also, all the images will be resized into 384*384 (or other size you want). +""" +import argparse +import os +import PIL.Image as Image +from PIL import ImageFile + +ImageFile.LOAD_TRUNCATED_IMAGES = True +Image.MAX_IMAGE_PIXELS = None + + +def get_args_parser(): + parser = argparse.ArgumentParser('CPIA dataset ROI part image cropping and resizing', add_help=False) + parser.add_argument('--input_root', default='..', type=str, + help='The root that contains the orginal images. Please make sure that there is no unwanted ' + 'images with corresponding suffix under the same root') + parser.add_argument('--output_root', default=None, type=str, + help='The root for the resized and cropped output images. If the root is not provided, this ' + 'program will automatically make an output path') + parser.add_argument('--suffix', default='jpg', type=str, + help='The suffix of the input image') + parser.add_argument('--size', default=384, type=int, + help='The size of the output image') + return argparse + + +def save_file(f_image, save_dir, suffix='.jpg'): + """ + Save images with designated suffix + """ + f_image = f_image.convert('RGB') + filepath, _ = os.path.split(save_dir) + if not os.path.exists(filepath): + os.makedirs(filepath) + f_image.save(save_dir + suffix) + + +def make_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + + +def find_all_files(root, suffix): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + print(files) + return res + + +def data_crop_resize(class_root, output_root, suffix, size=384): + all_data = find_all_files(class_root, suffix) + for data_root in all_data: + if data_root.endswith('.txt'): + continue + elif data_root.endswith('.DS_Store'): + continue + elif output_root is None: + new_data_root = (data_root + '_Lite').split('.')[0] + else: + data_name_without_suffix = os.path.split(data_root)[1].split('.')[0] + new_data_root = os.path.join(output_root, data_name_without_suffix) + + img = Image.open(data_root) + width, height = img.size # Get size + + # the cropping parameters are tuned for DataBiox + s = min(width, height) + a = int((s * 5) / 7) + left = int((width - a) / 2) + top = int((height - a) / 2) + right = left + a + bottom = top + a + + # Crop the center of the image + img = img.crop([left, top, right, bottom]) + resized_img = img.resize((int(size), int(size)), Image.ANTIALIAS) + save_file(resized_img, new_data_root) + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + + input_root = args.input_root + output_root = args.output_root + suffix = args.suffix + size = args.size + data_crop_resize(input_root, output_root, suffix, size) diff --git a/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_1_Microscope_P_falciparum.py b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_1_Microscope_P_falciparum.py new file mode 100644 index 0000000000000000000000000000000000000000..3e9309013c4ea82a4fc5ef5b2b8a13a640905561 --- /dev/null +++ b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_1_Microscope_P_falciparum.py @@ -0,0 +1,98 @@ +""" +CPIA_ROI_1_Microscope_P_falciparum.py ver 23.6.8 +This code aims to crop each Microscope image by the largest center square, keeping the black surroundings out of the +final image +Also, all the images will be resized into 384*384 (or other size you want). +""" +import argparse +import os +import PIL.Image as Image +from PIL import ImageFile +ImageFile.LOAD_TRUNCATED_IMAGES = True +Image.MAX_IMAGE_PIXELS = None + + +def get_args_parser(): + parser = argparse.ArgumentParser('CPIA dataset ROI part image cropping and resizing', add_help=False) + parser.add_argument('--input_root', default='..', type=str, + help='The root that contains the orginal images. Please make sure that there is no unwanted ' + 'images with corresponding suffix under the same root') + parser.add_argument('--output_root', default=None, type=str, + help='The root for the resized and cropped output images. If the root is not provided, this ' + 'program will automatically make an output path') + parser.add_argument('--suffix', default='jpg', type=str, + help='The suffix of the input image') + parser.add_argument('--size', default=384, type=int, + help='The size of the output image') + return argparse + + +def save_file(f_image, save_dir, suffix='.jpg'): + """ + Save images with designated suffix + """ + f_image = f_image.convert('RGB') + filepath, _ = os.path.split(save_dir) + if not os.path.exists(filepath): + os.makedirs(filepath) + f_image.save(save_dir + suffix) + + +def make_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + + +def find_all_files(root, suffix): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + print(files) + return res + + +def data_crop_resize(class_root, output_root, suffix, size=384): + all_data = find_all_files(class_root, suffix) + for data_root in all_data: + if data_root.endswith('.txt'): + continue + elif data_root.endswith('.DS_Store'): + continue + elif output_root is None: + new_data_root = (data_root + '_Lite').split('.')[0] + else: + data_name_without_suffix = os.path.split(data_root)[1].split('.')[0] + new_data_root = os.path.join(output_root, data_name_without_suffix) + + img = Image.open(data_root) + width, height = img.size # Get size + + # the cropping parameters are tuned for P_falciparum + s = min(width, height) + a = int((s * 5) / 7) + left = int((width - a) / 2) + top = int((height - a) / 2) + right = left + a + bottom = top + a + + # Crop the center of the image + img = img.crop([left, top, right, bottom]) + resized_img = img.resize((int(size), int(size)), Image.ANTIALIAS) + save_file(resized_img, new_data_root) + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + + input_root = args.input_root + output_root = args.output_root + suffix = args.suffix + size = args.size + data_crop_resize(input_root, output_root, suffix, size) \ No newline at end of file diff --git a/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_1_Microscope_P_uninfected.py b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_1_Microscope_P_uninfected.py new file mode 100644 index 0000000000000000000000000000000000000000..372a7739d8152d750f24902396178f25a227ecde --- /dev/null +++ b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_1_Microscope_P_uninfected.py @@ -0,0 +1,101 @@ +""" +CPIA_ROI_1_Microscope_P_uninfected.py ver 23.6.8 +This code aims to crop each Microscope image by the largest center square, keeping the black surroundings out of the +final image. +Also, all the images will be resized into 384*384 (or other size you want). +""" +import argparse +import os +import PIL.Image as Image +from PIL import ImageFile + +ImageFile.LOAD_TRUNCATED_IMAGES = True +Image.MAX_IMAGE_PIXELS = None + + +def get_args_parser(): + parser = argparse.ArgumentParser('CPIA dataset ROI part image cropping and resizing', add_help=False) + parser.add_argument('--input_root', default='..', type=str, + help='The root that contains the orginal images. Please make sure that there is no unwanted ' + 'images with corresponding suffix under the same root') + parser.add_argument('--output_root', default=None, type=str, + help='The root for the resized and cropped output images. If the root is not provided, this ' + 'program will automatically make an output path') + parser.add_argument('--suffix', default='jpg', type=str, + help='The suffix of the input image') + parser.add_argument('--size', default=384, type=int, + help='The size of the output image') + return argparse + + +def save_file(f_image, save_dir, suffix='.jpg'): + """ + Save images with designated suffix + """ + f_image = f_image.convert('RGB') + filepath, _ = os.path.split(save_dir) + if not os.path.exists(filepath): + os.makedirs(filepath) + f_image.save(save_dir + suffix) + + +def make_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + + +def find_all_files(root, suffix): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + print(files) + return res + + +def data_crop_resize(class_root, output_root, suffix, size=384): + all_data = find_all_files(class_root, suffix) + for data_root in all_data: + if data_root.endswith('.txt'): + continue + elif data_root.endswith('.DS_Store'): + continue + elif output_root is None: + new_data_root = (data_root + '_Lite').split('.')[0] + else: + data_name_without_suffix = os.path.split(data_root)[1].split('.')[0] + new_data_root = os.path.join(output_root, data_name_without_suffix) + + img = Image.open(data_root) + width, height = img.size # Get size + + # the cropping parameters are tuned for P_uninfected + s = min(width, height) + a = int((s * 5) / 7) + da = int(s / 14) + top = int(s / 7) + 2 * da + bottom = top + a - 2 * da + + left = int(s / 7) + 4 * da + right = left + a - 4 * da + + # Crop the center of the image + img = img.crop([left, top, right, bottom]) + resized_img = img.resize((int(size), int(size)), Image.ANTIALIAS) + save_file(resized_img, new_data_root) + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + + input_root = args.input_root + output_root = args.output_root + suffix = args.suffix + size = args.size + data_crop_resize(input_root, output_root, suffix, size) \ No newline at end of file diff --git a/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_1_Microscope_P_vivax.py b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_1_Microscope_P_vivax.py new file mode 100644 index 0000000000000000000000000000000000000000..aa386793d6c060da2cfdaba01d3852dce756c193 --- /dev/null +++ b/PuzzleTuning/dataprocessing/CPIA-main/ROI/CPIA_ROI_1_Microscope_P_vivax.py @@ -0,0 +1,100 @@ +""" +CPIA_ROI_1_Microscope_P_vivax.py ver 23.6.8 +This code aims to crop each Microscope image by the largest center square, keeping the black surroundings out of the +final image. +Also, all the images will be resized into 384*384 (or other size you want). +""" +import argparse +import os +import PIL.Image as Image +from PIL import ImageFile +ImageFile.LOAD_TRUNCATED_IMAGES = True +Image.MAX_IMAGE_PIXELS = None + + +def get_args_parser(): + parser = argparse.ArgumentParser('CPIA dataset ROI part image cropping and resizing', add_help=False) + parser.add_argument('--input_root', default='..', type=str, + help='The root that contains the orginal images. Please make sure that there is no unwanted ' + 'images with corresponding suffix under the same root') + parser.add_argument('--output_root', default=None, type=str, + help='The root for the resized and cropped output images. If the root is not provided, this ' + 'program will automatically make an output path') + parser.add_argument('--suffix', default='jpg', type=str, + help='The suffix of the input image') + parser.add_argument('--size', default=384, type=int, + help='The size of the output image') + return argparse + + +def save_file(f_image, save_dir, suffix='.jpg'): + """ + Save images with designated suffix + """ + f_image = f_image.convert('RGB') + filepath, _ = os.path.split(save_dir) + if not os.path.exists(filepath): + os.makedirs(filepath) + f_image.save(save_dir + suffix) + + +def make_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + + +def find_all_files(root, suffix): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + print(files) + return res + + +def data_crop_resize(class_root, output_root, suffix, size=384): + all_data = find_all_files(class_root, suffix) + for data_root in all_data: + if data_root.endswith('.txt'): + continue + elif data_root.endswith('.DS_Store'): + continue + elif output_root is None: + new_data_root = (data_root + '_Lite').split('.')[0] + else: + data_name_without_suffix = os.path.split(data_root)[1].split('.')[0] + new_data_root = os.path.join(output_root, data_name_without_suffix) + + img = Image.open(data_root) + width, height = img.size # Get size + + # the cropping parameters are tuned for P_vivax + s = min(width, height) + a = int((s * 5) / 7) + da = int(s / 14) + top = int(s / 7) + da + bottom = top + a - 2 * da + + left = int(s / 7) + 4 * da + right = left + a - 2 * da + + # Crop the center of the image + img = img.crop([left, top, right, bottom]) + resized_img = img.resize((int(size), int(size)), Image.ANTIALIAS) + save_file(resized_img, new_data_root) + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + + input_root = args.input_root + output_root = args.output_root + suffix = args.suffix + size = args.size + data_crop_resize(input_root, output_root, suffix, size) \ No newline at end of file diff --git a/PuzzleTuning/dataprocessing/CPIA-main/ROI/MicroscopeCrop.py b/PuzzleTuning/dataprocessing/CPIA-main/ROI/MicroscopeCrop.py new file mode 100644 index 0000000000000000000000000000000000000000..966faeaeaba5812ef8964816cf23c7bb927481ae --- /dev/null +++ b/PuzzleTuning/dataprocessing/CPIA-main/ROI/MicroscopeCrop.py @@ -0,0 +1,118 @@ +import os +import PIL.Image as Image +from PIL import ImageFile +ImageFile.LOAD_TRUNCATED_IMAGES = True +Image.MAX_IMAGE_PIXELS = None + + +def save_file(f_image, save_dir, suffix='.jpg'): + """ + Save images with designated suffix + """ + f_image = f_image.convert('RGB') + filepath, _ = os.path.split(save_dir) + if not os.path.exists(filepath): + os.makedirs(filepath) + f_image.save(save_dir + suffix) + + +def make_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + + +def find_all_files(root, suffix): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + print(files) + return res + + +def center_crop(img_size): + """ + Return the cropping zone of a non-square image + :param img_size: img.size + :return: list that contains the cropping zone + """ + width, height = img_size # Get dimensions + a = min(width, height) + left = int((width - a) / 2) + top = int((height - a) / 2) + right = left + a + bottom = top + a + + return [left, top, right, bottom] + + +def data_crop_resize(class_root, output_root, suffix, size=384): + all_data = find_all_files(class_root, suffix) + for data_root in all_data: + if data_root.endswith('.txt'): + continue + elif data_root.endswith('.DS_Store'): + continue + elif output_root is None: + new_data_root = (data_root + '_Lite').split('.')[0] + # specially made for GS dataset: + """new_data_root = (data_root + '_Lite').replace('.', '_') + new_data_root = new_data_root.replace('_jpg', '')""" + else: + data_name_without_suffix = os.path.split(data_root)[1].split('.')[0] + new_data_root = os.path.join(output_root, data_name_without_suffix) + + + img = Image.open(data_root) + width, height = img.size # Get dimensions + s = min(width, height) + """left = int((width - a) / 2) + top = int((height - a) / 2) + right = left + a + bottom = top + a + this is DataBiox + """ + + """a = int((s * 5) / 7) + da = int(s / 14) + top = int(s / 7) + da + bottom = top + a - 2 * da + + left = int(s / 7) + 4*da + right = left + a - 2 * da + this is for P.vivax""" + + """a = int((s * 5) / 7) + da = int(s / 14) + top = int(s / 7) + 3* da + bottom = top + a - 4 * da + + left = int(s / 7) + 4 * da + right = left + a - 2 * da + this is for P_falciparum""" + + a = int((s * 5) / 7) + da = int(s / 14) + top = int(s / 7) + 2 * da + bottom = top + a - 2 * da + + left = int(s / 7) + 4 * da + right = left + a - 4 * da + + + # Crop the center of the image + img = img.crop([left, top, right, bottom]) + resized_img = img.resize((int(size), int(size)), Image.ANTIALIAS) + save_file(resized_img, new_data_root) + + +if __name__ == '__main__': + data_crop_resize(r'F:\Puzzle Tuning Datasets\P.uninfected(NIH-NLM-ThickBloodSmearsU)\NIH-NLM-ThickBloodSmearsU\Uninfected Patients', + r'D:\CPIA_VersionJournal\CPIA_MJ\S\P_uninfected', + 'tiff', + 384) \ No newline at end of file diff --git a/PuzzleTuning/dataprocessing/CPIA-main/WSI/CPIA_WSI.py b/PuzzleTuning/dataprocessing/CPIA-main/WSI/CPIA_WSI.py new file mode 100644 index 0000000000000000000000000000000000000000..861951f5951a11e392bf2e2920d52b3560236e39 --- /dev/null +++ b/PuzzleTuning/dataprocessing/CPIA-main/WSI/CPIA_WSI.py @@ -0,0 +1,500 @@ +""" +CPIA_WSI.py ver: 23 Nov 2 +This code aims to split each whole slide image into standardised patches. +The patch sizes are: 3840, 960, 384, 96 +""" +import os +import PIL.Image as Image +import numpy as np +import openslide +import torch +from PIL import ImageFile +import pandas as pd +import argparse +from multiprocessing import Pool, cpu_count +from tqdm import tqdm +ImageFile.LOAD_TRUNCATED_IMAGES = True +Image.MAX_IMAGE_PIXELS = None + +STANDARD_MPP = 0.4942 +patch_size = [(3840, 3840), (960, 960), (384, 384), (96, 96)] + + +def run_map_mp(func, argument_list, num_processes='', is_tqdm=True): + """Multi-threading with progress bar + Ref: https://zhuanlan.zhihu.com/p/359369130 + + Args: + func (func): Target function. + argument_list (list): Argument list. Example format: [(a1, b1), (a2, b2)] + num_processes (str, optional): The number of processes. Defaults to the number of threads - 3. + is_tqdm (bool, optional): Whether to display progress bar (using tqdm). Defaults to True. + + Returns: + result_list_tqdm: Output list of each thread. + """ + result_list_tqdm = [] + try: + if not num_processes: + num_processes = min(cpu_count() - 3, len(argument_list)) + pool = Pool(processes=num_processes) + print('start running multiprocess using {} threads'.format(num_processes)) + + # Use pool.starmap to allow multi-parameter for func + if is_tqdm: + + # Here because starmap can only return result after fully finished, it is not capable to + # operate with tqdm progress bar. + + # In this case, I update the progress bar every num_processes processes, which may slow down + # the process a bit but enable observation. + + pbar = tqdm(total=len(argument_list)) + idx = 0 + for idx in range(0, len(argument_list) // num_processes + 1): + for result in pool.starmap(func=func, iterable=argument_list[idx*num_processes : min((idx+1)*num_processes, len(argument_list))]): + result_list_tqdm.append(result) + pbar.update(min(num_processes, len(argument_list)-idx*num_processes)) + idx += 1 + else: + for result in pool.starmap(func=func, iterable=argument_list): + result_list_tqdm.append(result) + pool.close() + pool.join() + + except: + result_list_tqdm = list(map(func, argument_list)) + return result_list_tqdm + + +def save_file(f_image, save_dir, suffix='.jpg'): + """ + Save images with designated suffix + """ + filepath, _ = os.path.split(save_dir) + if not os.path.exists(filepath): + os.makedirs(filepath) + f_image.save(save_dir + suffix) + + +def make_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + + +def find_all_files(root, suffix=None): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + if type(suffix) is tuple or type(suffix) is list: + for root, _, files in os.walk(root): + for f in files: + if suffix is not None: + status = 0 + for i in suffix: + if not f.endswith(i): + pass + else: + status = 1 + break + if status == 0: + continue + res.append(os.path.join(root, f)) + return res + + elif type(suffix) is str or suffix is None: + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + return res + + else: + print('type of suffix is not legal :', type(suffix)) + return -1 + + +def convert_to_npy_no_opening(patch, patch_size=(960, 960)): + """ + Convert the image into numpy format; + The numpy size is slightly cropped + :param patch: the patch to be converted + :param patch_size: the required image size + :return: + """ + patch_size = to_2tuple(patch_size) + img = patch + w, h = img.size + factor = min(w // patch_size[0], h // patch_size[1]) + numpy_img = img.crop([0, 0, factor * patch_size[0], factor * patch_size[1]]) + numpy_img = np.array(numpy_img) + + return numpy_img + + +class to_patch: + """ + Split an image into patches, each patch with the size of patch_size + """ + def __init__(self, patch_size=(16, 16)): + patch_size = to_2tuple(patch_size) + self.patch_h = patch_size[0] + self.patch_w = patch_size[1] + + def __call__(self, x): + x = torch.tensor(x) + x = x.permute(2, 0, 1) + c, h, w = x.shape + # print(x.shape) + # assert h // self.patch_h == h / self.patch_h and w // self.patch_w == w / self.patch_w + num_patches = (h // self.patch_h) * (w // self.patch_w) + + h_1 = (h // self.patch_h) * self.patch_h + w_1 = (w // self.patch_w) * self.patch_w + x = x[:, ((h - h_1) // 2):((h - h_1) // 2 + h_1), ((w - w_1) // 2):((w - w_1) // 2 + w_1)] + # patch encoding + # (c, h, w) + # -> (c, h // self.patch_h, self.patch_h, w // self.patch_w, self.patch_w) + # -> (h // self.patch_h, w // self.patch_w, self.patch_h, self.patch_w, c) + # -> (n_patches, patch_size^2*c) + patches = x.view( + c, + h // self.patch_h, + self.patch_h, + w // self.patch_w, + self.patch_w).permute(1, 3, 2, 4, 0).reshape(num_patches, -1) # it can also used in transformer Encoding + + # patch split + # (n_patches, patch_size^2*c) + # -> (num_patches, self.patch_h, self.patch_w, c) + # -> (num_patches, c, self.patch_h, self.patch_w) + patches = patches.view(num_patches, + self.patch_h, + self.patch_w, + c).permute(0, 3, 1, 2) + + return patches + + +def to_2tuple(input): + if type(input) is tuple: + if len(input) == 2: + return input + else: + if len(input) > 2: + output = (input[0], input[1]) + return output + elif len(input) == 1: + output = (input[0], input[0]) + return output + else: + print('cannot handle none tuple') + else: + if type(input) is list: + if len(input) == 2: + output = (input[0], input[1]) + return output + else: + if len(input) > 2: + output = (input[0], input[1]) + return output + elif len(input) == 1: + output = (input[0], input[0]) + return output + else: + print('cannot handle none list') + elif type(input) is int: + output = (input, input) + return output + else: + print('cannot handle ', type(input)) + raise ('cannot handle ', type(input)) + + +def pick_patch(patch): + """ + Pick the image patch that includes tissue information + The patch with relatively more R is to be picked + :param patch: input with numpy format + :return: bool + """ + patch = array2img(patch) + img_single = patch.resize((1, 1), Image.BILINEAR) + r, g, b = img_single.getpixel((0, 0)) + if r > 220 and g > 220 and b > 220: + return False + else: + return True + + +def array2img(patch): + img = Image.fromarray(patch.astype('uint8')).convert('RGB') + return img + + +def make_name(former_name, patch_size, patch_num): + """ + Important: each image patch's name include x, y, patch_size; + The exact location of an image patch is (x * patch_size, y * patch_size) + """ + former_patch_size = int(former_name.split('-')[-3]) + former_x = int(former_name.split('-')[-2]) + former_y = int(former_name.split('-')[-1]) + img_real_name = former_name[::-1].split('-', 3)[-1][::-1] + + ratio = int(former_patch_size / patch_size) + x = patch_num % ratio if patch_num % ratio != 0 else ratio + x = x - 1 # every coordinate starts with 0 + x = former_x * ratio + x + + y = patch_num // ratio if patch_num % ratio != 0 else patch_num // ratio - 1 + y = former_y * ratio + y + + img_name = img_real_name + '-' + str(patch_size) + '-' + str(x) + '-' + str(y) + print(img_name) + return img_name + + +def SVS_cut_to_patch(img, save_root, patch_size, + class_name, + name_dir_3840, name_dir_0, name_dir_1, name_dir_2, + patient_folder=False, + mpp=None, + L=True, M=True, S=True): + slide = openslide.open_slide(img) + img_name = os.path.split(img)[1].split('.')[0] + if mpp is None: + MPP = slide.properties[openslide.PROPERTY_NAME_MPP_X] + print(MPP, img) + else: + MPP = mpp + resize_ratio = STANDARD_MPP / float(MPP) + print(resize_ratio) + if 1.1 > resize_ratio > 0.9: + patch_size_num_0 = patch_size[0][0] + else: + patch_size_num_0 = int(patch_size[0][0] * resize_ratio) + print(patch_size_num_0) + save_root_0 = os.path.join(os.path.join(save_root, str(patch_size[0][0])), class_name + '-' + str(patch_size[0][0])) + make_path(save_root_0) + w, h = slide.level_dimensions[0] + for i in range(1, w // patch_size_num_0 - 1): + for j in range(1, h // patch_size_num_0 - 1): + + patch = slide.read_region((i * patch_size_num_0, j * patch_size_num_0), 0, + (patch_size_num_0, patch_size_num_0)) + patch = patch.convert('RGB') + if not 1.1 > resize_ratio > 0.9: + patch = patch.resize(patch_size[0], Image.ANTIALIAS) + # resize to 3840 * 3840 + img_single = patch.resize((1, 1), Image.ANTIALIAS) + r, g, b = img_single.getpixel((0, 0)) + if r < 220 and g < 220 and b < 220 and r > 100 and b > 30 and r > g + 20: + save_file(patch, os.path.join(save_root_0, + img_name + '-' + str(patch_size[0][0]) + '-' + str(i) + '-' + str(j))) + print(os.path.join(save_root_0, img_name + '-' + str(patch_size[0][0]) + '-' + str(i) + '-' + str(j))) + name_dir_3840[os.path.join(save_root_0, + img_name + '-' + str(patch_size[0][0]) + '-' + str(i) + '-' + str( + j)) + '-' + str(resize_ratio)] = img + if patient_folder is True: + save_root_patient_0 = os.path.join(save_root_0 + '-patient', img_name) + save_file(patch, os.path.join(save_root_patient_0, + img_name + '-' + str(patch_size[0][0]) + '-' + str(i) + '-' + str(j))) + Image_name_XL = img_name + '-' + str(patch_size[0][0]) + '-' + str(i) + '-' + str(j) + cut_to_patch(patch, Image_name_XL, save_root, + patch_size[1], patch_size[2], patch_size[3], + img_name, class_name, + name_dir_0, name_dir_1, name_dir_2, + patient_folder=patient_folder, + L=L, M=M, S=S) + else: + continue + pd.DataFrame.from_dict(name_dir_3840, orient='index', columns=['origin path']).to_csv( + os.path.join(os.path.join(save_root, str(patch_size[0][0])), class_name + '-' + str(patch_size[0][0]) + '.csv') + ) + + +def cut_to_patch(patch, + current_img_name, + save_root, + patch_size_0, patch_size_1, patch_size_2, + img_name, class_name, + name_dir_0, name_dir_1, name_dir_2, + patient_folder=True, + L=True, M=True, S=False + ): + current_img_name = current_img_name + numpy_img = convert_to_npy_no_opening(patch) + patch_size_num_0 = patch_size_0[0] + patch_size_num_1 = patch_size_1[0] + patch_size_num_2 = patch_size_2[0] + save_root_0 = os.path.join(os.path.join(save_root, str(patch_size_num_0)), class_name + '-' + str(patch_size_num_0)) + save_root_1 = os.path.join(os.path.join(save_root, str(patch_size_num_1)), class_name + '-' + str(patch_size_num_1)) + save_root_2 = os.path.join(os.path.join(save_root, str(patch_size_num_2)), class_name + '-' + str(patch_size_num_2)) + + save_root_patient_0 = os.path.join(save_root_0 + '-patient', img_name) + save_root_patient_1 = os.path.join(save_root_1 + '-patient', img_name) + save_root_patient_2 = os.path.join(save_root_2 + '-patient', img_name) + + img_split_0 = to_patch(patch_size_0) + img_patches_0 = img_split_0(numpy_img) + + img_split_1 = to_patch(patch_size_1) + img_patches_1 = img_split_1(numpy_img) + i = 0 + j = 0 + if L: + # on most cases we need L-scale, which is 960 * 960 + for patch in img_patches_0: + i = i + 1 + patch = patch.permute(1, 2, 0) + patch = patch.numpy() + if pick_patch(patch): + img_name_0 = make_name(current_img_name, patch_size_num_0, i) + save_dir_0 = os.path.join(save_root_0, img_name_0) + print(save_dir_0) + patch = array2img(patch) + # patch = patch.resize((384, 384), Image.ANTIALIAS) # 归为384*384 + # for our biggest CPIA we dont want to resize + if patient_folder: + save_file(patch, os.path.join(save_root_patient_0, img_name_0)) + name_dir_0[save_dir_0] = '1' + save_file(patch, save_dir_0) + else: + pass + if M: + # on most cases we need M-scale, which is 384 * 384 + # if M is false then S must be false + for patch_1 in img_patches_1: + # convert the image into numpy + j = j + 1 + patch_1 = patch_1.permute(1, 2, 0) + patch_1 = patch_1.numpy() + if pick_patch(patch_1): + # save 384*384 image + img_name_1 = make_name(current_img_name, patch_size_num_1, j) + save_dir_1 = os.path.join(save_root_1, img_name_1) + print(save_dir_1) + if S: + k = 0 + img_split_2 = to_patch(patch_size_2) + img_patches_2 = img_split_2(patch_1) + for patch_2 in img_patches_2: + k = k + 1 + patch_2 = patch_2.permute(1, 2, 0) + patch_2 = patch_2.numpy() + if pick_patch(patch_2): + if k % 10 == 0: + # down sampling + img_name_2 = make_name(img_name_1, patch_size_num_2, k) + patch_2 = array2img(patch_2) + save_dir_2 = os.path.join(save_root_2, img_name_2) + print(save_dir_2) + if patient_folder: + save_file(patch_2, os.path.join(save_root_patient_2, img_name_2)) + name_dir_2[save_dir_2] = '1' + save_file(patch_2, save_dir_2) + else: + pass + + patch_1 = array2img(patch_1) + if patient_folder: + save_file(patch_1, os.path.join(save_root_patient_1, img_name_1)) + name_dir_1[save_dir_1] = '1' + save_file(patch_1, save_dir_1) + else: + pass + pd.DataFrame.from_dict(name_dir_0, orient='index', columns=['origin path']).to_csv( + os.path.join(os.path.join(save_root, + str(patch_size_num_0)), class_name + '-' + str(patch_size_num_0) + '.csv') + ) + pd.DataFrame.from_dict(name_dir_1, orient='index', columns=['origin path']).to_csv( + os.path.join(os.path.join(save_root, + str(patch_size_num_1)), class_name + '-' + str(patch_size_num_1) + '.csv') + ) + pd.DataFrame.from_dict(name_dir_2, orient='index', columns=['origin path']).to_csv( + os.path.join(os.path.join(save_root, + str(patch_size_num_2)), class_name + '-' + str(patch_size_num_2) + '.csv') + ) + + +def read_and_convert(data_root, save_root, suffix, patient_folder, mpp, resume, resume_dataset, resume_WSI, L, M, S): + + class_names = os.listdir(data_root) + print(class_names) + + for class_name in class_names: + svs_class_root = os.path.join(data_root, class_name) + svs_all_files = find_all_files(svs_class_root, suffix) + + name_dir_3840 = {} + name_dir_0 = {} + name_dir_1 = {} + name_dir_2 = {} + arg_list = [] + + for img in svs_all_files: + arg_list.append([ + img, + save_root, + patch_size, + class_name, + name_dir_3840, + name_dir_0, + name_dir_1, + name_dir_2, + patient_folder, + mpp, + L, + M, + S]) + # Run code in parallel. One process for each WSI. + run_map_mp(SVS_cut_to_patch, arg_list, is_tqdm=True) + + + + +def get_args_parser(): + parser = argparse.ArgumentParser('CPIA_WSI', add_help=False) + + parser.add_argument('--input', default='/data/WSI_1', type=str, + help='path to input dataset') + parser.add_argument('--output', default='/data-save', type=str, + help='path to output patches') + parser.add_argument('--suffix', default='svs', type=str, + help='input image suffix') + parser.add_argument('--patient', default=False, type=bool, + help='whether to generate patient folder') + parser.add_argument('--mpp', default=None, type=str, + help='for some datasets the MPP need to be provided manually') + parser.add_argument('--resume', default=False, type=bool, + help='resume the process') + parser.add_argument('--resume_dataset', default='CPTAC-BRCA', type=str, + help='path to output patches') + parser.add_argument('--resume_WSI', default='/data/WSI_1/CPTAC-BRCA/BRCA/20BR008-76d73924-1d5a-4d75-b776-1f1b2b.svs', + type=str, + help='the actual path of the WSI to be resumed') + + + + return parser + +if __name__ == '__main__': + parser = argparse.ArgumentParser('CPIA_WSI', parents=[get_args_parser()]) + args = parser.parse_args() + read_and_convert(args.input, + args.output, + args.suffix, + args.patient, + args.mpp, + args.resume, args.resume_dataset, args.resume_WSI, + L=True, M=True, S=True) + + + + + + + diff --git a/PuzzleTuning/dataprocessing/Fraction_sample_data.py b/PuzzleTuning/dataprocessing/Fraction_sample_data.py new file mode 100644 index 0000000000000000000000000000000000000000..1bd3a0a5887d68e0131eaed714f480087e63e24c --- /dev/null +++ b/PuzzleTuning/dataprocessing/Fraction_sample_data.py @@ -0,0 +1,136 @@ +""" +script ver: Aug 19th 17:40 +将MIL格式数据集的train抽取一定量部分并命名为AAAA_fraction_XX XX为抽取百分比 +""" +import os +import random +import shutil +import argparse +from multiprocessing import Pool, cpu_count + + +def setup_seed(seed): # setting up the random seed + import numpy as np + np.random.seed(seed) + random.seed(seed) + + +def make_and_clear_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + + +def sampling(file_dir, target_dir, rate, split_subset_range='ALL', CLS=False): + """ + file_dir: input dataset path + target_dir: output dataset path + rate: fraction rate + split_subset_range:'train' to sample the training only; 'ALL' to sample the training, validation and test sets + CLS: type of dataset format, True for imagefolder, False for mask+imagefolder format + """ + print('Dataset at', file_dir) + split_names = os.listdir(file_dir) + for split_name in split_names: + + if split_subset_range == 'ALL': + file_dir_train = os.path.join(file_dir, split_name) + file_dir_data = os.path.join(file_dir_train, 'data') + file_dir_mask = os.path.join(file_dir_train, 'mask') + target_dir_train = os.path.join(target_dir, split_name) + target_dir_data = os.path.join(target_dir_train, 'data') + target_dir_mask = os.path.join(target_dir_train, 'mask') + + for type in os.listdir(file_dir_data): + + make_and_clear_path(os.path.join(target_dir_data, type)) + if not CLS: + make_and_clear_path(os.path.join(target_dir_mask, type)) + path_dir = os.listdir(os.path.join(file_dir_data, type)) # 取图片的原始路径 + file_number = len(path_dir) + rate1 = rate # 自定义抽取的比例(百分制) + pick_number = int(file_number * rate1 / 100) # 按照rate比例从文件夹中取一定数量的文件 + sample1 = random.sample(path_dir, pick_number) + for name in sample1: + shutil.copyfile(os.path.join(os.path.join(file_dir_data, type), name), + os.path.join(os.path.join(target_dir_data, type), name)) + if not CLS: + shutil.copyfile(os.path.join(os.path.join(file_dir_mask, type), name), + os.path.join(os.path.join(target_dir_mask, type), name)) + + elif split_subset_range == 'train': + if split_name == 'train': + file_dir_train = os.path.join(file_dir, split_name) + file_dir_data = os.path.join(file_dir_train, 'data') + file_dir_mask = os.path.join(file_dir_train, 'mask') + target_dir_train = os.path.join(target_dir, split_name) + target_dir_data = os.path.join(target_dir_train, 'data') + target_dir_mask = os.path.join(target_dir_train, 'mask') + + for type in os.listdir(file_dir_data): + + make_and_clear_path(os.path.join(target_dir_data, type)) + if not CLS: + make_and_clear_path(os.path.join(target_dir_mask, type)) + path_dir = os.listdir(os.path.join(file_dir_data, type)) # 取图片的原始路径 + file_number = len(path_dir) + rate1 = rate # 自定义抽取的比例(百分制) + pick_number = int(file_number * rate1 / 100) # 按照rate比例从文件夹中取一定数量的文件 + sample1 = random.sample(path_dir, pick_number) + for name in sample1: + shutil.copyfile(os.path.join(os.path.join(file_dir_data, type), name), + os.path.join(os.path.join(target_dir_data, type), name)) + if not CLS: + shutil.copyfile(os.path.join(os.path.join(file_dir_mask, type), name), + os.path.join(os.path.join(target_dir_mask, type), name)) + else: + shutil.copytree(os.path.join(file_dir, split_name), os.path.join(target_dir, split_name)) + else: + print('not a valid split_list idea') + raise + + print(split_name, 'has been processed') + + return + + +def main(args): + ''' + class_dir = '/Users/munros/Desktop/ROSE_MIL' + output = r'/Users/munros/Desktop/ROSE/MIL' + rates = [10, 20, 30, 40, 50, 60, 70, 80, 90] + for rate in rates: + + file_dir = class_dir + target_dir = os.path.join(output, 'Rose_fraction_' + str(int(rate/10)) + '_MIL') + + sampling(file_dir, target_dir, rate, split_list='train', CLS=False) + ''' + Dataset_name = os.path.split(args.root)[-1].split('_')[0] + target_dir = os.path.join(args.save_root, Dataset_name + '_fraction_' + str(int(args.rate / 10)) + '_MIL') + + sampling(args.root, target_dir, args.rate, split_subset_range=args.split_subset_range, CLS=args.CLS) + + +def get_args_parser(): + parser = argparse.ArgumentParser(description='data_sampling') + parser.add_argument('--root', default='/root/autodl-tmp/datasets/ROSE_MIL', type=str, + help='the data root, not including the final list') + parser.add_argument('--save_root', default='/root/autodl-tmp/datasets', type=str, + help='the data root, not including the final list') + parser.add_argument('--rate', default=10, type=int, + help='the rate of sampling') + parser.add_argument('--split_subset_range', default='train', type=str, + help='the subset which will be sampled: ALL or train') + parser.add_argument('--CLS', default=False, type=bool, + help='the type of dataset: CLS or MIL') + + return parser + + +if __name__ == '__main__': + # setting up the random seed + setup_seed(42) + + parser = get_args_parser() + args = parser.parse_args() + main(args) diff --git a/PuzzleTuning/dataprocessing/WSI_whole_cropping.py b/PuzzleTuning/dataprocessing/WSI_whole_cropping.py new file mode 100644 index 0000000000000000000000000000000000000000..4d087a6e8248c825ef97943baa9d57332c7fd5be --- /dev/null +++ b/PuzzleTuning/dataprocessing/WSI_whole_cropping.py @@ -0,0 +1,434 @@ +""" +'JPG_cropping_960...' ver: 22 Nov 10 +Crop pathology images into patches Using average filtering to screen the useful pieces which are mostly red/purple + +Specially mod ver +maximize the efficient of cropping in different size +""" +import os + +os.add_dll_directory(r"D:\chrome_download\github220901\openslide-win64\bin") +# 注意openslide的使用需要这样 另外叫将openslide添加到PATh里面 +import openslide +import shutil +import PIL.Image as Image +import numpy as np +import openslide +import torch +from tqdm import tqdm +import cv2 +from torchvision import transforms +from PIL import ImageFile +import pandas as pd + +ImageFile.LOAD_TRUNCATED_IMAGES = True +Image.MAX_IMAGE_PIXELS = None + +STANDARD_MPP = 0.4942 +patch_size = [(3840, 3840), (960, 960), (384, 384), (96, 96)] + + +def save_file(f_image, save_dir, suffix='.jpg'): + """ + 重命名并保存图片,生成重命名的表 + """ + filepath, _ = os.path.split(save_dir) + if not os.path.exists(filepath): + os.makedirs(filepath) + # f_image.save(save_dir + suffix) + image_data = np.asarray(f_image) + cv2.imwrite(save_dir+suffix, image_data) + + +def make_and_clear_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + + +def find_all_files(root, suffix=None): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + if type(suffix) is tuple or type(suffix) is list: + for root, _, files in os.walk(root): + for f in files: + if suffix is not None: + status = 0 + for i in suffix: + if not f.endswith(i): + pass + else: + status = 1 + break + if status == 0: + continue + res.append(os.path.join(root, f)) + return res + + elif type(suffix) is str or suffix is None: + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + return res + + else: + print('type of suffix is not legal :', type(suffix)) + return -1 + + +def convert_to_npy(a_data_path, patch_size=(960, 960)): + patch_size = to_2tuple(patch_size) + + # 处理转换 + + # 传回npy + img = Image.open(a_data_path) + w, h = img.size + factor = min(w // patch_size[0], h // patch_size[1]) + numpy_img = img.crop([0, 0, factor * patch_size[0], factor * patch_size[1]]) + numpy_img = np.array(numpy_img) + + return numpy_img + + +class to_patch: + """ + Split an image into patches, each patch with the size of patch_size + """ + + def __init__(self, patch_size=(16, 16)): + patch_size = to_2tuple(patch_size) + self.patch_h = patch_size[0] + self.patch_w = patch_size[1] + + def __call__(self, x): + x = torch.tensor(x) + x = x.permute(2, 0, 1) + c, h, w = x.shape + # print(x.shape) + # assert h // self.patch_h == h / self.patch_h and w // self.patch_w == w / self.patch_w + num_patches = (h // self.patch_h) * (w // self.patch_w) + + h_1 = (h // self.patch_h) * self.patch_h + w_1 = (w // self.patch_w) * self.patch_w + x = x[:, ((h - h_1) // 2):((h - h_1) // 2 + h_1), ((w - w_1) // 2):((w - w_1) // 2 + w_1)] + # patch encoding + # (c, h, w) + # -> (c, h // self.patch_h, self.patch_h, w // self.patch_w, self.patch_w) + # -> (h // self.patch_h, w // self.patch_w, self.patch_h, self.patch_w, c) + # -> (n_patches, patch_size^2*c) + patches = x.view( + c, + h // self.patch_h, + self.patch_h, + w // self.patch_w, + self.patch_w).permute(1, 3, 2, 4, 0).reshape(num_patches, -1) # it can also used in transformer Encoding + + # patch split + # (n_patches, patch_size^2*c) + # -> (num_patches, self.patch_h, self.patch_w, c) + # -> (num_patches, c, self.patch_h, self.patch_w) + patches = patches.view(num_patches, + self.patch_h, + self.patch_w, + c).permute(0, 3, 1, 2) + + return patches + + +def to_2tuple(input): + if type(input) is tuple: + if len(input) == 2: + return input + else: + if len(input) > 2: + output = (input[0], input[1]) + return output + elif len(input) == 1: + output = (input[0], input[0]) + return output + else: + print('cannot handle none tuple') + else: + if type(input) is list: + if len(input) == 2: + output = (input[0], input[1]) + return output + else: + if len(input) > 2: + output = (input[0], input[1]) + return output + elif len(input) == 1: + output = (input[0], input[0]) + return output + else: + print('cannot handle none list') + elif type(input) is int: + output = (input, input) + return output + else: + print('cannot handle ', type(input)) + raise ('cannot handle ', type(input)) + + +def pick_patch(patch): + """ + 用于选择合适颜色的图片 + :param patch: + :return: + """ + patch = array2img(patch) + img_single = patch.resize((1, 1), Image.ANTIALIAS) + r, g, b = img_single.getpixel((0, 0)) + if r - g < 30: + return False + else: + return True + + +def array2img(patch): + img = Image.fromarray(patch.astype('uint8')).convert('RGB') + return img + + +def make_name(former_name, patch_size, patch_num): + """ + 确保每个名字 都反映原图上的横向x,纵向y,步长为自身patch_size + :param former_name: + :param patch_size: + :return: + """ + former_patch_size = int(former_name.split('-')[-3]) + former_x = int(former_name.split('-')[-2]) + former_y = int(former_name.split('-')[-1]) + img_real_name = former_name[::-1].split('-', 3)[-1][::-1] + + ratio = int(former_patch_size / patch_size) + x = patch_num % ratio if patch_num % ratio != 0 else ratio + x = x - 1 # every coordinate starts with 0 + x = former_x * ratio + x + + y = patch_num // ratio if patch_num % ratio != 0 else patch_num // ratio - 1 + y = former_y * ratio + y + + img_name = img_real_name + '-' + str(patch_size) + '-' + str(x) + '-' + str(y) + print(img_name) + return img_name + + +def SVS_cut_to_patch(img, save_root, + patch_size, + img_name, + class_name, + name_dir_3840, name_dir_0, name_dir_1, name_dir_2, + patient_folder=False, + L=True, M=True, S=False): + slide = openslide.open_slide(img) + try: + MPP = slide.properties[openslide.PROPERTY_NAME_MPP_X] + print(MPP, img) + resize_ratio = STANDARD_MPP/float(MPP) + print(resize_ratio) + if 1.1 > resize_ratio > 0.9: + patch_size_num_0 = patch_size[0][0] + else: + patch_size_num_0 = int(patch_size[0][0] * resize_ratio) + print(patch_size_num_0) + save_root_0 = os.path.join(os.path.join(save_root, str(patch_size[0][0])), class_name + '-' + str(patch_size[0][0])) + make_and_clear_path(save_root_0) + w, h = slide.level_dimensions[0] + for i in range(1, w // patch_size_num_0 - 1): + + for j in range(1, h // patch_size_num_0 - 1): + + patch = slide.read_region((i * patch_size_num_0, j * patch_size_num_0), 0, (patch_size_num_0, patch_size_num_0)) + patch = patch.convert('RGB') + # print('finish id:%d image' % image_list.index(id)) + if not 1.1 > resize_ratio > 0.9: + patch = patch.resize(patch_size[0], Image.ANTIALIAS) # resize 到 3840 3840 + # 统一归为384*384 + # save_file(patch, os.path.join(save_root_0, img_name + '-' + str((i + 1) * (j + 1)))) + img_single = patch.resize((1, 1), Image.ANTIALIAS) + r, g, b = img_single.getpixel((0, 0)) + if r < 220 and g < 220 and b < 220 and r > 100 and b > 30 and r > g + 20: + + save_file(patch, os.path.join(save_root_0, img_name + '-' + str(patch_size[0][0]) + '-' + str(i) + '-' + str(j))) + name_dir_3840[os.path.join(save_root_0, img_name + '-' + str(patch_size[0][0]) + '-' + str(i) + '-' + str(j)) + '-' + str(resize_ratio)] = img + if patient_folder is True: + save_root_patient_0 = os.path.join(save_root_0 + '-patient', img_name) + save_file(patch, os.path.join(save_root_patient_0, img_name + '-' + str(patch_size[0][0]) + '-' + str(i) + '-' + str(j))) + current_img = os.path.join(save_root_0, img_name + '-' + str(patch_size[0][0]) + '-' + str(i) + '-' + str(j)) + '.jpg' + + cut_to_patch(current_img, save_root, + patch_size[1], patch_size[2], patch_size[3], + img_name, class_name, + name_dir_0, name_dir_1, name_dir_2, + patient_folder=patient_folder, + L=L, M=M, S=S) + else: + continue + # save_file(patch, os.path.join('H:\PuzzleTuning\SNL-Breast-Back', img_name + '-' + str(i) + '-' +c + # str(j))) + pd.DataFrame.from_dict(name_dir_3840, orient='index', columns=['origin path']).to_csv( + os.path.join(os.path.join(save_root, str(patch_size[0][0])), class_name + '-' + str(patch_size[0][0]) + '.csv') + ) + + except Exception as e: + print(e) + + +def cut_to_patch(img, + save_root, + patch_size_0, patch_size_1, patch_size_2, + img_name, class_name, + name_dir_0, name_dir_1, name_dir_2, + patient_folder=True, + L=True, M=True, S=False + ): + current_img_name = os.path.split(img)[1].split('.')[0] + numpy_img = convert_to_npy(img) + patch_size_num_0 = patch_size_0[0] + patch_size_num_1 = patch_size_1[0] + patch_size_num_2 = patch_size_2[0] + save_root_0 = os.path.join(os.path.join(save_root, str(patch_size_num_0)), class_name + '-' + str(patch_size_num_0)) + save_root_1 = os.path.join(os.path.join(save_root, str(patch_size_num_1)), class_name + '-' + str(patch_size_num_1)) + save_root_2 = os.path.join(os.path.join(save_root, str(patch_size_num_2)), class_name + '-' + str(patch_size_num_2)) + + save_root_patient_0 = os.path.join(save_root_0 + '-patient', img_name) + save_root_patient_1 = os.path.join(save_root_1 + '-patient', img_name) + save_root_patient_2 = os.path.join(save_root_2 + '-patient', img_name) + + img_split_0 = to_patch(patch_size_0) + img_patches_0 = img_split_0(numpy_img) + + img_split_1 = to_patch(patch_size_1) + img_patches_1 = img_split_1(numpy_img) + i = 0 + j = 0 + if L: + # on most cases we need L-scale, which is 960 * 960 + for patch in img_patches_0: + i = i + 1 + patch = patch.permute(1, 2, 0) + patch = patch.numpy() + if pick_patch(patch): + img_name_0 = make_name(current_img_name, patch_size_num_0, i) + save_dir_0 = os.path.join(save_root_0, img_name_0) + print(save_dir_0) + patch = array2img(patch) + # patch = patch.resize((384, 384), Image.ANTIALIAS) # 归为384*384 + # for our biggest CPIA we dont want to resize + if patient_folder: + save_file(patch, os.path.join(save_root_patient_0, img_name_0)) + name_dir_0[save_dir_0] = img + # 保存相关.csv + save_file(patch, save_dir_0) + else: + pass + if M: + # on most cases we need M-scale, which is 384 * 384 + # if M is false then S must be false + for patch_1 in img_patches_1: + # convert the image into numpy + j = j + 1 + patch_1 = patch_1.permute(1, 2, 0) + patch_1 = patch_1.numpy() + if pick_patch(patch_1): + # save 384*384 image + img_name_1 = make_name(current_img_name, patch_size_num_1, j) + save_dir_1 = os.path.join(save_root_1, img_name_1) + print(save_dir_1) + if S: + # 2023.5.12 暂时不处理S + k = 0 + img_split_2 = to_patch(patch_size_2) + img_patches_2 = img_split_2(patch_1) + for patch_2 in img_patches_2: + k = k + 1 + patch_2 = patch_2.permute(1, 2, 0) + patch_2 = patch_2.numpy() + if pick_patch(patch_2): + # if k % 10 == 0: + # for our biggest CPIA we don't want sampling + img_name_2 = make_name(img_name_1, patch_size_num_2, k) + patch_2 = array2img(patch_2) + save_dir_2 = os.path.join(save_root_2, img_name_2) + print(save_dir_2) + if patient_folder: + save_file(patch_2, os.path.join(save_root_patient_2, img_name_2)) + name_dir_2[save_dir_2] = img + save_file(patch_2, save_dir_2) + else: + pass + + patch_1 = array2img(patch_1) + if patient_folder: + save_file(patch_1, os.path.join(save_root_patient_1, img_name_1)) + name_dir_1[save_dir_1] = img + save_file(patch_1, save_dir_1) + else: + pass + pd.DataFrame.from_dict(name_dir_0, orient='index', columns=['origin path']).to_csv( + os.path.join(os.path.join(save_root, + str(patch_size_num_0)), class_name + '-' + str(patch_size_num_0) + '.csv') + ) + pd.DataFrame.from_dict(name_dir_1, orient='index', columns=['origin path']).to_csv( + os.path.join(os.path.join(save_root, + str(patch_size_num_1)), class_name + '-' + str(patch_size_num_1) + '.csv') + ) + pd.DataFrame.from_dict(name_dir_2, orient='index', columns=['origin path']).to_csv( + os.path.join(os.path.join(save_root, + str(patch_size_num_2)), class_name + '-' + str(patch_size_num_2) + '.csv') + ) + + +def read_and_convert(data_root, save_root, suffix=None, patient_folder=False, L=True, M=True, S=False): + # 一次处理只一个数据集, 每个数据集的处理方式可能有不同 + + # 读入所有数据 + + class_names = os.listdir(data_root) + + class_names = ['PAIP2019'] + # 接下来一行代码只在断点续传使用 + # class_names = class_names[class_names.index('CPTAC-LUAD') :] + + for class_name in class_names: + + svs_class_root = os.path.join(data_root, class_name) + svs_all_files = find_all_files(svs_class_root, suffix) + # 接下来一行代码只在断点续传使用 + # if class_name == 'CPTAC-LUAD': + # svs_all_files = svs_all_files[svs_all_files.index(r'E:\Puzzle_Tuning_Datasets\Raw\WSI\CPTAC-LUAD\LUAD\C3N-02141-27.svs') + 1:] + name_dir_3840 = {} + name_dir_0 = {} + name_dir_1 = {} + name_dir_2 = {} + for img in svs_all_files: + img_name = os.path.split(img)[1].split('.')[0] + SVS_cut_to_patch(img, save_root, patch_size, img_name, class_name, name_dir_3840, name_dir_0, name_dir_1, name_dir_2, + patient_folder, L=L, M=M, S=S) + + + +if __name__ == '__main__': + read_and_convert(r'I:\Puzzle_Tuning_Datasets\Raw', + r'X:\CPIA_WSI_no_sampling_no_rezising', + 'svs', + patient_folder=False, + L=True, M=True, S=False) + # fixme: X: doesn't take the picture + # fixed use image_data = np.asarray(f_image) + # cv2.imwrite(save_dir+suffix, image_data) + + +# 2023.5.1 E: CPTAC-(CCRCC CM HNSCC LSCC LUAD PDA SAR UCEC) Post-NAT-BRCA + + + + + + diff --git a/PuzzleTuning/dataprocessing/WSI_whole_cropping_counting.py b/PuzzleTuning/dataprocessing/WSI_whole_cropping_counting.py new file mode 100644 index 0000000000000000000000000000000000000000..b66bc98c9255af2d68f8ac9223e7676c3beaac8f --- /dev/null +++ b/PuzzleTuning/dataprocessing/WSI_whole_cropping_counting.py @@ -0,0 +1,398 @@ +""" +'JPG_cropping_960...' ver: 23.6.2 +this code is used to count the dataset quantity of CPIA-WSI +Crop pathology images into patches Using average filtering to screen the useful pieces which are mostly red/purple + +Specially mod ver +maximize the efficient of cropping in different size +""" +import os + +os.add_dll_directory(r"D:\chrome_download\github220901\openslide-win64\bin") +# 注意openslide的使用需要这样 另外叫将openslide添加到PATh里面 +import openslide +import shutil +import PIL.Image as Image +import numpy as np +import openslide +import torch +from tqdm import tqdm +import cv2 +from torchvision import transforms +from PIL import ImageFile +import pandas as pd + +ImageFile.LOAD_TRUNCATED_IMAGES = True +Image.MAX_IMAGE_PIXELS = None + +STANDARD_MPP = 0.4942 +patch_size = [(3840, 3840), (960, 960), (384, 384), (96, 96)] + + +def save_file(f_image, save_dir, suffix='.jpg'): + """ + 重命名并保存图片,生成重命名的表 + """ + filepath, _ = os.path.split(save_dir) + if not os.path.exists(filepath): + os.makedirs(filepath) + # f_image.save(save_dir + suffix) + image_data = np.asarray(f_image) + cv2.imwrite(save_dir+suffix, image_data) + + +def make_and_clear_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + + +def find_all_files(root, suffix=None): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + if type(suffix) is tuple or type(suffix) is list: + for root, _, files in os.walk(root): + for f in files: + if suffix is not None: + status = 0 + for i in suffix: + if not f.endswith(i): + pass + else: + status = 1 + break + if status == 0: + continue + res.append(os.path.join(root, f)) + return res + + elif type(suffix) is str or suffix is None: + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + return res + + else: + print('type of suffix is not legal :', type(suffix)) + return -1 + + +def convert_to_npy(a_data_path, patch_size=(960, 960)): + patch_size = to_2tuple(patch_size) + + # 处理转换 + + # 传回npy + img = Image.open(a_data_path) + w, h = img.size + factor = min(w // patch_size[0], h // patch_size[1]) + numpy_img = img.crop([0, 0, factor * patch_size[0], factor * patch_size[1]]) + numpy_img = np.array(numpy_img) + + return numpy_img + +def convert_to_npy_no_opening(patch, patch_size=(960, 960)): + patch_size = to_2tuple(patch_size) + img = patch + w, h = img.size + factor = min(w // patch_size[0], h // patch_size[1]) + numpy_img = img.crop([0, 0, factor * patch_size[0], factor * patch_size[1]]) + numpy_img = np.array(numpy_img) + + return numpy_img + + +class to_patch: + """ + Split an image into patches, each patch with the size of patch_size + """ + + def __init__(self, patch_size=(16, 16)): + patch_size = to_2tuple(patch_size) + self.patch_h = patch_size[0] + self.patch_w = patch_size[1] + + def __call__(self, x): + x = torch.tensor(x) + x = x.permute(2, 0, 1) + c, h, w = x.shape + # print(x.shape) + # assert h // self.patch_h == h / self.patch_h and w // self.patch_w == w / self.patch_w + num_patches = (h // self.patch_h) * (w // self.patch_w) + + h_1 = (h // self.patch_h) * self.patch_h + w_1 = (w // self.patch_w) * self.patch_w + x = x[:, ((h - h_1) // 2):((h - h_1) // 2 + h_1), ((w - w_1) // 2):((w - w_1) // 2 + w_1)] + # patch encoding + # (c, h, w) + # -> (c, h // self.patch_h, self.patch_h, w // self.patch_w, self.patch_w) + # -> (h // self.patch_h, w // self.patch_w, self.patch_h, self.patch_w, c) + # -> (n_patches, patch_size^2*c) + patches = x.view( + c, + h // self.patch_h, + self.patch_h, + w // self.patch_w, + self.patch_w).permute(1, 3, 2, 4, 0).reshape(num_patches, -1) # it can also used in transformer Encoding + + # patch split + # (n_patches, patch_size^2*c) + # -> (num_patches, self.patch_h, self.patch_w, c) + # -> (num_patches, c, self.patch_h, self.patch_w) + patches = patches.view(num_patches, + self.patch_h, + self.patch_w, + c).permute(0, 3, 1, 2) + + return patches + + +def to_2tuple(input): + if type(input) is tuple: + if len(input) == 2: + return input + else: + if len(input) > 2: + output = (input[0], input[1]) + return output + elif len(input) == 1: + output = (input[0], input[0]) + return output + else: + print('cannot handle none tuple') + else: + if type(input) is list: + if len(input) == 2: + output = (input[0], input[1]) + return output + else: + if len(input) > 2: + output = (input[0], input[1]) + return output + elif len(input) == 1: + output = (input[0], input[0]) + return output + else: + print('cannot handle none list') + elif type(input) is int: + output = (input, input) + return output + else: + print('cannot handle ', type(input)) + raise ('cannot handle ', type(input)) + + +def pick_patch(patch): + """ + 用于选择合适颜色的图片 + :param patch: + :return: + """ + patch = array2img(patch) + img_single = patch.resize((1, 1), Image.ANTIALIAS) + r, g, b = img_single.getpixel((0, 0)) + if r - g < 30: + return False + else: + return True + + +def array2img(patch): + img = Image.fromarray(patch.astype('uint8')).convert('RGB') + return img + + +def make_name(former_name, patch_size, patch_num): + """ + 确保每个名字 都反映原图上的横向x,纵向y,步长为自身patch_size + :param former_name: + :param patch_size: + :return: + """ + former_patch_size = int(former_name.split('-')[-3]) + former_x = int(former_name.split('-')[-2]) + former_y = int(former_name.split('-')[-1]) + img_real_name = former_name[::-1].split('-', 3)[-1][::-1] + + ratio = int(former_patch_size / patch_size) + x = patch_num % ratio if patch_num % ratio != 0 else ratio + x = x - 1 # every coordinate starts with 0 + x = former_x * ratio + x + + y = patch_num // ratio if patch_num % ratio != 0 else patch_num // ratio - 1 + y = former_y * ratio + y + + img_name = img_real_name + '-' + str(patch_size) + '-' + str(x) + '-' + str(y) + return img_name + + +def SVS_cut_to_patch(img, save_root, + patch_size, + class_name, + patient_folder=False, + L=True, M=True, S=False): + global num_XL + + img_name = os.path.split(img)[1].split('.')[0] + slide = openslide.open_slide(img) + try: + MPP = slide.properties[openslide.PROPERTY_NAME_MPP_X] + resize_ratio = STANDARD_MPP/float(MPP) + + if 1.1 > resize_ratio > 0.9: + patch_size_num_0 = patch_size[0][0] + else: + patch_size_num_0 = int(patch_size[0][0] * resize_ratio) + + save_root_0 = os.path.join(os.path.join(save_root, str(patch_size[0][0])), class_name + '-' + str(patch_size[0][0])) + make_and_clear_path(save_root_0) + w, h = slide.level_dimensions[0] + for i in range(1, w // patch_size_num_0 - 1): + + for j in range(1, h // patch_size_num_0 - 1): + + patch = slide.read_region((i * patch_size_num_0, j * patch_size_num_0), 0, (patch_size_num_0, patch_size_num_0)) + patch = patch.convert('RGB') + # print('finish id:%d image' % image_list.index(id)) + if not 1.1 > resize_ratio > 0.9: + patch = patch.resize(patch_size[0], Image.ANTIALIAS) # resize 到 3840 3840 + # 统一归为384*384 + # save_file(patch, os.path.join(save_root_0, img_name + '-' + str((i + 1) * (j + 1)))) + img_single = patch.resize((1, 1), Image.ANTIALIAS) + r, g, b = img_single.getpixel((0, 0)) + if r < 220 and g < 220 and b < 220 and r > 100 and b > 30 and r > g + 20: + num_XL += 1 + # save_file(patch, os.path.join(save_root_0, img_name + '-' + str(patch_size[0][0]) + '-' + str(i) + '-' + str(j))) + current_img_name = img_name + '-' + str(patch_size[0][0]) + '-' + str(i) + '-' + str(j) + + cut_to_patch(patch, current_img_name, save_root, + patch_size[1], patch_size[2], patch_size[3], + img_name, class_name, + patient_folder=patient_folder, + L=L, M=M, S=S) + else: + continue + + except Exception as e: + print(e) + + +def cut_to_patch(patch, + current_img_name, + save_root, + patch_size_0, patch_size_1, patch_size_2, + img_name, class_name, + patient_folder=True, + L=True, M=True, S=True + ): + global num_L, num_M, num_S + current_img_name = current_img_name + numpy_img = convert_to_npy_no_opening(patch) + patch_size_num_0 = patch_size_0[0] + patch_size_num_1 = patch_size_1[0] + patch_size_num_2 = patch_size_2[0] + + img_split_0 = to_patch(patch_size_0) + img_patches_0 = img_split_0(numpy_img) + + img_split_1 = to_patch(patch_size_1) + img_patches_1 = img_split_1(numpy_img) + i = 0 + j = 0 + if L: + # on most cases we need L-scale, which is 960 * 960 + for patch in img_patches_0: + i = i + 1 + patch = patch.permute(1, 2, 0) + patch = patch.numpy() + if pick_patch(patch): + img_name_0 = make_name(current_img_name, patch_size_num_0, i) + num_L += 1 + else: + pass + if M: + # on most cases we need M-scale, which is 384 * 384 + # if M is false then S must be false + for patch_1 in img_patches_1: + # convert the image into numpy + j = j + 1 + patch_1 = patch_1.permute(1, 2, 0) + patch_1 = patch_1.numpy() + if pick_patch(patch_1): + # save 384*384 image + num_M += 1 + if S: + k = 0 + img_split_2 = to_patch(patch_size_2) + img_patches_2 = img_split_2(patch_1) + for patch_2 in img_patches_2: + k = k + 1 + patch_2 = patch_2.permute(1, 2, 0) + patch_2 = patch_2.numpy() + if pick_patch(patch_2): + if k % 10 == 0: + num_S += 1 + + else: + pass + else: + pass + + +def read_and_convert(data_root, save_root, suffix=None, L=True, M=True, S=True): + global num_XL, num_L, num_M, num_S + dataset_list = [] + # class_names = os.listdir(data_root) + class_names = ['tif'] + # 接下来一行代码只在断点续传使用 + # class_names = class_names[class_names.index('CPTAC-UCEC') :] + + + + for class_name in class_names: + + svs_class_root = os.path.join(data_root, class_name) + svs_all_files = find_all_files(svs_class_root, suffix) + + num_XL = 0 + num_L = 0 + num_M = 0 + num_S = 0 + for seq in tqdm(range(len(svs_all_files))): + img = svs_all_files[seq] + SVS_cut_to_patch(img, save_root, patch_size, class_name, + patient_folder=True, L=L, M=M, S=S) + print({'dataset_name': str(class_name), + 'num_XL': int(num_XL), + 'num_L': int(num_L), + 'num_M': int(num_M), + 'num_S': int(num_S)}) + + dataset_list.append( + {'dataset_name': str(class_name), + 'num_XL': int(num_XL), + 'num_L': int(num_L), + 'num_M': int(num_M), + 'num_S': int(num_S)} + ) + + print(dataset_list) + + +if __name__ == '__main__': + read_and_convert(r'F:\MIL_datasets\CAMELYON16\training', + r'X:\CPIA_WSI_no_sampling_no_rezising', + 'tif', + L=True, M=True, S=True) + # fixme: X: doesn't take the picture + # fixed use image_data = np.asarray(f_image) + # cv2.imwrite(save_dir+suffix, image_data) + + + + + + diff --git a/PuzzleTuning/dataprocessing/bad_data_killer.py b/PuzzleTuning/dataprocessing/bad_data_killer.py new file mode 100644 index 0000000000000000000000000000000000000000..3a59ada71a9d90c7f398bb5e4e746623a52efb6b --- /dev/null +++ b/PuzzleTuning/dataprocessing/bad_data_killer.py @@ -0,0 +1,52 @@ +""" +datacheck via dataloader Script ver: Feb 23th 21:00 +loop the data and check if they are all cool +""" +import time +import torch +from torch import nn, optim +from torch.utils.data import DataLoader +from torchvision import models, datasets, transforms +import torch.nn.functional as func +from torchsummary import summary +import matplotlib.pyplot as plt +from torchvision import models +import ssl +import os + +ssl._create_default_https_context = ssl._create_unverified_context + + +def data_loop(device, train_loader, check_minibatch=100): + model_time = time.time() + prev_time = model_time + index = 0 + + for data, label in train_loader: + data = data.to(device) + + # at the checking time now + if index % check_minibatch == check_minibatch - 1: + check_index = index // check_minibatch + 1 + now_time = time.time() + gap_time = now_time - prev_time + prev_time = now_time + print('index of ' + str(check_minibatch) + ' minibatch:', check_index, ' time used:', gap_time) + + index += 1 + + print('all checked, time used:', time.time() - model_time) + + +if __name__ == '__main__': + data_path = r'/root/autodl-tmp/datasets/L' + edge_size = 224 + transform_train = transforms.Compose([transforms.Resize([edge_size, edge_size]),transforms.ToTensor()]) + + train_data = datasets.ImageFolder(data_path, transform=transform_train) + train_loader = DataLoader(train_data, batch_size=500, shuffle=False, num_workers=32) + + os.environ['CUDA_VISIBLE_DEVICES'] = '0' + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + data_loop(device, train_loader) diff --git a/PuzzleTuning/dataprocessing/data_split.py b/PuzzleTuning/dataprocessing/data_split.py new file mode 100644 index 0000000000000000000000000000000000000000..feebb73a7ae31a86021ae17a4409aeeb2ede6a72 --- /dev/null +++ b/PuzzleTuning/dataprocessing/data_split.py @@ -0,0 +1,215 @@ +""" +dataset divide script ver: Jan 9th 15:30 official release + +ref:https://zhuanlan.zhihu.com/p/199238910 +""" +import os +import random +import shutil +from shutil import copy2 +from multiprocessing import Pool, cpu_count + + +def del_file(filepath): + """ + Delete all files or folders in a directory + :param filepath: path of file + :return: + """ + del_list = os.listdir(filepath) + for f in del_list: + file_path = os.path.join(filepath, f) + if os.path.isfile(file_path): + os.remove(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + + +def make_and_clear_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + del_file(file_pack_path) + + +def a_dataset_split(src_data_folder, target_data_folder, class_name, train_scale, val_scale, test_scale, com_num=None): + current_class_data_path = os.path.join(src_data_folder, class_name) + current_all_data = os.listdir(current_class_data_path) + + current_data_length = len(current_all_data) + current_data_index_list = list(range(current_data_length)) + random.shuffle(current_data_index_list) + + train_folder = os.path.join(os.path.join(target_data_folder, 'train'), class_name) + val_folder = os.path.join(os.path.join(target_data_folder, 'val'), class_name) + test_folder = os.path.join(os.path.join(target_data_folder, 'test'), class_name) + + train_stop_flag = current_data_length * train_scale + val_stop_flag = current_data_length * (train_scale + val_scale) + current_idx = 0 + train_num = 0 + val_num = 0 + test_num = 0 + for i in current_data_index_list: + src_img_path = os.path.join(current_class_data_path, current_all_data[i]) + if current_idx <= train_stop_flag: + copy2(src_img_path, train_folder) + # print("{} copied to {}".format(src_img_path, train_folder)) + train_num = train_num + 1 + + elif (current_idx > train_stop_flag) and (current_idx <= val_stop_flag): + copy2(src_img_path, val_folder) + # print("{} copied to{}".format(src_img_path, val_folder)) + val_num = val_num + 1 + + else: + copy2(src_img_path, test_folder) + # print("{} copied to {}".format(src_img_path, test_folder)) + test_num = test_num + 1 + + current_idx = current_idx + 1 + + print("*********************************{}*************************************".format(class_name) + '\n' + + "{} class has been divided into {}:{}:{}, a total of {} images".format(class_name, train_scale, val_scale, + test_scale, + current_data_length) + + '\n' + "Train set{}: {} pics".format( + train_folder, + train_num) + + '\n' + "Validation set{}: {} pics".format(val_folder, val_num) + '\n' + "Test set{}: {} pics".format( + test_folder, test_num) + + '\n') + + if com_num is not None: + print('processed class idx:', com_num) + + +def data_set_split(src_data_folder, target_data_folder='./dataset', train_scale=0.8, val_scale=0.2, test_scale=0.0, + Parallel_processing=False): + """ + Read source data folder, generate divided folders as 'train', 'val' and 'test' + :param src_data_folder: source folder E:/biye/gogogo/note_book/torch_note/data/utils_test/data_split/src_data + :param target_data_folder: target folder E:/biye/gogogo/note_book/torch_note/data/utils_test/data_split/target_data + :param train_scale: train set ratio + :param val_scale: validation set ratio + :param test_scale: test set ratio + + :param Parallel_processing: whether to process in parallel + + :return: + """ + make_and_clear_path(target_data_folder) + print("Begin dataset division") + class_names = os.listdir(src_data_folder) + # Create folder in the target directory + split_names = ['train', 'val', 'test'] + for split_name in split_names: + split_path = os.path.join(target_data_folder, split_name) + # Then create category folder under the split_path directory + for class_name in class_names: + class_split_path = os.path.join(split_path, class_name) + os.makedirs(class_split_path) + + if Parallel_processing: + # Create process pool + tasks_num = len(class_names) + process_pool = Pool(min(cpu_count() - 2, tasks_num)) # Number of parallels, leave at least 2 cores + + com_num = 0 + print("start processing" + str(tasks_num) + " files by multi-process") + # Schedule tasks + for class_name in class_names: + # Pool.apply_async(target to be called,(parameter tuple passed to the target,)) + # Use free process to call the target during each loop + com_num += 1 + args = (src_data_folder, target_data_folder, class_name, train_scale, val_scale, test_scale, com_num) + process_pool.apply_async(a_dataset_split, args) + + process_pool.close() # Close the process pool, process pool will no longer receive new requests once it is closed. + process_pool.join() # Wait till all process in process pool finished, must be placed after the 'close' statement + + else: + # Divide the dataset according to the proportion, and copy the data image + # Traverse by category + for class_name in class_names: + a_dataset_split(src_data_folder, target_data_folder, class_name, train_scale, val_scale, test_scale) + + +def k_fold_split(src_data_folder, target_data_folder='./kfold', k=5): + """ + Read the source data folder, generate divided folders as 'train', 'val'. + + :param src_data_folder: organized imagenet format folders that need to be divided by k-folding + :param target_data_folder: large target folder with k folders generated inside, k folders are in imagenet format with train and val inside + :param k: the number of divided folds + + :return: + """ + make_and_clear_path(target_data_folder) + print("Begin dataset division") + class_names = os.listdir(src_data_folder) # Get category name + + # Divide the dataset for each category according to the proportion, and copy and distribute the data images + for class_name in class_names: # Classification traversal first + + current_class_data_path = os.path.join(src_data_folder, class_name) + current_class_data_names = os.listdir(current_class_data_path) + + current_data_length = len(current_class_data_names) + random.shuffle(current_class_data_names) + + # Divide data + split_num = current_data_length // k + # Put a packet for evert split_num data, and if there are k+1 packets, the last packet can only have k-1 data at most + temp_split_pack = [current_class_data_names[i:i + split_num] for i in range(0, current_data_length, split_num)] + fold_name_pack = [temp_split_pack[i] for i in range(0, k)] # Get the first k packets + if len( + temp_split_pack) > k: # If it can’t be divided equally at the end, the last one will have one more pack, and put the contents into different packs in turn + for pack_idx, name in enumerate(temp_split_pack[-1]): # The extra pack have at most k-1 data + fold_name_pack[pack_idx].append(name) + + print("{} class is divided into {} cross-validation, a total of {} images".format(class_name, k, + current_data_length)) + + for p in range(1, k + 1): # For each fold, start from 1 + # Folder + train_folder = os.path.join(target_data_folder, 'fold_' + str(p), 'train', class_name) + val_folder = os.path.join(target_data_folder, 'fold_' + str(p), 'val', class_name) + os.makedirs(train_folder) + os.makedirs(val_folder) + + pack_idx = p - 1 # Use the current fold of data as val set, and use the rest as train set + + # Copy divided data + train_num = 0 + val_num = 0 + + for j in range(k): + if j == pack_idx: + for i in fold_name_pack[j]: + src_img_path = os.path.join(current_class_data_path, i) + copy2(src_img_path, val_folder) + val_num += 1 + # print("{} has copied to {}".format(src_img_path, val_folder)) + else: + for i in fold_name_pack[j]: + src_img_path = os.path.join(current_class_data_path, i) + copy2(src_img_path, train_folder) + train_num += 1 + # print("{} has copied to {}".format(src_img_path, train_folder)) + print("fold {}: class:{} train num: {}".format(p, class_name, train_num)) + print("fold {}: class:{} val num: {}".format(p, class_name, val_num)) + + +if __name__ == '__main__': + # step1: create train_val and test dataset + src_data_folder = r'C:\Users\admin\Desktop\ROSE_5k' + target_data_folder1 = r'C:\Users\admin\Desktop\ROSE_5000_train_val' # _5fold + data_set_split(src_data_folder, target_data_folder1, train_scale=0.8, val_scale=0.0, test_scale=0.2, + Parallel_processing=False) + + # step2: create 5 fold dataset + src_data_folder = os.path.join(target_data_folder1, 'train') + target_data_folder2 = r'C:\Users\admin\Desktop\ROSE_5000_5fold' # + k_fold_split(src_data_folder, target_data_folder2, k=5) + + # step3: move the test dataset into file folder of the 5 fold dataset diff --git a/PuzzleTuning/dataprocessing/database_generator.py b/PuzzleTuning/dataprocessing/database_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..59ef9644852d0ffe79ce540af7042c6d363b0683 --- /dev/null +++ b/PuzzleTuning/dataprocessing/database_generator.py @@ -0,0 +1,164 @@ +""" +Organize the data to ensure that all data is in jpg format ver: Jan 9th 15:30 official release + +""" +import os +import re +import csv +import shutil +import pandas as pd +from PIL import Image +from tqdm import tqdm +import torchvision.transforms +from PIL import ImageFile + +ImageFile.LOAD_TRUNCATED_IMAGES = True + + +def del_file(filepath): + """ + Delete all files and folders in one directory + :param filepath: file path + :return: + """ + del_list = os.listdir(filepath) + for f in del_list: + file_path = os.path.join(filepath, f) + if os.path.isfile(file_path): + os.remove(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + + +def make_and_clear_path(file_pack_path): + if not os.path.exists(file_pack_path): + os.makedirs(file_pack_path) + del_file(file_pack_path) + + +def find_all_files(root, suffix=None): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + return res + + +def read_file(f_dir): + """ + Read a file and convert it into numpy format + """ + f_image = Image.open(f_dir) + return f_image + + +def change_shape(image, corp_x=2400, corp_y=1800, f_x=1390, f_y=1038): + """ + Resize the image into x*y + """ + if image.size[0] > corp_x or image.size[1] > corp_y: + # Generate an object of CenterCrop class to crop the image from the center into corp_x*corp_y + crop_obj = torchvision.transforms.CenterCrop((corp_y, corp_x)) + image = crop_obj(image) + # print(image.size[0], image.size[1]) + + image.thumbnail((f_x, f_y), Image.ANTIALIAS) + return image + + +def save_file(f_image, save_dir, suffix='.jpg'): + """ + Save and rename the images, generate the renamed table + """ + filepath, _ = os.path.split(save_dir) + if not os.path.exists(filepath): + os.makedirs(filepath) + f_image.save(save_dir + suffix) + + +def PC_to_stander(root_from=r'C:\Users\admin\Desktop\dataset\PC', + root_positive=r'C:\Users\admin\Desktop\jpg_dataset\P', + root_negative=r'C:\Users\admin\Desktop\jpg_dataset\N', corp_x=2400, corp_y=1800, f_x=1390, f_y=1038): + root_target, _ = os.path.split(root_positive) + make_and_clear_path(root_target) + + f_dir_list = find_all_files(root=root_from, suffix='.jpg') + # print(f_dir_list) + + name_dict = {} # Save the new and old names + old_size_type = [] + size_type = [] # Record all different image sizes (after reshape) + + for seq in tqdm(range(len(f_dir_list))): + f_dir = f_dir_list[seq] + + if '非癌' in f_dir or '阴性' in f_dir or '良性' in f_dir: + root_target = root_negative + else: + root_target = root_positive + + f_image = read_file(f_dir) + + size = (f_image.size[0], f_image.size[1]) + if size not in old_size_type: + old_size_type.append(size) + + f_image = change_shape(f_image, corp_x=corp_x, corp_y=corp_y, f_x=f_x, f_y=f_y) + + size = (f_image.size[0], f_image.size[1]) + if size not in size_type: + size_type.append(size) + + save_dir = os.path.join(root_target, str(seq + 1)) # Set save directory + name_dict[save_dir] = f_dir + + save_file(f_image, save_dir) + + print('old size type:', old_size_type) + print('size type: ', size_type) + + root_target, _ = os.path.split(root_positive) + pd.DataFrame.from_dict(name_dict, orient='index', columns=['origin path']).to_csv( + os.path.join(root_target, 'name_dict.csv')) + + +def trans_csv_folder_to_imagefoder(target_path=r'C:\Users\admin\Desktop\MRAS_SEED_dataset', + original_path=r'C:\Users\admin\Desktop\dataset\MARS_SEED_Dataset\train\train_org_image', + csv_path=r'C:\Users\admin\Desktop\dataset\MARS_SEED_Dataset\train\train_label.csv'): + """ + Original data format: a folder with image inside + a csv file with header which has the name and category of every image. + Process original dataset and get data packet in image folder format + + :param target_path: the path of target image folder + :param original_path: The folder with images + :param csv_path: A csv file with header and the name and category of each image + """ + idx = -1 + with open(csv_path, "rt", encoding="utf-8") as csvfile: + reader = csv.reader(csvfile) + rows = [row for row in reader] + make_and_clear_path(target_path) # Clear target_path + for row in tqdm(rows): + idx += 1 + if idx == 0: # Skip the first header + continue + item_path = os.path.join(original_path, row[0]) + if os.path.exists(os.path.join(target_path, row[1])): + shutil.copy(item_path, os.path.join(target_path, row[1])) + else: + os.makedirs(os.path.join(target_path, row[1])) + shutil.copy(item_path, os.path.join(target_path, row[1])) + + print('total num:', idx) + + +if __name__ == '__main__': + PC_to_stander(root_from=r'../Desktop/ROSE_2112', + root_positive=r'../Desktop/jpg_dataset/Positive', + root_negative=r'../Desktop/jpg_dataset/Negative', corp_x=5280, corp_y=3956, f_x=1390, + f_y=1038) diff --git a/PuzzleTuning/dataprocessing/deployment_dataset_INF.py b/PuzzleTuning/dataprocessing/deployment_dataset_INF.py new file mode 100644 index 0000000000000000000000000000000000000000..8174fec021b9d2b70bf647d6b02ec26bcd7f3de5 --- /dev/null +++ b/PuzzleTuning/dataprocessing/deployment_dataset_INF.py @@ -0,0 +1,286 @@ +""" +self supervise dataset AI-inferance Script ver: Aug 25th 22:00 + +""" +import argparse +import csv +import os +import shutil +import sys + +import cv2 +import numpy as np +import torch +import torch.nn as nn +from PIL import Image +from tqdm import tqdm + +sys.path.append("..") +from Backbone.getmodel import get_model +from utils.tools import find_all_files +from utils.data_augmentation import data_augmentation + + +def trans_csv_folder_to_imagefoder(target_path=r'C:\Users\admin\Desktop\MRAS_SEED_dataset', + original_path=r'C:\Users\admin\Desktop\dataset\MARS_SEED_Dataset\train\train_org_image', + csv_path=r'C:\Users\admin\Desktop\dataset\MARS_SEED_Dataset\train\train_label.csv'): + """ + Original data format: a folder with image inside + a csv file with header which has the name and category of every image. + Process original dataset and get data packet in image folder format + + :param target_path: the path of target image folder + :param original_path: The folder with images + :param csv_path: A csv file with header and the name and category of each image + """ + idx = -1 + with open(csv_path, "rt", encoding="utf-8") as csvfile: + reader = csv.reader(csvfile) + rows = [row for row in reader] + + if not os.path.exists(target_path): + os.makedirs(target_path) + + for row in tqdm(rows): + idx += 1 + + item_path = row[0] + if os.path.exists(os.path.join(target_path, row[1])): + shutil.copy(item_path, os.path.join(target_path, row[1])) + else: + os.makedirs(os.path.join(target_path, row[1])) + shutil.copy(item_path, os.path.join(target_path, row[1])) + + print('total num:', idx) + + +class PILImageTransform: + def __init__(self): + pass + + def __call__(self, image): + # Trans cv2 BGR image to PIL RGB image + b, g, r = cv2.split(image) + image = cv2.merge([r, g, b]) + return Image.fromarray(np.uint8(image)) + + +class Front_Background_Dataset(torch.utils.data.Dataset): + def __init__(self, input_root, data_transforms=None, edge_size=384, suffix='.jpg'): + + super().__init__() + + self.data_root = input_root + + # get files + self.input_ids = sorted(find_all_files(self.data_root, suffix=suffix)) + + # to PIL + self.PIL_Transform = PILImageTransform() + + # get data augmentation and transform + if data_transforms is not None: + self.transform = data_transforms + else: + self.transform = transforms.Compose([transforms.Resize(edge_size), transforms.ToTensor()]) + + def __len__(self): + return len(self.input_ids) + + def __getitem__(self, idx): + # get data path + imageName = self.input_ids[idx] + # get image id + imageID = imageName + # 文件名 os.path.split(imageName)[-1].split('.')[0] + + # get data + # CV2 0-255 hwc,in totensor step it will be transformed to chw. ps:PIL(0-1 hwc) + image = np.array(cv2.imread(imageName), dtype=np.float32) + + image = self.transform(self.PIL_Transform(image)) + + return image, imageID + + +def inferance(model, dataloader, record_dir, class_names=['0', '1'], result_csv_name='inferance.csv', device='cuda'): + if not os.path.exists(record_dir): + os.makedirs(record_dir) + + model.eval() + print('Inferance') + print('-' * 10) + + check_idx = 0 + + with open(os.path.join(record_dir, result_csv_name), 'w') as f_log: + # Iterate over data. + for images, imageIDs in dataloader: + images = images.to(device) + + # forward + outputs = model(images) + confidence, preds = torch.max(outputs, 1) + + pred_labels = preds.cpu().numpy() + + for output_idx in range(len(pred_labels)): + f_log.write(str(imageIDs[output_idx]) + ', ' + str(class_names[pred_labels[output_idx]]) + ', \n') + check_idx += 1 + + f_log.close() + print(str(check_idx) + ' samples are all recorded') + + +def main(args): + if args.paint: + # use Agg kernal, not painting in the front-desk + import matplotlib + matplotlib.use('Agg') + + # PATH + model_idx = args.model_idx + dataroot = args.dataroot + save_model_path = os.path.join(args.model_path, 'CLS_' + model_idx + '.pth') + record_dir = args.record_dir + if not os.path.exists(record_dir): + os.mkdir(record_dir) + + gpu_idx = args.gpu_idx + + drop_rate = args.drop_rate + attn_drop_rate = args.attn_drop_rate + drop_path_rate = args.drop_path_rate + use_cls_token = False if args.cls_token_off else True + use_pos_embedding = False if args.pos_embedding_off else True + use_att_module = None if args.att_module == 'None' else args.att_module + edge_size = args.edge_size + batch_size = args.batch_size + + data_transforms = data_augmentation(data_augmentation_mode=args.data_augmentation_mode, edge_size=edge_size) + + inf_dataset = Front_Background_Dataset(dataroot, data_transforms=data_transforms['val'], edge_size=edge_size, + suffix='.jpg') + dataloader = torch.utils.data.DataLoader(inf_dataset, batch_size=batch_size, num_workers=2, shuffle=False) + + class_names = ['0', '1'] # 0 for empty + + # Get model + pretrained_backbone = False + if args.num_classes == 0: + print("class_names:", class_names) + num_classes = len(class_names) + else: + if len(class_names) == args.num_classes: + print("class_names:", class_names) + else: + print('classfication number of the model mismatch the dataset requirement of:', len(class_names)) + return -1 + + model = get_model(num_classes, edge_size, model_idx, drop_rate, attn_drop_rate, drop_path_rate, + pretrained_backbone, use_cls_token, use_pos_embedding, use_att_module) + + # todo: this model structure is formed under only one condition + if gpu_idx == -1: + if torch.cuda.device_count() > 1: + print("Use", torch.cuda.device_count(), "GPUs!") + # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs + model = nn.DataParallel(model) + else: + print('we dont have more GPU idx here, try to use gpu_idx=0') + try: + # setting 0 for: only card idx 0 is sighted for this code + os.environ['CUDA_VISIBLE_DEVICES'] = '0' + except: + print("GPU distributing ERRO occur use CPU instead") + + else: + # Decide which device we want to run on + try: + # setting k for: only card idx k is sighted for this code + os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_idx) + except: + print('we dont have that GPU idx here, try to use gpu_idx=0') + try: + # setting 0 for: only card idx 0 is sighted for this code + os.environ['CUDA_VISIBLE_DEVICES'] = '0' + except: + print("GPU distributing ERRO occur use CPU instead") + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # single card for test + + try: + model.load_state_dict(torch.load(save_model_path), False) + except: + print('model loading erro') + else: + print('model loaded') + + model.to(device) + + inferance(model, dataloader, record_dir, class_names=class_names, result_csv_name='inferance.csv', device='cuda') + + +def get_args_parser(): + parser = argparse.ArgumentParser(description='PyTorch ImageNet INF') + + # Model Name or index + parser.add_argument('--model_idx', default='Hybrid2_384_401_testsample', type=str, help='Model Name or index') + + # MIL Stripe + parser.add_argument('--MIL_Stripe', action='store_true', help='MIL_Stripe') + + # drop_rate, attn_drop_rate, drop_path_rate + parser.add_argument('--drop_rate', default=0.0, type=float, help='dropout rate , default 0.0') + parser.add_argument('--attn_drop_rate', default=0.0, type=float, help='dropout rate Aftter Attention, default 0.0') + parser.add_argument('--drop_path_rate', default=0.0, type=float, help='drop path for stochastic depth, default 0.0') + + # Abalation Studies for MSHT + parser.add_argument('--cls_token_off', action='store_true', help='use cls_token in model structure') + parser.add_argument('--pos_embedding_off', action='store_true', help='use pos_embedding in model structure') + # 'SimAM', 'CBAM', 'SE' 'None' + parser.add_argument('--att_module', default='SimAM', type=str, help='use which att_module in model structure') + + # Enviroment parameters + parser.add_argument('--gpu_idx', default=0, type=int, + help='use a single GPU with its index, -1 to use multiple GPU') + + # Path parameters + parser.add_argument('--dataroot', default=r'/data/pancreatic-cancer-project/k5_dataset', + help='path to dataset') + parser.add_argument('--model_path', default=r'/home/pancreatic-cancer-project/saved_models', + help='path to save model state-dict') + parser.add_argument('--record_dir', default=r'/home/pancreatic-cancer-project/INF', + help='path to record INF csv') + + # Help tool parameters + parser.add_argument('--paint', action='store_false', help='paint in front desk') # matplotlib.use('Agg') + parser.add_argument('--enable_notify', action='store_true', help='enable notify to send email') + # check tool parameters + parser.add_argument('--enable_tensorboard', action='store_true', help='enable tensorboard to save status') + + parser.add_argument('--enable_attention_check', action='store_true', help='check and save attention map') + parser.add_argument('--enable_visualize_check', action='store_true', help='check and save pics') + + parser.add_argument('--data_augmentation_mode', default=0, type=int, help='data_augmentation_mode') + + # PromptTuning + parser.add_argument('--PromptTuning', default=None, type=str, + help='use Prompt Tuning strategy instead of Finetuning') + # Prompt_Token_num + parser.add_argument('--Prompt_Token_num', default=10, type=int, help='Prompt_Token_num') + + # Dataset based parameters + parser.add_argument('--num_classes', default=0, type=int, help='classification number, default 0 for auto-fit') + parser.add_argument('--edge_size', default=384, type=int, help='edge size of input image') # 224 256 384 1000 + + # Test setting parameters + parser.add_argument('--batch_size', default=1, type=int, help='testing batch_size default 1') + + return parser + + +if __name__ == '__main__': + parser = get_args_parser() + args = parser.parse_args() + main(args) + + # 转换生成的csv保存到哪? diff --git a/PuzzleTuning/dataprocessing/non-instance.py b/PuzzleTuning/dataprocessing/non-instance.py new file mode 100644 index 0000000000000000000000000000000000000000..0dcc0912dd36f1a1a7784b1168b5cf06711fd19c --- /dev/null +++ b/PuzzleTuning/dataprocessing/non-instance.py @@ -0,0 +1,84 @@ +""" +对测试集进行处理,将图片中的实例遮住,实例部分用原图像素均值填充,生成新的测试集 +ver: Feb 21th +""" + +import numpy as np +import cv2 +import os + + +def find_all_files(root, suffix=None): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + if type(suffix) is str or suffix is None: + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + return res + + else: + print('type of suffix is not legal :', type(suffix)) + return -1 + + +if __name__ == '__main__': + # 只需要修改数据路径和result路径,new_test与test平级 + # 导入测试集image和mask + data_path = 'E:/Study/code/datasets/SIPaKMeD_MIL/test/data/' # MIL数据集的路径 + result_path = 'E:/Study/code/datasets/SIPaKMeD_MIL/new_test/' + if not os.path.exists(result_path): + os.makedirs(result_path) + + suffix = '.jpg' + # 获取类别名,制作label和类别名的对应字典 + class_names = [filename for filename in os.listdir(data_path) + if os.path.isdir(os.path.join(data_path, filename))] + class_names.sort() + cls_idxs = [i for i in range(len(class_names))] + class_id_dict = dict(zip(class_names, cls_idxs)) + input_ids = sorted(find_all_files(data_path, suffix=suffix)) + + # 制作结果路径 + for class_name in class_names: + res_path = result_path + class_name + if not os.path.exists(res_path): + os.makedirs(res_path) + + for i in range(len(input_ids)): + image_path = input_ids[i] + # 读取image和mask + # CV2 0-255 hwc,in totensor step it will be transformed to chw. ps:PIL(0-1 hwc) + image = np.array(cv2.imread(image_path)) + + # mask_path is replace the last 'data' by 'mask' + mask_path = "data".join(image_path.split("data")[:-1]) + 'mask' + "".join(image_path.split("data")[-1:]) + # mask: 0/255 cv2 hwc + mask = np.array(cv2.imread(mask_path)) + mask_norm = np.where(mask > 50, 0, 1) + # new_image_path is replace the last 'test/data' by 'new_test' + new_image_path = "data".join(image_path.split("test/data")[:-1]) + \ + 'new_test' + "".join(image_path.split("test/data")[-1:]) + new_image = image * mask_norm + + # 把抠掉的部分填充成原图的像素均值 + value_mean_r = int(np.mean(image[:, :, 0])) + value_mean_g = int(np.mean(image[:, :, 1])) + value_mean_b = int(np.mean(image[:, :, 2])) + new_image[:, :, 0][new_image[:, :, 0] == 0] = value_mean_r + new_image[:, :, 1][new_image[:, :, 1] == 0] = value_mean_g + new_image[:, :, 2][new_image[:, :, 2] == 0] = value_mean_b + new_image = new_image.astype(np.uint8) + + # # 显示原图,mask,new_image + # images = np.hstack([image, mask, new_image]) + # cv2.imshow('Before and after mask', images) + # cv2.waitKey(0) + + # 存储新的图片 + cv2.imwrite(new_image_path, new_image) + diff --git a/PuzzleTuning/dataprocessing/resize_and_crop.py b/PuzzleTuning/dataprocessing/resize_and_crop.py new file mode 100644 index 0000000000000000000000000000000000000000..27b5970c6b17b5c30fa82e68ef06cb72db710da4 --- /dev/null +++ b/PuzzleTuning/dataprocessing/resize_and_crop.py @@ -0,0 +1,32 @@ +from PIL import Image +import os + + +def resize_and_crop(source_folder, target_folder, width, height,endswith='.jpg'): + if not os.path.exists(target_folder): + os.makedirs(target_folder) + + for filename in os.listdir(source_folder): + if filename.endswith(endswith): # or filename.endswith(".jpg"): if some images are .jpg + image_path = os.path.join(source_folder, filename) + image = Image.open(image_path) + + # Crop the largest centered square + w, h = image.size + min_dim = min(w, h) + left = (w - min_dim) / 2 + top = (h - min_dim) / 2 + right = (w + min_dim) / 2 + bottom = (h + min_dim) / 2 + image_cropped = image.crop((left, top, right, bottom)) + + # Resize the cropped image + image_resized = image_cropped.resize((width, height)) + target_path = os.path.join(target_folder, filename) + image_resized.save(target_path) + + +source_directory = './CAM16' # Replace this with the path to your folder with original images +target_directory = './CAM16_new' # Replace this with the path where you want to save resized images + +resize_and_crop(source_directory, target_directory, width=224, height=224, endswith='.jpg') diff --git a/PuzzleTuning/dataprocessing/self_supervise_dataset_generator.py b/PuzzleTuning/dataprocessing/self_supervise_dataset_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..857f2fb04cb1bd495336c6be76a6f37b5edf9cdb --- /dev/null +++ b/PuzzleTuning/dataprocessing/self_supervise_dataset_generator.py @@ -0,0 +1,125 @@ +""" +self supervise dataset making Script ver: Aug 21th 21:50 + +todo +提供了一个简单的思路 +先做成单进程的,后续做成多进程的 + + +""" +import torch +import numpy as np +import os +import shutil + +from utils.tools import to_2tuple, find_all_files + + +def convert_to_npy(a_data_path): + + # 处理转换 + + # 传回npy + numpy_img = 0 + + return numpy_img + + +def cut_to_patch(numpy_img, save_root, resize_infor, patch_size=384): + pass + + +def read_and_convert(data_root,save_root, resize_infor, suffix=None, patch_size=384): + # 一次处理只一个数据集, 每个数据集的处理方式可能有不同 + + # 读入所有数据 + all_files = find_all_files(data_root) + + # 把所有数据转换为同一个格式 + for img in all_files: + numpy_img = convert_to_npy(img) + cut_to_patch(numpy_img, save_root, resize_infor, patch_size) + + pass + + +class to_patch: + """ + Split a image into patches, each patch with the size of patch_size + """ + + def __init__(self, patch_size=(16, 16)): + patch_size = to_2tuple(patch_size) + self.patch_h = patch_size[0] + self.patch_w = patch_size[1] + + def __call__(self, x): + c, h, w = x.shape + + assert h // self.patch_h == h / self.patch_h and w // self.patch_w == w / self.patch_w + + num_patches = (h // self.patch_h) * (w // self.patch_w) + + # patch encoding + # (c, h, w) + # -> (c, h // self.patch_h, self.patch_h, w // self.patch_w, self.patch_w) + # -> (h // self.patch_h, w // self.patch_w, self.patch_h, self.patch_w, c) + # -> (n_patches, patch_size^2*c) + patches = x.view( + c, + h // self.patch_h, + self.patch_h, + w // self.patch_w, + self.patch_w).permute(1, 3, 2, 4, 0).reshape(num_patches, -1) # it can also used in transformer Encoding + + # patch split + # (n_patches, patch_size^2*c) + # -> (num_patches, self.patch_h, self.patch_w, c) + # -> (num_patches, c, self.patch_h, self.patch_w) + patches = patches.view(num_patches, + self.patch_h, + self.patch_w, + c).permute(0, 3, 1, 2) + + ''' + # check + for i in range(len(patches)): + recons_img = ToPILImage()(patches[i]) + recons_img.save(os.path.join('./patch_play', 'recons_target'+str(i)+'.jpg')) + + + # patch compose to image + # (num_patches, c, self.patch_h, self.patch_w) + # -> (h // self.patch_h, w // self.patch_w, c, self.patch_h, self.patch_w) + # -> (c, h // self.patch_h, self.patch_h, w // self.patch_w, self.patch_w) + # -> (c, h, w) + patches = patches.view(h // self.patch_h, + w // self.patch_w, + c, + self.patch_h, + self.patch_w).permute(2, 0, 3, 1, 4).reshape(c, h, w) + ''' + + ''' + # visual check + # reshape + composed_patches = patches.view(h // self.patch_h, + w // self.patch_w, + c, + self.patch_h, + self.patch_w).permute(2, 0, 3, 1, 4).reshape(c, h, w) + # view pic + from torchvision.transforms import ToPILImage + composed_img = ToPILImage()(bag_image[0]) # transform tensor image to PIL image + composed_img.save(os.path.join('./', 'composed_img.jpg')) + + ''' + + return patches + + +img = np.ones([3, 224, 224]) + +patchfy=to_patch(patch_size=(16, 16)) + +patch=patchfy(img) \ No newline at end of file diff --git a/PuzzleTuning/pytorch_grad_cam/README.md b/PuzzleTuning/pytorch_grad_cam/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3d0b6e60f1c9888f3f6fbe2d0faf8e43f115a0da --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/README.md @@ -0,0 +1,27 @@ +from https://github.com/jacobgil/pytorch-grad-cam + +# References + +https://arxiv.org/abs/1610.02391 +Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization Ramprasaath R. Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, Dhruv Batra + +https://arxiv.org/abs/1710.11063 +Grad-CAM++: Improved Visual Explanations for Deep Convolutional Networks Aditya Chattopadhyay, Anirban Sarkar, Prantik Howlader, Vineeth N Balasubramanian + +https://arxiv.org/abs/1910.01279 +Score-CAM: Score-Weighted Visual Explanations for Convolutional Neural Networks Haofan Wang, Zifan Wang, Mengnan Du, Fan Yang, Zijian Zhang, Sirui Ding, Piotr Mardziel, Xia Hu + +https://ieeexplore.ieee.org/abstract/document/9093360/ +Ablation-cam: Visual explanations for deep convolutional network via gradient-free localization. Saurabh Desai and Harish G Ramaswamy. In WACV, pages 972–980, 2020 + +https://arxiv.org/abs/2008.02312 +Axiom-based Grad-CAM: Towards Accurate Visualization and Explanation of CNNs Ruigang Fu, Qingyong Hu, Xiaohu Dong, Yulan Guo, Yinghui Gao, Biao Li + +https://arxiv.org/abs/2008.00299 +Eigen-CAM: Class Activation Map using Principal Components Mohammed Bany Muhammad, Mohammed Yeasin + +http://mftp.mmcheng.net/Papers/21TIP_LayerCAM.pdf +LayerCAM: Exploring Hierarchical Class Activation Maps for Localization Peng-Tao Jiang; Chang-Bin Zhang; Qibin Hou; Ming-Ming Cheng; Yunchao Wei + +https://arxiv.org/abs/1905.00780 +Full-Gradient Representation for Neural Network Visualization Suraj Srinivas, Francois Fleuret diff --git a/PuzzleTuning/pytorch_grad_cam/__init__.py b/PuzzleTuning/pytorch_grad_cam/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a6d7c10790dda00580069776f65a74f3bc8d0768 --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/__init__.py @@ -0,0 +1,10 @@ +from pytorch_grad_cam.grad_cam import GradCAM +from pytorch_grad_cam.ablation_cam import AblationCAM +from pytorch_grad_cam.xgrad_cam import XGradCAM +from pytorch_grad_cam.grad_cam_plusplus import GradCAMPlusPlus +from pytorch_grad_cam.score_cam import ScoreCAM +from pytorch_grad_cam.layer_cam import LayerCAM +from pytorch_grad_cam.eigen_cam import EigenCAM +from pytorch_grad_cam.eigen_grad_cam import EigenGradCAM +from pytorch_grad_cam.fullgrad_cam import FullGrad +from pytorch_grad_cam.guided_backprop import GuidedBackpropReLUModel diff --git a/PuzzleTuning/pytorch_grad_cam/ablation_cam.py b/PuzzleTuning/pytorch_grad_cam/ablation_cam.py new file mode 100644 index 0000000000000000000000000000000000000000..7bb682e6e44d8a9f59d5f28e5b1461a67836eb63 --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/ablation_cam.py @@ -0,0 +1,105 @@ +import numpy as np +import torch +import tqdm +from pytorch_grad_cam.base_cam import BaseCAM +from pytorch_grad_cam.utils.find_layers import replace_layer_recursive + + +class AblationLayer(torch.nn.Module): + def __init__(self, layer, reshape_transform, indices): + super(AblationLayer, self).__init__() + + self.layer = layer + self.reshape_transform = reshape_transform + # The channels to zero out: + self.indices = indices + + def forward(self, x): + self.__call__(x) + + def __call__(self, x): + output = self.layer(x) + + # Hack to work with ViT, + # Since the activation channels are last and not first like in CNNs + # Probably should remove it? + if self.reshape_transform is not None: + output = output.transpose(1, 2) + + for i in range(output.size(0)): + + # Commonly the minimum activation will be 0, + # And then it makes sense to zero it out. + # However depending on the architecture, + # If the values can be negative, we use very negative values + # to perform the ablation, deviating from the paper. + if torch.min(output) == 0: + output[i, self.indices[i], :] = 0 + else: + ABLATION_VALUE = 1e5 + output[i, self.indices[i], :] = torch.min( + output) - ABLATION_VALUE + + if self.reshape_transform is not None: + output = output.transpose(2, 1) + + return output + + +class AblationCAM(BaseCAM): + def __init__(self, + model, + target_layers, + use_cuda=False, + reshape_transform=None): + super(AblationCAM, self).__init__(model, target_layers, use_cuda, + reshape_transform) + + def get_cam_weights(self, + input_tensor, + target_layer, + target_category, + activations, + grads): + with torch.no_grad(): + outputs = self.model(input_tensor).cpu().numpy() + original_scores = [] + for i in range(input_tensor.size(0)): + original_scores.append(outputs[i, target_category[i]]) + original_scores = np.float32(original_scores) + + ablation_layer = AblationLayer(target_layer, + self.reshape_transform, + indices=[]) + replace_layer_recursive(self.model, target_layer, ablation_layer) + + if hasattr(self, "batch_size"): + BATCH_SIZE = self.batch_size + else: + BATCH_SIZE = 32 + + number_of_channels = activations.shape[1] + weights = [] + + with torch.no_grad(): + # Iterate over the input batch + for tensor, category in zip(input_tensor, target_category): + batch_tensor = tensor.repeat(BATCH_SIZE, 1, 1, 1) + for i in tqdm.tqdm(range(0, number_of_channels, BATCH_SIZE)): + ablation_layer.indices = list(range(i, i + BATCH_SIZE)) + + if i + BATCH_SIZE > number_of_channels: + keep = number_of_channels - i + batch_tensor = batch_tensor[:keep] + ablation_layer.indices = ablation_layer.indices[:keep] + score = self.model(batch_tensor)[:, category].cpu().numpy() + weights.extend(score) + + weights = np.float32(weights) + weights = weights.reshape(activations.shape[:2]) + original_scores = original_scores[:, None] + weights = (original_scores - weights) / original_scores + + # Replace the model back to the original state + replace_layer_recursive(self.model, ablation_layer, target_layer) + return weights diff --git a/PuzzleTuning/pytorch_grad_cam/activations_and_gradients.py b/PuzzleTuning/pytorch_grad_cam/activations_and_gradients.py new file mode 100644 index 0000000000000000000000000000000000000000..e311c594aff00adc5c7489aeb476cc2544a5075c --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/activations_and_gradients.py @@ -0,0 +1,45 @@ +class ActivationsAndGradients: + """ Class for extracting activations and + registering gradients from targetted intermediate layers """ + + def __init__(self, model, target_layers, reshape_transform): + self.model = model + self.gradients = [] + self.activations = [] + self.reshape_transform = reshape_transform + self.handles = [] + for target_layer in target_layers: + self.handles.append( + target_layer.register_forward_hook( + self.save_activation)) + # Backward compitability with older pytorch versions: + if hasattr(target_layer, 'register_full_backward_hook'): + self.handles.append( + target_layer.register_full_backward_hook( + self.save_gradient)) + else: + self.handles.append( + target_layer.register_backward_hook( + self.save_gradient)) + + def save_activation(self, module, input, output): + activation = output + if self.reshape_transform is not None: + activation = self.reshape_transform(activation) + self.activations.append(activation.cpu().detach()) + + def save_gradient(self, module, grad_input, grad_output): + # Gradients are computed in reverse order + grad = grad_output[0] + if self.reshape_transform is not None: + grad = self.reshape_transform(grad) + self.gradients = [grad.cpu().detach()] + self.gradients + + def __call__(self, x): + self.gradients = [] + self.activations = [] + return self.model(x) + + def release(self): + for handle in self.handles: + handle.remove() diff --git a/PuzzleTuning/pytorch_grad_cam/base_cam.py b/PuzzleTuning/pytorch_grad_cam/base_cam.py new file mode 100644 index 0000000000000000000000000000000000000000..b3a1dbc6dedd0b1e06afd9c58aaba2d72613f00b --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/base_cam.py @@ -0,0 +1,202 @@ +import cv2 +import numpy as np +import torch +import ttach as tta +from pytorch_grad_cam.activations_and_gradients import ActivationsAndGradients +from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection + + +class BaseCAM: + def __init__(self, + model, + target_layers, + use_cuda=False, + reshape_transform=None, + compute_input_gradient=False, + uses_gradients=True): + self.model = model.eval() + self.target_layers = target_layers + self.cuda = use_cuda + if self.cuda: + self.model = model.cuda() + self.reshape_transform = reshape_transform + self.compute_input_gradient = compute_input_gradient + self.uses_gradients = uses_gradients + self.activations_and_grads = ActivationsAndGradients( + self.model, target_layers, reshape_transform) + + """ Get a vector of weights for every channel in the target layer. + Methods that return weights channels, + will typically need to only implement this function. """ + + def get_cam_weights(self, + input_tensor, + target_layers, + target_category, + activations, + grads): + raise Exception("Not Implemented") + + def get_loss(self, output, target_category): + loss = 0 + for i in range(len(target_category)): + loss = loss + output[i, target_category[i]] + return loss + + def get_cam_image(self, + input_tensor, + target_layer, + target_category, + activations, + grads, + eigen_smooth=False): + weights = self.get_cam_weights(input_tensor, target_layer, + target_category, activations, grads) + weighted_activations = weights[:, :, None, None] * activations + if eigen_smooth: + cam = get_2d_projection(weighted_activations) + else: + cam = weighted_activations.sum(axis=1) + return cam + + def forward(self, input_tensor, target_category=None, eigen_smooth=False): + if self.cuda: + input_tensor = input_tensor.cuda() + + if self.compute_input_gradient: + input_tensor = torch.autograd.Variable(input_tensor, + requires_grad=True) + + output = self.activations_and_grads(input_tensor) + if isinstance(target_category, int): + target_category = [target_category] * input_tensor.size(0) + + if target_category is None: + target_category = np.argmax(output.cpu().data.numpy(), axis=-1) + else: + assert(len(target_category) == input_tensor.size(0)) + + if self.uses_gradients: + self.model.zero_grad() + loss = self.get_loss(output, target_category) + loss.backward(retain_graph=True) + + # In most of the saliency attribution papers, the saliency is + # computed with a single target layer. + # Commonly it is the last convolutional layer. + # Here we support passing a list with multiple target layers. + # It will compute the saliency image for every image, + # and then aggregate them (with a default mean aggregation). + # This gives you more flexibility in case you just want to + # use all conv layers for example, all Batchnorm layers, + # or something else. + cam_per_layer = self.compute_cam_per_layer(input_tensor, + target_category, + eigen_smooth) + return self.aggregate_multi_layers(cam_per_layer) + + def get_target_width_height(self, input_tensor): + width, height = input_tensor.size(-1), input_tensor.size(-2) + return width, height + + def compute_cam_per_layer( + self, + input_tensor, + target_category, + eigen_smooth): + activations_list = [a.cpu().data.numpy() + for a in self.activations_and_grads.activations] + grads_list = [g.cpu().data.numpy() + for g in self.activations_and_grads.gradients] + target_size = self.get_target_width_height(input_tensor) + + cam_per_target_layer = [] + # Loop over the saliency image from every layer + + for target_layer, layer_activations, layer_grads in \ + zip(self.target_layers, activations_list, grads_list): + cam = self.get_cam_image(input_tensor, + target_layer, + target_category, + layer_activations, + layer_grads, + eigen_smooth) + scaled = self.scale_cam_image(cam, target_size) + cam_per_target_layer.append(scaled[:, None, :]) + + return cam_per_target_layer + + def aggregate_multi_layers(self, cam_per_target_layer): + cam_per_target_layer = np.concatenate(cam_per_target_layer, axis=1) + cam_per_target_layer = np.maximum(cam_per_target_layer, 0) + result = np.mean(cam_per_target_layer, axis=1) + return self.scale_cam_image(result) + + def scale_cam_image(self, cam, target_size=None): + result = [] + for img in cam: + img = img - np.min(img) + img = img / (1e-7 + np.max(img)) + if target_size is not None: + img = cv2.resize(img, target_size) + result.append(img) + result = np.float32(result) + + return result + + def forward_augmentation_smoothing(self, + input_tensor, + target_category=None, + eigen_smooth=False): + transforms = tta.Compose( + [ + tta.HorizontalFlip(), + tta.Multiply(factors=[0.9, 1, 1.1]), + ] + ) + cams = [] + for transform in transforms: + augmented_tensor = transform.augment_image(input_tensor) + cam = self.forward(augmented_tensor, + target_category, eigen_smooth) + + # The ttach library expects a tensor of size BxCxHxW + cam = cam[:, None, :, :] + cam = torch.from_numpy(cam) + cam = transform.deaugment_mask(cam) + + # Back to numpy float32, HxW + cam = cam.numpy() + cam = cam[:, 0, :, :] + cams.append(cam) + + cam = np.mean(np.float32(cams), axis=0) + return cam + + def __call__(self, + input_tensor, + target_category=None, + aug_smooth=False, + eigen_smooth=False): + + # Smooth the CAM result with test time augmentation + if aug_smooth is True: + return self.forward_augmentation_smoothing( + input_tensor, target_category, eigen_smooth) + + return self.forward(input_tensor, + target_category, eigen_smooth) + + def __del__(self): + self.activations_and_grads.release() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.activations_and_grads.release() + if isinstance(exc_value, IndexError): + # Handle IndexError here... + print( + f"An exception occurred in CAM with block: {exc_type}. Message: {exc_value}") + return True diff --git a/PuzzleTuning/pytorch_grad_cam/eigen_cam.py b/PuzzleTuning/pytorch_grad_cam/eigen_cam.py new file mode 100644 index 0000000000000000000000000000000000000000..89563748d14672ff026d21f134c2d234659523b5 --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/eigen_cam.py @@ -0,0 +1,20 @@ +from pytorch_grad_cam.base_cam import BaseCAM +from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection + +# https://arxiv.org/abs/2008.00299 + + +class EigenCAM(BaseCAM): + def __init__(self, model, target_layers, use_cuda=False, + reshape_transform=None): + super(EigenCAM, self).__init__(model, target_layers, use_cuda, + reshape_transform) + + def get_cam_image(self, + input_tensor, + target_layer, + target_category, + activations, + grads, + eigen_smooth): + return get_2d_projection(activations) diff --git a/PuzzleTuning/pytorch_grad_cam/eigen_grad_cam.py b/PuzzleTuning/pytorch_grad_cam/eigen_grad_cam.py new file mode 100644 index 0000000000000000000000000000000000000000..3932a96d27b6019ed0f537688f0beb47d3c57e11 --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/eigen_grad_cam.py @@ -0,0 +1,21 @@ +from pytorch_grad_cam.base_cam import BaseCAM +from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection + +# Like Eigen CAM: https://arxiv.org/abs/2008.00299 +# But multiply the activations x gradients + + +class EigenGradCAM(BaseCAM): + def __init__(self, model, target_layers, use_cuda=False, + reshape_transform=None): + super(EigenGradCAM, self).__init__(model, target_layers, use_cuda, + reshape_transform) + + def get_cam_image(self, + input_tensor, + target_layer, + target_category, + activations, + grads, + eigen_smooth): + return get_2d_projection(grads * activations) diff --git a/PuzzleTuning/pytorch_grad_cam/fullgrad_cam.py b/PuzzleTuning/pytorch_grad_cam/fullgrad_cam.py new file mode 100644 index 0000000000000000000000000000000000000000..3cf4bf394beb1f0f5780831a78da1deca738f1ba --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/fullgrad_cam.py @@ -0,0 +1,106 @@ +import numpy as np +import torch +from pytorch_grad_cam.base_cam import BaseCAM +from pytorch_grad_cam.utils.find_layers import find_layer_predicate_recursive +from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection + +# https://arxiv.org/abs/1905.00780 + + +class FullGrad(BaseCAM): + def __init__(self, model, target_layers, use_cuda=False, + reshape_transform=None): + if len(target_layers) > 0: + print( + "Warning: target_layers is ignored in FullGrad. All bias layers will be used instead") + + def layer_with_2D_bias(layer): + bias_target_layers = [torch.nn.Conv2d, torch.nn.BatchNorm2d] + if type(layer) in bias_target_layers and layer.bias is not None: + return True + return False + target_layers = find_layer_predicate_recursive( + model, layer_with_2D_bias) + super( + FullGrad, + self).__init__( + model, + target_layers, + use_cuda, + reshape_transform, + compute_input_gradient=True) + self.bias_data = [self.get_bias_data( + layer).cpu().numpy() for layer in target_layers] + + def get_bias_data(self, layer): + # Borrowed from official paper impl: + # https://github.com/idiap/fullgrad-saliency/blob/master/saliency/tensor_extractor.py#L47 + if isinstance(layer, torch.nn.BatchNorm2d): + bias = - (layer.running_mean * layer.weight + / torch.sqrt(layer.running_var + layer.eps)) + layer.bias + return bias.data + else: + return layer.bias.data + + def scale_accross_batch_and_channels(self, tensor, target_size): + batch_size, channel_size = tensor.shape[:2] + reshaped_tensor = tensor.reshape( + batch_size * channel_size, *tensor.shape[2:]) + result = self.scale_cam_image(reshaped_tensor, target_size) + result = result.reshape( + batch_size, + channel_size, + target_size[1], + target_size[0]) + return result + + def compute_cam_per_layer( + self, + input_tensor, + target_category, + eigen_smooth): + input_grad = input_tensor.grad.data.cpu().numpy() + grads_list = [g.cpu().data.numpy() for g in + self.activations_and_grads.gradients] + cam_per_target_layer = [] + target_size = self.get_target_width_height(input_tensor) + + gradient_multiplied_input = input_grad * input_tensor.data.cpu().numpy() + gradient_multiplied_input = np.abs(gradient_multiplied_input) + gradient_multiplied_input = self.scale_accross_batch_and_channels( + gradient_multiplied_input, + target_size) + cam_per_target_layer.append(gradient_multiplied_input) + + # Loop over the saliency image from every layer + assert(len(self.bias_data) == len(grads_list)) + for bias, grads in zip(self.bias_data, grads_list): + bias = bias[None, :, None, None] + # In the paper they take the absolute value, + # but possibily taking only the positive gradients will work + # better. + bias_grad = np.abs(bias * grads) + result = self.scale_accross_batch_and_channels( + bias_grad, target_size) + result = np.sum(result, axis=1) + cam_per_target_layer.append(result[:, None, :]) + cam_per_target_layer = np.concatenate(cam_per_target_layer, axis=1) + if eigen_smooth: + # Resize to a smaller image, since this method typically has a very large number of channels, + # and then consumes a lot of memory + cam_per_target_layer = self.scale_accross_batch_and_channels( + cam_per_target_layer, (target_size[0] // 8, target_size[1] // 8)) + cam_per_target_layer = get_2d_projection(cam_per_target_layer) + cam_per_target_layer = cam_per_target_layer[:, None, :, :] + cam_per_target_layer = self.scale_accross_batch_and_channels( + cam_per_target_layer, + target_size) + else: + cam_per_target_layer = np.sum( + cam_per_target_layer, axis=1)[:, None, :] + + return cam_per_target_layer + + def aggregate_multi_layers(self, cam_per_target_layer): + result = np.sum(cam_per_target_layer, axis=1) + return self.scale_cam_image(result) diff --git a/PuzzleTuning/pytorch_grad_cam/grad_cam.py b/PuzzleTuning/pytorch_grad_cam/grad_cam.py new file mode 100644 index 0000000000000000000000000000000000000000..025bf45ddc57ce3105945d7f4a747d001618a428 --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/grad_cam.py @@ -0,0 +1,22 @@ +import numpy as np +from pytorch_grad_cam.base_cam import BaseCAM + + +class GradCAM(BaseCAM): + def __init__(self, model, target_layers, use_cuda=False, + reshape_transform=None): + super( + GradCAM, + self).__init__( + model, + target_layers, + use_cuda, + reshape_transform) + + def get_cam_weights(self, + input_tensor, + target_layer, + target_category, + activations, + grads): + return np.mean(grads, axis=(2, 3)) diff --git a/PuzzleTuning/pytorch_grad_cam/grad_cam_plusplus.py b/PuzzleTuning/pytorch_grad_cam/grad_cam_plusplus.py new file mode 100644 index 0000000000000000000000000000000000000000..4466826b7dd8707063885a1742332492213b03dd --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/grad_cam_plusplus.py @@ -0,0 +1,32 @@ +import numpy as np +from pytorch_grad_cam.base_cam import BaseCAM + +# https://arxiv.org/abs/1710.11063 + + +class GradCAMPlusPlus(BaseCAM): + def __init__(self, model, target_layers, use_cuda=False, + reshape_transform=None): + super(GradCAMPlusPlus, self).__init__(model, target_layers, use_cuda, + reshape_transform) + + def get_cam_weights(self, + input_tensor, + target_layers, + target_category, + activations, + grads): + grads_power_2 = grads**2 + grads_power_3 = grads_power_2 * grads + # Equation 19 in https://arxiv.org/abs/1710.11063 + sum_activations = np.sum(activations, axis=(2, 3)) + eps = 0.000001 + aij = grads_power_2 / (2 * grads_power_2 + + sum_activations[:, :, None, None] * grads_power_3 + eps) + # Now bring back the ReLU from eq.7 in the paper, + # And zero out aijs where the activations are 0 + aij = np.where(grads != 0, aij, 0) + + weights = np.maximum(grads, 0) * aij + weights = np.sum(weights, axis=(2, 3)) + return weights diff --git a/PuzzleTuning/pytorch_grad_cam/guided_backprop.py b/PuzzleTuning/pytorch_grad_cam/guided_backprop.py new file mode 100644 index 0000000000000000000000000000000000000000..602fbf354397bf8596f700e8dce94dd0b7f49011 --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/guided_backprop.py @@ -0,0 +1,100 @@ +import numpy as np +import torch +from torch.autograd import Function +from pytorch_grad_cam.utils.find_layers import replace_all_layer_type_recursive + + +class GuidedBackpropReLU(Function): + @staticmethod + def forward(self, input_img): + positive_mask = (input_img > 0).type_as(input_img) + output = torch.addcmul( + torch.zeros( + input_img.size()).type_as(input_img), + input_img, + positive_mask) + self.save_for_backward(input_img, output) + return output + + @staticmethod + def backward(self, grad_output): + input_img, output = self.saved_tensors + grad_input = None + + positive_mask_1 = (input_img > 0).type_as(grad_output) + positive_mask_2 = (grad_output > 0).type_as(grad_output) + grad_input = torch.addcmul( + torch.zeros( + input_img.size()).type_as(input_img), + torch.addcmul( + torch.zeros( + input_img.size()).type_as(input_img), + grad_output, + positive_mask_1), + positive_mask_2) + return grad_input + + +class GuidedBackpropReLUasModule(torch.nn.Module): + def __init__(self): + super(GuidedBackpropReLUasModule, self).__init__() + + def forward(self, input_img): + return GuidedBackpropReLU.apply(input_img) + + +class GuidedBackpropReLUModel: + def __init__(self, model, use_cuda): + self.model = model + self.model.eval() + self.cuda = use_cuda + if self.cuda: + self.model = self.model.cuda() + + def forward(self, input_img): + return self.model(input_img) + + def recursive_replace_relu_with_guidedrelu(self, module_top): + + for idx, module in module_top._modules.items(): + self.recursive_replace_relu_with_guidedrelu(module) + if module.__class__.__name__ == 'ReLU': + module_top._modules[idx] = GuidedBackpropReLU.apply + print("b") + + def recursive_replace_guidedrelu_with_relu(self, module_top): + try: + for idx, module in module_top._modules.items(): + self.recursive_replace_guidedrelu_with_relu(module) + if module == GuidedBackpropReLU.apply: + module_top._modules[idx] = torch.nn.ReLU() + except BaseException: + pass + + def __call__(self, input_img, target_category=None): + replace_all_layer_type_recursive(self.model, + torch.nn.ReLU, + GuidedBackpropReLUasModule()) + + if self.cuda: + input_img = input_img.cuda() + + input_img = input_img.requires_grad_(True) + + output = self.forward(input_img) + + if target_category is None: + target_category = np.argmax(output.cpu().data.numpy()) + + loss = output[0, target_category] + loss.backward(retain_graph=True) + + output = input_img.grad.cpu().data.numpy() + output = output[0, :, :, :] + output = output.transpose((1, 2, 0)) + + replace_all_layer_type_recursive(self.model, + GuidedBackpropReLUasModule, + torch.nn.ReLU()) + + return output diff --git a/PuzzleTuning/pytorch_grad_cam/layer_cam.py b/PuzzleTuning/pytorch_grad_cam/layer_cam.py new file mode 100644 index 0000000000000000000000000000000000000000..971443d798658d6c29ff9da54481511ac317a1b0 --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/layer_cam.py @@ -0,0 +1,36 @@ +import numpy as np +from pytorch_grad_cam.base_cam import BaseCAM +from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection + +# https://ieeexplore.ieee.org/document/9462463 + + +class LayerCAM(BaseCAM): + def __init__( + self, + model, + target_layers, + use_cuda=False, + reshape_transform=None): + super( + LayerCAM, + self).__init__( + model, + target_layers, + use_cuda, + reshape_transform) + + def get_cam_image(self, + input_tensor, + target_layer, + target_category, + activations, + grads, + eigen_smooth): + spatial_weighted_activations = np.maximum(grads, 0) * activations + + if eigen_smooth: + cam = get_2d_projection(spatial_weighted_activations) + else: + cam = spatial_weighted_activations.sum(axis=1) + return cam diff --git a/PuzzleTuning/pytorch_grad_cam/score_cam.py b/PuzzleTuning/pytorch_grad_cam/score_cam.py new file mode 100644 index 0000000000000000000000000000000000000000..9865964d98dc379d6577539ee857bd87d2f33589 --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/score_cam.py @@ -0,0 +1,61 @@ +import torch +import tqdm +from pytorch_grad_cam.base_cam import BaseCAM + + +class ScoreCAM(BaseCAM): + def __init__( + self, + model, + target_layers, + use_cuda=False, + reshape_transform=None): + super(ScoreCAM, self).__init__(model, target_layers, use_cuda, + reshape_transform=reshape_transform) + + if len(target_layers) > 0: + print("Warning: You are using ScoreCAM with target layers, " + "however ScoreCAM will ignore them.") + + def get_cam_weights(self, + input_tensor, + target_layer, + target_category, + activations, + grads): + with torch.no_grad(): + upsample = torch.nn.UpsamplingBilinear2d( + size=input_tensor.shape[-2:]) + activation_tensor = torch.from_numpy(activations) + if self.cuda: + activation_tensor = activation_tensor.cuda() + + upsampled = upsample(activation_tensor) + + maxs = upsampled.view(upsampled.size(0), + upsampled.size(1), -1).max(dim=-1)[0] + mins = upsampled.view(upsampled.size(0), + upsampled.size(1), -1).min(dim=-1)[0] + maxs, mins = maxs[:, :, None, None], mins[:, :, None, None] + upsampled = (upsampled - mins) / (maxs - mins) + + input_tensors = input_tensor[:, None, + :, :] * upsampled[:, :, None, :, :] + + if hasattr(self, "batch_size"): + BATCH_SIZE = self.batch_size + else: + BATCH_SIZE = 16 + + scores = [] + for batch_index, tensor in enumerate(input_tensors): + category = target_category[batch_index] + for i in tqdm.tqdm(range(0, tensor.size(0), BATCH_SIZE)): + batch = tensor[i: i + BATCH_SIZE, :] + outputs = self.model(batch).cpu().numpy()[:, category] + scores.extend(outputs) + scores = torch.Tensor(scores) + scores = scores.view(activations.shape[0], activations.shape[1]) + + weights = torch.nn.Softmax(dim=-1)(scores).numpy() + return weights diff --git a/PuzzleTuning/pytorch_grad_cam/utils/__init__.py b/PuzzleTuning/pytorch_grad_cam/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..be80d48e4c3bfe648fdbfc8d34c72a420e18aca2 --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/utils/__init__.py @@ -0,0 +1,2 @@ +from pytorch_grad_cam.utils.image import deprocess_image,show_cam_on_image, preprocess_image +from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection \ No newline at end of file diff --git a/PuzzleTuning/pytorch_grad_cam/utils/find_layers.py b/PuzzleTuning/pytorch_grad_cam/utils/find_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..4b9e44590664fdc30e996f79bd1a3497db40e822 --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/utils/find_layers.py @@ -0,0 +1,30 @@ +def replace_layer_recursive(model, old_layer, new_layer): + for name, layer in model._modules.items(): + if layer == old_layer: + model._modules[name] = new_layer + return True + elif replace_layer_recursive(layer, old_layer, new_layer): + return True + return False + + +def replace_all_layer_type_recursive(model, old_layer_type, new_layer): + for name, layer in model._modules.items(): + if isinstance(layer, old_layer_type): + model._modules[name] = new_layer + replace_all_layer_type_recursive(layer, old_layer_type, new_layer) + + +def find_layer_types_recursive(model, layer_types): + def predicate(layer): + return type(layer) in layer_types + return find_layer_predicate_recursive(model, predicate) + + +def find_layer_predicate_recursive(model, predicate): + result = [] + for name, layer in model._modules.items(): + if predicate(layer): + result.append(layer) + result.extend(find_layer_predicate_recursive(layer, predicate)) + return result diff --git a/PuzzleTuning/pytorch_grad_cam/utils/image.py b/PuzzleTuning/pytorch_grad_cam/utils/image.py new file mode 100644 index 0000000000000000000000000000000000000000..8e91f9beea11b0faf51493be9d9cfb404f8d1f34 --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/utils/image.py @@ -0,0 +1,49 @@ +import cv2 +import numpy as np +import torch +from torchvision.transforms import Compose, Normalize, ToTensor + + +def preprocess_image(img: np.ndarray, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) -> torch.Tensor: + preprocessing = Compose([ + ToTensor(), + Normalize(mean=mean, std=std) + ]) + return preprocessing(img.copy()).unsqueeze(0) + + +def deprocess_image(img): + """ see https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py#L65 """ + img = img - np.mean(img) + img = img / (np.std(img) + 1e-5) + img = img * 0.1 + img = img + 0.5 + img = np.clip(img, 0, 1) + return np.uint8(img * 255) + + +def show_cam_on_image(img: np.ndarray, + mask: np.ndarray, + use_rgb: bool = False, + colormap: int = cv2.COLORMAP_JET) -> np.ndarray: + """ This function overlays the cam mask on the image as an heatmap. + By default the heatmap is in BGR format. + + :param img: The base image in RGB or BGR format. + :param mask: The cam mask. + :param use_rgb: Whether to use an RGB or BGR heatmap, this should be set to True if 'img' is in RGB format. + :param colormap: The OpenCV colormap to be used. + :returns: The default image with the cam overlay. + """ + heatmap = cv2.applyColorMap(np.uint8(255 * mask), colormap) + if use_rgb: + heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB) + heatmap = np.float32(heatmap) / 255 + + if np.max(img) > 1: + raise Exception( + "The input image should np.float32 in the range [0, 1]") + + cam = heatmap + img + cam = cam / np.max(cam) + return np.uint8(255 * cam) diff --git a/PuzzleTuning/pytorch_grad_cam/utils/svd_on_activations.py b/PuzzleTuning/pytorch_grad_cam/utils/svd_on_activations.py new file mode 100644 index 0000000000000000000000000000000000000000..a406aeea85617922e67270a70388256ac214e8e2 --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/utils/svd_on_activations.py @@ -0,0 +1,19 @@ +import numpy as np + + +def get_2d_projection(activation_batch): + # TBD: use pytorch batch svd implementation + activation_batch[np.isnan(activation_batch)] = 0 + projections = [] + for activations in activation_batch: + reshaped_activations = (activations).reshape( + activations.shape[0], -1).transpose() + # Centering before the SVD seems to be important here, + # Otherwise the image returned is negative + reshaped_activations = reshaped_activations - \ + reshaped_activations.mean(axis=0) + U, S, VT = np.linalg.svd(reshaped_activations, full_matrices=True) + projection = reshaped_activations @ VT[0, :] + projection = projection.reshape(activations.shape[1:]) + projections.append(projection) + return np.float32(projections) diff --git a/PuzzleTuning/pytorch_grad_cam/xgrad_cam.py b/PuzzleTuning/pytorch_grad_cam/xgrad_cam.py new file mode 100644 index 0000000000000000000000000000000000000000..81a920fe8b81bfb7bce9f317edfcc465c9bffd60 --- /dev/null +++ b/PuzzleTuning/pytorch_grad_cam/xgrad_cam.py @@ -0,0 +1,31 @@ +import numpy as np +from pytorch_grad_cam.base_cam import BaseCAM + + +class XGradCAM(BaseCAM): + def __init__( + self, + model, + target_layers, + use_cuda=False, + reshape_transform=None): + super( + XGradCAM, + self).__init__( + model, + target_layers, + use_cuda, + reshape_transform) + + def get_cam_weights(self, + input_tensor, + target_layer, + target_category, + activations, + grads): + sum_activations = np.sum(activations, axis=(2, 3)) + eps = 1e-7 + weights = grads * activations / \ + (sum_activations[:, :, None, None] + eps) + weights = weights.sum(axis=(2, 3)) + return weights diff --git a/PuzzleTuning/utils/Experiment_script_helper.py b/PuzzleTuning/utils/Experiment_script_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..5e0ad02940f6786ea025b6c5fbb84446ab68a23e --- /dev/null +++ b/PuzzleTuning/utils/Experiment_script_helper.py @@ -0,0 +1,883 @@ +""" +Experimental Script Generator Script ver: Oct 5th 16:30 + +for linux servers + +todo fix train and test alternatively +""" +import argparse +import os.path + + +def zero_trans_mystrlr_to_float(in_str): + # EG: '305' -> 0.0005 + front = '0.' + num_of_zero = int(in_str[0]) + end = in_str[-1] + for i in range(num_of_zero): + front = front + '0' + front = front + end + + out_float = float(front) + + return out_float + + +def zero_trans_floatlr_to_mystrlr(in_float): + # EG: 0.0005 -> '305' + in_string = "%.20f" % in_float + zero_counts = 0 + + for i in range(len(in_string) - 2): + # print(string[i+2]) + if in_string[i + 2] == '0': + zero_counts += 1 + else: + cut = i + break + + trans_output = str(zero_counts) + '0' + in_string[(cut + 2):] + + last_zeros = 0 + for i in trans_output[::-1]: + if i == '0': + last_zeros += 1 + else: + break + trans_output = trans_output[0:0 - last_zeros] + + return trans_output + + +def remove_nohup_ignoring_input_at_first_line(directory='./'): + """ + read the .sh files at the directory, remove the first line if it's 'nohup: ignoring input\n' + """ + for root, _, files in os.walk(directory): + for file_name in files: + if file_name.endswith(".sh"): + file_path = os.path.join(root, file_name) + + with open(file_path, 'r') as file: + lines = file.readlines() + # print(lines) + + modified_lines = [line for line in lines if line != "nohup: ignoring input\n"] + with open(file_path, 'w') as file: + file.writelines(modified_lines) + + print('file_path:', file_path, 'has been cleaned') + + +def concatenate_the_lines_from_several_files(directory='./', cat_file='0.sh'): + cat_file_path = os.path.join(directory, cat_file) + all_lines = ["#!/bin/sh\n", ] + + for root, _, files in os.walk(directory): + for file_name in files: + if file_name.endswith(".sh"): + file_path = os.path.join(root, file_name) + + with open(file_path, 'r') as file: + lines = file.readlines() + # print(lines) + + modified_lines = [line for line in lines if line != "#!/bin/sh\n"] + all_lines.extend(modified_lines) + print('file_path:', file_path, 'has taken') + + with open(cat_file_path, 'w') as file: + file.writelines(all_lines) + + +def print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_Token_num=20, + Prompt_input=False): + Pre_Trained_model_path = os.path.join(Pre_Trained_model_path_PATH, model_weight_name) + VPT_backbone_model_path = os.path.join(Pre_Trained_model_path_PATH, 'ViT_b16_224_Imagenet.pth') + if not Prompt_input: + # send a ViT model inside and then do the ViT + finetuning; + # In VPT versions: we build VPT backbone with the ViT weight, then do finetuning and prompting + # ViT + finetuning + print( + 'python Train.py --gpu_idx ' + GPU_idx + ' --edge_size 224 --data_augmentation_mode ' + data_augmentation_mode + + ' --lr ' + lr + ' --lrf ' + lrf + ' --enable_tensorboard --model_idx ViT_base_' + model_weight_idx + '_' + + lr_mystr + '_lf' + lrf_mystr + '_finetuning_' + dataset_name + '_CLS --dataroot ' + str(dataroot) + + ' --draw_root ' + draw_root + ' --Pre_Trained_model_path ' + Pre_Trained_model_path + + ' --model_path ' + save_model_PATH) + print( + 'python Test.py --gpu_idx ' + GPU_idx + ' --edge_size 224 --data_augmentation_mode ' + data_augmentation_mode + + ' --model_idx ViT_base_' + model_weight_idx + '_' + lr_mystr + '_lf' + lrf_mystr + '_finetuning_' + + dataset_name + '_CLS --dataroot ' + str(dataroot) + ' --draw_root ' + draw_root + ' --model_path ' + + save_model_PATH) + # VPT + prompting + print( + 'python Train.py --gpu_idx ' + GPU_idx + ' --edge_size 224 --data_augmentation_mode ' + data_augmentation_mode + + ' --lr ' + lr + ' --lrf ' + lrf + ' --enable_tensorboard --model_idx ViT_base_' + model_weight_idx + + '_PromptDeep_' + str(Prompt_Token_num) + '_' + lr_mystr + '_lf' + lrf_mystr + '_prompting_' + dataset_name + + '_CLS --PromptTuning Deep --Prompt_Token_num ' + str(Prompt_Token_num) + ' --dataroot ' + str( + dataroot) + ' --draw_root ' + draw_root + + ' --Pre_Trained_model_path ' + Pre_Trained_model_path + ' --model_path ' + save_model_PATH) + print( + 'python Test.py --gpu_idx ' + GPU_idx + ' --edge_size 224 --data_augmentation_mode ' + data_augmentation_mode + + ' --model_idx ViT_base_' + model_weight_idx + '_PromptDeep_' + str( + Prompt_Token_num) + '_' + lr_mystr + '_lf' + lrf_mystr + + '_prompting_' + dataset_name + '_CLS --PromptTuning Deep --Prompt_Token_num ' + str( + Prompt_Token_num) + ' --dataroot ' + str(dataroot) + ' --draw_root ' + + draw_root + ' --Pre_Trained_model_path ' + Pre_Trained_model_path + ' --model_path ' + save_model_PATH) + # VPT + finetuning + print( + 'python Train.py --gpu_idx ' + GPU_idx + ' --edge_size 224 --data_augmentation_mode ' + data_augmentation_mode + + ' --lr ' + lr + ' --lrf ' + lrf + ' --enable_tensorboard --model_idx ViT_base_' + model_weight_idx + + '_PromptDeep_' + str( + Prompt_Token_num) + '_' + lr_mystr + '_lf' + lrf_mystr + '_finetuning_' + dataset_name + + '_CLS --PromptTuning Deep --Prompt_Token_num ' + str( + Prompt_Token_num) + ' --PromptUnFreeze --dataroot ' + str(dataroot) + ' --draw_root ' + draw_root + + ' --Pre_Trained_model_path ' + Pre_Trained_model_path + ' --model_path ' + save_model_PATH) + print( + 'python Test.py --gpu_idx ' + GPU_idx + ' --edge_size 224 --data_augmentation_mode ' + data_augmentation_mode + + ' --model_idx ViT_base_' + model_weight_idx + '_PromptDeep_' + str( + Prompt_Token_num) + '_' + lr_mystr + '_lf' + lrf_mystr + + '_finetuning_' + dataset_name + '_CLS --PromptTuning Deep --Prompt_Token_num ' + str( + Prompt_Token_num) + ' --PromptUnFreeze --dataroot ' + str(dataroot) + + ' --draw_root ' + draw_root + ' --model_path ' + save_model_PATH) + else: + # send a VPT prompt state inside to build the prompt tokens + # we build VPT backbone with the ViT-timm weight, then do finetuning and prompting + # fixme notice here Pre_Trained_model_path is actually the trained prompt state path + # VPT + prompting + print( + 'python Train.py --gpu_idx ' + GPU_idx + ' --edge_size 224 --data_augmentation_mode ' + data_augmentation_mode + + ' --lr ' + lr + ' --lrf ' + lrf + ' --enable_tensorboard --model_idx ViT_base_' + model_weight_idx + + '_PromptDeep_' + str(Prompt_Token_num) + '_' + lr_mystr + '_lf' + lrf_mystr + '_prompting_' + dataset_name + + '_CLS --PromptTuning Deep --Prompt_Token_num ' + str(Prompt_Token_num) + ' --dataroot ' + str( + dataroot) + ' --draw_root ' + draw_root + + ' --Pre_Trained_model_path ' + VPT_backbone_model_path + ' --Prompt_state_path ' + Pre_Trained_model_path + ' --model_path ' + save_model_PATH) + print( + 'python Test.py --gpu_idx ' + GPU_idx + ' --edge_size 224 --data_augmentation_mode ' + data_augmentation_mode + + ' --model_idx ViT_base_' + model_weight_idx + '_PromptDeep_' + str( + Prompt_Token_num) + '_' + lr_mystr + '_lf' + lrf_mystr + + '_prompting_' + dataset_name + '_CLS --PromptTuning Deep --Prompt_Token_num ' + str( + Prompt_Token_num) + ' --dataroot ' + str(dataroot) + ' --draw_root ' + + draw_root + ' --Pre_Trained_model_path ' + VPT_backbone_model_path + ' --model_path ' + save_model_PATH) + # VPT + finetuning + print( + 'python Train.py --gpu_idx ' + GPU_idx + ' --edge_size 224 --data_augmentation_mode ' + data_augmentation_mode + + ' --lr ' + lr + ' --lrf ' + lrf + ' --enable_tensorboard --model_idx ViT_base_' + model_weight_idx + + '_PromptDeep_' + str( + Prompt_Token_num) + '_' + lr_mystr + '_lf' + lrf_mystr + '_finetuning_' + dataset_name + + '_CLS --PromptTuning Deep --Prompt_Token_num ' + str( + Prompt_Token_num) + ' --PromptUnFreeze --dataroot ' + str(dataroot) + ' --draw_root ' + draw_root + + ' --Pre_Trained_model_path ' + VPT_backbone_model_path + ' --Prompt_state_path ' + Pre_Trained_model_path + ' --model_path ' + save_model_PATH) + print( + 'python Test.py --gpu_idx ' + GPU_idx + ' --edge_size 224 --data_augmentation_mode ' + data_augmentation_mode + + ' --model_idx ViT_base_' + model_weight_idx + '_PromptDeep_' + str( + Prompt_Token_num) + '_' + lr_mystr + '_lf' + lrf_mystr + + '_finetuning_' + dataset_name + '_CLS --PromptTuning Deep --Prompt_Token_num ' + str( + Prompt_Token_num) + ' --PromptUnFreeze --dataroot ' + str(dataroot) + + ' --draw_root ' + draw_root + ' --model_path ' + save_model_PATH) + + print('') + + +def write_PuzzleTuning_comparison_script(lr_mystr, lrf_mystr, data_augmentation_mode, dataset_name, GPU_idx='0'): + """ + In PuzzleTuning comparison experiments we put + datasets at: --dataroot /root/autodl-tmp/datasets + Pre_Trained_model_path /root/autodl-tmp/pre_trained_models # output_models (not applicable for comparison) + Prompt_state_path (not applicable for comparison) /root/autodl-tmp/output_models + save the training model at: model_path /root/autodl-tmp/saved_models + draw_root /root/autodl-tmp/PuzzleTuning_Comparison/[*lr*_*lrf*_*dataset_name*] + + """ + dataroot_PATH = '/root/autodl-tmp/datasets' + Pre_Trained_model_path_PATH = '/root/autodl-tmp/pre_trained_models' + save_model_PATH = '/root/autodl-tmp/saved_models' + draw_root_PATH = '/root/autodl-tmp/PuzzleTuning_Comparison' + + data_augmentation_mode = str(data_augmentation_mode) + GPU_idx = str(GPU_idx) + + lr = str(zero_trans_mystrlr_to_float(lr_mystr)) + lrf = '0.' + str(lrf_mystr) + + experiment_idx = lr_mystr + '_lf' + lrf_mystr + '_' + dataset_name + + dataroot = os.path.join(dataroot_PATH, dataset_name + '_CLS') + draw_root = os.path.join(draw_root_PATH, experiment_idx) + + # PuzzleTuning official version: + # we pre-trained VPT prompt tokens, and use the timm ViT as backbone + print('#SAE-timm-start_promptstate') # SAE+VPT start with timm + model_weight_idx = 'ViT_base_timm_PuzzleTuning_SAE_E_199_promptstate' + model_weight_name = 'ViT_b16_224_timm_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=True) + + # Comparison methods: + + # For the comparison methods: we trained ViT, so we use ViT + ft first, + # and then, put it as vpt 's backbone in prompting and VPT finetuning. + print('#空白对比') + model_weight_idx = 'random' + model_weight_name = 'ViT_b16_224_Random_Init.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=False) + + print('#timm对比') + model_weight_idx = 'timm' + model_weight_name = 'ViT_b16_224_Imagenet.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=False) + + print('#MAEImageNet对比') + model_weight_idx = 'MAEImageNet' + model_weight_name = 'ViT_b16_224_MAEImageNet_Init.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=False) + + print('#mae对比') + model_weight_idx = 'timm_mae_CPIAm_E100' + model_weight_name = 'ViT_b16_224_timm_mae_ALL_100.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=False) + + print('#moco对比') + model_weight_idx = 'timm_moco_CPIAm_E100' + model_weight_name = 'ViT_b16_224_timm_moco_ALL_100.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=False) + + print('#dino对比') + model_weight_idx = 'timm_dino_CPIAm_E100' + model_weight_name = 'ViT_b16_224_timm_dino_ALL_100.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=False) + + print('#BYOL对比') + model_weight_idx = 'timm_BYOL_CPIAm_E50' + model_weight_name = 'ViT_b16_224_timm_BYOL_ALL_50.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=False) + + print('#GCMAE对比') + model_weight_idx = 'timm_GCMAE_CPIAm_E80' + model_weight_name = 'ViT_b16_224_timm_GCMAE_ALL_80.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=False) + + print('#SDMAE对比') + model_weight_idx = 'timm_SDMAE_CPIAm_E80' + model_weight_name = 'ViT_b16_224_timm_SDMAE_ALL_80.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=False) + + print('#SIMMIM对比') + model_weight_idx = 'timm_SIMMIM_CPIAm_E200' + model_weight_name = 'ViT_b16_224_timm_SIMMIM_ALL_200.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=False) + + print('#SIMCLR对比') + model_weight_idx = 'timm_SIMCLR_CPIAm_E100' + model_weight_name = 'ViT_b16_224_timm_SIMCLR_ALL_100.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=False) + + # Ablation versions: + + # For ablation SAE-ViT version, we pre-trained ViT, so we use ViT + ft first, + # and then, put it as vpt 's backbone in prompting and VPT finetuning. + print('#PuzzleTuning_SAE_ViT-CPIA对比') + model_weight_idx = 'timm_PuzzleTuning_SAE_E_199' + model_weight_name = 'ViT_b16_224_timm_PuzzleTuning_SAE_CPIAm_E_199.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=False) + + print('#SAE_fixp16fixr25-timm-start') # SAE_fixp16fixr25+ViT start with timm + model_weight_idx = 'ViT_base_timm_PuzzleTuning_SAE_fixp16fixr25_E_199' + model_weight_name = 'ViT_b16_224_timm_PuzzleTuning_SAE_fixp16fixr25_CPIAm_E_199.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=False) + + print('#SAE_fixp16ratiodecay-timm-start') # SAE_fixp16ratiodecay+ViT start with timm + model_weight_idx = 'ViT_base_timm_PuzzleTuning_SAE_fixp16ratiodecay_E_199' + model_weight_name = 'ViT_b16_224_timm_PuzzleTuning_SAE_fixp16ratiodecay_CPIAm_E_199.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=False) + + # For ablation SAE-VPT version, we pre-trained VPT prompt tokens, and use the timm ViT as backbone + print('#MAE-VPT_promptstate') # MAE+VPT + model_weight_idx = 'timm_mae_Prompt_CPIAm_E199_promptstate' + model_weight_name = 'ViT_b16_224_timm_PuzzleTuning_MAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=True) + + print('#SAE-MAE-start_promptstate') # SAE+VPT start with MAEImageNet + model_weight_idx = 'ViT_base_MAEImageNet_PuzzleTuning_SAE_E_199_promptstate' + model_weight_name = 'ViT_b16_224_MAEImageNet_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=True) + + print('#SAE-Random-start_promptstate') # SAE+VPT start with Random + model_weight_idx = 'ViT_base_Random_PuzzleTuning_SAE_E_199_promptstate' + model_weight_name = 'ViT_b16_224_Random_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=True) + + print('#SAE_fixp16fixr25-timm-start_promptstate') # SAE_fixp16fixr25+VPT start with timm + model_weight_idx = 'ViT_base_timm_PuzzleTuning_SAE_fixp16fixr25_E_199_promptstate' + model_weight_name = 'ViT_b16_224_timm_PuzzleTuning_SAE_fixp16fixr25_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=True) + + print('#SAE_fixp16ratiodecay-timm-start_promptstate') # SAE_fixp16ratiodecay+VPT start with timm + model_weight_idx = 'ViT_base_timm_PuzzleTuning_SAE_fixp16ratiodecay_E_199_promptstate' + model_weight_name = 'ViT_b16_224_timm_PuzzleTuning_SAE_fixp16ratiodecay_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth' + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=True) + + print('') + print('cd /home/pancreatic-cancer-diagnosis-tansformer/code/utils') + record_dir = os.path.join(draw_root, 'CSV_logs') + print('python check_log_json.py --enable_notify --draw_root ' + draw_root + ' --record_dir ' + record_dir) + print('cd /home/pancreatic-cancer-diagnosis-tansformer/code') + + +def write_additional_PuzzleTuning_comparison_script(add_idx, lr_mystr, lrf_mystr, data_augmentation_mode, dataset_name, + model_weight_idx='timm_mae_CPIAm_E100', + model_weight_name='ViT_b16_224_timm_mae_ALL_100.pth', + GPU_idx='0', Prompt_input=False): + """ + In PuzzleTuning comparison experiments we put + datasets at: --dataroot /root/autodl-tmp/datasets + Pre_Trained_model_path /root/autodl-tmp/pre_trained_models # output_models (not applicable for comparison) + Prompt_state_path (not applicable for comparison) /root/autodl-tmp/output_models + save the training model at: model_path /root/autodl-tmp/saved_models + draw_root /root/autodl-tmp/PuzzleTuning_Comparison/[*lr*_*lrf*_*dataset_name*] + + # fixme the additional experiments settings need to manually set!!! + in the additional experiments, we save the runs to + draw_root /root/autodl-tmp/runs/[*lr*_*lrf*_*dataset_name*] + and then copy a duplicates to /root/autodl-tmp/PuzzleTuning_Comparison/[*lr*_*lrf*_*dataset_name*] + + """ + dataroot_PATH = '/root/autodl-tmp/datasets' + Pre_Trained_model_path_PATH = '/root/autodl-tmp/pre_trained_models' + save_model_PATH = '/root/autodl-tmp/saved_models' + draw_root_PATH = '/root/autodl-tmp/runs' + copy_to_draw_root_PATH = '/root/autodl-tmp/PuzzleTuning_Comparison' + + data_augmentation_mode = str(data_augmentation_mode) + GPU_idx = str(GPU_idx) + + lr = str(zero_trans_mystrlr_to_float(lr_mystr)) + lrf = '0.' + str(lrf_mystr) + + experiment_idx = lr_mystr + '_lf' + lrf_mystr + '_' + dataset_name + add_experiment_idx = add_idx + '_' + lr_mystr + '_lf' + lrf_mystr + '_' + dataset_name + + dataroot = os.path.join(dataroot_PATH, dataset_name + '_CLS') + # additional exp runs path + draw_root = os.path.join(draw_root_PATH, add_experiment_idx) + # basic all exp runs path + copy_draw_root = os.path.join(copy_to_draw_root_PATH, experiment_idx) + + print('# Additional ' + add_idx) + print_a_PuzzleTuning_comparison_script(model_weight_idx, model_weight_name, lr, lrf, lr_mystr, lrf_mystr, + dataset_name, dataroot, draw_root, Pre_Trained_model_path_PATH, + save_model_PATH, data_augmentation_mode, GPU_idx, Prompt_input=Prompt_input) + print('') + print('cd /home/pancreatic-cancer-diagnosis-tansformer/code/utils') + # update the total record + print('') + print('cp -r ' + draw_root + '/*' + ' ' + copy_draw_root) + record_dir = os.path.join(copy_draw_root, 'CSV_logs') + print('python check_log_json.py --draw_root ' + copy_draw_root + ' --record_dir ' + record_dir) + + # update the additional runs and send to notify + record_dir = os.path.join(draw_root, add_experiment_idx) + print('python check_log_json.py --enable_notify --draw_root ' + draw_root + ' --record_dir ' + record_dir) + + print('cd /home/pancreatic-cancer-diagnosis-tansformer/code') + + +def write_CLS_script(model_idxs, data_augmentation_mode, edge_size, batch_size, lr, lrf, enable_tensorboard, + test_enable_attention_check, dataset_name, dataroot, model_path, draw_root): + data_augmentation_mode = str(data_augmentation_mode) + edge_size_ipt = str(edge_size) + batch_size = str(batch_size) + lr_name = zero_trans_floatlr_to_mystrlr(lr) + lr = str(lr) + lf_name = str(int(100 * lrf)) + lrf = str(lrf) + dataroot = dataroot + dataset_name + '_CLS' + + for model_idx in model_idxs: + + # alter the edge size for certain models + if model_idx in ['cross_former', 'convit', 'visformer', 'ViT_h']: + edge_size = '224' + else: + edge_size = edge_size_ipt + + if enable_tensorboard is True: + print('python Train.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_' + dataset_name + '_CLS --edge_size ' + edge_size + + ' --data_augmentation_mode ' + data_augmentation_mode + ' --batch_size ' + batch_size + + ' --lr ' + lr + ' --lrf ' + lrf + ' --enable_tensorboard --dataroot ' + dataroot + + ' --model_path ' + model_path + ' --draw_root ' + draw_root) + print('') + else: + print('python Train.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_' + dataset_name + '_CLS --edge_size ' + edge_size + + ' --data_augmentation_mode ' + data_augmentation_mode + ' --batch_size ' + batch_size + + ' --lr ' + lr + ' --lrf ' + lrf + ' --dataroot ' + dataroot + + ' --model_path ' + model_path + ' --draw_root ' + draw_root) + print('') + + for model_idx in model_idxs: + + # alter the edge size for certain models + if model_idx in ['cross_former', 'convit', 'visformer', 'ViT_h']: + edge_size = '224' + else: + edge_size = edge_size_ipt + + if test_enable_attention_check is True: + print('python Test.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_' + dataset_name + '_CLS --edge_size ' + edge_size + + ' --data_augmentation_mode ' + data_augmentation_mode + ' --enable_attention_check --dataroot ' + + dataroot + ' --model_path ' + model_path + ' --draw_root ' + draw_root) + print('') + else: + print('python Test.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_' + dataset_name + '_CLS --edge_size ' + edge_size + + ' --data_augmentation_mode ' + data_augmentation_mode + ' --dataroot ' + + dataroot + ' --model_path ' + model_path + ' --draw_root ' + draw_root) + print('') + + +def write_CLS_AUG_script(model_idx, augmentation_names, data_augmentation_mode, edge_size, batch_size, lr, lrf, + test_enable_attention_check, enable_tensorboard, dataset_name, dataroot, model_path, + draw_root): + data_augmentation_mode = str(data_augmentation_mode) + data_augmentation_mode = str(data_augmentation_mode) + edge_size = str(edge_size) + batch_size = str(batch_size) + lr_name = zero_trans_floatlr_to_mystrlr(lr) + lr = str(lr) + lf_name = str(int(100 * lrf)) + lrf = str(lrf) + dataroot = dataroot + dataset_name + '_CLS' + + for augmentation_name in augmentation_names: + if enable_tensorboard is True: + print('python Train.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_' + dataset_name + '_' + augmentation_name + '_CLS' + + ' --augmentation_name ' + augmentation_name + ' --edge_size ' + edge_size + + ' --data_augmentation_mode ' + data_augmentation_mode + ' --batch_size ' + batch_size + + ' --lr ' + lr + ' --lrf ' + lrf + ' --enable_tensorboard --dataroot ' + dataroot + + ' --model_path ' + model_path + ' --draw_root ' + draw_root) + print('') + else: + print('python Train.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_' + dataset_name + '_' + augmentation_name + '_CLS' + + ' --augmentation_name ' + augmentation_name + ' --edge_size ' + edge_size + + ' --data_augmentation_mode ' + data_augmentation_mode + ' --batch_size ' + batch_size + + ' --lr ' + lr + ' --lrf ' + lrf + ' --dataroot ' + dataroot + + ' --model_path ' + model_path + ' --draw_root ' + draw_root) + print('') + + for augmentation_name in augmentation_names: + if test_enable_attention_check is True: + print('python Test.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_' + dataset_name + '_' + augmentation_name + '_CLS' + + ' --edge_size ' + edge_size + ' --data_augmentation_mode ' + data_augmentation_mode + + ' --enable_attention_check --dataroot ' + dataroot + ' --model_path ' + model_path + + ' --draw_root ' + draw_root) + print('') + else: + print('python Test.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_' + dataset_name + '_' + augmentation_name + '_CLS' + + ' --edge_size ' + edge_size + ' --data_augmentation_mode ' + data_augmentation_mode + ' --dataroot ' + + dataroot + ' --model_path ' + model_path + ' --draw_root ' + draw_root) + print('') + + +def write_MIL_script(model_idxs, data_augmentation_mode, edge_size, batch_size, patch_size, lr, lrf, enable_tensorboard, + test_enable_attention_check, dataset_name, dataroot, model_path, draw_root, imaging_root=None): + # imaging_root 是放画图的检查的路径,可以和draw一样 + if imaging_root == None: + imaging_root = draw_root + + data_augmentation_mode = str(data_augmentation_mode) + edge_size = str(edge_size) + batch_size = str(batch_size) + patch_size = str(patch_size) + lr_name = zero_trans_floatlr_to_mystrlr(lr) + lr = str(lr) + lf_name = str(int(100 * lrf)) + lrf = str(lrf) + dataroot = dataroot + dataset_name + '_MIL' + CLS_dataroot = dataroot + dataset_name + '_CLS' + + for model_idx in model_idxs: + if enable_tensorboard is True: + print('python MIL_train.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_p' + patch_size + '_' + dataset_name + + '_MIL --edge_size ' + edge_size + ' --data_augmentation_mode ' + data_augmentation_mode + + ' --batch_size ' + batch_size + ' --patch_size ' + patch_size + ' --lr ' + lr + ' --lrf ' + + lrf + ' --enable_tensorboard --dataroot ' + dataroot + ' --model_path ' + model_path + + ' --draw_root ' + draw_root) + print('') + else: + print('python MIL_train.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_p' + patch_size + '_' + dataset_name + + '_MIL --edge_size ' + edge_size + ' --data_augmentation_mode ' + data_augmentation_mode + + ' --batch_size ' + batch_size + ' --patch_size ' + patch_size + ' --lr ' + lr + ' --lrf ' + + lrf + ' --dataroot ' + dataroot + ' --model_path ' + model_path + ' --draw_root ' + draw_root) + print('') + + for model_idx in model_idxs: + print('python MIL_test.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_p' + patch_size + '_' + dataset_name + + '_MIL --edge_size ' + edge_size + ' --patch_size ' + patch_size + + ' --batch_size 1 --data_augmentation_mode ' + data_augmentation_mode + ' --dataroot ' + + dataroot + ' --model_path ' + model_path + ' --draw_root ' + draw_root) + print('') + + if test_enable_attention_check is True: # 设置多个batch的实验 + print('python Test.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_p' + patch_size + '_' + dataset_name + + '_MIL --edge_size ' + edge_size + ' --data_augmentation_mode ' + data_augmentation_mode + + ' --MIL_Stripe --enable_attention_check --check_minibatch 10' + + ' --dataroot ' + CLS_dataroot + ' --model_path ' + model_path + + ' --draw_root ' + imaging_root) + print('') + print('python MIL_test.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_p' + patch_size + '_' + dataset_name + + '_MIL --shuffle_attention_check --MIL_Stripe --edge_size ' + edge_size + + ' --data_augmentation_mode ' + data_augmentation_mode + + ' --shuffle_dataloader --batch_size 4 --check_minibatch 10' + ' --patch_size ' + patch_size + + ' --dataroot ' + dataroot + ' --model_path ' + model_path + + ' --draw_root ' + imaging_root) + print('') + print('python MIL_test.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_p' + patch_size + '_' + dataset_name + + '_MIL --shuffle_attention_check --MIL_Stripe --edge_size ' + edge_size + + ' --data_augmentation_mode ' + data_augmentation_mode + + ' --batch_size 4 --check_minibatch 10' + ' --patch_size ' + patch_size + + ' --dataroot ' + dataroot + ' --model_path ' + model_path + + ' --draw_root ' + imaging_root) + print('') + print('python MIL_test.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_p' + patch_size + '_' + dataset_name + + '_MIL --shuffle_attention_check --MIL_Stripe --edge_size ' + edge_size + + ' --data_augmentation_mode ' + data_augmentation_mode + + ' --batch_size 1 --check_minibatch 10' + ' --patch_size ' + patch_size + + ' --dataroot ' + dataroot + ' --model_path ' + model_path + + ' --draw_root ' + imaging_root) + print('') + + else: + print('python Test.py --model_idx ' + model_idx + '_' + edge_size + '_' + lr_name + + '_PT_lf' + lf_name + '_b' + batch_size + '_p' + patch_size + '_' + dataset_name + + '_MIL --edge_size ' + edge_size + ' --data_augmentation_mode ' + data_augmentation_mode + + ' --MIL_Stripe --dataroot ' + CLS_dataroot + ' --model_path ' + model_path + + ' --draw_root ' + draw_root) + print('') + + +''' +if __name__ == '__main__': + + print('#!/bin/sh') + print('') + # CLS-MIL调参的第一步是使用一个经验参数进行简单摸索,看看大家结果大概是多少,同时和文献进行对比 + # 首先摸索CLS对比实验结果 + model_idxs = ['ViT', 'vgg16', 'vgg19', 'mobilenetv3', 'inceptionv3', 'xception', + 'ResNet50', 'efficientnet_b3', 'swin_b', 'ResN50_ViT', 'conformer', 'cross_former'] + + batch_size = 8 + dataset_name = 'NCT-CRC-HE-100K' + + write_CLS_script(model_idxs=model_idxs, + data_augmentation_mode=3, + edge_size=384, + batch_size=batch_size, + lr=0.000007, + lrf=0.35, + enable_tensorboard=True, + test_enable_attention_check=True, + dataset_name=dataset_name, + dataroot='/root/autodl-tmp/datasets/', + model_path='/root/autodl-tmp/saved_models', + draw_root='/root/autodl-tmp/runs') + + # 正式实验的时候,后面还需要做各种MIL的消融实验 + # TODO 更多write_MIL_script + # 其次摸索CLS+特定模型vit+不同数据增强 对比实验结果 + augmentation_names = ['Cutout', 'Mixup', 'CutMix'] + write_CLS_AUG_script(model_idx='ViT', + augmentation_names=augmentation_names, + data_augmentation_mode=3, + edge_size=384, + batch_size=batch_size, + lr=0.000007, + lrf=0.35, + enable_tensorboard=True, + test_enable_attention_check=True, + dataset_name=dataset_name, + dataroot='/root/autodl-tmp/datasets/', + model_path='/root/autodl-tmp/saved_models', + draw_root='/root/autodl-tmp/runs') + + # 最后摸索MIL+ViT的实验结果 + MIL_model_idxs = ['ViT', ] + # MIL ablations + write_MIL_script(model_idxs=MIL_model_idxs, + data_augmentation_mode=3, + edge_size=384, + batch_size=batch_size, + patch_size=16, + lr=0.000007, + lrf=0.35, + enable_tensorboard=True, + test_enable_attention_check=False, + dataset_name=dataset_name, + dataroot='/root/autodl-tmp/datasets/', + model_path='/root/autodl-tmp/saved_models', + draw_root='/root/autodl-tmp/runs', + imaging_root='/root/autodl-tmp/imaging_results') + write_MIL_script(model_idxs=MIL_model_idxs, + data_augmentation_mode=3, + edge_size=384, + batch_size=batch_size, + patch_size=64, + lr=0.000007, + lrf=0.35, + enable_tensorboard=True, + test_enable_attention_check=False, + dataset_name=dataset_name, + dataroot='/root/autodl-tmp/datasets/', + model_path='/root/autodl-tmp/saved_models', + draw_root='/root/autodl-tmp/runs', + imaging_root='/root/autodl-tmp/imaging_results') + write_MIL_script(model_idxs=MIL_model_idxs, + data_augmentation_mode=3, + edge_size=384, + batch_size=batch_size, + patch_size=48, + lr=0.000007, + lrf=0.35, + enable_tensorboard=True, + test_enable_attention_check=False, + dataset_name=dataset_name, + dataroot='/root/autodl-tmp/datasets/', + model_path='/root/autodl-tmp/saved_models', + draw_root='/root/autodl-tmp/runs', + imaging_root='/root/autodl-tmp/imaging_results') + write_MIL_script(model_idxs=MIL_model_idxs, + data_augmentation_mode=3, + edge_size=384, + batch_size=batch_size, + patch_size=96, + lr=0.000007, + lrf=0.35, + enable_tensorboard=True, + test_enable_attention_check=False, + dataset_name=dataset_name, + dataroot='/root/autodl-tmp/datasets/', + model_path='/root/autodl-tmp/saved_models', + draw_root='/root/autodl-tmp/runs', + imaging_root='/root/autodl-tmp/imaging_results') + write_MIL_script(model_idxs=MIL_model_idxs, + data_augmentation_mode=3, + edge_size=384, + batch_size=batch_size, + patch_size=128, + lr=0.000007, + lrf=0.35, + enable_tensorboard=True, + test_enable_attention_check=False, + dataset_name=dataset_name, + dataroot='/root/autodl-tmp/datasets/', + model_path='/root/autodl-tmp/saved_models', + draw_root='/root/autodl-tmp/runs', + imaging_root='/root/autodl-tmp/imaging_results') + + # 调参实验的时候,先调MIL到最好,然后用参数去跑CLS实验看结果 + + print('cd /home/pancreatic-cancer-diagnosis-tansformer/code/utils') + print('') + print( + 'python check_log_json.py --enable_notify --draw_root /root/autodl-tmp/runs --record_dir /root/autodl-tmp/CSV_logs') + print('') + print('shutdown') +''' + + +def get_args_parser(): + parser = argparse.ArgumentParser(description='Automatically write shell script for training') + + # Model Name or index + parser.add_argument('--lr_mystr', default=None, type=str, help='Model lr EG: 506 -> 0.000006') + parser.add_argument('--lrf_mystr', default=None, type=str, help='Model lrf EG: 50 -> cosine decay to 50%') + parser.add_argument('--data_augmentation_mode', default=None, type=str, help='ROSE,pRCC:0; CAM16,WBC:3') + parser.add_argument('--dataset_name', default=None, type=str, help='ROSE,pRCC,CAM16,WBC ?') + parser.add_argument('--GPU_idx', default='0', type=str, help='Experiment GPU_idx EG: 0') + + return parser + + +if __name__ == '__main__': + parser = get_args_parser() + args = parser.parse_args() + + print('#!/bin/sh') + print('') + # add DropPos-CPIA + write_additional_PuzzleTuning_comparison_script(add_idx='DropPos-CPIA', lr_mystr=args.lr_mystr, + lrf_mystr=args.lrf_mystr, + data_augmentation_mode=args.data_augmentation_mode, + dataset_name=args.dataset_name, + model_weight_idx='timm_DropPos_CPIAm_E200', + model_weight_name='ViT_b16_224_timm_DropPos_ALL_200.pth', + GPU_idx=args.GPU_idx, Prompt_input=False) + + ''' + # add MAE-CPIA + write_additional_PuzzleTuning_comparison_script(add_idx='MAE-CPIA', lr_mystr=args.lr_mystr, + lrf_mystr=args.lrf_mystr, + data_augmentation_mode=args.data_augmentation_mode, + dataset_name=args.dataset_name, + model_weight_idx='timm_mae_CPIAm_E100', + model_weight_name='ViT_b16_224_timm_mae_ALL_100.pth', + GPU_idx=args.GPU_idx, Prompt_input=False) + # add SDMAE-CPIA + write_additional_PuzzleTuning_comparison_script(add_idx='SDMAE-CPIA', lr_mystr=args.lr_mystr, + lrf_mystr=args.lrf_mystr, + data_augmentation_mode=args.data_augmentation_mode, + dataset_name=args.dataset_name, + model_weight_idx='timm_SDMAE_CPIAm_E80', + model_weight_name='ViT_b16_224_timm_SDMAE_ALL_80.pth', + GPU_idx=args.GPU_idx, Prompt_input=False) + # add GCMAE-CPIA + write_additional_PuzzleTuning_comparison_script(add_idx='GCMAE-CPIA', lr_mystr=args.lr_mystr, + lrf_mystr=args.lrf_mystr, + data_augmentation_mode=args.data_augmentation_mode, + dataset_name=args.dataset_name, + model_weight_idx='timm_GCMAE_CPIAm_E80', + model_weight_name='ViT_b16_224_timm_GCMAE_ALL_80.pth', + GPU_idx=args.GPU_idx, Prompt_input=False) + # add JIGSAW-CPIA + write_additional_PuzzleTuning_comparison_script(add_idx='JIGSAW-CPIA', lr_mystr=args.lr_mystr, + lrf_mystr=args.lrf_mystr, + data_augmentation_mode=args.data_augmentation_mode, + dataset_name=args.dataset_name, + model_weight_idx='timm_JIGSAW_CPIAm_E50', + model_weight_name='ViT_b16_224_timm_JIGSAW_ALL_50.pth', + GPU_idx=args.GPU_idx, Prompt_input=False) + + # add DropPos-CPIA + write_additional_PuzzleTuning_comparison_script(add_idx='DropPos-CPIA', lr_mystr=args.lr_mystr, + lrf_mystr=args.lrf_mystr, + data_augmentation_mode=args.data_augmentation_mode, + dataset_name=args.dataset_name, + model_weight_idx='timm_DropPos_CPIAm_E200', + model_weight_name='ViT_b16_224_timm_DropPos_ALL_200.pth', + GPU_idx=args.GPU_idx, Prompt_input=False) + + # add MAE+VPT + write_additional_PuzzleTuning_comparison_script(add_idx='MAE-VPT_promptstate', + lr_mystr=args.lr_mystr, + lrf_mystr=args.lrf_mystr, + data_augmentation_mode=args.data_augmentation_mode, + dataset_name=args.dataset_name, + model_weight_idx='timm_mae_Prompt_CPIAm_E199_promptstate', + model_weight_name='ViT_b16_224_timm_PuzzleTuning_MAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth', + GPU_idx='0', Prompt_input=True) + # add SAE-MAE-start + write_additional_PuzzleTuning_comparison_script(add_idx='SAE-MAE-start_promptstate', + lr_mystr=args.lr_mystr, + lrf_mystr=args.lrf_mystr, + data_augmentation_mode=args.data_augmentation_mode, + dataset_name=args.dataset_name, + model_weight_idx='ViT_base_MAEImageNet_PuzzleTuning_SAE_E_199_promptstate', + model_weight_name='ViT_b16_224_MAEImageNet_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth', + GPU_idx='0', Prompt_input=True) + # add SAE-Random-start + write_additional_PuzzleTuning_comparison_script(add_idx='SAE-Random-start_promptstate', + lr_mystr=args.lr_mystr, + lrf_mystr=args.lrf_mystr, + data_augmentation_mode=args.data_augmentation_mode, + dataset_name=args.dataset_name, + model_weight_idx='ViT_base_Random_PuzzleTuning_SAE_E_199_promptstate', + model_weight_name='ViT_b16_224_Random_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth', + GPU_idx='0', Prompt_input=True) + + # add PuzzleTuning_SAE_ViT_to_VPT-CPIA + write_additional_PuzzleTuning_comparison_script(add_idx='PuzzleTuning_SAE_ViT-CPIA', lr_mystr=args.lr_mystr, + lrf_mystr=args.lrf_mystr, + data_augmentation_mode=args.data_augmentation_mode, + dataset_name=args.dataset_name, + model_weight_idx='timm_PuzzleTuning_SAE_E_199', + model_weight_name='ViT_b16_224_timm_PuzzleTuning_SAE_CPIAm_E_199.pth', + GPU_idx=args.GPU_idx, Prompt_input=False) + ''' + + # rewrite all + ''' + write_PuzzleTuning_comparison_script(lr_mystr=args.lr_mystr, lrf_mystr=args.lrf_mystr, + data_augmentation_mode=args.data_augmentation_mode, + dataset_name=args.dataset_name, GPU_idx=args.GPU_idx) + ''' + + + ''' + we can use the following codes to generates the additional exp scripts + + # read and auto generate task info + import os + path='/root/autodl-tmp/PuzzleTuning_Comparison' + data_augmentation_dic = {'ROSE': '0', 'pRCC': '0', 'CAM16': '3', 'WBC': '3'} + for exp_root in os.listdir(path): + out_sh_name = exp_root + '.sh' + lr_mystr = exp_root.split('_')[0] + lrf_mystr = exp_root.split('_')[1].split('lf')[-1] + dataset_name = exp_root.split('_')[-1] + data_augmentation_mode = data_augmentation_dic[dataset_name] + print('nohup python Experiment_script_helper.py --lr_mystr ' + lr_mystr + ' --lrf_mystr ' + lrf_mystr + + ' --data_augmentation_mode ' + data_augmentation_mode + ' --dataset_name ' + dataset_name + ' > ' + + out_sh_name + ' 2>&1 &') + + # then, we use the shell to run this code with the generated lines + + # the generate sh files has a nohup line at their first lines, so we can use this to erase + remove_nohup_ignoring_input_at_first_line(directory='./') + + # we can use the func to combine the sh files: + concatenate_the_lines_from_several_files(directory='./', cat_file='0.sh') + ''' diff --git a/PuzzleTuning/utils/Make_experiments_sh_with_helper.py b/PuzzleTuning/utils/Make_experiments_sh_with_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..5430280561b4229d6bf5a14b448863304bd30a20 --- /dev/null +++ b/PuzzleTuning/utils/Make_experiments_sh_with_helper.py @@ -0,0 +1,181 @@ +import os + +def explore_the_experimetns_and_generate_nohup_lines(path='/root/autodl-tmp/PuzzleTuning_Comparison'): + data_augmentation_dic = {'ROSE': '0', 'pRCC': '0', 'CAM16': '3', 'WBC': '3'} + for exp_root in os.listdir(path): + out_sh_name = exp_root + '.sh' + lr_mystr = exp_root.split('_')[0] + lrf_mystr = exp_root.split('_')[1].split('lf')[-1] + dataset_name = exp_root.split('_')[-1] + data_augmentation_mode = data_augmentation_dic[dataset_name] + print('nohup python Experiment_script_helper.py --lr_mystr ' + lr_mystr + ' --lrf_mystr ' + lrf_mystr + + ' --data_augmentation_mode ' + data_augmentation_mode + ' --dataset_name ' + dataset_name + ' > ' + + out_sh_name + ' 2>&1 &') + + +explore_the_experimetns_and_generate_nohup_lines('/Users/zhangtianyi/Downloads/PuzzleTuning_Comparison') +''' +nohup python Experiment_script_helper.py --lr_mystr 408 --lrf_mystr 25 --data_augmentation_mode 0 --dataset_name ROSE > 408_lf25_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 607 --lrf_mystr 05 --data_augmentation_mode 3 --dataset_name WBC > 607_lf05_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 503 --lrf_mystr 40 --data_augmentation_mode 0 --dataset_name ROSE > 503_lf40_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 605 --lrf_mystr 50 --data_augmentation_mode 0 --dataset_name ROSE > 605_lf50_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 606 --lrf_mystr 05 --data_augmentation_mode 0 --dataset_name ROSE > 606_lf05_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 503 --lrf_mystr 05 --data_augmentation_mode 3 --dataset_name CAM16 > 503_lf05_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 503 --lrf_mystr 40 --data_augmentation_mode 3 --dataset_name CAM16 > 503_lf40_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 504 --lrf_mystr 25 --data_augmentation_mode 0 --dataset_name pRCC > 504_lf25_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 509 --lrf_mystr 05 --data_augmentation_mode 0 --dataset_name pRCC > 509_lf05_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 507 --lrf_mystr 50 --data_augmentation_mode 3 --dataset_name CAM16 > 507_lf50_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 607 --lrf_mystr 10 --data_augmentation_mode 3 --dataset_name WBC > 607_lf10_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 401 --lrf_mystr 35 --data_augmentation_mode 0 --dataset_name pRCC > 401_lf35_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 402 --lrf_mystr 50 --data_augmentation_mode 3 --dataset_name CAM16 > 402_lf50_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 305 --lrf_mystr 05 --data_augmentation_mode 0 --dataset_name pRCC > 305_lf05_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 509 --lrf_mystr 25 --data_augmentation_mode 0 --dataset_name pRCC > 509_lf25_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 502 --lrf_mystr 50 --data_augmentation_mode 0 --dataset_name ROSE > 502_lf50_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 504 --lrf_mystr 05 --data_augmentation_mode 0 --dataset_name pRCC > 504_lf05_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 507 --lrf_mystr 50 --data_augmentation_mode 0 --dataset_name pRCC > 507_lf50_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 504 --lrf_mystr 30 --data_augmentation_mode 0 --dataset_name ROSE > 504_lf30_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 304 --lrf_mystr 35 --data_augmentation_mode 0 --dataset_name pRCC > 304_lf35_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 307 --lrf_mystr 20 --data_augmentation_mode 0 --dataset_name pRCC > 307_lf20_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 408 --lrf_mystr 40 --data_augmentation_mode 3 --dataset_name WBC > 408_lf40_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 605 --lrf_mystr 50 --data_augmentation_mode 3 --dataset_name WBC > 605_lf50_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 503 --lrf_mystr 15 --data_augmentation_mode 0 --dataset_name pRCC > 503_lf15_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 303 --lrf_mystr 10 --data_augmentation_mode 0 --dataset_name ROSE > 303_lf10_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 302 --lrf_mystr 15 --data_augmentation_mode 0 --dataset_name pRCC > 302_lf15_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 506 --lrf_mystr 20 --data_augmentation_mode 0 --dataset_name pRCC > 506_lf20_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 608 --lrf_mystr 25 --data_augmentation_mode 0 --dataset_name pRCC > 608_lf25_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 306 --lrf_mystr 10 --data_augmentation_mode 0 --dataset_name pRCC > 306_lf10_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 501 --lrf_mystr 50 --data_augmentation_mode 0 --dataset_name pRCC > 501_lf50_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 506 --lrf_mystr 35 --data_augmentation_mode 0 --dataset_name ROSE > 506_lf35_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 506 --lrf_mystr 40 --data_augmentation_mode 3 --dataset_name CAM16 > 506_lf40_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 503 --lrf_mystr 10 --data_augmentation_mode 3 --dataset_name WBC > 503_lf10_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 404 --lrf_mystr 25 --data_augmentation_mode 3 --dataset_name WBC > 404_lf25_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 609 --lrf_mystr 35 --data_augmentation_mode 3 --dataset_name WBC > 609_lf35_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 609 --lrf_mystr 20 --data_augmentation_mode 3 --dataset_name WBC > 609_lf20_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 404 --lrf_mystr 30 --data_augmentation_mode 3 --dataset_name WBC > 404_lf30_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 606 --lrf_mystr 15 --data_augmentation_mode 3 --dataset_name WBC > 606_lf15_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 607 --lrf_mystr 15 --data_augmentation_mode 3 --dataset_name WBC > 607_lf15_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 507 --lrf_mystr 30 --data_augmentation_mode 0 --dataset_name pRCC > 507_lf30_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 303 --lrf_mystr 05 --data_augmentation_mode 0 --dataset_name pRCC > 303_lf05_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 508 --lrf_mystr 10 --data_augmentation_mode 3 --dataset_name CAM16 > 508_lf10_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 409 --lrf_mystr 25 --data_augmentation_mode 3 --dataset_name WBC > 409_lf25_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 501 --lrf_mystr 15 --data_augmentation_mode 0 --dataset_name ROSE > 501_lf15_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 306 --lrf_mystr 20 --data_augmentation_mode 3 --dataset_name CAM16 > 306_lf20_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 305 --lrf_mystr 15 --data_augmentation_mode 0 --dataset_name pRCC > 305_lf15_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 604 --lrf_mystr 35 --data_augmentation_mode 3 --dataset_name WBC > 604_lf35_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 407 --lrf_mystr 10 --data_augmentation_mode 3 --dataset_name WBC > 407_lf10_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 505 --lrf_mystr 50 --data_augmentation_mode 3 --dataset_name CAM16 > 505_lf50_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 409 --lrf_mystr 25 --data_augmentation_mode 0 --dataset_name ROSE > 409_lf25_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 407 --lrf_mystr 05 --data_augmentation_mode 3 --dataset_name WBC > 407_lf05_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 406 --lrf_mystr 05 --data_augmentation_mode 3 --dataset_name WBC > 406_lf05_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 507 --lrf_mystr 40 --data_augmentation_mode 0 --dataset_name pRCC > 507_lf40_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 307 --lrf_mystr 50 --data_augmentation_mode 0 --dataset_name pRCC > 307_lf50_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 301 --lrf_mystr 25 --data_augmentation_mode 3 --dataset_name CAM16 > 301_lf25_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 603 --lrf_mystr 50 --data_augmentation_mode 3 --dataset_name CAM16 > 603_lf50_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 503 --lrf_mystr 50 --data_augmentation_mode 0 --dataset_name ROSE > 503_lf50_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 508 --lrf_mystr 10 --data_augmentation_mode 0 --dataset_name ROSE > 508_lf10_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 508 --lrf_mystr 50 --data_augmentation_mode 3 --dataset_name CAM16 > 508_lf50_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 304 --lrf_mystr 05 --data_augmentation_mode 3 --dataset_name CAM16 > 304_lf05_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 304 --lrf_mystr 40 --data_augmentation_mode 3 --dataset_name CAM16 > 304_lf40_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 606 --lrf_mystr 15 --data_augmentation_mode 0 --dataset_name ROSE > 606_lf15_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 608 --lrf_mystr 50 --data_augmentation_mode 3 --dataset_name WBC > 608_lf50_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 609 --lrf_mystr 50 --data_augmentation_mode 3 --dataset_name WBC > 609_lf50_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 509 --lrf_mystr 25 --data_augmentation_mode 3 --dataset_name CAM16 > 509_lf25_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 607 --lrf_mystr 40 --data_augmentation_mode 3 --dataset_name CAM16 > 607_lf40_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 503 --lrf_mystr 20 --data_augmentation_mode 3 --dataset_name CAM16 > 503_lf20_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 505 --lrf_mystr 15 --data_augmentation_mode 3 --dataset_name WBC > 505_lf15_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 508 --lrf_mystr 35 --data_augmentation_mode 3 --dataset_name CAM16 > 508_lf35_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 608 --lrf_mystr 15 --data_augmentation_mode 0 --dataset_name pRCC > 608_lf15_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 506 --lrf_mystr 25 --data_augmentation_mode 0 --dataset_name ROSE > 506_lf25_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 401 --lrf_mystr 05 --data_augmentation_mode 3 --dataset_name CAM16 > 401_lf05_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 501 --lrf_mystr 40 --data_augmentation_mode 0 --dataset_name pRCC > 501_lf40_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 608 --lrf_mystr 40 --data_augmentation_mode 3 --dataset_name WBC > 608_lf40_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 506 --lrf_mystr 20 --data_augmentation_mode 3 --dataset_name CAM16 > 506_lf20_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 503 --lrf_mystr 25 --data_augmentation_mode 0 --dataset_name pRCC > 503_lf25_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 609 --lrf_mystr 10 --data_augmentation_mode 0 --dataset_name ROSE > 609_lf10_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 509 --lrf_mystr 05 --data_augmentation_mode 3 --dataset_name CAM16 > 509_lf05_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 305 --lrf_mystr 15 --data_augmentation_mode 3 --dataset_name CAM16 > 305_lf15_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 505 --lrf_mystr 50 --data_augmentation_mode 0 --dataset_name ROSE > 505_lf50_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 608 --lrf_mystr 35 --data_augmentation_mode 0 --dataset_name pRCC > 608_lf35_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 402 --lrf_mystr 10 --data_augmentation_mode 3 --dataset_name CAM16 > 402_lf10_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 408 --lrf_mystr 10 --data_augmentation_mode 3 --dataset_name WBC > 408_lf10_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 306 --lrf_mystr 15 --data_augmentation_mode 3 --dataset_name CAM16 > 306_lf15_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 507 --lrf_mystr 15 --data_augmentation_mode 0 --dataset_name pRCC > 507_lf15_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 508 --lrf_mystr 50 --data_augmentation_mode 0 --dataset_name pRCC > 508_lf50_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 503 --lrf_mystr 10 --data_augmentation_mode 0 --dataset_name pRCC > 503_lf10_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 302 --lrf_mystr 25 --data_augmentation_mode 0 --dataset_name ROSE > 302_lf25_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 502 --lrf_mystr 15 --data_augmentation_mode 0 --dataset_name ROSE > 502_lf15_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 608 --lrf_mystr 20 --data_augmentation_mode 0 --dataset_name pRCC > 608_lf20_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 509 --lrf_mystr 40 --data_augmentation_mode 0 --dataset_name pRCC > 509_lf40_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 504 --lrf_mystr 25 --data_augmentation_mode 3 --dataset_name WBC > 504_lf25_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 301 --lrf_mystr 50 --data_augmentation_mode 0 --dataset_name ROSE > 301_lf50_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 302 --lrf_mystr 05 --data_augmentation_mode 0 --dataset_name ROSE > 302_lf05_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 508 --lrf_mystr 20 --data_augmentation_mode 3 --dataset_name CAM16 > 508_lf20_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 506 --lrf_mystr 05 --data_augmentation_mode 0 --dataset_name pRCC > 506_lf05_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 301 --lrf_mystr 30 --data_augmentation_mode 3 --dataset_name WBC > 301_lf30_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 507 --lrf_mystr 05 --data_augmentation_mode 3 --dataset_name CAM16 > 507_lf05_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 307 --lrf_mystr 05 --data_augmentation_mode 0 --dataset_name pRCC > 307_lf05_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 503 --lrf_mystr 50 --data_augmentation_mode 3 --dataset_name CAM16 > 503_lf50_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 605 --lrf_mystr 15 --data_augmentation_mode 0 --dataset_name ROSE > 605_lf15_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 503 --lrf_mystr 05 --data_augmentation_mode 0 --dataset_name ROSE > 503_lf05_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 301 --lrf_mystr 35 --data_augmentation_mode 3 --dataset_name WBC > 301_lf35_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 606 --lrf_mystr 20 --data_augmentation_mode 3 --dataset_name CAM16 > 606_lf20_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 304 --lrf_mystr 10 --data_augmentation_mode 3 --dataset_name CAM16 > 304_lf10_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 401 --lrf_mystr 30 --data_augmentation_mode 3 --dataset_name CAM16 > 401_lf30_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 304 --lrf_mystr 10 --data_augmentation_mode 0 --dataset_name pRCC > 304_lf10_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 504 --lrf_mystr 20 --data_augmentation_mode 0 --dataset_name pRCC > 504_lf20_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 504 --lrf_mystr 15 --data_augmentation_mode 0 --dataset_name ROSE > 504_lf15_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 506 --lrf_mystr 50 --data_augmentation_mode 3 --dataset_name CAM16 > 506_lf50_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 505 --lrf_mystr 25 --data_augmentation_mode 0 --dataset_name ROSE > 505_lf25_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 501 --lrf_mystr 20 --data_augmentation_mode 0 --dataset_name ROSE > 501_lf20_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 301 --lrf_mystr 20 --data_augmentation_mode 3 --dataset_name WBC > 301_lf20_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 502 --lrf_mystr 50 --data_augmentation_mode 3 --dataset_name WBC > 502_lf50_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 305 --lrf_mystr 20 --data_augmentation_mode 3 --dataset_name CAM16 > 305_lf20_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 401 --lrf_mystr 30 --data_augmentation_mode 0 --dataset_name pRCC > 401_lf30_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 409 --lrf_mystr 30 --data_augmentation_mode 0 --dataset_name ROSE > 409_lf30_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 303 --lrf_mystr 50 --data_augmentation_mode 3 --dataset_name CAM16 > 303_lf50_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 505 --lrf_mystr 05 --data_augmentation_mode 0 --dataset_name ROSE > 505_lf05_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 607 --lrf_mystr 10 --data_augmentation_mode 0 --dataset_name ROSE > 607_lf10_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 309 --lrf_mystr 10 --data_augmentation_mode 0 --dataset_name pRCC > 309_lf10_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 302 --lrf_mystr 50 --data_augmentation_mode 0 --dataset_name pRCC > 302_lf50_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 306 --lrf_mystr 35 --data_augmentation_mode 3 --dataset_name CAM16 > 306_lf35_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 304 --lrf_mystr 05 --data_augmentation_mode 0 --dataset_name ROSE > 304_lf05_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 605 --lrf_mystr 10 --data_augmentation_mode 3 --dataset_name WBC > 605_lf10_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 606 --lrf_mystr 30 --data_augmentation_mode 3 --dataset_name WBC > 606_lf30_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 502 --lrf_mystr 20 --data_augmentation_mode 3 --dataset_name WBC > 502_lf20_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 506 --lrf_mystr 15 --data_augmentation_mode 0 --dataset_name pRCC > 506_lf15_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 509 --lrf_mystr 50 --data_augmentation_mode 0 --dataset_name pRCC > 509_lf50_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 609 --lrf_mystr 05 --data_augmentation_mode 3 --dataset_name WBC > 609_lf05_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 303 --lrf_mystr 10 --data_augmentation_mode 0 --dataset_name pRCC > 303_lf10_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 609 --lrf_mystr 10 --data_augmentation_mode 3 --dataset_name WBC > 609_lf10_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 304 --lrf_mystr 15 --data_augmentation_mode 3 --dataset_name CAM16 > 304_lf15_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 503 --lrf_mystr 15 --data_augmentation_mode 0 --dataset_name ROSE > 503_lf15_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 507 --lrf_mystr 20 --data_augmentation_mode 3 --dataset_name CAM16 > 507_lf20_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 606 --lrf_mystr 25 --data_augmentation_mode 3 --dataset_name WBC > 606_lf25_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 402 --lrf_mystr 20 --data_augmentation_mode 3 --dataset_name CAM16 > 402_lf20_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 503 --lrf_mystr 35 --data_augmentation_mode 0 --dataset_name ROSE > 503_lf35_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 303 --lrf_mystr 05 --data_augmentation_mode 0 --dataset_name ROSE > 303_lf05_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 608 --lrf_mystr 30 --data_augmentation_mode 0 --dataset_name pRCC > 608_lf30_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 302 --lrf_mystr 35 --data_augmentation_mode 0 --dataset_name ROSE > 302_lf35_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 306 --lrf_mystr 30 --data_augmentation_mode 3 --dataset_name CAM16 > 306_lf30_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 505 --lrf_mystr 05 --data_augmentation_mode 3 --dataset_name CAM16 > 505_lf05_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 302 --lrf_mystr 40 --data_augmentation_mode 0 --dataset_name pRCC > 302_lf40_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 302 --lrf_mystr 05 --data_augmentation_mode 3 --dataset_name WBC > 302_lf05_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 409 --lrf_mystr 20 --data_augmentation_mode 0 --dataset_name ROSE > 409_lf20_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 302 --lrf_mystr 10 --data_augmentation_mode 3 --dataset_name WBC > 302_lf10_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 306 --lrf_mystr 10 --data_augmentation_mode 3 --dataset_name CAM16 > 306_lf10_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 504 --lrf_mystr 10 --data_augmentation_mode 0 --dataset_name pRCC > 504_lf10_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 304 --lrf_mystr 35 --data_augmentation_mode 0 --dataset_name ROSE > 304_lf35_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 505 --lrf_mystr 40 --data_augmentation_mode 3 --dataset_name WBC > 505_lf40_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 504 --lrf_mystr 40 --data_augmentation_mode 3 --dataset_name WBC > 504_lf40_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 503 --lrf_mystr 25 --data_augmentation_mode 3 --dataset_name WBC > 503_lf25_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 606 --lrf_mystr 10 --data_augmentation_mode 0 --dataset_name ROSE > 606_lf10_ROSE.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 305 --lrf_mystr 40 --data_augmentation_mode 3 --dataset_name CAM16 > 305_lf40_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 305 --lrf_mystr 05 --data_augmentation_mode 3 --dataset_name CAM16 > 305_lf05_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 609 --lrf_mystr 15 --data_augmentation_mode 3 --dataset_name WBC > 609_lf15_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 606 --lrf_mystr 20 --data_augmentation_mode 3 --dataset_name WBC > 606_lf20_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 502 --lrf_mystr 30 --data_augmentation_mode 3 --dataset_name WBC > 502_lf30_WBC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 509 --lrf_mystr 10 --data_augmentation_mode 0 --dataset_name pRCC > 509_lf10_pRCC.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 404 --lrf_mystr 35 --data_augmentation_mode 3 --dataset_name CAM16 > 404_lf35_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 401 --lrf_mystr 15 --data_augmentation_mode 3 --dataset_name CAM16 > 401_lf15_CAM16.sh 2>&1 & +nohup python Experiment_script_helper.py --lr_mystr 505 --lrf_mystr 35 --data_augmentation_mode 0 --dataset_name ROSE > 505_lf35_ROSE.sh 2>&1 & +''' diff --git a/PuzzleTuning/utils/SoftCrossEntropyLoss.py b/PuzzleTuning/utils/SoftCrossEntropyLoss.py new file mode 100644 index 0000000000000000000000000000000000000000..ff72edeab929320600269630e27b6e6e5da54cac --- /dev/null +++ b/PuzzleTuning/utils/SoftCrossEntropyLoss.py @@ -0,0 +1,33 @@ +""" +SoftCrossEntropy loss Script ver: May 17th 19:00 + +update +SoftlabelCrossEntropy loss for soft-label based augmentations +fixme 好像说reduction='sum' 有问题? +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + + +# define SoftlabelCrossEntropy loss for soft-label based augmentations +def SoftCrossEntropy(input, target, reduction='sum'): # reduction='sum' fixme 好像说有问题?查一下warning + log_likelihood = -F.log_softmax(input, dim=1) + batch = input.shape[0] + if reduction == 'average': + loss = torch.sum(torch.mul(log_likelihood, target)) / batch + else: + loss = torch.sum(torch.mul(log_likelihood, target)) + return loss + + +class SoftlabelCrossEntropy(nn.modules.loss._Loss): + __constants__ = ['reduction'] + + def __init__(self, reduction: str = 'sum') -> None: + super(SoftlabelCrossEntropy, self).__init__(reduction) + + def forward(self, input: Tensor, target: Tensor) -> Tensor: + return SoftCrossEntropy(input, target, reduction=self.reduction) diff --git a/PuzzleTuning/utils/check_log_json.py b/PuzzleTuning/utils/check_log_json.py new file mode 100644 index 0000000000000000000000000000000000000000..b7dd1f30eca7ea43357d1ae56338089e01f01ae7 --- /dev/null +++ b/PuzzleTuning/utils/check_log_json.py @@ -0,0 +1,223 @@ +""" +Organize log and output excel script ver: Sep 13th 15:00 +enable_notify +""" + + +import argparse +import json +import os + +try: # 适配不同系统 + from utils.metrics import * +except: + from metrics import * + + +def find_all_files(root, suffix=None): + ''' + 返回特定后缀的所有文件路径列表 + ''' + res = [] + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + return res + + +def read_a_json_log(json_path, record_dir): + if not os.path.exists(record_dir): + os.makedirs(record_dir) + + with open(json_path) as f: + load_dict = json.load(f) + # print(load_dict) + epoch_num = len(load_dict) + try: + cls_list = [cls for cls in load_dict[str(1)]['train']] + test_status = False + except: + cls_list = [cls for cls in load_dict['test']['test']] + test_status = True + else: + pass + cls_num = len(cls_list) + + indicator_list = ['Precision', 'Recall', 'Sensitivity', 'Specificity', 'NPV', 'F1_score'] + indicator_num = len(indicator_list) + + blank_num = cls_num * indicator_num + first_blank_num = blank_num // 2 + + empty_str1 = ' ,' # 对齐Acc + for i in range(0, first_blank_num): + empty_str1 += ' ,' + + empty_str2 = '' + for i in range(0, blank_num): + empty_str2 += ' ,' + + result_csv_name = os.path.split(json_path)[1].split('.')[0] + '.csv' + result_indicators = [os.path.split(json_path)[1].split('.')[0], ] # 第一个位置留给model name + + with open(os.path.join(record_dir, result_csv_name), 'w') as f_log: + if test_status: + # 写头文件1 + f_log.write('Phase:,' + empty_str1 + ' Test\n') + head = 'Epoch:, ' + class_head = 'Acc, ' # 目标 'Acc, '+ 类别* indicator_list + for cls in cls_list: + for indicator in indicator_list: + class_head += cls + '_' + indicator + ', ' + + # 写头文件2 + f_log.write(head + class_head + '\n') # Test + f_log.close() + + else: + # 写头文件1 + f_log.write('Phase:,' + empty_str1 + ' Train' + empty_str2 + ' Val\n') + + head = 'Epoch:, ' + class_head = 'Acc, ' # 目标 'Acc, '+ 类别* indicator_list + for cls in cls_list: + for indicator in indicator_list: + class_head += cls + '_' + indicator + ', ' + + # 写头文件2 + f_log.write(head + class_head + class_head + '\n') # Train val + f_log.close() + + # 初始化最佳 + best_val_acc = 0.0 + + for epoch in range(1, epoch_num + 1): + if test_status: + epoch = 'test' + epoch_indicators = [epoch, ] # 第一个位置留给epoch + + for phase in ['train', 'val']: + if test_status: + phase = 'test' + + sum_tp = 0.0 + + phase_indicators = [0.0, ] # 第一个位置留给ACC + + for cls in cls_list: + log = load_dict[str(epoch)][phase][cls] + tp = log['tp'] + tn = log['tn'] + fp = log['fp'] + fn = log['fn'] + + sum_tp += tp + + Precision = compute_precision(tp, fp) + Recall = compute_recall(tp, fn) + + Sensitivity = compute_sensitivity(tp, fn) + Specificity = compute_specificity(tn, fp) + + NPV = compute_NPV(tn, fn) + F1_score = compute_f1_score(tp, tn, fp, fn) + + cls_indicators = [Precision, Recall, Sensitivity, Specificity, NPV, F1_score] + phase_indicators.extend(cls_indicators) + + Acc = 100 * (sum_tp / float(tp + tn + fn + fp)) # 直接取最后一个的tp tn fn fp 算总数就行 + phase_indicators[0] = Acc + + epoch_indicators.extend(phase_indicators) + + if Acc >= best_val_acc and phase == 'val': + best_val_acc = Acc + best_epoch_indicators = epoch_indicators + + elif test_status: + with open(os.path.join(record_dir, result_csv_name), 'a') as f_log: + for i in epoch_indicators: + f_log.write(str(i) + ', ') + f_log.write('\n') + f_log.close() + result_indicators.extend(epoch_indicators) + return result_indicators # 结束 返回test的log行 + else: + pass + + # epoch_indicators + with open(os.path.join(record_dir, result_csv_name), 'a') as f_log: + for i in epoch_indicators: + f_log.write(str(i) + ', ') + f_log.write('\n') + + with open(os.path.join(record_dir, result_csv_name), 'a') as f_log: + f_log.write('\n') + f_log.write('\n') + # 写头文件1 + f_log.write('Phase:,' + empty_str1 + ' Train' + empty_str2 + ' Val\n') + # 写头文件2 + f_log.write('Best Epoch:, ' + class_head + class_head + '\n') # Train val + + try: + for i in best_epoch_indicators: + f_log.write(str(i) + ', ') + f_log.close() + result_indicators.extend(best_epoch_indicators) + return result_indicators # 结束 返回best epoch行 + except: + print('No best_epoch_indicators') + return result_indicators # 结束 + + +def read_all_logs(logs_path, record_dir): + if not os.path.exists(record_dir): + os.makedirs(record_dir) + + res = find_all_files(logs_path, suffix='.json') + + result_csv_name = os.path.split(logs_path)[1] + '.csv' + + with open(os.path.join(record_dir, result_csv_name), 'w') as f_log: + for json_path in res: + result_indicators = read_a_json_log(json_path, record_dir) # best_epoch_indicators of a model json log + + for i in result_indicators: + f_log.write(str(i) + ', ') + f_log.write('\n') + f_log.close() + + print('record_dir:',record_dir) + + +def main(args): + ONE_LOG = args.ONE_LOG + draw_root = args.draw_root + record_dir = args.record_dir + + if ONE_LOG: + read_a_json_log(draw_root, record_dir) + else: + read_all_logs(draw_root, record_dir) + + +def get_args_parser(): + parser = argparse.ArgumentParser(description='Log checker') + + parser.add_argument('--ONE_LOG', action='store_true', help='check only one LOG') + + parser.add_argument('--draw_root', default=r'../../../../Downloads/runs', + help='path of the drawn and saved tensorboard output') + + parser.add_argument('--record_dir', default=r'../../../../Downloads/runs/CSV_logs', + help='path to save csv log output') + + return parser + + +if __name__ == '__main__': + parser = get_args_parser() + args = parser.parse_args() + main(args) diff --git a/PuzzleTuning/utils/check_tensorboard.py b/PuzzleTuning/utils/check_tensorboard.py new file mode 100644 index 0000000000000000000000000000000000000000..0290e3d958248ec4dbbdcc598ef27aa9a30160ad --- /dev/null +++ b/PuzzleTuning/utils/check_tensorboard.py @@ -0,0 +1,56 @@ +# 读取tf events画ACC-Loss +from tensorboard.backend.event_processing import event_accumulator # 导入tensorboard的事件解析器 + +import os +import matplotlib +import matplotlib.pyplot as plt + + +def find_all_files_startwith(root, suffix=None): + """ + 返回特定前缀的所有文件路径列表 + """ + res = [] + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.startswith(suffix): + continue + res.append(os.path.join(root, f)) + return res + + +def ACC_loss(PATH, out_file_path): + fig = plt.figure(figsize=(6, 4)) + ax1 = fig.add_subplot(111) + + runs_all = find_all_files_startwith(PATH, suffix='events') + print(runs_all) + + for runs_path in runs_all: + model_idx = os.path.split(os.path.split(runs_path)[0])[1] + + ea = event_accumulator.EventAccumulator(runs_path) # 初始化EventAccumulator对象 + ea.Reload() # 这一步是必须的,将事件的内容都导进去 + # print(ea.scalars.Keys()) # 检查保存了哪些记录scalars + + train_ACC = ea.scalars.Items("train_ACC") + train_loss = ea.scalars.Items("train_loss") # 读取train_loss + ''' + print([(i.step, i.value) for i in train_ACC]) + for i, j in zip(train_ACC, train_loss): + print((i.value, j.value)) + ''' + ax1.plot([i.value for i in train_loss], [i.value for i in train_ACC], label=model_idx) + + plt.legend(loc='lower right') + ax1.set_xlabel("Loss") + ax1.set_ylabel("Acc") + plt.show() + plt.savefig(out_file_path, dpi=1000) + + +if __name__ == '__main__': + matplotlib.use('Agg') + PATH = './MIL-SI/Archive/log/abalation' + out_file_path = './patch_size_abalation_loss-acc.jpg' + ACC_loss(PATH, out_file_path) diff --git a/PuzzleTuning/utils/data_augmentation.py b/PuzzleTuning/utils/data_augmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..8632a767e823529412426d7578ecaf3bee41d747 --- /dev/null +++ b/PuzzleTuning/utils/data_augmentation.py @@ -0,0 +1,84 @@ +""" +data_augmentation Script ver: Sep 1st 20:30 + +dataset structure: ImageNet +image folder dataset is used. +""" + +from torchvision import transforms + + +def data_augmentation(data_augmentation_mode=0, edge_size=384): + if data_augmentation_mode == 0: # ROSE + MARS + data_transforms = { + 'train': transforms.Compose([ + transforms.RandomRotation((0, 180)), + transforms.RandomHorizontalFlip(), + transforms.RandomVerticalFlip(), + transforms.CenterCrop(700), # center area for classification + transforms.Resize([edge_size, edge_size]), + transforms.ColorJitter(brightness=0.15, contrast=0.3, saturation=0.3, hue=0.06), + # HSL shift operation + transforms.ToTensor() + ]), + 'val': transforms.Compose([ + transforms.CenterCrop(700), + transforms.Resize([edge_size, edge_size]), + transforms.ToTensor() + ]), + } + + elif data_augmentation_mode == 1: # Cervical + data_transforms = { + 'train': transforms.Compose([ + transforms.Resize([edge_size, edge_size]), + transforms.RandomVerticalFlip(), + transforms.RandomHorizontalFlip(), + transforms.ColorJitter(brightness=0.15, contrast=0.3, saturation=0.3, hue=0.06), + # HSL shift operation + transforms.ToTensor() + ]), + 'val': transforms.Compose([ + transforms.Resize([edge_size, edge_size]), + transforms.ToTensor() + ]), + } + + elif data_augmentation_mode == 2: # warwick + data_transforms = { + 'train': transforms.Compose([ + transforms.RandomRotation((0, 180)), + transforms.RandomHorizontalFlip(), + transforms.RandomVerticalFlip(), + transforms.CenterCrop(360), # center area for classification + transforms.Resize([edge_size, edge_size]), + transforms.ColorJitter(brightness=0.15, contrast=0.3, saturation=0.3, hue=0.06), + # HSL shift operation + transforms.ToTensor() + ]), + 'val': transforms.Compose([ + transforms.CenterCrop(360), + transforms.Resize([edge_size, edge_size]), + transforms.ToTensor() + ]), + } + + elif data_augmentation_mode == 3: # for the squre input: just resize + data_transforms = { + 'train': transforms.Compose([ + transforms.RandomHorizontalFlip(), + transforms.RandomVerticalFlip(), + transforms.Resize([edge_size, edge_size]), + transforms.ColorJitter(brightness=0.15, contrast=0.3, saturation=0.3, hue=0.06), + # HSL shift operation + transforms.ToTensor() + ]), + 'val': transforms.Compose([ + transforms.Resize([edge_size, edge_size]), + transforms.ToTensor() + ]), + } + else: + print('no legal data augmentation is selected') + return -1 + return data_transforms diff --git a/PuzzleTuning/utils/dual_augmentation.py b/PuzzleTuning/utils/dual_augmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..9b802f604a39357ee7af29af934a35b37dfaf111 --- /dev/null +++ b/PuzzleTuning/utils/dual_augmentation.py @@ -0,0 +1,242 @@ +""" +dual augmentation on both images and their masks Script ver: Apr 10th 11:20 + + +""" +import random +import numpy as np +import cv2 +from PIL import Image +from torchvision import transforms +from utils.tools import to_2tuple + + +class DualCompose: # fit pytorch transform + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, image, mask=None): + # process the cv2 transformation first + for t in self.transforms: + image, mask = t(image, mask) + # NOTICE 转回图片 值总和还变成了cv2 numpy的1/255 + + # Then, Transform cv2 BGR image to PIL RGB image + + # BGR -> RGB channel + b, g, r = cv2.split(image) + image = cv2.merge([r, g, b]) + b, g, r = cv2.split(mask) + mask = cv2.merge([r, g, b]) + # Image.fromarray make the 0-255 to PIL 0-1 range values + return Image.fromarray(np.uint8(image)), Image.fromarray(np.uint8(mask)) + + +class DualImageTransform: + # Transform cv2 BGR image to PIL RGB image + def __init__(self): + pass + + def __call__(self, image, mask=None): + # BGR -> RGB channel + b, g, r = cv2.split(image) + image = cv2.merge([r, g, b]) + b, g, r = cv2.split(mask) + mask = cv2.merge([r, g, b]) + # Image.fromarray make the 0-255 to PIL 0-1 range values + return Image.fromarray(np.uint8(image)), Image.fromarray(np.uint8(mask)) + + +class Dual_RandomHorizontalFlip: + """ + Random horizontal flip. + image shape: (height, width, channels) + mask shape: (height, width) + possibility: possibility for flip + """ + + def __init__(self, possibility=0.5): + assert isinstance(possibility, (int, float)) + self.possibility = possibility + + def __call__(self, image, mask): + if random.random() <= self.possibility: + image = np.flip(image, axis=1) + mask = np.flip(mask, axis=1) + + return image, mask + + +class Dual_RandomVerticalFlip: + """ + Random vertical flip. + image shape: (height, width, channels) + mask shape: (height, width) + possibility: possibility for flip + """ + + def __init__(self, possibility=0.5): + assert isinstance(possibility, (int, float)) + self.possibility = possibility + + def __call__(self, image, mask): + if random.random() <= self.possibility: + image = np.flip(image, axis=0) + mask = np.flip(mask, axis=0) + + return image, mask + + +class Dual_Rotate: + """ + Random rotation. + image shape: (height, width, channels) + mask shape: (height, width) + possibility: possibility for rotate + range: range of rotation angles + """ + + def __init__(self, possibility=0.5, range=20): + self.possibility = possibility + self.range = range + + def __call__(self, image, mask): + # 这里cv2读到的是反的,因此这里是height, width而不是width,height,图片input不是正方形时会有严重后果 + height, width = image.shape[:2] + + if random.random() <= self.possibility: + angle = np.random.randint(0, self.range) + + center = (width // 2, height // 2) + # 得到旋转矩阵,第一个参数为旋转中心,第二个参数为旋转角度,第三个参数为旋转之前原图像缩放比例 + M = cv2.getRotationMatrix2D(center, -angle, 1) + # 进行仿射变换,第一个参数图像,第二个参数是旋转矩阵,第三个参数是变换之后的图像大小 + image = cv2.warpAffine(image, M, (width, height)) + mask = cv2.warpAffine(mask.astype(np.uint8), M, (width, height)) + + return image.astype(np.uint8), mask.astype(np.int) + + +def Four_step_dual_augmentation(data_augmentation_mode=0, edge_size=384): + """ + Get data augmentation methods + + Dual_transform : Transform CV2 images and their mask by Rotate, RandomHorizontalFlip, etc. + DualImage : Transform CV2 images and their mask to PIL images + train_domain_transform : transforms.ColorJitter on PIL images + transform: PIL crop, resize and to Tensor + + USAGE: + + IN Train: + image, mask = self.Dual_transform(image, mask) + # image color jitter shifting + image = self.train_domain_transform(image) + # crop + resize + image = self.transform(image) + + IN Val $ Test: + + # 0/255 mask -> binary mask + image, mask = self.DualImage(image, mask) + # crop + resize + image = self.transform(image) + """ + + edge_size = to_2tuple(edge_size) + + if data_augmentation_mode == 0: # ROSE + MARS + # apply the on-time synchornized transform on image and mask togather + Dual_transform = DualCompose([ + Dual_Rotate(possibility=0.8, range=180), + Dual_RandomHorizontalFlip(), + Dual_RandomVerticalFlip(), + ]) + # val & test use DualImage to convert PIL Image + DualImage = DualImageTransform() + + # ColorJitter for image only + train_domain_transform = transforms.Compose([ + # HSL shift operation + transforms.ColorJitter(brightness=0.15, contrast=0.3, saturation=0.3, hue=0.06), + ]) + + # lastly, the synchornized separate transform + transform = transforms.Compose([ + transforms.CenterCrop(700), # center area for classification + transforms.Resize(edge_size), + transforms.ToTensor(), # hwc -> chw tensor + ]) + + elif data_augmentation_mode == 1: # Cervical + # apply the on-time synchornized transform on image and mask togather + Dual_transform = DualCompose([ + Dual_Rotate(possibility=0.8, range=180), + Dual_RandomHorizontalFlip(), + Dual_RandomVerticalFlip(), + ]) + # val & test use DualImage to convert PIL Image + DualImage = DualImageTransform() + + # ColorJitter for image only + train_domain_transform = transforms.Compose([ + # HSL shift operation + transforms.ColorJitter(brightness=0.15, contrast=0.3, saturation=0.3, hue=0.06), + ]) + + # lastly, the synchornized separate transform + transform = transforms.Compose([ + transforms.Resize(edge_size), + transforms.ToTensor(), # hwc -> chw tensor + ]) + + elif data_augmentation_mode == 2: # + # apply the on-time synchornized transform on image and mask togather + Dual_transform = DualCompose([ + Dual_Rotate(possibility=0.8, range=180), + Dual_RandomHorizontalFlip(), + Dual_RandomVerticalFlip(), + ]) + # val & test use DualImage to convert PIL Image + DualImage = DualImageTransform() + + # ColorJitter for image only + train_domain_transform = transforms.Compose([ + # HSL shift operation + transforms.ColorJitter(brightness=0.15, contrast=0.3, saturation=0.3, hue=0.06), + ]) + + # lastly, the synchornized separate transform + transform = transforms.Compose([ + transforms.CenterCrop(360), # center area for classification + transforms.Resize(edge_size), + transforms.ToTensor(), # hwc -> chw tensor + ]) + + elif data_augmentation_mode == 3: # for the squre input: just resize + # apply the on-time synchornized transform on image and mask togather + Dual_transform = DualCompose([ + # Dual_Rotate(possibility=0.8, range=180), + Dual_RandomHorizontalFlip(), + Dual_RandomVerticalFlip(), + ]) + # val & test use DualImage to convert PIL Image + DualImage = DualImageTransform() + + # ColorJitter for image only + train_domain_transform = transforms.Compose([ + # HSL shift operation + transforms.ColorJitter(brightness=0.15, contrast=0.3, saturation=0.3, hue=0.06), + ]) + + # lastly, the synchornized separate transform + transform = transforms.Compose([ + transforms.Resize(edge_size), + transforms.ToTensor(), # hwc -> chw tensor + ]) + + else: + print('no legal data augmentation is selected') + return -1 + + return Dual_transform, DualImage, train_domain_transform, transform diff --git a/PuzzleTuning/utils/fmix.py b/PuzzleTuning/utils/fmix.py new file mode 100644 index 0000000000000000000000000000000000000000..e006c02e8e0c785e7904ea244d0d8c7102c86105 --- /dev/null +++ b/PuzzleTuning/utils/fmix.py @@ -0,0 +1,194 @@ +""" +from official release of ... +Script ver: July 9th 15:20 + +""" + +import math +import random + +import numpy as np +from scipy.stats import beta + + +def fftfreqnd(h, w=None, z=None): + """ Get bin values for discrete fourier transform of size (h, w, z) + + :param h: Required, first dimension size + :param w: Optional, second dimension size + :param z: Optional, third dimension size + """ + fz = fx = 0 + fy = np.fft.fftfreq(h) + + if w is not None: + fy = np.expand_dims(fy, -1) + + if w % 2 == 1: + fx = np.fft.fftfreq(w)[: w // 2 + 2] + else: + fx = np.fft.fftfreq(w)[: w // 2 + 1] + + if z is not None: + fy = np.expand_dims(fy, -1) + if z % 2 == 1: + fz = np.fft.fftfreq(z)[:, None] + else: + fz = np.fft.fftfreq(z)[:, None] + + return np.sqrt(fx * fx + fy * fy + fz * fz) + + +def get_spectrum(freqs, decay_power, ch, h, w=0, z=0): + """ Samples a fourier image with given size and frequencies decayed by decay power + + :param freqs: Bin values for the discrete fourier transform + :param decay_power: Decay power for frequency decay prop 1/f**d + :param ch: Number of channels for the resulting mask + :param h: Required, first dimension size + :param w: Optional, second dimension size + :param z: Optional, third dimension size + """ + scale = np.ones(1) / (np.maximum(freqs, np.array([1. / max(w, h, z)])) ** decay_power) + + param_size = [ch] + list(freqs.shape) + [2] + param = np.random.randn(*param_size) + + scale = np.expand_dims(scale, -1)[None, :] + + return scale * param + + +def make_low_freq_image(decay, shape, ch=1): + """ Sample a low frequency image from fourier space + + :param decay_power: Decay power for frequency decay prop 1/f**d + :param shape: Shape of desired mask, list up to 3 dims + :param ch: Number of channels for desired mask + """ + freqs = fftfreqnd(*shape) + spectrum = get_spectrum(freqs, decay, ch, *shape)#.reshape((1, *shape[:-1], -1)) + spectrum = spectrum[:, 0] + 1j * spectrum[:, 1] + mask = np.real(np.fft.irfftn(spectrum, shape)) + + if len(shape) == 1: + mask = mask[:1, :shape[0]] + if len(shape) == 2: + mask = mask[:1, :shape[0], :shape[1]] + if len(shape) == 3: + mask = mask[:1, :shape[0], :shape[1], :shape[2]] + + mask = mask + mask = (mask - mask.min()) + mask = mask / mask.max() + return mask + + +def sample_lam(alpha, reformulate=False): + """ Sample a lambda from symmetric beta distribution with given alpha + + :param alpha: Alpha value for beta distribution + :param reformulate: If True, uses the reformulation of [1]. + """ + if reformulate: + lam = beta.rvs(alpha+1, alpha) + else: + lam = beta.rvs(alpha, alpha) + + return lam + + +def binarise_mask(mask, lam, in_shape, max_soft=0.0): + """ Binarises a given low frequency image such that it has mean lambda. + + :param mask: Low frequency image, usually the result of `make_low_freq_image` + :param lam: Mean value of final mask + :param in_shape: Shape of inputs + :param max_soft: Softening value between 0 and 0.5 which smooths hard edges in the mask. + :return: + """ + idx = mask.reshape(-1).argsort()[::-1] + mask = mask.reshape(-1) + num = math.ceil(lam * mask.size) if random.random() > 0.5 else math.floor(lam * mask.size) + + eff_soft = max_soft + if max_soft > lam or max_soft > (1-lam): + eff_soft = min(lam, 1-lam) + + soft = int(mask.size * eff_soft) + num_low = num - soft + num_high = num + soft + + mask[idx[:num_high]] = 1 + mask[idx[num_low:]] = 0 + mask[idx[num_low:num_high]] = np.linspace(1, 0, (num_high - num_low)) + + mask = mask.reshape((1, *in_shape)) + return mask + + +def sample_mask(alpha, decay_power, shape, max_soft=0.0, reformulate=False): + """ Samples a mean lambda from beta distribution parametrised by alpha, creates a low frequency image and binarises + it based on this lambda + + :param alpha: Alpha value for beta distribution from which to sample mean of mask + :param decay_power: Decay power for frequency decay prop 1/f**d + :param shape: Shape of desired mask, list up to 3 dims + :param max_soft: Softening value between 0 and 0.5 which smooths hard edges in the mask. + :param reformulate: If True, uses the reformulation of [1]. + """ + if isinstance(shape, int): + shape = (shape,) + + # Choose lambda + lam = sample_lam(alpha, reformulate) + + # Make mask, get mean / std + mask = make_low_freq_image(decay_power, shape) + mask = binarise_mask(mask, lam, shape, max_soft) + + return lam, mask + + +def sample_and_apply(x, alpha, decay_power, shape, max_soft=0.0, reformulate=False): + """ + + :param x: Image batch on which to apply fmix of shape [b, c, shape*] + :param alpha: Alpha value for beta distribution from which to sample mean of mask + :param decay_power: Decay power for frequency decay prop 1/f**d + :param shape: Shape of desired mask, list up to 3 dims + :param max_soft: Softening value between 0 and 0.5 which smooths hard edges in the mask. + :param reformulate: If True, uses the reformulation of [1]. + :return: mixed input, permutation indices, lambda value of mix, + """ + lam, mask = sample_mask(alpha, decay_power, shape, max_soft, reformulate) + index = np.random.permutation(x.shape[0]) + + x1, x2 = x * mask, x[index] * (1-mask) + return x1+x2, index, lam + + +class FMixBase: + r""" FMix augmentation + + Args: + decay_power (float): Decay power for frequency decay prop 1/f**d + alpha (float): Alpha value for beta distribution from which to sample mean of mask + size ([int] | [int, int] | [int, int, int]): Shape of desired mask, list up to 3 dims + max_soft (float): Softening value between 0 and 0.5 which smooths hard edges in the mask. + reformulate (bool): If True, uses the reformulation of [1]. + """ + + def __init__(self, decay_power=3, alpha=1, size=(32, 32), max_soft=0.0, reformulate=False): + super().__init__() + self.decay_power = decay_power + self.reformulate = reformulate + self.size = size + self.alpha = alpha + self.max_soft = max_soft + self.index = None + self.lam = None + + def __call__(self, inputs, labels, alpha=2, beta=2, act=True): + raise NotImplementedError + diff --git a/PuzzleTuning/utils/metrics.py b/PuzzleTuning/utils/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..7e2a892e9a0df8dd4f500e8f05fe3a111a1d7ecd --- /dev/null +++ b/PuzzleTuning/utils/metrics.py @@ -0,0 +1,68 @@ +import numpy as np + + +def compute_accuracy(tp, tn, fn, fp): # only fit 2 cls condition + """ + Accuracy = (TP + TN) / (FP + FN + TP + TN) + """ + if tp + tn + fn + fp == 0: + return 0.0 + return ((tp + tn) * 100) / float(tp + tn + fn + fp) + + +def compute_specificity(tn, fp): + """ + Precision = TN / (FN + TP) + """ + if tn + fp == 0: + return 0.0 + return (tn * 100) / float(tn + fp) + + +def compute_sensitivity(tp, fn): # equal to recall + """ + Recall = TP / (FN + TP) + """ + if tp + fn == 0: + return 0.0 + return (tp * 100) / float(tp + fn) + + +def compute_precision(tp, fp): # equal to Positive Predictive Value(PPV) + """ + Precision = TP / (FP + TP) + """ + if tp + fp == 0: + return 0.0 + return (tp * 100) / float(tp + fp) + + +def compute_recall(tp, fn): # equal to Sensitivity + """ + Recall = TP / (FN + TP) + """ + if tp + fn == 0: + return 0.0 + return (tp * 100) / float(tp + fn) + + +def compute_f1_score(tp, tn, fp, fn): + # calculates the F1 score + precision = compute_precision(tp, fp) / 100 + recall = compute_recall(tp, fn) / 100 + + if precision + recall == 0: + return 0.0 + + f1_score = (2 * precision * recall) / (precision + recall) + return f1_score * 100 + + +def compute_NPV(tn, fn): # Negative Predictive Value + """ + Negative Predictive Value = tn / (tn + fn) + """ + if tn + fn == 0: + return 0.0 + return (tn * 100) / float(tn + fn) + diff --git a/PuzzleTuning/utils/online_augmentations.py b/PuzzleTuning/utils/online_augmentations.py new file mode 100644 index 0000000000000000000000000000000000000000..960e6756bd1e2711a3b63b72d3643f0198570630 --- /dev/null +++ b/PuzzleTuning/utils/online_augmentations.py @@ -0,0 +1,635 @@ +""" +Online Augmentations May 23rd 2023 21:30 +ref: +CutOut, Mixup, CutMix based on +https://blog.csdn.net/cp1314971/article/details/106612060 +""" +import cv2 +import torch +import numpy as np +import torch.nn.functional as F +from scipy.special import perm +from torchvision.transforms import Resize +from torchvision.transforms import ToPILImage, ToTensor + +from utils.visual_usage import patchify, unpatchify +from utils.fmix import sample_mask, FMixBase # Fmix + + +# generate random bounding box +def rand_bbox(size, lam): + W = size[2] + H = size[3] + cut_rat = np.sqrt(1. - lam) + cut_w = np.int64(W * cut_rat) + cut_h = np.int64(H * cut_rat) + + # uniform + cx = np.random.randint(W) + cy = np.random.randint(H) + + bbx1 = np.clip(cx - cut_w // 2, 0, W) + bby1 = np.clip(cy - cut_h // 2, 0, H) + bbx2 = np.clip(cx + cut_w // 2, 0, W) + bby2 = np.clip(cy + cut_h // 2, 0, H) + + return bbx1, bby1, bbx2, bby2 + + +def saliency_bbox(img, lam): + size = img.size() + W = size[1] + H = size[2] + cut_rat = np.sqrt(1. - lam) + cut_w = np.int(W * cut_rat) + cut_h = np.int(H * cut_rat) + + # initialize OpenCV's static fine grained saliency detector and + # compute the saliency map + temp_img = img.cpu().numpy().transpose(1, 2, 0) + saliency = cv2.saliency.StaticSaliencyFineGrained_create() + (success, saliencyMap) = saliency.computeSaliency(temp_img) + saliencyMap = (saliencyMap * 255).astype("uint8") + + maximum_indices = np.unravel_index(np.argmax(saliencyMap, axis=None), saliencyMap.shape) + x = maximum_indices[0] + y = maximum_indices[1] + + bbx1 = np.clip(x - cut_w // 2, 0, W) + bby1 = np.clip(y - cut_h // 2, 0, H) + bbx2 = np.clip(x + cut_w // 2, 0, W) + bby2 = np.clip(y + cut_h // 2, 0, H) + + return bbx1, bby1, bbx2, bby2 + + +# augmentation methods +class Cutout(object): + def __init__(self, alpha=2, shuffle_p=1.0, class_num=2, batch_size=4, device='cpu'): + """ + Cutout augmentation arXiv:1708.04552 + :param alpha: alpha + :param shuffle_p: chance of trigger augmentation + :param class_num: number of classification categories + :param batch_size: batch_size of training + :param device: CUDA or CPU + """ + self.alpha = alpha + self.class_num = class_num + self.batch_size = batch_size + self.p = shuffle_p + self.device = torch.device(device) + + def __call__(self, inputs, labels, act=True): + labels = torch.eye(self.class_num).to(self.device)[labels, :] # one-hot hard label + ori_inputs = inputs.clone().to(self.device) # duplicate inputs for ori inputs + cutout_inputs = inputs.clone().to(self.device) # duplicate inputs for outputs + lam_list = [] # a list to record operating ratio + + for i in range(self.batch_size): + + if np.random.randint(0, 101) > 100 * self.p or (not act): + # trigger the augmentation operation + lam_list.append(-1) + continue + + lam = np.random.beta(self.alpha, self.alpha) + bbx1, bby1, bbx2, bby2 = rand_bbox(ori_inputs.size(), lam) # get random bbox + + cutout_inputs[i, :, bbx1:bbx2, bby1:bby2] = 0 + + # update the ratio of (area of ori_image on new masked image) for soft-label + lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (ori_inputs.size()[2] * ori_inputs.size()[3])) + lam_list.append(lam) + + long_label = labels.argmax(dim=1) + + # NOTICE cutout use long label and ori_crossentropy instead of soft-label and soft-label_crossentropy + return cutout_inputs, long_label, long_label + + +class CutMix(object): + def __init__(self, alpha=2, shuffle_p=1.0, class_num=2, batch_size=4, device='cpu'): + """ + CutMix augmentation arXiv:1905.04899 + :param alpha: alpha + :param shuffle_p: chance of trigger augmentation + :param class_num: number of classification categories + :param batch_size: batch_size of training + :param device: CUDA or CPU + """ + self.alpha = alpha + self.class_num = class_num + self.batch_size = batch_size + + # calibrate the trigger chance of p, new ratio is the change of operation occur in each batch + self.p = shuffle_p * (perm(self.batch_size, self.batch_size) + / (perm(self.batch_size, self.batch_size) - + perm(self.batch_size - 1, self.batch_size - 1))) + self.device = torch.device(device) + + def __call__(self, inputs, labels, act=True): + + labels = torch.eye(self.class_num).to(self.device)[labels, :] # one-hot hard label + ori_inputs = inputs.clone().to(self.device) # duplicate inputs for ori inputs + cutmix_inputs = inputs.clone().to(self.device) # duplicate inputs for outputs + lam_list = [] # a list to record operating ratio + indices = torch.randperm(self.batch_size, device=self.device) # shuffle indices + shuffled_inputs = inputs[indices].to(self.device) + shuffled_labels = labels[indices].to(self.device) + + for i in range(self.batch_size): + + if np.random.randint(0, 101) > 100 * self.p or (not act): + # trigger the augmentation operation + lam_list.append(-1) + continue + + lam = np.random.beta(self.alpha, self.alpha) + bbx1, bby1, bbx2, bby2 = rand_bbox(ori_inputs.size(), lam) # get random bbox + + cutmix_inputs[i, :, bbx1:bbx2, bby1:bby2] = \ + shuffled_inputs[i, :, bbx1:bbx2, bby1:bby2] + + # update the ratio of (area of ori_image on new image) for soft-label + lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (ori_inputs.size()[2] * ori_inputs.size()[3])) + lam_list.append(lam) + labels[i] = labels[i] * lam + shuffled_labels[i] * (1 - lam) + + long_label = labels.argmax(dim=1) + return cutmix_inputs, labels, long_label + + +class Mixup(object): + def __init__(self, alpha=2, shuffle_p=1.0, class_num=2, batch_size=4, device='cpu'): + """ + Mixup augmentation arXiv:1710.09412 + :param alpha: alpha + :param shuffle_p: chance of trigger augmentation + :param class_num: number of classification categories + :param batch_size: batch_size of training + :param device: CUDA or CPU + """ + self.alpha = alpha + self.class_num = class_num + self.batch_size = batch_size + # calibrate the trigger chance of p, new ratio is the change of operation occur in each batch + self.p = shuffle_p * (perm(self.batch_size, self.batch_size) + / (perm(self.batch_size, self.batch_size) - + perm(self.batch_size - 1, self.batch_size - 1))) + self.device = torch.device(device) + + def __call__(self, inputs, labels, act=True): + labels = torch.eye(self.class_num).to(self.device)[labels, :] # one-hot hard label + ori_inputs = inputs.clone().to(self.device) # duplicate inputs for ori inputs + mixup_inputs = inputs.clone().to(self.device) # duplicate inputs for outputs + lam_list = [] # a list to record operating ratio + indices = torch.randperm(self.batch_size, device=self.device) # shuffle indices + shuffled_inputs = inputs[indices].to(self.device) + shuffled_labels = labels[indices].to(self.device) + + for i in range(self.batch_size): + if np.random.randint(0, 101) > 100 * self.p or (not act): + # trigger the augmentation operation + lam_list.append(-1) + continue + + lam = np.random.beta(self.alpha, self.alpha) + lam_list.append(lam) + mixup_inputs[i] = ori_inputs[i] * lam + shuffled_inputs[i] * (1 - lam) + labels[i] = labels[i] * lam + shuffled_labels[i] * (1 - lam) + + long_label = labels.argmax(dim=1) + return mixup_inputs, labels, long_label + + +class SaliencyMix(object): + def __init__(self, alpha=1, shuffle_p=1.0, class_num=2, batch_size=4, device='cpu'): + """ + SaliencyMix augmentation arXiv:2006.01791 + :param alpha: alpha + :param shuffle_p: chance of trigger augmentation + :param class_num: number of classification categories + :param batch_size: batch_size of training + :param device: CUDA or CPU + """ + # ori batch_size=128 + self.alpha = alpha + self.class_num = class_num + self.batch_size = batch_size + # calibrate the trigger chance of p, new ratio is the change of operation occur in each batch + self.p = shuffle_p + self.device = torch.device(device) + + def __call__(self, inputs, labels, act=True): + labels = torch.eye(self.class_num).to(self.device)[labels, :] # one-hot hard label + ori_inputs = inputs.clone().to(self.device) # duplicate inputs for ori inputs + saliencymix_inputs = inputs.clone().to(self.device) # duplicate inputs for outputs + lam_list = [] # a list to record operating ratio + indices = torch.randperm(self.batch_size, device=self.device) # shuffle indices + shuffled_inputs = inputs[indices].to(self.device) + shuffled_labels = labels[indices].to(self.device) + + for i in range(self.batch_size): + if np.random.randint(0, 101) > 100 * self.p or (not act) or self.alpha <= 0: + # trigger the augmentation operation + lam_list.append(-1) + continue + + lam = np.random.beta(self.alpha, self.alpha) + bbx1, bby1, bbx2, bby2 = saliency_bbox(shuffled_inputs[i], lam) # get random bbox + + saliencymix_inputs[i, :, bbx1:bbx2, bby1:bby2] = \ + shuffled_inputs[i, :, bbx1:bbx2, bby1:bby2] + + # update the ratio of (area of ori_image on new image) for soft-label + lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (ori_inputs.size()[2] * ori_inputs.size()[3])) + lam_list.append(lam) + labels[i] = labels[i] * lam + shuffled_labels[i] * (1 - lam) + + long_label = labels.argmax(dim=1) + return saliencymix_inputs, labels, long_label + + +class ResizeMix(object): + def __init__(self, shuffle_p=1.0, class_num=2, batch_size=4, device='cpu'): + """ + ResizeMix augmentation arXiv:2012.11101 + :param shuffle_p: chance of trigger augmentation + :param class_num: number of classification categories + :param batch_size: batch_size of training + :param device: CUDA or CPU + """ + # ori batch_size=512 + self.class_num = class_num + self.batch_size = batch_size + # calibrate the trigger chance of p, new ratio is the change of operation occur in each batch + self.p = shuffle_p + self.device = torch.device(device) + + def __call__(self, inputs, labels, alpha=0.1, beta=0.8, act=True): + labels = torch.eye(self.class_num).to(self.device)[labels, :] # one-hot hard label + ori_inputs = inputs.clone().to(self.device) # duplicate inputs for ori inputs + resizemix_inputs = inputs.clone().to(self.device) # duplicate inputs for outputs + lam_list = [] # a list to record operating ratio + indices = torch.randperm(self.batch_size, device=self.device) # shuffle indices + shuffled_inputs = inputs[indices].to(self.device) + shuffled_labels = labels[indices].to(self.device) + + for i in range(self.batch_size): + if np.random.randint(0, 101) > 100 * self.p or (not act): + # trigger the augmentation operation + lam_list.append(-1) + continue + + lam = np.random.uniform(alpha, beta) + # lam = 1 - lam + bbx1, bby1, bbx2, bby2 = rand_bbox(ori_inputs.size(), lam) # get random bbox + + # resizer by torchvision + torch_resize = Resize([bbx2 - bbx1, bby2 - bby1]) + + # Tensor -> PIL -> resize -> Tensor + re_pil_image = torch_resize(ToPILImage()(shuffled_inputs[i])) + resizemix_inputs[i, :, bbx1:bbx2, bby1:bby2] = ToTensor()(re_pil_image) + + # update the ratio of (area of ori_image on new image) for soft-label + lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (ori_inputs.size()[2] * ori_inputs.size()[3])) + lam_list.append(lam) + labels[i] = labels[i] * lam + shuffled_labels[i] * (1 - lam) + + long_label = labels.argmax(dim=1) + return resizemix_inputs, labels, long_label + + +class FMix(FMixBase): + + def __init__(self, shuffle_p=1.0, class_num=2, batch_size=4, decay_power=3, alpha=1, size=(32, 32), + max_soft=0.0, reformulate=False, device='cpu'): + """ + FMix augmentation arXiv:2002.12047 + :param shuffle_p: chance of trigger augmentation + :param class_num: number of classification categories + :param batch_size: batch_size of training + + :param decay_power: decay_power + :param alpha: alpha + :param size: size of patch + :param max_soft: max_soft + :param reformulate: reformulate + + :param device: CUDA or CPU + """ + # ori batch_size=128 + super().__init__(decay_power, alpha, size, max_soft, reformulate) + self.class_num = class_num + self.batch_size = batch_size + self.p = shuffle_p + self.device = torch.device(device) + + def __call__(self, inputs, labels, alpha=1, act=True): + # Sample mask and generate random permutation + lam, mask = sample_mask(self.alpha, self.decay_power, self.size, self.max_soft, self.reformulate) + mask = torch.from_numpy(mask).float().to(self.device) + + labels = torch.eye(self.class_num).to(self.device)[labels, :] # one-hot hard label + ori_inputs = inputs.clone().to(self.device) + fmix_inputs = inputs.clone().to(self.device) # duplicate inputs for outputs + lam_list = [] # a list to record operating ratio + indices = torch.randperm(self.batch_size, device=self.device) # shuffle indices + shuffled_inputs = inputs[indices].to(self.device) + shuffled_labels = labels[indices].to(self.device) + + for i in range(self.batch_size): + if np.random.randint(0, 101) > 100 * self.p or (not act): + # trigger the augmentation operation + lam_list.append(-1) + continue + + x1 = mask * ori_inputs[i] + x2 = (1 - mask) * shuffled_inputs[i] + fmix_inputs[i] = x1 + x2 + + lam_list.append(lam) + labels[i] = labels[i] * lam + shuffled_labels[i] * (1 - lam) + + long_label = labels.argmax(dim=1) + # print('lam:', lam) + return fmix_inputs, labels, long_label + + +# CellMix +class CellMix(object): + def __init__(self, shuffle_p=1.0, class_num=2, strategy='In-place', group_shuffle_size=-1, device='cpu'): + """ + CellMix augmentation arXiv:2301.11513 + :param shuffle_p: chance of trigger augmentation + :param class_num: number of classification categories + :param strategy: 'In-place' or 'Random' to shuffle the relation patches within the batch + :param group_shuffle_size: the size of shuffling group in the batch, -1 to all + :param device: CUDA or CPU + """ + self.p = shuffle_p + self.CLS = class_num # classification category number of the task + self.device = device + self.strategy = strategy # 'In-place' or 'Random' + self.group_shuffle_size = group_shuffle_size # -1 for whole batch + + def __call__(self, inputs, labels, fix_position_ratio=0.5, puzzle_patch_size=32, act=True): + """ + Fix-position in-place shuffling + Perform cross-sample random selection to fix some patches in each image of the batch + After selection, the fixed patches are reserved, the rest patches are batch wise + in-place shuffled and then regrouped with the fixed patches. + cross-sample selection is done by argsort random noise in dim 1 and apply to all image within the batch. + in-place batch-wise shuffle operation is done by argsort random noise in dim 0. + grouped-in-place batch-wise shuffle operation is done by argsort random noise in the batch dimension + + :param inputs: input image tensor, size of [B, 3, H, W], + :param labels: + :param fix_position_ratio: float ratio of the least remaining part of patches + :param puzzle_patch_size: int patch size of shuffle + :param act: set to be False to force not triggering CellMix in validation, set to True to trigger by chance p + + output: x, soft_label, long_label + x : [B, 3, H, W] re-grouped image after cellmix augmentation + soft_label : [B, CLS], soft-label of the class distribution + long_label : [B] hard long-label for general discribe + """ + if np.random.randint(0, 101) > 100 * self.p or (not act): + soft_label = torch.eye(self.CLS).to(self.device)[labels, :] # one-hot hard label + return inputs, soft_label, labels + + # Break img into puzzle patches with the size of puzzle_patch_size [B, num_patches, D] + inputs = patchify(inputs, puzzle_patch_size) + B, num_patches, D = inputs.shape + + # generate the persudo-mask: in cls dim only the k dim is + mask = torch.zeros([B, num_patches, self.CLS], device=inputs.device, requires_grad=False) # no grad + # mask of patches: (B, num_patches, cls) (cls)=[0,mask_area,0,....] + + # transform to persudo-mask + B_idx = range(B) + mask[B_idx, :, labels] = 1 + + # num of fix_position puzzle patches + len_fix_position = int(num_patches * fix_position_ratio) + + # create a noise tensor to prepare shuffle idx of puzzle patches + noise = torch.rand(1, num_patches, device=self.device) + noise = torch.repeat_interleave(noise, repeats=B, dim=0) + + # based on the batch sequence's shape, the noise tensor get a series idx matrix by sort + ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove + # sort the idx matrix again, we can obtain the original location idx matrix before assignment + ids_restore = torch.argsort(ids_shuffle, dim=1) + + # keep the first subset + ids_fix = ids_shuffle[:, :len_fix_position] # [B,num_patches] -> [B,fix_patches] + ids_puzzle = ids_shuffle[:, len_fix_position:] # [B,num_patches] -> [B,puzzle_patches] + + # set puzzle patch + # ids_?.unsqueeze(-1).repeat(1, 1, D) + # [B,?_patches] -> [B,?_patches,1] (at each place with the idx of ori patch) -> [B,?_patches,D] + + # torch.gather to select patche groups x_fixed of [B,fix_patches,D] and x_puzzle of [B,puzzle_patches,D] + x_fixed = torch.gather(inputs, dim=1, index=ids_fix.unsqueeze(-1).repeat(1, 1, D)) + x_puzzle = torch.gather(inputs, dim=1, index=ids_puzzle.unsqueeze(-1).repeat(1, 1, D)) + mask_fixed = torch.gather(mask, dim=1, index=ids_fix.unsqueeze(-1).repeat(1, 1, self.CLS)) + mask_puzzle = torch.gather(mask, dim=1, index=ids_puzzle.unsqueeze(-1).repeat(1, 1, self.CLS)) + + if self.strategy == 'In-place' or self.strategy == 'Random': + # the In-place strategy shuffles the relation patches within their location, among the batch index + B, num_shuffle_patches, D = x_puzzle.shape + + # create a noise tensor to prepare shuffle idx of puzzle patches + # [B, num_shuffle_patches] noise in [0, 1] + noise = torch.rand(B, num_shuffle_patches, device=self.device) + + if self.group_shuffle_size == -1 or self.group_shuffle_size == B: # CellMix-Split + # sort the noise matrix, obtain a index assignment for shuffle, + # shuffle dim 0 of entire noise (among all the batch) + in_place_shuffle_indices = torch.argsort(noise, dim=0) + + else: # CellMix-Group + assert B > self.group_shuffle_size > 0 and B % self.group_shuffle_size == 0 + grouped_indices_list = [] + for group_idx in range(B // self.group_shuffle_size): + # group the noise by self.group_shuffle_size: [group_shuffle_size,N] + grouped_noise = noise[group_idx * self.group_shuffle_size: + group_idx * self.group_shuffle_size + self.group_shuffle_size, :] + # sort each grouped_noise matrix, obtain a index assignment for shuffle, + # now the shuffle dim is 0 (among the batch within the group) + grouped_indices = torch.argsort(grouped_noise, dim=0) + # put grouped_noise matrix into the list + grouped_indices_list.append(grouped_indices + self.group_shuffle_size * group_idx) + # stack(cat) the group indices(from list) back to tensor + in_place_shuffle_indices = torch.cat(grouped_indices_list, dim=0) + + # torch.gather to achieve shuffle (taking all the idx base on a shuffled indices) + x_puzzle = torch.gather(x_puzzle, dim=0, index=in_place_shuffle_indices.unsqueeze(-1).repeat(1, 1, D)) + mask_puzzle = torch.gather(mask_puzzle, dim=0, + index=in_place_shuffle_indices.unsqueeze(-1).repeat(1, 1, self.CLS)) + else: + print('not a valid CellMix strategy') + + # pack up all puzzle patches + inputs = torch.cat([x_fixed, x_puzzle], dim=1) + mask = torch.cat([mask_fixed, mask_puzzle], dim=1) + + # unshuffle to restore the fixed positions + inputs = torch.gather(inputs, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, D)) + # torch.gather to generate restored binary mask + mask = torch.gather(mask, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, self.CLS)) + + # CellMix random strategy randomly shuffle the image patches (after cellmix in-place shuffle) + if self.strategy == 'Random': + B, num_patches, D = inputs.shape + # create a noise tensor to prepare shuffle idx of puzzle patches + noise = torch.rand(B, num_patches, device=self.device) # [num_patches,B] noise in [0, 1] + # sort the noise matrix, obtain a index assignment for shuffle, now the shuffle dim is 1 (with the batch) + all_shuffle_indices = torch.argsort(noise, dim=1) + # ids_shuffle shape of [B,N], in N is idx + # torch.gather to shuffle + inputs = torch.gather(inputs, dim=1, index=all_shuffle_indices.unsqueeze(-1).repeat(1, 1, D)) + # no need to torch the mask, because its patch-wise shuffle within each sample + else: # when strategy == 'In-place' + pass + + # unpatchify to obtain puzzle images and their mask + inputs = unpatchify(inputs, puzzle_patch_size) # restore to image size:B,3,224,224/ B,3,384,384 + + # transform soft-mask to soft-label + # calaculate a composed label with a conjugate design + # [B, num_patches, CLS]->(B, CLS) + soft_label = mask.sum(dim=1) # (B, CLS) + soft_label = soft_label / num_patches + # long_label, as a data-augmentation requirement + long_label = soft_label.argmax(dim=1) + + return inputs, soft_label, long_label + + +# ask func +def get_online_augmentation(augmentation_name, p=0.5, class_num=2, batch_size=4, edge_size=224, device='cpu'): + """ + :param augmentation_name: name of data-augmentation method + :param p: chance of triggering + :param class_num: classification task num + :param batch_size: batch size + :param edge_size: edge size of img + + :param device: cpu or cuda + + 其中augmentation_name, class_num, batch_size, edge_size必须提供 + """ + if augmentation_name == 'CellMix-Group': + Augmentation = CellMix(shuffle_p=p, class_num=class_num, strategy='In-place', group_shuffle_size=2, + device=device) + return Augmentation + + elif augmentation_name == 'CellMix-Group4': + Augmentation = CellMix(shuffle_p=p, class_num=class_num, strategy='In-place', group_shuffle_size=4, + device=device) + return Augmentation + + elif augmentation_name == 'CellMix-Split': + Augmentation = CellMix(shuffle_p=p, class_num=class_num, strategy='In-place', group_shuffle_size=-1, + device=device) + return Augmentation + + elif augmentation_name == 'CellMix-Random': + Augmentation = CellMix(shuffle_p=p, class_num=class_num, strategy='Random', group_shuffle_size=2, + device=device) + return Augmentation + + elif augmentation_name == 'CellMix-Random4': + Augmentation = CellMix(shuffle_p=p, class_num=class_num, strategy='Random', group_shuffle_size=4, + device=device) + return Augmentation + + elif augmentation_name == 'CellMix-Self': + Augmentation = CellMix(shuffle_p=p, class_num=class_num, strategy='Random', group_shuffle_size=1, + device=device) + return Augmentation + + elif augmentation_name == 'CellMix-All': + Augmentation = CellMix(shuffle_p=p, class_num=class_num, strategy='Random', group_shuffle_size=-1, + device=device) + return Augmentation + + elif augmentation_name == 'Cutout': + Augmentation = Cutout(alpha=2, shuffle_p=p, class_num=class_num, batch_size=batch_size, device=device) + return Augmentation + + elif augmentation_name == 'CutMix': + Augmentation = CutMix(alpha=2, shuffle_p=p, class_num=class_num, batch_size=batch_size, device=device) + return Augmentation + + elif augmentation_name == 'Mixup': + Augmentation = Mixup(alpha=2, shuffle_p=p, class_num=class_num, batch_size=batch_size, device=device) + return Augmentation + + elif augmentation_name == 'SaliencyMix': + Augmentation = SaliencyMix(alpha=1, shuffle_p=p, class_num=class_num, batch_size=batch_size, + device=device) # alpha实际为源代码中beta + return Augmentation + + elif augmentation_name == 'ResizeMix': + Augmentation = ResizeMix(shuffle_p=p, class_num=class_num, batch_size=batch_size, device=device) + return Augmentation + + elif augmentation_name == 'FMix': + # FMIX p=1.0 beacuse the chance of trigger is determined inside its own design + Augmentation = FMix(shuffle_p=1.0, class_num=class_num, batch_size=batch_size, + size=(edge_size, edge_size), device=device) + return Augmentation + + elif augmentation_name == 'PuzzleMix': + return None + # fixme: all related parts have been taken out seperately + # Augmentation = PuzzleMix(alpha=2, shuffle_p=p, class_num=class_num, batch_size=batch_size, device=device) + # return Augmentation + + elif augmentation_name == 'CoMix': + # TODO CoMix + return None + + elif augmentation_name == 'RandomMix': + # TODO RandomMix + return None + + else: + print('no valid counterparts augmentation selected') + return None + + +if __name__ == '__main__': + ''' + Augmentation = get_online_augmentation('CellMix-Split', p=0.5, class_num=2) + output, labels, GT_labels = Augmentation(x, label, fix_position_ratio=0.5, puzzle_patch_size=32, act=True) + + print(labels, GT_labels) + + ''' + + x = torch.load("./temp-tensors/warwick.pt") + # print(x.shape) + label = torch.load("./temp-tensors/warwick_labels.pt") + # print(label) + + # Augmentation = get_online_augmentation('ResizeMix', p=0.5, class_num=2) + # output, labels, GT_labels = Augmentation(x, label, act=True) + Augmentation = get_online_augmentation('CellMix-Group', p=1, class_num=2) + output, labels, GT_labels = Augmentation(x, label, fix_position_ratio=0.5, puzzle_patch_size=32, act=True) + + print(labels, GT_labels) + + composed_img = ToPILImage()(output[0]) + composed_img.show() + composed_img = ToPILImage()(output[1]) + composed_img.show() + composed_img = ToPILImage()(output[2]) + composed_img.show() + composed_img = ToPILImage()(output[3]) + composed_img.show() diff --git a/PuzzleTuning/utils/sam.py b/PuzzleTuning/utils/sam.py new file mode 100644 index 0000000000000000000000000000000000000000..1b44aad2787d00d56905e88b68c8a93c5f37440a --- /dev/null +++ b/PuzzleTuning/utils/sam.py @@ -0,0 +1,108 @@ +""" +版本: 8月26日 17:00 +SAM范化性训练,避免过拟合的优化器优化工具 ICLR 2021 spotlight paper by Google + +介绍:https://mp.weixin.qq.com/s/04VT-ldd0-XEkhEW6Txl_A +第三方实现来自:https://pub.towardsai.net/we-dont-need-to-worry-about-overfitting-anymore-9fb31a154c81 + +论文:Sharpness-aware Minimization for Efficiently Improving Generalization +链接:https://arxiv.org/abs/2010.01412 + +计算原理: +在训练过程中,优化器更新模型参数w时,整体上可以分为四个步骤: + +(1)基于参数 w 对 batch data S 计算 gradient G 。 + +(2)求解 G 的 dual norm,依照 dual vector 方向更新参数,得到 w+ε体系下的辅助模型。 + +(3)基于参数 w+ε 下的辅助模型,对 S 计算 gradient G’ 。 + +(4)用 G’ 更新原本的模型的参数 w 。 + + +使用例子: +from sam import SAM +... +model = YourModel() +base_optimizer = torch.optim.SGD # 传入一个优化器模板 +optimizer = SAM(model.parameters(), base_optimizer, lr=0.1, momentum=0.9) # 优化器参数 +... +for input, output in data: + + # first forward-backward pass,计算第一轮loss,这个和普通的一样 + # 第一轮的loss是真实模型跑出来的,我们统计中需要的loss就是它,第二轮loss不是真实的模型的loss(是辅助模型的),所以不需要用在传统统计loss中 + output = model(input) + loss = loss_function(output, labels) # use this loss for any training statistics!!!! + loss.backward() # 模型反向传播,记录原梯度。 + + # step1 的SAM类计算了“SAM梯度” + optimizer.first_step(zero_grad=True) # 第一轮opt用“SAM梯度”对原模型参数体系进行了更新,现在模型变成了辅助模型, + # step1记录保存了回到原模型参数体系的变化方法 + + # second forward-backward pass 第二轮先对辅助模型(step1更新后的模型)正向、反向传播 + output2 = model(input) # 用output2 确保计算图是辅助模型(即step1更新后的模型),不然有一堆bug。 + + # 由于新增了计算图,因此计算时间增加显存占用也增加? + + loss_function(output2, labels).backward() # make sure to do a full forward pass 辅助模型反向传播,记录更新梯度 + optimizer.second_step(zero_grad=True) # 第二轮,先原模型参数替换回去,之后base opt以辅助模型的更新方向对原模型参数体系进行更新 +... + + +""" +import torch + + +class SAM(torch.optim.Optimizer): + def __init__(self, params, base_optimizer, rho=0.05, **kwargs): + assert rho >= 0.0, f"Invalid rho, should be non-negative: {rho}" + + defaults = dict(rho=rho, **kwargs) + super(SAM, self).__init__(params, defaults) + + self.base_optimizer = base_optimizer(self.param_groups, **kwargs) + self.param_groups = self.base_optimizer.param_groups + + @torch.no_grad() + def first_step(self, zero_grad=False): # step1 生成辅助模型,对原模型参数进行修改把他变成辅助模型,同时记录怎么变的,以便还原 + grad_norm = self._grad_norm() + + for group in self.param_groups: + scale = group["rho"] / (grad_norm + 1e-12) # 附近的梯度影响 + + for p in group["params"]: + if p.grad is None: + continue + e_w = p.grad * scale.to(p) # 考虑附近的梯度影响之后,确定辅助模型的参数变化需要的“SAM梯度” + p.add_(e_w) # climb to the local maximum "w + e(w)" inplace参数更新! 因此是 原模型 变成了 辅助模型 + self.state[p]["e_w"] = e_w + + if zero_grad: + self.zero_grad() + + @torch.no_grad() + def second_step(self, zero_grad=False): # step2 先对辅助模型参数进行修改把他变回原模型, + # 之后对原模型基于辅助模型的梯度用base_optimizer进行参数更新 + + for group in self.param_groups: + + for p in group["params"]: + if p.grad is None: + continue + p.sub_(self.state[p]["e_w"]) # get back to "w" from "w + e(w)" + # 辅助模型参数还原,回到原模型 get back to "w" from "w + e(w)",注意这个也是inplace的!! + + self.base_optimizer.step() # 用base_optimizer对原模型进行参数更新 do the actual "sharpness-aware" update + + if zero_grad: + self.zero_grad() + + def _grad_norm(self): + shared_device = self.param_groups[0]["params"][0].device + # put everything on the same device, in case of model parallelism + norm = torch.norm( + torch.stack([ + p.grad.norm(p=2).to(shared_device) + for group in self.param_groups for p in group["params"] + if p.grad is not None]), p=2) + return norm diff --git a/PuzzleTuning/utils/schedulers.py b/PuzzleTuning/utils/schedulers.py new file mode 100644 index 0000000000000000000000000000000000000000..1132d41f23de04d9127941a79e8ea453d06ba508 --- /dev/null +++ b/PuzzleTuning/utils/schedulers.py @@ -0,0 +1,285 @@ +""" +Schedulers Script ver: Feb 15th 17:00 + +puzzle_patch_scheduler is used to arrange patch size for multi-scale learning + +ref +lr_scheduler from MAE code. +https://github.com/facebookresearch/mae +""" + +import math +import random + + +def factor(num): + """ + find factor of input num + """ + factors = [] + for_times = int(math.sqrt(num)) + for i in range(for_times + 1)[1:]: + if num % i == 0: + factors.append(i) + t = int(num / i) + if not t == i: + factors.append(t) + return factors + + +def defactor(num_list, basic_num): # check multiples + array = [] + for i in num_list: + if i // basic_num * basic_num - i == 0: + array.append(i) + array.sort() # accend + return array + + +def adjust_learning_rate(optimizer, epoch, args): + """ + Decay the learning rate with half-cycle cosine after warmup + epoch,ok with float,to be more flexible, + like: data_iter_step / len(data_loader) + epoch + """ + # calculate the lr for this time + if epoch < args.warmup_epochs: # for warmup + lr = args.lr * epoch / args.warmup_epochs # lr increase from zero to the setted lr + + else: # after warmup do cosin lr decay + lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \ + (1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs))) + + # update lr in the optmizer + for param_group in optimizer.param_groups: + if "lr_scale" in param_group: + param_group["lr"] = lr * param_group["lr_scale"] + else: + param_group["lr"] = lr + return lr + + +class patch_scheduler: + """ + this is used to drive the patch size by loss and epoch + the patch list is automatically get + """ + + def __init__(self, total_epoches=200, warmup_epochs=20, edge_size=384, basic_patch=16, strategy=None, + threshold=3.0, reducing_factor=0.933, fix_patch_size=None, patch_size_jump=None): + super().__init__() + + self.strategy = strategy + + self.total_epoches = total_epoches + self.warmup_epochs = warmup_epochs + + # automatically build legal patch list, from small to big size + self.patch_list = defactor(factor(edge_size), basic_patch) + + self.threshold = threshold + self.reducing_factor = reducing_factor + self.fix_patch_size = fix_patch_size + + # from small to big patch, No need for patch at all fig level + if len(self.patch_list) > 1: + self.patch_list = self.patch_list[:-1] + + # jump_patch_list by selecting the 'odd' or 'even', but both with the smallest patch size + if patch_size_jump == 'odd': # 384:[196, 96, 48, 16] + jump_patch_list = self.patch_list[0::2] + self.patch_list = jump_patch_list + elif patch_size_jump == 'even': # 384:[128, 64, 32, 16] + jump_patch_list = self.patch_list[1::2] + # add back the smallest + temp_list = [self.patch_list[0]] + temp_list.extend(jump_patch_list) + self.patch_list = temp_list + else: # all + pass + + if self.strategy in ['reverse', 'loss_back', 'loss_hold']: # start from big(easy) to samll(complex) + self.patch_list.sort(reverse=True) + + if self.strategy is None or self.strategy == 'fixed': + puzzle_patch_size = self.fix_patch_size or self.patch_list[0] + print('patch_list:', puzzle_patch_size) + else: + print('patch_list:', self.patch_list) + + # self.loss_log ? + + def __call__(self, epoch, loss=0.0): + # Designed for flexable ablations + if self.strategy == 'linear' or self.strategy == 'reverse': # reverse from big size to small + if epoch < self.warmup_epochs: # warmup + puzzle_patch_size = 32 # fixed size for warmup + else: + puzzle_patch_size = self.patch_list[min(int((epoch - self.warmup_epochs) + / (self.total_epoches - self.warmup_epochs) + * len(self.patch_list)), len(self.patch_list) - 1)] + + elif self.strategy == 'loop': + # looply change the patch size, after [group_size] epoches we change once + group_size = int(self.threshold) + + if epoch < self.warmup_epochs: + puzzle_patch_size = 32 # in warm up epoches, fixed patch size at 32 fixme exploring + else: + group_idx = (epoch - self.warmup_epochs) % (len(self.patch_list) * group_size) + puzzle_patch_size = self.patch_list[int(group_idx / group_size)] + + elif self.strategy == 'random': # random size strategy + puzzle_patch_size = random.choice(self.patch_list) + + elif self.strategy == 'loss_back': + if epoch < self.warmup_epochs: # for warmup + puzzle_patch_size = 32 # in warm-up we use the fix size + else: + if loss == 0.0: + puzzle_patch_size = self.patch_list[min(int((epoch - self.warmup_epochs) + / (self.total_epoches - self.warmup_epochs) + * len(self.patch_list)), len(self.patch_list) - 1)] + + elif loss < self.threshold: + puzzle_patch_size = self.patch_list[min(max(int((epoch - self.warmup_epochs) + / (self.total_epoches - self.warmup_epochs) + * len(self.patch_list)) + 1, 0), + len(self.patch_list) - 1)] + self.threshold *= self.reducing_factor + else: + puzzle_patch_size = self.patch_list[min(max(int((epoch - self.warmup_epochs) + / (self.total_epoches - self.warmup_epochs) + * len(self.patch_list)) - 1, 0), + len(self.patch_list) - 1)] + + elif self.strategy == 'loss_hold': + if epoch < self.warmup_epochs: # for warmup + puzzle_patch_size = 32 # in warm-up we use the fix size + else: + if loss == 0.0: + puzzle_patch_size = self.patch_list[min(int((epoch - self.warmup_epochs) + / (self.total_epoches - self.warmup_epochs) + * len(self.patch_list)), len(self.patch_list) - 1)] + + elif loss < self.threshold: + puzzle_patch_size = self.patch_list[min(max(int((epoch - self.warmup_epochs) + / (self.total_epoches - self.warmup_epochs) + * len(self.patch_list)) + 1, 0), + len(self.patch_list) - 1)] + self.threshold *= self.reducing_factor + else: + puzzle_patch_size = self.patch_list[min(max(int((epoch - self.warmup_epochs) + / (self.total_epoches - self.warmup_epochs) + * len(self.patch_list)), 0), + len(self.patch_list) - 1)] + + else: + # if self.strategy is None or 'fixed' or 'ratio-decay' + puzzle_patch_size = self.fix_patch_size or self.patch_list[0] # basic_patch + + return puzzle_patch_size + + +class ratio_scheduler: + """ + this is used to drive the fix position ratio by loss and epoch + the ratio is control by ratio_floor_factor=0.5, upper_limit=0.9, lower_limit=0.2 + """ + def __init__(self, total_epoches=200, warmup_epochs=20, basic_ratio=0.25, strategy=None, fix_position_ratio=None, + threshold=4.0, loss_reducing_factor=0.933, ratio_floor_factor=0.5, upper_limit=0.9, lower_limit=0.2): + + # fixme basic_ratio and fix_position_ratio(when stage is fixed) is a bit conflicting, not good enough + super().__init__() + self.strategy = strategy + + self.total_epoches = total_epoches + self.warmup_epochs = warmup_epochs + + self.basic_ratio = basic_ratio + + self.threshold = threshold + self.loss_reducing_factor = loss_reducing_factor + + self.fix_position_ratio = fix_position_ratio + + self.upper_limit = upper_limit + self.lower_limit = lower_limit + self.ratio_floor_factor = ratio_floor_factor + + def __call__(self, epoch, loss=0.0): + if self.strategy == 'ratio-decay' or self.strategy == 'decay': + if epoch < self.warmup_epochs: # for warmup + fix_position_ratio = self.basic_ratio # fixed + else: + max_ratio = min(3 * self.basic_ratio, self.upper_limit) # upper-limit of 0.9 + min_ratio = max(self.basic_ratio * self.ratio_floor_factor, self.lower_limit) + + fix_position_ratio = min(max(((self.total_epoches - self.warmup_epochs) + - (epoch - self.warmup_epochs)) / + (self.total_epoches - self.warmup_epochs) + * max_ratio, min_ratio), max_ratio) + + elif self.strategy == 'loss_back': + + if epoch < self.warmup_epochs: # for warmup + fix_position_ratio = self.basic_ratio # in warm-up we use the fix ratio + + else: + max_ratio = min(3 * self.basic_ratio, self.upper_limit) + min_ratio = max(self.basic_ratio * self.ratio_floor_factor, self.lower_limit) + if loss == 0.0: + fix_position_ratio = min(max(((self.total_epoches - self.warmup_epochs) + - (epoch - self.warmup_epochs)) / + (self.total_epoches - self.warmup_epochs) + * max_ratio, min_ratio), max_ratio) + elif loss < self.threshold: + fix_position_ratio = min(max(((self.total_epoches - self.warmup_epochs) + - (epoch - self.warmup_epochs)) / + (self.total_epoches - self.warmup_epochs) + * max_ratio * 0.9, min_ratio), max_ratio) + self.threshold *= self.loss_reducing_factor + else: + fix_position_ratio = min(max(((self.total_epoches - self.warmup_epochs) + - (epoch - self.warmup_epochs)) / + (self.total_epoches - self.warmup_epochs) + * max_ratio * 1.1, min_ratio), max_ratio) + + elif self.strategy == 'loss_hold': + + if epoch < self.warmup_epochs: # for warmup + fix_position_ratio = self.basic_ratio # in warm-up we use the fix ratio + + else: + max_ratio = min(3 * self.basic_ratio, self.upper_limit) + min_ratio = max(self.basic_ratio * self.ratio_floor_factor, self.lower_limit) + + if loss == 0.0: + fix_position_ratio = min(max(((self.total_epoches - self.warmup_epochs) + - (epoch - self.warmup_epochs)) / + (self.total_epoches - self.warmup_epochs) + * max_ratio, min_ratio), max_ratio) + elif loss < self.threshold: + fix_position_ratio = min(max(((self.total_epoches - self.warmup_epochs) + - (epoch - self.warmup_epochs)) / + (self.total_epoches - self.warmup_epochs) + * max_ratio * 0.9, min_ratio), max_ratio) + self.threshold *= self.loss_reducing_factor + else: + fix_position_ratio = min(max(((self.total_epoches - self.warmup_epochs) + - (epoch - self.warmup_epochs)) / + (self.total_epoches - self.warmup_epochs) + * max_ratio, min_ratio), max_ratio) + + else: # basic_ratio + fix_position_ratio = self.fix_position_ratio or self.basic_ratio + + return fix_position_ratio + + +''' +scheduler = puzzle_fix_position_ratio_scheduler(strategy='reverse') +epoch = 102 +fix_position_ratio = scheduler(epoch) +print(fix_position_ratio) +''' diff --git a/PuzzleTuning/utils/temp-tensors/color.pt b/PuzzleTuning/utils/temp-tensors/color.pt new file mode 100644 index 0000000000000000000000000000000000000000..058c6edd177162c02e2cf7e802a0c9191b5897b4 --- /dev/null +++ b/PuzzleTuning/utils/temp-tensors/color.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63b58afc3d30f2cead67e4e9d77d0509a9cc547cc0a26390e07204e8f8f9ff0c +size 2409195 diff --git a/PuzzleTuning/utils/temp-tensors/color_labels.pt b/PuzzleTuning/utils/temp-tensors/color_labels.pt new file mode 100644 index 0000000000000000000000000000000000000000..135a55b90ab671cb650ff50862582cb120d6e69f --- /dev/null +++ b/PuzzleTuning/utils/temp-tensors/color_labels.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25f4d6316bbcab066eaf26be4e418547a468d3f5d45c274535306bd37ca814b6 +size 747 diff --git a/PuzzleTuning/utils/temp-tensors/warwick.pt b/PuzzleTuning/utils/temp-tensors/warwick.pt new file mode 100644 index 0000000000000000000000000000000000000000..e37fa21a753b5c76591757775ad2e267651af424 --- /dev/null +++ b/PuzzleTuning/utils/temp-tensors/warwick.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ec6955b6afe4692ee4ead145f50e052e5c4481409beeda42bc4377d6fe8f579 +size 2409195 diff --git a/PuzzleTuning/utils/temp-tensors/warwick_labels.pt b/PuzzleTuning/utils/temp-tensors/warwick_labels.pt new file mode 100644 index 0000000000000000000000000000000000000000..edca0ee35b3cf03395e6319763430844f128bbb0 --- /dev/null +++ b/PuzzleTuning/utils/temp-tensors/warwick_labels.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65c8a01f50cf37cccc74514ada3f2e4d696133652404b2bb4313cfd0fe6df063 +size 747 diff --git a/PuzzleTuning/utils/tools.py b/PuzzleTuning/utils/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..25b5c9f3430679496e1de56a6104d4f715bc345a --- /dev/null +++ b/PuzzleTuning/utils/tools.py @@ -0,0 +1,144 @@ +""" +Tools Script ver: Feb 22rd 20:00 +""" +import os +import shutil +import torch +import numpy as np +from collections import OrderedDict + + +# Tools +def del_file(filepath): + """ + clear all items within a folder + :param filepath: folder path + :return: + """ + del_list = os.listdir(filepath) + for f in del_list: + file_path = os.path.join(filepath, f) + if os.path.isfile(file_path): + os.remove(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + + +def to_2tuple(input): + if type(input) is tuple: + if len(input) == 2: + return input + else: + if len(input) > 2: + output = (input[0], input[1]) + return output + elif len(input) == 1: + output = (input[0], input[0]) + return output + else: + print('cannot handle none tuple') + else: + if type(input) is list: + if len(input) == 2: + output = (input[0], input[1]) + return output + else: + if len(input) > 2: + output = (input[0], input[1]) + return output + elif len(input) == 1: + output = (input[0], input[0]) + return output + else: + print('cannot handle none list') + elif type(input) is int: + output = (input, input) + return output + else: + print('cannot handle ', type(input)) + raise ('cannot handle ', type(input)) + + +def find_all_files(root, suffix=None): + """ + Return a list of file paths ended with specific suffix + """ + res = [] + if type(suffix) is tuple or type(suffix) is list: + for root, _, files in os.walk(root): + for f in files: + if suffix is not None: + status = 0 + for i in suffix: + if not f.endswith(i): + pass + else: + status = 1 + break + if status == 0: + continue + res.append(os.path.join(root, f)) + return res + + elif type(suffix) is str or suffix is None: + for root, _, files in os.walk(root): + for f in files: + if suffix is not None and not f.endswith(suffix): + continue + res.append(os.path.join(root, f)) + return res + + else: + print('type of suffix is not legal :', type(suffix)) + return -1 + + +# Transfer state_dict by removing misalignment +def FixStateDict(state_dict, remove_key_head=None): + """ + Obtain a fixed state_dict by removing misalignment + + :param state_dict: model state_dict of OrderedDict() + :param remove_key_head: the str or list of strings need to be remove by startswith + """ + + if remove_key_head is None: + return state_dict + + elif type(remove_key_head) == str: + keys = [] + for k, v in state_dict.items(): + if k.startswith(remove_key_head): # 将‘arc’开头的key过滤掉,这里是要去除的层的key + continue + keys.append(k) + + elif type(remove_key_head) == list: + keys = [] + for k, v in state_dict.items(): + jump = False + for a_remove_key_head in remove_key_head: + if k.startswith(a_remove_key_head): # 将‘arc’开头的key过滤掉,这里是要去除的层的key + jump = True + break + if jump: + continue + else: + keys.append(k) + else: + print('erro in defining remove_key_head !') + return -1 + + new_state_dict = OrderedDict() + for k in keys: + new_state_dict[k] = state_dict[k] + return new_state_dict + + +def setup_seed(seed): # setting up the random seed + import random + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + np.random.seed(seed) + random.seed(seed) + torch.backends.cudnn.deterministic = True + diff --git a/PuzzleTuning/utils/transfermodel.py b/PuzzleTuning/utils/transfermodel.py new file mode 100644 index 0000000000000000000000000000000000000000..f353f6be52e0027bce76b9f547d4507a30ebba83 --- /dev/null +++ b/PuzzleTuning/utils/transfermodel.py @@ -0,0 +1,300 @@ +""" +Transfer PuzzleTuning Pre-Training checkpoints Script ver: Oct 23rd 17:00 + +write a model based on the weight of a checkpoint file +EG: create a vit-base based on PuzzleTuning SAE + +""" +import argparse + +import sys +sys.path.append('..') +import os +import torch +import torch.nn as nn + +from Backbone import getmodel, GetPromptModel +from SSL_structures import SAE + + +# Transfer pretrained MSHT checkpoints to normal model state_dict +def transfer_model_encoder(check_point_path, save_model_path, model_idx='ViT', prompt_mode=None, + Prompt_Token_num=20, edge_size=384, given_name=None): + if not os.path.exists(save_model_path): + os.makedirs(save_model_path) + + if given_name is not None: + given_path = os.path.join(save_model_path, given_name) + else: + given_path = None + + if prompt_mode == "Deep" or prompt_mode == "Shallow": + model = GetPromptModel.build_promptmodel(edge_size=edge_size, model_idx=model_idx, patch_size=16, + Prompt_Token_num=Prompt_Token_num, VPT_type=prompt_mode, + base_state_dict=None) + # elif prompt_mode == "Other" or prompt_mode == None: + else: + model = getmodel.get_model(model_idx=model_idx, pretrained_backbone=False, edge_size=edge_size) + ''' + state = {'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch} + TempBest_state = {'model': best_model_wts, 'epoch': best_epoch_idx} + ''' + state = torch.load(check_point_path) + + transfer_name = os.path.splitext(os.path.split(check_point_path)[1])[0] + '_of_' + + try: + model_state = state['model'] + try: + print("checkpoint epoch", state['epoch']) + if prompt_mode is not None: + save_model_path = os.path.join(save_model_path, transfer_name + + model_idx + '_E_' + str(state['epoch']) + '_promptstate' + '.pth') + else: + save_model_path = os.path.join(save_model_path, transfer_name + + model_idx + '_E_' + str(state['epoch']) + '_transfer' + '.pth') + + except: + print("no 'epoch' in state") + if prompt_mode is not None: + save_model_path = os.path.join(save_model_path, transfer_name + model_idx + '_promptstate' + '.pth') + else: + save_model_path = os.path.join(save_model_path, transfer_name + model_idx + '_transfer' + '.pth') + except: + print("not a checkpoint state (no 'model' in state)") + model_state = state + if prompt_mode is not None: + save_model_path = os.path.join(save_model_path, transfer_name + model_idx + '_promptstate' + '.pth') + else: + save_model_path = os.path.join(save_model_path, transfer_name + model_idx + '_transfer' + '.pth') + + try: + model.load_state_dict(model_state) + print("model loaded") + print("model :", model_idx) + gpu_use = 0 + except: + try: + model = nn.DataParallel(model) + model.load_state_dict(model_state, False) + print("DataParallel model loaded") + print("model :", model_idx) + gpu_use = -1 + except: + print("model loading erro!!") + gpu_use = -2 + + if given_path is not None: + save_model_path = given_path + + if gpu_use == -1: + # print(model) + if prompt_mode is not None: + prompt_state_dict = model.module.obtain_prompt() + # fixme maybe bug at DP module.obtain_prompt, just model.obtain_prompt is enough + print('prompt obtained') + torch.save(prompt_state_dict, save_model_path) + else: + torch.save(model.module.state_dict(), save_model_path) + print('model trained by multi-GPUs has its single GPU copy saved at ', save_model_path) + + elif gpu_use == 0: + if prompt_mode is not None: + prompt_state_dict = model.obtain_prompt() + print('prompt obtained') + torch.save(prompt_state_dict, save_model_path) + else: + torch.save(model.state_dict(), save_model_path) + print('model trained by a single GPU has been saved at ', save_model_path) + else: + print('erro') + + +def transfer_model_decoder(check_point_path, save_model_path, + model_idx='sae_vit_base_patch16_decoder', dec_idx='swin_unet', + prompt_mode=None, Prompt_Token_num=20, edge_size=384): + + if not os.path.exists(save_model_path): + os.makedirs(save_model_path) + + state = torch.load(check_point_path) + + transfer_name = os.path.splitext(os.path.split(check_point_path)[1])[0] + '_of_' + + model = SAE.__dict__[model_idx](img_size=edge_size, prompt_mode=prompt_mode, Prompt_Token_num=Prompt_Token_num, + basic_state_dict=None, dec_idx=dec_idx) + + try: + model_state = state['model'] + try: + print("checkpoint epoch", state['epoch']) + save_model_path = os.path.join(save_model_path, transfer_name + 'Decoder_' + dec_idx + '_E_' + + str(state['epoch']) + '.pth') + + + except: + print("no 'epoch' in state") + save_model_path = os.path.join(save_model_path, transfer_name + 'Decoder_' + dec_idx + '.pth') + except: + print("not a checkpoint state (no 'model' in state)") + model_state = state + save_model_path = os.path.join(save_model_path, transfer_name + 'Decoder_' + dec_idx + '.pth') + + try: + model.load_state_dict(model_state) + print("model loaded") + print("model :", model_idx) + gpu_use = 0 + except: + try: + model = nn.DataParallel(model) + model.load_state_dict(model_state, False) + print("DataParallel model loaded") + print("model :", model_idx) + gpu_use = -1 + except: + print("model loading erro!!") + gpu_use = -2 + + else: + model = model.decoder + + if gpu_use == -1: + torch.save(model.module.decoder.state_dict(), save_model_path) + print('model trained by multi-GPUs has its single GPU copy saved at ', save_model_path) + + elif gpu_use == 0: + torch.save(model.state_dict(), save_model_path) + print('model trained by a single GPU has been saved at ', save_model_path) + else: + print('erro') + + +def get_args_parser(): + parser = argparse.ArgumentParser('Take pre-trained model from PuzzleTuning', add_help=False) + + # Model Name or index + parser.add_argument('--given_name', default=None, type=str, help='name of the weight-state-dict') + parser.add_argument('--model_idx', default='ViT', type=str, help='taking the weight to the specified model') + parser.add_argument('--edge_size', default=224, type=int, help='images input size for model') + + # PromptTuning + parser.add_argument('--PromptTuning', default=None, type=str, + help='Deep/Shallow to use Prompt Tuning model instead of Finetuning model, by default None') + # Prompt_Token_num + parser.add_argument('--Prompt_Token_num', default=20, type=int, help='Prompt_Token_num') + + # PATH settings + parser.add_argument('--checkpoint_path', default=None, type=str, help='check_point_path') + parser.add_argument('--save_model_path', default=None, type=str, help='out put weight path for pre-trained model') + + return parser + + +def main(args): + # fixme: now need a CUDA device as the model is save as a CUDA model! + + + # PuzzleTuning Template + """ + # Prompt + # transfer_model_encoder(checkpoint_path, save_model_path, model_idx='ViT', edge_size=224, prompt_mode='Deep', Prompt_Token_num=20,given_name=given_name) + + # not prompt model + # transfer_model_encoder(checkpoint_path, save_model_path, model_idx='ViT', edge_size=224, given_name=given_name) + + # decoder + # transfer_model_decoder(checkpoint_path, save_model_path, model_idx='sae_vit_base_patch16_decoder', dec_idx='swin_unet', edge_size=224, prompt_mode='Deep') + + + # PuzzleTuning Experiments transfer records: + # 1 周期puzzle自动减小ratio,自动loop变化size 迁移timm,PromptTuning:VPT-Deep,seg_decoder:None (核心方法) + # ViT_b16_224_timm_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth + checkpoint_path = '/root/autodl-tmp/runs/PuzzleTuning_SAE_vit_base_patch16_Prompt_Deep_tokennum_20_tr_timm_CPIAm/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20_checkpoint-199.pth' + save_model_path = '/root/autodl-tmp/output_models' + given_name = r'ViT_b16_224_timm_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_E_50_promptstate.pth' + transfer_model_encoder(checkpoint_path, save_model_path, model_idx='ViT', edge_size=224, prompt_mode='Deep', + Prompt_Token_num=20,given_name=given_name) + + # PuzzleTuning Ablation studies:SAE+不同curriculum+不同VPT/ViT + # 2 周期puzzle自动减小ratio,自动loop变化size 迁移timm,PromptTuning:None,seg_decoder:None + # ViT_b16_224_timm_PuzzleTuning_SAE_CPIAm_E_199.pth + checkpoint_path = '/root/autodl-tmp/runs/PuzzleTuning_SAE_vit_base_patch16_tr_timm_CPIAm/PuzzleTuning_sae_vit_base_patch16_checkpoint-199.pth' + save_model_path = '/root/autodl-tmp/output_models' + given_name = r'ViT_b16_224_timm_PuzzleTuning_SAE_CPIAm_E_199.pth' + transfer_model_encoder(checkpoint_path, save_model_path, model_idx='ViT', edge_size=224, given_name=given_name) + + # 3 固定puzzle ratio,固定patch size 迁移timm,PromptTuning:VPT-Deep,seg_decoder:None (服务器pt1) + # ViT_b16_224_timm_PuzzleTuning_fixp16fixr25_SAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth + checkpoint_path = '/root/autodl-tmp/runs/PuzzleTuning_SAE_fixp16fixr25_vit_base_Prompt_Deep_tokennum_20_tr_timm_CPIAm/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20_checkpoint-199.pth' + save_model_path = '/root/autodl-tmp/output_models' + given_name = r'ViT_b16_224_timm_PuzzleTuning_SAE_fixp16fixr25_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth' + transfer_model_encoder(checkpoint_path, save_model_path, model_idx='ViT', edge_size=224, prompt_mode='Deep', + Prompt_Token_num=20, given_name=given_name) + + # 4 固定puzzle ratio,固定patch size 迁移timm,PromptTuning:None,seg_decoder:None (服务器pt2) + # ViT_b16_224_timm_PuzzleTuning_fixp16fixr25_SAE_CPIAm_E_199.pth + checkpoint_path = '/root/autodl-tmp/runs/PuzzleTuning_SAE_fixp16fixr25_vit_base_patch16_tr_timm_CPIAm/PuzzleTuning_sae_vit_base_patch16_checkpoint-199.pth' + save_model_path = '/root/autodl-tmp/output_models' + given_name = r'ViT_b16_224_timm_PuzzleTuning_SAE_fixp16fixr25_CPIAm_E_199.pth' + transfer_model_encoder(checkpoint_path, save_model_path, model_idx='ViT', edge_size=224, given_name=given_name) + + # 5 变化puzzle ratio,固定patch size 迁移timm,PromptTuning:VPT-Deep,seg_decoder:None, strategy: ratio-decay (服务器pt3) + # ViT_b16_224_timm_PuzzleTuning_fixp16ratiodecay_SAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth + checkpoint_path = '/root/autodl-tmp/runs/PuzzleTuning_SAE_fixp16ratiodecay_vit_base_Prompt_Deep_tokennum_20_tr_timm_CPIAm/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20_checkpoint-199.pth' + save_model_path = '/root/autodl-tmp/output_models' + given_name = r'ViT_b16_224_timm_PuzzleTuning_SAE_fixp16ratiodecay_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth' + transfer_model_encoder(checkpoint_path, save_model_path, model_idx='ViT', edge_size=224, prompt_mode='Deep', + Prompt_Token_num=20, given_name=given_name) + + # 6 变化puzzle ratio,固定patch size 迁移timm,PromptTuning:None,seg_decoder:None (服务器pt4) + # ViT_b16_224_timm_PuzzleTuning_fixp16ratiodecay_SAE_CPIAm_E_199.pth + checkpoint_path = '/root/autodl-tmp/runs/PuzzleTuning_SAE_fixp16ratiodecay_vit_base_patch16_tr_timm_CPIAm/PuzzleTuning_sae_vit_base_patch16_checkpoint-199.pth' + save_model_path = '/root/autodl-tmp/output_models' + given_name = r'ViT_b16_224_timm_PuzzleTuning_SAE_fixp16ratiodecay_CPIAm_E_199.pth' + transfer_model_encoder(checkpoint_path, save_model_path, model_idx='ViT', edge_size=224, given_name=given_name) + + # PuzzleTuning Ablation studies:上游不要puzzle 所以是 VPT+MAE + # 7 MAE+VPT,迁移timm,PromptTuning:VPT-Deep,seg_decoder:None (A40*4服务器pt5) + # ViT_b16_224_timm_PuzzleTuning_MAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth + checkpoint_path = '/root/autodl-tmp/runs/PuzzleTuning_MAE_vit_base_Prompt_Deep_tokennum_20_tr_timm_CPIAm/PuzzleTuning_mae_vit_base_patch16_Prompt_Deep_tokennum_20_checkpoint-199.pth' + save_model_path = '/root/autodl-tmp/output_models' + given_name = r'ViT_b16_224_timm_PuzzleTuning_MAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth' + transfer_model_encoder(checkpoint_path, save_model_path, model_idx='ViT', edge_size=224, prompt_mode='Deep', + Prompt_Token_num=20, given_name=given_name) + + # 8 周期puzzle自动减小ratio,自动loop变化size 迁移MAEImageNet,PromptTuning:VPT-Deep,seg_decoder:None (A100-PCIE*2 服务器pt6) + # ViT_b16_224_MAEImageNet_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth + checkpoint_path = '/root/autodl-tmp/runs/PuzzleTuning_SAE_vit_base_patch16_Prompt_Deep_tokennum_20_tr_MAEImageNet_CPIAm/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20_checkpoint-199.pth' + save_model_path = '/root/autodl-tmp/output_models' + given_name = r'ViT_b16_224_MAEImageNet_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth' + transfer_model_encoder(checkpoint_path, save_model_path, model_idx='ViT', edge_size=224, prompt_mode='Deep', + Prompt_Token_num=20, given_name=given_name) + + # 9 周期puzzle自动减小ratio,自动loop变化size 迁移Random,PromptTuning:VPT-Deep,seg_decoder:None (A40*4服务器pt7) + # ViT_b16_224_Random_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth + checkpoint_path = '/root/autodl-tmp/runs/PuzzleTuning_SAE_vit_base_patch16_Prompt_Deep_tokennum_20_tr_Random_CPIAm/PuzzleTuning_sae_vit_base_patch16_Prompt_Deep_tokennum_20_checkpoint-199.pth' + save_model_path = '/root/autodl-tmp/output_models' + given_name = r'ViT_b16_224_Random_PuzzleTuning_SAE_CPIAm_Prompt_Deep_tokennum_20_E_199_promptstate.pth' + transfer_model_encoder(checkpoint_path, save_model_path, model_idx='ViT', edge_size=224, prompt_mode='Deep', + Prompt_Token_num=20, given_name=given_name) + + # 10 周期puzzle自动减小ratio,自动loop变化size 迁移Random,PromptTuning:None,seg_decoder:None (4090*6服务器pt8) + # ViT_b16_224_MAEImageNet_PuzzleTuning_SAE_CPIAm_E_199.pth + checkpoint_path = '/root/autodl-tmp/runs/PuzzleTuning_SAE_vit_base_patch16_tr_MAEImageNet_CPIAm/PuzzleTuning_sae_vit_base_patch16_checkpoint-199.pth' + save_model_path = '/root/autodl-tmp/output_models' + given_name = r'ViT_b16_224_MAEImageNet_PuzzleTuning_SAE_CPIAm_E_199.pth' + transfer_model_encoder(checkpoint_path, save_model_path, model_idx='ViT', edge_size=224, given_name=given_name) + """ + + transfer_model_encoder(args.checkpoint_path, args.save_model_path, + model_idx=args.model_idx, edge_size=args.edge_size, + prompt_mode=args.PromptTuning, Prompt_Token_num=args.Prompt_Token_num, + given_name=args.given_name) + + +if __name__ == '__main__': + args = get_args_parser() + args = args.parse_args() + + main(args) diff --git a/PuzzleTuning/utils/visual_usage.py b/PuzzleTuning/utils/visual_usage.py new file mode 100644 index 0000000000000000000000000000000000000000..cf3d27c2e208212caaece2df1f3881c66126e4f4 --- /dev/null +++ b/PuzzleTuning/utils/visual_usage.py @@ -0,0 +1,509 @@ +""" +Attention Visulization Script ver: Oct 23rd 18:00 +use rgb format input +""" + +import torch +import torch.nn as nn +import numpy as np +import matplotlib.pyplot as plt +import os +from PIL import Image +from torchvision.transforms import ToPILImage + + +def softmax(x): + """Compute the softmax in a numerically stable way.""" + sof = nn.Softmax() + return sof(x) + + +def imshow(inp, title=None): # Imshow for Tensor + """Imshow for Tensor.""" + inp = inp.numpy().transpose((1, 2, 0)) + ''' + # if required: Alter the transform + # because transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + mean = np.array([0.485, 0.456, 0.406]) + std = np.array([0.229, 0.224, 0.225]) + inp = std * inp + mean + inp = np.clip(inp, 0, 1) + ''' + plt.imshow(inp) + if title is not None: + plt.title(title) + plt.pause(0.001) # pause a bit so that plots are updated + + +def Draw_tri_fig(Ori_img, Puz_img, Rec_img, picpath): + plt.figure() + + ax = plt.subplot(1, 3, 1) + ax.axis('off') + ax.set_title('Original') + plt.imshow(Ori_img) + + ax = plt.subplot(1, 3, 2) + ax.axis('off') + ax.set_title('Puzzle') + plt.imshow(Puz_img) + + ax = plt.subplot(1, 3, 3) + ax.axis('off') + ax.set_title('Restored') + plt.imshow(Rec_img) + + plt.savefig(picpath, dpi=400) + plt.show() + + plt.cla() + plt.close("all") + + +# Grad CAM part:Visualize of CNN+Transformer attention area +def cls_token_s12_transform(tensor, height=12, width=12): # based on pytorch_grad_cam + result = tensor[:, 1:, :].reshape(tensor.size(0), height, width, tensor.size(2)) + + # Bring the channels to the first dimension, + # like in CNNs. + result = result.transpose(2, 3).transpose(1, 2) + return result + + +def cls_token_s14_transform(tensor, height=14, width=14): # based on pytorch_grad_cam + result = tensor[:, 1:, :].reshape(tensor.size(0), height, width, tensor.size(2)) + + # Bring the channels to the first dimension, + # like in CNNs. + result = result.transpose(2, 3).transpose(1, 2) + return result + + +def cls_token_s16_transform(tensor, height=16, width=16): # based on pytorch_grad_cam + result = tensor[:, 1:, :].reshape(tensor.size(0), height, width, tensor.size(2)) + + # Bring the channels to the first dimension, + # like in CNNs. + result = result.transpose(2, 3).transpose(1, 2) + return result + + +def cls_token_s24_transform(tensor, height=24, width=24): # based on pytorch_grad_cam + result = tensor[:, 1:, :].reshape(tensor.size(0), height, width, tensor.size(2)) + + # Bring the channels to the first dimension, + # like in CNNs. + result = result.transpose(2, 3).transpose(1, 2) + return result + + +def no_cls_token_s12_transform(tensor, height=12, width=12): # based on pytorch_grad_cam + result = tensor.reshape(tensor.size(0), height, width, tensor.size(2)) + + # Bring the channels to the first dimension, + # like in CNNs. + result = result.transpose(2, 3).transpose(1, 2) + return result + + +def swinT_transform_224(tensor, height=7, width=7): # 224 7 + result = tensor.reshape(tensor.size(0), height, width, tensor.size(2)) + + # Bring the channels to the first dimension, + # like in CNNs. + result = result.transpose(2, 3).transpose(1, 2) + return result + + +def swinT_transform_384(tensor, height=12, width=12): # 384 12 + result = tensor.reshape(tensor.size(0), height, width, tensor.size(2)) + + # Bring the channels to the first dimension, + # like in CNNs. + result = result.transpose(2, 3).transpose(1, 2) + return result + + +def choose_cam_by_model(model, model_idx, edge_size, use_cuda=True, model_type='CLS'): + """ + :param model: model object + :param model_idx: model idx for the getting pre-setted layer and size + :param edge_size: image size for the getting pre-setted layer and size + + :param use_cuda: use cuda to speed up imaging + :param model_type: default 'CLS' for model, 'MIL' for model backbone + """ + from pytorch_grad_cam import GradCAM + + # reshape_transform todo conformer 224!! + # check class: target_category = None + # If None, returns the map for the highest scoring category. + # Otherwise, targets the requested category. + + if model_idx[0:3] == 'ViT' or model_idx[0:4] == 'deit': + # We should chose any layer before the final attention block, + # check: https://github.com/jacobgil/pytorch-grad-cam/blob/master/tutorials/vision_transformers.md + if model_type == 'CLS': + target_layers = [model.blocks[-1].norm1] + else: # MIL-SI + target_layers = [model.backbone.blocks[-1].norm1] + + if model_idx[0:5] == 'ViT_h': + if edge_size == 224: + grad_cam = GradCAM(model, target_layers=target_layers, use_cuda=use_cuda, + reshape_transform=cls_token_s16_transform) + else: + print('ERRO in ViT_huge edge size') + return -1 + else: + if edge_size == 384: + grad_cam = GradCAM(model, target_layers=target_layers, use_cuda=use_cuda, + reshape_transform=cls_token_s24_transform) + elif edge_size == 224: + grad_cam = GradCAM(model, target_layers=target_layers, use_cuda=use_cuda, + reshape_transform=cls_token_s14_transform) + else: + print('ERRO in ViT/DeiT edge size') + return -1 + + elif model_idx[0:3] == 'vgg': + if model_type == 'CLS': + target_layers = [model.features[-1]] + else: + target_layers = [model.backbone.features[-1]] + grad_cam = GradCAM(model, target_layers=target_layers, use_cuda=use_cuda, reshape_transform=None) + + elif model_idx[0:6] == 'swin_b': + if model_type == 'CLS': + target_layers = [model.layers[-1].blocks[-1].norm1] + else: + target_layers = [model.backbone.layers[-1].blocks[-1].norm1] + if edge_size == 384: + grad_cam = GradCAM(model, target_layers=target_layers, use_cuda=use_cuda, + reshape_transform=swinT_transform_384) + elif edge_size == 224: + grad_cam = GradCAM(model, target_layers=target_layers, use_cuda=use_cuda, + reshape_transform=swinT_transform_224) + else: + print('ERRO in Swin Transformer edge size') + return -1 + + elif model_idx[0:6] == 'ResNet': + if model_type == 'CLS': + target_layers = [model.layer4[-1]] + else: + target_layers = [model.backbone.layer4[-1]] + + grad_cam = GradCAM(model, target_layers=target_layers, use_cuda=use_cuda, reshape_transform=None) # CNN: None + + elif model_idx[0:7] == 'Hybrid1' and edge_size == 384: + target_layers = [model.blocks[-1].norm1] + grad_cam = GradCAM(model, target_layers=target_layers, use_cuda=use_cuda, + reshape_transform=cls_token_s12_transform) + + elif model_idx[0:7] == 'Hybrid2' and edge_size == 384: + target_layers = [model.dec4.norm1] + + if 'CLS' in model_idx.split('_') and 'No' in model_idx.split('_'): + grad_cam = GradCAM(model, target_layers=target_layers, use_cuda=use_cuda, + reshape_transform=no_cls_token_s12_transform) + + else: + grad_cam = GradCAM(model, target_layers=target_layers, use_cuda=use_cuda, + reshape_transform=cls_token_s12_transform) + + elif model_idx[0:7] == 'Hybrid3' and edge_size == 384: + target_layers = [model.dec3.norm1] + grad_cam = GradCAM(model, target_layers=target_layers, use_cuda=use_cuda, + reshape_transform=cls_token_s24_transform) + + elif model_idx[0:9] == 'mobilenet': + if model_type == 'CLS': + target_layers = [model.blocks[-1]] + else: + target_layers = [model.backbone.blocks[-1]] + grad_cam = GradCAM(model, target_layers=target_layers, use_cuda=use_cuda, reshape_transform=None) # CNN: None + + elif model_idx[0:10] == 'ResN50_ViT' and edge_size == 384: + if model_type == 'CLS': + target_layers = [model.blocks[-1].norm1] + else: + target_layers = [model.backbone.blocks[-1].norm1] + if edge_size == 384: + grad_cam = GradCAM(model, target_layers=target_layers, use_cuda=use_cuda, + reshape_transform=cls_token_s24_transform) + elif edge_size == 224: + grad_cam = GradCAM(model, target_layers=target_layers, use_cuda=use_cuda, + reshape_transform=cls_token_s14_transform) + else: + print('ERRO in ResN50_ViT edge size') + return -1 + + elif model_idx[0:12] == 'efficientnet': + target_layers = [model.conv_head] + grad_cam = GradCAM(model, target_layers=target_layers, use_cuda=use_cuda, reshape_transform=None) # CNN: None + + + else: + print('ERRO in model_idx') + return -1 + + return grad_cam + + +def check_SAA(inputs, labels, model, model_idx, edge_size, class_names, model_type='CLS', num_images=-1, + pic_name='test', + draw_path='../imaging_results', check_all=True, unknown_GT=False, writer=None): + """ + check num_images of images and visual the models's attention area + output a pic with 2 column and rows of num_images + + :param inputs: inputs of data + :param labels: labels or the K+1 soft label of data + + :param model: model object + :param model_idx: model idx for the getting pre-setted layer and size + :param edge_size: image size for the getting pre-setted layer and size + + :param class_names: The name of classes for painting + :param model_type: default 'CLS' for model, 'MIL' for model backbone + + :param num_images: how many image u want to check, should SMALLER THAN the batchsize + :param pic_name: name of the output pic + :param draw_path: path folder for output pic + :param check_all: choose the type of checking CAM : by default False to be only on the predicted type' + True to be on all types + + :param unknown_GT: cam on unknown GT + + :param writer: attach the pic to the tensorboard backend + + :return: None + """ + from pytorch_grad_cam.utils import show_cam_on_image + + # choose checking type: false to be only on the predicted type'; true to be on all types + if check_all: + checking_type = ['ori', ] + checking_type.extend([cls for cls in range(len(class_names))]) + else: + checking_type = ['ori', 'tar'] + + # test model + was_training = model.training + model.eval() + + outputs = model(inputs) + _, preds = torch.max(outputs, 1) + + grad_cam = choose_cam_by_model(model, model_idx, edge_size, model_type=model_type) # choose model + + if num_images == -1: # auto detect a batch + num_images = int(inputs.shape[0]) + + images_so_far = 0 + plt.figure() + + for j in range(num_images): + + for type in checking_type: + images_so_far += 1 + if type == 'ori': + ax = plt.subplot(num_images, len(checking_type), images_so_far) + ax.axis('off') + + if unknown_GT and not len(labels) == 1: # Ground Truth of the K+1 soft label + soft_label = labels.cpu().numpy()[j] # K+1 soft label + title = 'A' + str(round(soft_label[0], 0)) + for i in range(1, len(soft_label)): + title += class_names[i - 1][0] # use the first character only + title += str(round(soft_label[i], 0)) # use int (float 0) + title += ' ' + ax.set_title(title) + + else: + ax.set_title('Ground Truth:{}'.format(class_names[int(labels[j])])) + + imshow(inputs.cpu().data[j]) + plt.pause(0.001) # pause a bit so that plots are updated + + else: + ax = plt.subplot(num_images, len(checking_type), images_so_far) + ax.axis('off') + if type == 'tar': # target categories + ax.set_title('Predict: {}'.format(class_names[preds[j]])) + # focus on the specific target class to create grayscale_cam + # grayscale_cam is generate on batch + grayscale_cam = grad_cam(inputs, target_category=None, eigen_smooth=False, aug_smooth=False) + else: + # pseudo confidence by softmax + ax.set_title('{:.1%} {}'.format(softmax(outputs[j])[int(type)], class_names[int(type)])) + # focus on the specific target class to create grayscale_cam + # grayscale_cam is generate on batch + grayscale_cam = grad_cam(inputs, target_category=int(type), eigen_smooth=False, aug_smooth=False) + + # get a cv2 encoding image from dataloder by inputs[j].cpu().numpy().transpose((1, 2, 0)) + + cam_img = show_cam_on_image(inputs[j].cpu().numpy().transpose((1, 2, 0)), grayscale_cam[j], + use_rgb=True) # Fixme: use rgb format input (already fixed) + + plt.imshow(cam_img) + plt.pause(0.001) # pause a bit so that plots are updated + + if images_so_far == num_images * len(checking_type): # complete when the pics is enough + picpath = os.path.join(draw_path, pic_name + '.jpg') + if not os.path.exists(draw_path): + os.makedirs(draw_path) + + plt.savefig(picpath, dpi=1000) + plt.show() + + model.train(mode=was_training) + if writer is not None: # attach the pic to the tensorboard backend if avilable + image_PIL = Image.open(picpath) + img = np.array(image_PIL) + writer.add_image(pic_name, img, 1, dataformats='HWC') + + plt.cla() + plt.close("all") + return + + model.train(mode=was_training) + + +def visualize_check(inputs, labels, model, class_names, num_images=-1, pic_name='test', + draw_path='/home/ZTY/imaging_results', writer=None): # visual check + """ + check num_images of images and visual them + output a pic with 3 column and rows of num_images//3 + + :param inputs: inputs of data + :param labels: labels of data + + :param model: model object + :param class_names: The name of classes for painting + :param num_images: how many image u want to check, should SMALLER THAN the batchsize + :param pic_name: name of the output pic + :param draw_path: path folder for output pic + :param writer: attach the pic to the tensorboard backend + + :return: None + + """ + was_training = model.training + model.eval() + + images_so_far = 0 + plt.figure() + + with torch.no_grad(): + + outputs = model(inputs) + _, preds = torch.max(outputs, 1) + + if num_images == -1: # auto detect a batch + num_images = int(inputs.shape[0]) + + if num_images % 5 == 0: + line_imgs_num = 5 + elif num_images % 4 == 0: + line_imgs_num = 4 + elif num_images % 3 == 0: + line_imgs_num = 3 + elif num_images % 2 == 0: + line_imgs_num = 2 + else: + line_imgs_num = int(num_images) + + rows_imgs_num = int(num_images // line_imgs_num) + num_images = line_imgs_num * rows_imgs_num + + for j in range(num_images): # each batch input idx: j + + images_so_far += 1 + + ax = plt.subplot(rows_imgs_num, line_imgs_num, images_so_far) + + ax.axis('off') + ax.set_title('Pred: {} True: {}'.format(class_names[preds[j]], class_names[int(labels[j])])) + imshow(inputs.cpu().data[j]) + + if images_so_far == num_images: + picpath = os.path.join(draw_path, pic_name + '.jpg') + if not os.path.exists(draw_path): + os.makedirs(draw_path) + + ''' + myfig = plt.gcf() # get current image + myfig.savefig(picpath, dpi=1000) + ''' + plt.savefig(picpath, dpi=1000) + plt.show() + + model.train(mode=was_training) + + if writer is not None: # attach the pic to the tensorboard backend if avilable + image_PIL = Image.open(picpath) + img = np.array(image_PIL) + writer.add_image(pic_name, img, 1, dataformats='HWC') + + plt.cla() + plt.close("all") + return + + model.train(mode=was_training) + + +def unpatchify(pred, patch_size=16): + """ + Decoding embeded patch tokens + + input: + x: (B, num_patches, patch_size**2 *3) AKA [B, num_patches, flatten_dim] + patch_size: + + output: + imgs: (B, 3, H, W) + """ + + # squre root of num_patches (without CLS token is required) + h = w = int(pred.shape[1] ** .5) + # assert num_patches is with out CLS token + assert h * w == pred.shape[1] + + # ReArrange dimensions [B, num_patches, flatten_dim] -> [B, h_p, w_p, patch_size, patch_size, C] + pred = pred.reshape(shape=(pred.shape[0], h, w, patch_size, patch_size, 3)) + # ReArrange dimensions [B, h_p, w_p, patch_size, patch_size, C] -> [B, C, h_p, patch_size, w_p, patch_size] + pred = torch.einsum('nhwpqc->nchpwq', pred) + # use reshape to compose patch [B, C, h_p, patch_size, w_p, patch_size] -> [B, C, H, W] + imgs = pred.reshape(shape=(pred.shape[0], 3, h * patch_size, h * patch_size)) + return imgs + + +def patchify(imgs, patch_size=16): + """ + Break image to patch tokens + + input: + imgs: (B, 3, H, W) + + output: + x: (B, num_patches, patch_size**2 *3) AKA [B, num_patches, flatten_dim] + """ + # assert H == W and image shape is dividedable by patch + assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % patch_size == 0 + # patch num in rol or column + h = w = imgs.shape[2] // patch_size + + # use reshape to split patch [B, C, H, W] -> [B, C, h_p, patch_size, w_p, patch_size] + imgs = imgs.reshape(shape=(imgs.shape[0], 3, h, patch_size, w, patch_size)) + + # ReArrange dimensions [B, C, h_p, patch_size, w_p, patch_size] -> [B, h_p, w_p, patch_size, patch_size, C] + imgs = torch.einsum('nchpwq->nhwpqc', imgs) + # ReArrange dimensions [B, h_p, w_p, patch_size, patch_size, C] -> [B, num_patches, flatten_dim] + imgs = imgs.reshape(shape=(imgs.shape[0], h * w, patch_size ** 2 * 3)) + return imgs + + +def anti_tensor_norm(batch_tensor): + pass # TODO 总之想一下