repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
NunoEdgarGFlowHub/datasets
[ "c3351cdd59eedf8193d670334672ff75020f82b6" ]
[ "tensorflow_datasets/core/features/image_feature_test.py" ]
[ "# coding=utf-8\n# Copyright 2018 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tensorflow_datasets.core.features.image_feature.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow_datasets.core import features as features_lib\nfrom tensorflow_datasets.core import test_utils\n\n\nclass ImageFeatureTest(test_utils.FeatureExpectationsTestCase):\n\n @property\n def expectations(self):\n randint = np.random.randint\n\n img = randint(256, size=(128, 100, 3), dtype=np.uint8)\n img_other_shape = randint(256, size=(64, 200, 3), dtype=np.uint8)\n img_file_path = os.path.join(os.path.dirname(__file__),\n '../../testing/test_data/6pixels.png')\n img_file_expected_content = [ # see tests_data/README.md\n [[0, 255, 0], [255, 0, 0], [255, 0, 255]],\n [[0, 0, 255], [255, 255, 0], [126, 127, 128]],\n ]\n\n img_shaped = randint(256, size=(32, 64, 3), dtype=np.uint8)\n\n return [\n test_utils.FeatureExpectation(\n name='image',\n feature=features_lib.Image(),\n shape=(None, None, 3),\n dtype=tf.uint8,\n tests=[\n # Numpy array\n test_utils.FeatureExpectationItem(\n value=img,\n expected=img,\n ),\n # File path\n test_utils.FeatureExpectationItem(\n value=img_file_path,\n expected=img_file_expected_content,\n ),\n # 'img' shape can be dynamic\n test_utils.FeatureExpectationItem(\n value=img_other_shape,\n expected=img_other_shape,\n ),\n # Invalid type\n test_utils.FeatureExpectationItem(\n value=randint(256, size=(128, 128, 3), dtype=np.uint32),\n raise_cls=ValueError,\n raise_msg='should be uint8',\n ),\n # Invalid number of dimensions\n test_utils.FeatureExpectationItem(\n value=randint(256, size=(128, 128), dtype=np.uint8),\n raise_cls=ValueError,\n raise_msg='must have the same rank',\n ),\n # Invalid number of channels\n test_utils.FeatureExpectationItem(\n value=randint(256, size=(128, 128, 1), dtype=np.uint8),\n raise_cls=ValueError,\n raise_msg='are incompatible',\n ),\n ],\n ),\n # Image with statically defined shape\n test_utils.FeatureExpectation(\n name='image_shaped',\n feature=features_lib.Image(shape=(32, 64, 3)),\n shape=(32, 64, 3),\n dtype=tf.uint8,\n tests=[\n test_utils.FeatureExpectationItem(\n value=img_shaped,\n expected=img_shaped,\n ),\n # 'img_shaped' shape should be static\n test_utils.FeatureExpectationItem(\n value=randint(256, size=(31, 64, 3), dtype=np.uint8),\n raise_cls=ValueError,\n raise_msg='are incompatible',\n ),\n ],\n ),\n ]\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ] ]
brando90/Does-MAML-Only-Work-via-Feature-Re-use-A-Data-Set-Centric-Perspective
[ "45c4fabf35d6d8d19e49092e84e8ac9fa55aee8d" ]
[ "maml_vs_adapted_maml_src/models/resnet_rfs.py" ]
[ "import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom torch.distributions import Bernoulli\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y\n\n\nclass DropBlock(nn.Module):\n def __init__(self, block_size):\n super(DropBlock, self).__init__()\n\n self.block_size = block_size\n # self.gamma = gamma\n # self.bernouli = Bernoulli(gamma)\n\n def forward(self, x, gamma):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # shape: (bsize, channels, height, width)\n\n if self.training:\n batch_size, channels, height, width = x.shape\n\n bernoulli = Bernoulli(gamma)\n mask = bernoulli.sample(\n (batch_size, channels, height - (self.block_size - 1), width - (self.block_size - 1))).to(device)\n block_mask = self._compute_block_mask(mask)\n countM = block_mask.size()[0] * block_mask.size()[1] * block_mask.size()[2] * block_mask.size()[3]\n count_ones = block_mask.sum()\n\n return block_mask * x * (countM / count_ones)\n else:\n return x\n\n def _compute_block_mask(self, mask):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n left_padding = int((self.block_size - 1) / 2)\n right_padding = int(self.block_size / 2)\n\n batch_size, channels, height, width = mask.shape\n # print (\"mask\", mask[0][0])\n non_zero_idxs = mask.nonzero()\n nr_blocks = non_zero_idxs.shape[0]\n\n offsets = torch.stack(\n [\n torch.arange(self.block_size).view(-1, 1).expand(self.block_size, self.block_size).reshape(-1),\n # - left_padding,\n torch.arange(self.block_size).repeat(self.block_size), # - left_padding\n ]\n ).t().to(device)\n offsets = torch.cat((torch.zeros(self.block_size ** 2, 2).to(device).long(), offsets.long()), 1)\n\n if nr_blocks > 0:\n non_zero_idxs = non_zero_idxs.repeat(self.block_size ** 2, 1)\n offsets = offsets.repeat(nr_blocks, 1).view(-1, 4)\n offsets = offsets.long()\n\n block_idxs = non_zero_idxs + offsets\n # block_idxs += left_padding\n padded_mask = F.pad(mask, (left_padding, right_padding, left_padding, right_padding))\n padded_mask[block_idxs[:, 0], block_idxs[:, 1], block_idxs[:, 2], block_idxs[:, 3]] = 1.\n else:\n padded_mask = F.pad(mask, (left_padding, right_padding, left_padding, right_padding))\n\n block_mask = 1 - padded_mask # [:height, :width]\n return block_mask\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, drop_rate=0.0, drop_block=False,\n block_size=1, use_se=False):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.LeakyReLU(0.1)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = conv3x3(planes, planes)\n self.bn3 = nn.BatchNorm2d(planes)\n self.maxpool = nn.MaxPool2d(stride)\n self.downsample = downsample\n self.stride = stride\n self.drop_rate = drop_rate\n self.num_batches_tracked = 0\n self.drop_block = drop_block\n self.block_size = block_size\n self.DropBlock = DropBlock(block_size=self.block_size)\n self.use_se = use_se\n if self.use_se:\n self.se = SELayer(planes, 4)\n\n def forward(self, x):\n self.num_batches_tracked += 1\n\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n if self.use_se:\n out = self.se(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out)\n out = self.maxpool(out)\n\n if self.drop_rate > 0:\n if self.drop_block == True:\n feat_size = out.size()[2]\n keep_rate = max(1.0 - self.drop_rate / (20 * 2000) * (self.num_batches_tracked), 1.0 - self.drop_rate)\n gamma = (1 - keep_rate) / self.block_size ** 2 * feat_size ** 2 / (feat_size - self.block_size + 1) ** 2\n out = self.DropBlock(out, gamma=gamma)\n else:\n out = F.dropout(out, p=self.drop_rate, training=self.training, inplace=True)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, n_blocks, keep_prob=1.0, avg_pool=False, drop_rate=0.0,\n dropblock_size=5, num_classes=-1, use_se=False):\n super(ResNet, self).__init__()\n\n self.inplanes = 3\n self.use_se = use_se\n self.layer1 = self._make_layer(block, n_blocks[0], 64,\n stride=2, drop_rate=drop_rate)\n self.layer2 = self._make_layer(block, n_blocks[1], 160,\n stride=2, drop_rate=drop_rate)\n self.layer3 = self._make_layer(block, n_blocks[2], 320,\n stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)\n self.layer4 = self._make_layer(block, n_blocks[3], 640,\n stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)\n if avg_pool:\n # self.avgpool = nn.AvgPool2d(5, stride=1)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.keep_prob = keep_prob\n self.keep_avg_pool = avg_pool\n self.dropout = nn.Dropout(p=1 - self.keep_prob, inplace=False)\n self.drop_rate = drop_rate\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n self.num_classes = num_classes\n if self.num_classes > 0:\n self.classifier = nn.Linear(640, self.num_classes)\n\n def _make_layer(self, block, n_block, planes, stride=1, drop_rate=0.0, drop_block=False, block_size=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n if n_block == 1:\n layer = block(self.inplanes, planes, stride, downsample, drop_rate, drop_block, block_size, self.use_se)\n else:\n layer = block(self.inplanes, planes, stride, downsample, drop_rate, self.use_se)\n layers.append(layer)\n self.inplanes = planes * block.expansion\n\n for i in range(1, n_block):\n if i == n_block - 1:\n layer = block(self.inplanes, planes, drop_rate=drop_rate, drop_block=drop_block,\n block_size=block_size, use_se=self.use_se)\n else:\n layer = block(self.inplanes, planes, drop_rate=drop_rate, use_se=self.use_se)\n layers.append(layer)\n\n return nn.Sequential(*layers)\n\n def forward(self, x, is_feat=False):\n x = self.layer1(x)\n f0 = x\n x = self.layer2(x)\n f1 = x\n x = self.layer3(x)\n f2 = x\n x = self.layer4(x)\n f3 = x\n if self.keep_avg_pool:\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n feat = x\n if self.num_classes > 0:\n x = self.classifier(x)\n\n if is_feat:\n return [f0, f1, f2, f3, feat], x\n else:\n return x\n\n def get_embedding(self, x):\n [f0, f1, f2, f3, feat], x = self.forward(x, is_feat=True)\n return feat\n\n\ndef resnet12(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-12 model.\n \"\"\"\n model = ResNet(BasicBlock, [1, 1, 1, 1], keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)\n return model\n\n\ndef resnet18(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet(BasicBlock, [1, 1, 2, 2], keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)\n return model\n\n\ndef resnet24(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-24 model.\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)\n return model\n\n\ndef resnet50(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n indeed, only (3 + 4 + 6 + 3) * 3 + 1 = 49 layers\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)\n return model\n\n\ndef resnet101(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n indeed, only (3 + 4 + 23 + 3) * 3 + 1 = 100 layers\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 23, 3], keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)\n return model\n\n\ndef seresnet12(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-12 model.\n \"\"\"\n model = ResNet(BasicBlock, [1, 1, 1, 1], keep_prob=keep_prob, avg_pool=avg_pool, use_se=True, **kwargs)\n return model\n\n\ndef seresnet18(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet(BasicBlock, [1, 1, 2, 2], keep_prob=keep_prob, avg_pool=avg_pool, use_se=True, **kwargs)\n return model\n\n\ndef seresnet24(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-24 model.\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], keep_prob=keep_prob, avg_pool=avg_pool, use_se=True, **kwargs)\n return model\n\n\ndef seresnet50(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n indeed, only (3 + 4 + 6 + 3) * 3 + 1 = 49 layers\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], keep_prob=keep_prob, avg_pool=avg_pool, use_se=True, **kwargs)\n return model\n\n\ndef seresnet101(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n indeed, only (3 + 4 + 23 + 3) * 3 + 1 = 100 layers\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 23, 3], keep_prob=keep_prob, avg_pool=avg_pool, use_se=True, **kwargs)\n return model\n\n\nif __name__ == '__main__':\n from types import SimpleNamespace\n # import argparse\n #\n # parser = argparse.ArgumentParser('argument for training')\n # parser.add_argument('--model', type=str, choices=['resnet12', 'resnet18', 'resnet24', 'resnet50', 'resnet101',\n # 'seresnet12', 'seresnet18', 'seresnet24', 'seresnet50',\n # 'seresnet101'])\n # args = parser.parse_args()\n args = SimpleNamespace(model='resnet12')\n args.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n model_dict = {\n 'resnet12': resnet12,\n 'resnet18': resnet18,\n 'resnet24': resnet24,\n 'resnet50': resnet50,\n 'resnet101': resnet101,\n 'seresnet12': seresnet12,\n 'seresnet18': seresnet18,\n 'seresnet24': seresnet24,\n 'seresnet50': seresnet50,\n 'seresnet101': seresnet101,\n }\n\n model = model_dict[args.model](avg_pool=True, drop_rate=0.1, dropblock_size=5, num_classes=64).to(args.device)\n data = torch.randn(2, 3, 84, 84)\n model = model.to(args.device)\n data = data.to(args.device)\n feat, logit = model(data, is_feat=True)\n print(feat[-1].shape)\n print(logit.shape)\n\n print(\"DONE\")\n" ]
[ [ "torch.cuda.is_available", "torch.nn.Conv2d", "torch.distributions.Bernoulli", "torch.nn.Sigmoid", "torch.nn.Dropout", "torch.nn.BatchNorm2d", "torch.nn.init.kaiming_normal_", "torch.nn.functional.dropout", "torch.randn", "torch.nn.functional.pad", "torch.arange", "torch.nn.MaxPool2d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.Linear", "torch.nn.init.constant_", "torch.nn.Sequential", "torch.zeros", "torch.nn.ReLU", "torch.nn.LeakyReLU" ] ]
jwmueller/autogluon-benchmarking
[ "28f35188a65c5fb37d4950fa9657ea84c9163049" ]
[ "autogluon_utils/benchmarking/evaluation/tex_table.py" ]
[ "import pandas as pd\nimport numpy as np\n\n# Example usage: x = tex_table(df, textable_file=\"testlatextable.txt\", bold='min')\n\ndef tex_table(df, textable_file, bold = None, nan_char = \" x \", max_digits = 4):\n \"\"\" This function is only intended for fully numerical tables (dataset x frameworks comparison).\n Datasets should be row indices of df rather than a column.\n Args:\n df = DataFrame\n textable_file = path to output file\n bold = 'min' or = 'max' (if df only contains numbers), or = None for no bolding.\n nan_char replaces NaN in LaTex table\n max_digits = Maximum number of digits to show in each cell. \n \"\"\"\n if bold is not None:\n if bold == 'min':\n best_row_vals = df.min(axis=1)\n # best_cols = df.idxmin(axis=1)\n elif bold == 'max':\n best_row_vals = df.max(axis=1)\n # best_cols = df.idxmax(axis=0)\n else:\n raise ValueError(\"unknown bold option\")\n best_cols = []\n for i in df.index:\n row_best_cols = list(df.columns[np.abs(df.loc[i] - best_row_vals[i]) < 1e-5])\n best_cols.append(row_best_cols)\n if len(row_best_cols) <= 0:\n raise ValueError(\"no row value matches best row value\")\n \n # SHIFT_FACTOR = 100\n # df = df * SHIFT_FACTOR\n # df = df.round(num_decimals)\n # df = df / SHIFT_FACTOR\n max_int = int(df.max(numeric_only=True).max())\n max_digits = max(max_digits, len(str(max_int))) # make sure we don't truncate values before decimal\n df = df.astype('str')\n df = df.replace(\"nan\", nan_char)\n df = df.applymap(lambda x: x[:max_digits])\n \n print(df.columns)\n if bold is not None:\n ind = 0\n for i in df.index: # bold best value:\n if len(best_cols[ind]) > 0:\n for col_name in best_cols[ind]:\n df.at[i,col_name] = \"\\\\textbf{\" + df.at[i,col_name] + \"}\"\n ind += 1\n \n df.reset_index(inplace=True) # set dataset indices as first column\n df.rename(columns={'dataset':'Dataset'}, inplace=True)\n cols = list(df.columns)\n df.columns = ['\\\\textbf{'+col+'}' for col in cols]\n textab = df.to_latex(escape=True, index=False, column_format = 'l'+'c'*(len(df.columns)-1))\n textab = textab.replace(\"\\\\textbackslash textbf\", \"\\\\textbf\")\n textab = textab.replace(\"\\\\{\", \"{\")\n textab = textab.replace(\"\\\\}\", \"}\")\n \n with open(textable_file,'w') as tf:\n tf.write(textab)\n print(\"saved tex table to: %s\" % textable_file)\n return textab\n\n" ]
[ [ "numpy.abs" ] ]
peiyong86/FATE
[ "efae2b1add20d9f98ac05a669298e36369f91497" ]
[ "federatedml/linear_model/linear_model_weight.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport numpy as np\n\nfrom federatedml.framework.weights import ListWeights, TransferableWeights\n\n\nclass LinearModelWeights(ListWeights):\n def __init__(self, l, fit_intercept):\n super().__init__(l)\n self.fit_intercept = fit_intercept\n\n def for_remote(self):\n return TransferableWeights(self._weights, self.__class__, self.fit_intercept)\n\n @property\n def coef_(self):\n if self.fit_intercept:\n return np.array(self._weights[:-1])\n return np.array(self._weights)\n\n @property\n def intercept_(self):\n if self.fit_intercept:\n return self._weights[-1]\n return 0.0\n" ]
[ [ "numpy.array" ] ]
NoldAndreas/FINDER
[ "a3d947c5d59a7cd6e54400b0e9aeb9e111689976" ]
[ "Code/Geometry_Base.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 20 14:51:07 2020\n\n@author: andreas\n\"\"\"\n\nimport numpy as np\nfrom abc import abstractmethod\n#import pickle\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport scipy.spatial.distance as dist\nimport glob\n\n#Geometry base class\n\nclass Geometry_Base:\n \n @abstractmethod\n def __init__(self,basefolder,unitCluster_Library):\n \n #Values that have to be set in child classes:\n self.basefolder = basefolder;\n self.geometry_name = [];\n self.XC = [];\n self.N_clusters = [];\n self.N_Noise = [];\n self.labels_groundtruth = [];\n self.parameters = [];\n self.unitCluster_Library = unitCluster_Library;\n self.seed = [];\n\n self.__loadUnitClusters();\n \n #Methods that need to be set in child classes:\n @abstractmethod \n def GeneratePoints(self):\n yield None\n \n def __loadUnitClusters(self): \n data_template_clusters = [];\n folder_ = self.unitCluster_Library;\n\n filenamesList = glob.glob(self.basefolder +'TemplateClusters/'+ folder_+'/cluster_*.txt');\n for fn in filenamesList:\n XS_i = np.loadtxt(fn, comments=\"#\", delimiter=\" \", unpack=False);\n data_template_clusters.append(XS_i); \n print(\"Loaded \"+str(len(data_template_clusters))+\" template clusters..\");\n\n #In the input data, \n #1 unit corresponds to 158nm. We normalize such that 1 unit = 1 nanometer\n datascale = 158;\n for i,X_cl in enumerate(data_template_clusters):\n data_template_clusters[i] = datascale*(X_cl - np.mean(X_cl,axis=0));\n \n self.template_clusters = data_template_clusters;\n \n def PlotScatter(self,filename):\n \n labels = self.labels_groundtruth;\n XC = self.XC;\n \n plt.figure();\n mark = (labels==-1);\n sns.scatterplot(x=XC[mark,0],y=XC[mark,1],color='grey',alpha=0.2);\n mark = (labels>=0);\n sns.scatterplot(x=XC[mark,0],y=XC[mark,1],hue=labels[mark],palette='bright',legend=False);\n plt.axis('equal')\n plt.savefig(filename);\n \n def GetTypicalDiameter_of_templateClusters(self):\n D_ = 0; \n for cl in self.template_clusters:\n d = np.max(dist.pdist(cl));\n if(d>D_):\n D_ = d;\n return D_\n \n def GetTypical_Number_of_points_templateClusters(self):\n Ns = [len(cl) for cl in self.template_clusters]; \n \n return np.mean(np.asarray(Ns));" ]
[ [ "scipy.spatial.distance.pdist", "matplotlib.pyplot.figure", "matplotlib.pyplot.axis", "matplotlib.pyplot.savefig", "numpy.asarray", "numpy.mean", "numpy.loadtxt" ] ]
fzfs/Multi-view-Chest-X-ray-Classification
[ "156149829629586d5a8d7946fc710b3b2dec1020" ]
[ "Resnet.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, view=4, num_classes=13, img_size = 224):\n\n self.inplanes = 64\n super(ResNet, self).__init__()\n c = 1\n if view == 4:\n c = 2\n \n self.conv1 = nn.Conv2d(c, 64, kernel_size=7, stride=2, padding=3,bias=False) \n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n \n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1) \n x = self.fc(x)\n\n return x\n \nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=16, k_size=9):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.ReLU" ] ]
INF800/CLIP-rsicd
[ "80eb09a71b4ab8a4566eeaa72ec8890630c0d7ee" ]
[ "utils/data.py" ]
[ "import torch\nfrom torchvision.datasets import VisionDataset\nfrom torchvision.io import ImageReadMode, read_image\nfrom torchvision.transforms import (\n # added for image augmentation\n ToPILImage,\n RandomCrop,\n ColorJitter,\n RandomHorizontalFlip,\n RandomVerticalFlip,\n RandomResizedCrop,\n ToTensor,\n # /added for image augmentation\n CenterCrop, \n ConvertImageDtype, \n Normalize, \n Resize\n)\nfrom torchvision.transforms.functional import InterpolationMode\n\nimport jsonlines\nfrom pathlib import Path\nfrom typing import Optional, Callable\n\n\n# adopted form https://github.com/huggingface/transformers/blob/master/examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py\nclass Transform(torch.nn.Module):\n def __init__(self, image_size, augment_images, augmentation_args):\n super().__init__()\n if augment_images:\n crop_size = int(image_size * 0.8)\n self.transforms = torch.nn.Sequential(\n # image augmentation transforms\n RandomCrop(crop_size),\n ColorJitter(),\n RandomHorizontalFlip(augmentation_args.random_horizontal_flip),\n RandomVerticalFlip(augmentation_args.random_vertical_flip),\n RandomResizedCrop(crop_size, scale=(0.8, 1.2), ratio=(1.0, 1.0)),\n # /image augmentation transforms\n Resize([image_size], interpolation=InterpolationMode.BICUBIC),\n CenterCrop(image_size),\n ConvertImageDtype(torch.float),\n Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),\n )\n else:\n self.transforms = torch.nn.Sequential(\n Resize([image_size], interpolation=InterpolationMode.BICUBIC),\n CenterCrop(image_size),\n ConvertImageDtype(torch.float),\n Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n with torch.no_grad():\n x = self.transforms(x)\n return x\n\nclass ImageTextDataset(VisionDataset):\n \"\"\"\n Dtaset for loading image-text data for tasks like CLIP training, Image Captioning.\n\n Args:\n root: (string): The root path where the dataset is stored.\n The expected format is jsonlines where each line is a json object containing to keys.\n `filename`: The path to the image.\n `captions`: An `array` of captions.\n split: (string): Dataset split name. Is used for parsing jsonl files from `root` folder.\n captions_per_image: (int): number of captions per image to use. Defaults to 5.\n augment_captions: (bool): If true the jsonl files with `textaug_` prefix are selected from root\n folder. \n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.ToTensor``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n transforms (callable, optional): A function/transform that takes input sample and its target as entry\n and returns a transformed version.\n \"\"\"\n\n def __init__(\n self,\n root: str,\n split: str, \n captions_per_image:int = 5,\n augment_captions:bool = True,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n transforms: Optional[Callable] = None,\n ):\n super().__init__(root, transforms, transform, target_transform)\n self.root = root\n if augment_captions:\n prefix = \"textaug_\"\n else:\n prefix = \"\"\n filepaths = list(Path(root).glob(f\"{prefix}{split}*.jsonl\"))\n fps_empty_msg = f\"\"\"\\\n The `filepaths` is empty. Please make sure that `root` folder contains jsonl files\n named properly: [textaug_]{split}*.jsonl.\n `textaug_` prefix is expected if `augment_captions` is `True`.\n \"\"\"\n assert len(filepaths) > 0, fps_empty_msg\n \n self.captions = []\n self.image_paths = []\n for count, filepath in enumerate(filepaths):\n with jsonlines.open(filepath, \"r\") as reader:\n for example in reader:\n self.captions.extend(example[\"captions\"][:captions_per_image])\n self.image_paths.extend([example[\"filename\"]] * captions_per_image)\n print(f\"{count+1} input files for {split} split found\")\n \n def _load_image(self, idx: int):\n path = f\"{self.root}/{self.image_paths[idx]}\"\n return read_image(path, mode=ImageReadMode.RGB)\n\n def _load_target(self, idx):\n return self.captions[idx]\n\n def __getitem__(self, index: int):\n image = self._load_image(index)\n target = self._load_target(index)\n\n if self.transforms is not None:\n image, target = self.transforms(image, target)\n\n return image, target\n\n def __len__(self) -> int:\n return len(self.captions)" ]
[ [ "torch.no_grad" ] ]
google-research/DBAP-algorithm
[ "545a4e780f9d9d480c96b67e7a8ae590a983db6b" ]
[ "third_party/rlkit_library/rlkit/torch/skewfit/video_gen.py" ]
[ "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport os.path as osp\n\nimport uuid\nfrom rlkit.envs.vae_wrapper import VAEWrappedEnv\n\nfilename = str(uuid.uuid4())\n\nimport skvideo.io\nimport numpy as np\nimport time\n\nimport scipy.misc\n\ndef add_border(img, pad_length, pad_color, imsize=84):\n H = 3*imsize\n W = imsize\n img = img.reshape((3*imsize, imsize, -1))\n img2 = np.ones((H + 2 * pad_length, W + 2 * pad_length, img.shape[2]), dtype=np.uint8) * pad_color\n img2[pad_length:-pad_length, pad_length:-pad_length, :] = img\n return img2\n\n\ndef get_image(goal, obs, recon_obs, imsize=84, pad_length=1, pad_color=255):\n if len(goal.shape) == 1:\n goal = goal.reshape(-1, imsize, imsize).transpose(2, 1, 0)\n obs = obs.reshape(-1, imsize, imsize).transpose(2,1,0)\n recon_obs = recon_obs.reshape(-1, imsize, imsize).transpose(2,1,0)\n img = np.concatenate((goal, obs, recon_obs))\n img = np.uint8(255 * img)\n if pad_length > 0:\n img = add_border(img, pad_length, pad_color)\n return img\n\n\ndef dump_video(\n env,\n policy,\n filename,\n rollout_function,\n rows=3,\n columns=6,\n pad_length=0,\n pad_color=255,\n do_timer=True,\n horizon=100,\n dirname_to_save_images=None,\n subdirname=\"rollouts\",\n imsize=84,\n):\n # num_channels = env.vae.input_channels\n num_channels = 1 if env.grayscale else 3\n frames = []\n H = 3*imsize\n W=imsize\n N = rows * columns\n for i in range(N):\n start = time.time()\n path = rollout_function(\n env,\n policy,\n max_path_length=horizon,\n render=False,\n )\n is_vae_env = isinstance(env, VAEWrappedEnv)\n l = []\n for d in path['full_observations']:\n if is_vae_env:\n recon = np.clip(env._reconstruct_img(d['image_observation']), 0, 1)\n else:\n recon = d['image_observation']\n l.append(\n get_image(\n d['image_desired_goal'],\n d['image_observation'],\n recon,\n pad_length=pad_length,\n pad_color=pad_color,\n imsize=imsize,\n )\n )\n frames += l\n\n if dirname_to_save_images:\n rollout_dir = osp.join(dirname_to_save_images, subdirname, str(i))\n os.makedirs(rollout_dir, exist_ok=True)\n rollout_frames = frames[-101:]\n goal_img = np.flip(rollout_frames[0][:imsize, :imsize, :], 0)\n scipy.misc.imsave(rollout_dir+\"/goal.png\", goal_img)\n goal_img = np.flip(rollout_frames[1][:imsize, :imsize, :], 0)\n scipy.misc.imsave(rollout_dir+\"/z_goal.png\", goal_img)\n for j in range(0, 101, 1):\n img = np.flip(rollout_frames[j][imsize:, :imsize, :], 0)\n scipy.misc.imsave(rollout_dir+\"/\"+str(j)+\".png\", img)\n if do_timer:\n print(i, time.time() - start)\n\n frames = np.array(frames, dtype=np.uint8)\n path_length = frames.size // (\n N * (H + 2*pad_length) * (W + 2*pad_length) * num_channels\n )\n frames = np.array(frames, dtype=np.uint8).reshape(\n (N, path_length, H + 2 * pad_length, W + 2 * pad_length, num_channels)\n )\n f1 = []\n for k1 in range(columns):\n f2 = []\n for k2 in range(rows):\n k = k1 * rows + k2\n f2.append(frames[k:k+1, :, :, :, :].reshape(\n (path_length, H + 2 * pad_length, W + 2 * pad_length, num_channels)\n ))\n f1.append(np.concatenate(f2, axis=1))\n outputdata = np.concatenate(f1, axis=2)\n skvideo.io.vwrite(filename, outputdata)\n print(\"Saved video to \", filename)\n" ]
[ [ "numpy.ones", "numpy.flip", "numpy.array", "numpy.concatenate", "numpy.uint8" ] ]
Ennosigaeon/scipy
[ "2d872f7cf2098031b9be863ec25e366a550b229c" ]
[ "scipy/sparse/bsr.py" ]
[ "\"\"\"Compressed Block Sparse Row matrix format\"\"\"\n\n__docformat__ = \"restructuredtext en\"\n\n__all__ = ['bsr_matrix', 'isspmatrix_bsr']\n\nfrom warnings import warn\n\nimport numpy as np\n\nfrom .data import _data_matrix, _minmax_mixin\nfrom .compressed import _cs_matrix\nfrom .base import isspmatrix, _formats, spmatrix\nfrom .sputils import (isshape, getdtype, getdata, to_native, upcast,\n get_index_dtype, check_shape)\nfrom . import _sparsetools\nfrom ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_maxnnz,\n bsr_matmat, bsr_transpose, bsr_sort_indices,\n bsr_tocsr)\n\n\nclass bsr_matrix(_cs_matrix, _minmax_mixin):\n \"\"\"Block Sparse Row matrix\n\n This can be instantiated in several ways:\n bsr_matrix(D, [blocksize=(R,C)])\n where D is a dense matrix or 2-D ndarray.\n\n bsr_matrix(S, [blocksize=(R,C)])\n with another sparse matrix S (equivalent to S.tobsr())\n\n bsr_matrix((M, N), [blocksize=(R,C), dtype])\n to construct an empty matrix with shape (M, N)\n dtype is optional, defaulting to dtype='d'.\n\n bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)])\n where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``\n\n bsr_matrix((data, indices, indptr), [shape=(M, N)])\n is the standard BSR representation where the block column\n indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]``\n and their corresponding block values are stored in\n ``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not\n supplied, the matrix dimensions are inferred from the index arrays.\n\n Attributes\n ----------\n dtype : dtype\n Data type of the matrix\n shape : 2-tuple\n Shape of the matrix\n ndim : int\n Number of dimensions (this is always 2)\n nnz\n Number of stored values, including explicit zeros\n data\n Data array of the matrix\n indices\n BSR format index array\n indptr\n BSR format index pointer array\n blocksize\n Block size of the matrix\n has_sorted_indices\n Whether indices are sorted\n\n Notes\n -----\n Sparse matrices can be used in arithmetic operations: they support\n addition, subtraction, multiplication, division, and matrix power.\n\n **Summary of BSR format**\n\n The Block Compressed Row (BSR) format is very similar to the Compressed\n Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense\n sub matrices like the last example below. Block matrices often arise in\n vector-valued finite element discretizations. In such cases, BSR is\n considerably more efficient than CSR and CSC for many sparse arithmetic\n operations.\n\n **Blocksize**\n\n The blocksize (R,C) must evenly divide the shape of the matrix (M,N).\n That is, R and C must satisfy the relationship ``M % R = 0`` and\n ``N % C = 0``.\n\n If no blocksize is specified, a simple heuristic is applied to determine\n an appropriate blocksize.\n\n Examples\n --------\n >>> from scipy.sparse import bsr_matrix\n >>> bsr_matrix((3, 4), dtype=np.int8).toarray()\n array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]], dtype=int8)\n\n >>> row = np.array([0, 0, 1, 2, 2, 2])\n >>> col = np.array([0, 2, 2, 0, 1, 2])\n >>> data = np.array([1, 2, 3 ,4, 5, 6])\n >>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray()\n array([[1, 0, 2],\n [0, 0, 3],\n [4, 5, 6]])\n\n >>> indptr = np.array([0, 2, 3, 6])\n >>> indices = np.array([0, 2, 2, 0, 1, 2])\n >>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)\n >>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray()\n array([[1, 1, 0, 0, 2, 2],\n [1, 1, 0, 0, 2, 2],\n [0, 0, 0, 0, 3, 3],\n [0, 0, 0, 0, 3, 3],\n [4, 4, 5, 5, 6, 6],\n [4, 4, 5, 5, 6, 6]])\n\n \"\"\"\n format = 'bsr'\n\n def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None):\n _data_matrix.__init__(self)\n\n if isspmatrix(arg1):\n if isspmatrix_bsr(arg1) and copy:\n arg1 = arg1.copy()\n else:\n arg1 = arg1.tobsr(blocksize=blocksize)\n self._set_self(arg1)\n\n elif isinstance(arg1,tuple):\n if isshape(arg1):\n # it's a tuple of matrix dimensions (M,N)\n self._shape = check_shape(arg1)\n M,N = self.shape\n # process blocksize\n if blocksize is None:\n blocksize = (1,1)\n else:\n if not isshape(blocksize):\n raise ValueError('invalid blocksize=%s' % blocksize)\n blocksize = tuple(blocksize)\n self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float))\n\n R,C = blocksize\n if (M % R) != 0 or (N % C) != 0:\n raise ValueError('shape must be multiple of blocksize')\n\n # Select index dtype large enough to pass array and\n # scalar parameters to sparsetools\n idx_dtype = get_index_dtype(maxval=max(M//R, N//C, R, C))\n self.indices = np.zeros(0, dtype=idx_dtype)\n self.indptr = np.zeros(M//R + 1, dtype=idx_dtype)\n\n elif len(arg1) == 2:\n # (data,(row,col)) format\n from .coo import coo_matrix\n self._set_self(\n coo_matrix(arg1, dtype=dtype, shape=shape).tobsr(\n blocksize=blocksize\n )\n )\n\n elif len(arg1) == 3:\n # (data,indices,indptr) format\n (data, indices, indptr) = arg1\n\n # Select index dtype large enough to pass array and\n # scalar parameters to sparsetools\n maxval = 1\n if shape is not None:\n maxval = max(shape)\n if blocksize is not None:\n maxval = max(maxval, max(blocksize))\n idx_dtype = get_index_dtype((indices, indptr), maxval=maxval,\n check_contents=True)\n self.indices = np.array(indices, copy=copy, dtype=idx_dtype)\n self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)\n self.data = getdata(data, copy=copy, dtype=dtype)\n if self.data.ndim != 3:\n raise ValueError(\n 'BSR data must be 3-dimensional, got shape=%s' % (\n self.data.shape,))\n if blocksize is not None:\n if not isshape(blocksize):\n raise ValueError('invalid blocksize=%s' % (blocksize,))\n if tuple(blocksize) != self.data.shape[1:]:\n raise ValueError('mismatching blocksize=%s vs %s' % (\n blocksize, self.data.shape[1:]))\n else:\n raise ValueError('unrecognized bsr_matrix constructor usage')\n else:\n # must be dense\n try:\n arg1 = np.asarray(arg1)\n except Exception as e:\n raise ValueError(\"unrecognized form for\"\n \" %s_matrix constructor\" % self.format) from e\n from .coo import coo_matrix\n arg1 = coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize)\n self._set_self(arg1)\n\n if shape is not None:\n self._shape = check_shape(shape)\n else:\n if self.shape is None:\n # shape not already set, try to infer dimensions\n try:\n M = len(self.indptr) - 1\n N = self.indices.max() + 1\n except Exception as e:\n raise ValueError('unable to infer matrix dimensions') from e\n else:\n R,C = self.blocksize\n self._shape = check_shape((M*R,N*C))\n\n if self.shape is None:\n if shape is None:\n # TODO infer shape here\n raise ValueError('need to infer shape')\n else:\n self._shape = check_shape(shape)\n\n if dtype is not None:\n self.data = self.data.astype(dtype, copy=False)\n\n self.check_format(full_check=False)\n\n def check_format(self, full_check=True):\n \"\"\"check whether the matrix format is valid\n\n *Parameters*:\n full_check:\n True - rigorous check, O(N) operations : default\n False - basic check, O(1) operations\n\n \"\"\"\n M,N = self.shape\n R,C = self.blocksize\n\n # index arrays should have integer data types\n if self.indptr.dtype.kind != 'i':\n warn(\"indptr array has non-integer dtype (%s)\"\n % self.indptr.dtype.name)\n if self.indices.dtype.kind != 'i':\n warn(\"indices array has non-integer dtype (%s)\"\n % self.indices.dtype.name)\n\n idx_dtype = get_index_dtype((self.indices, self.indptr))\n self.indptr = np.asarray(self.indptr, dtype=idx_dtype)\n self.indices = np.asarray(self.indices, dtype=idx_dtype)\n self.data = to_native(self.data)\n\n # check array shapes\n if self.indices.ndim != 1 or self.indptr.ndim != 1:\n raise ValueError(\"indices, and indptr should be 1-D\")\n if self.data.ndim != 3:\n raise ValueError(\"data should be 3-D\")\n\n # check index pointer\n if (len(self.indptr) != M//R + 1):\n raise ValueError(\"index pointer size (%d) should be (%d)\" %\n (len(self.indptr), M//R + 1))\n if (self.indptr[0] != 0):\n raise ValueError(\"index pointer should start with 0\")\n\n # check index and data arrays\n if (len(self.indices) != len(self.data)):\n raise ValueError(\"indices and data should have the same size\")\n if (self.indptr[-1] > len(self.indices)):\n raise ValueError(\"Last value of index pointer should be less than \"\n \"the size of index and data arrays\")\n\n self.prune()\n\n if full_check:\n # check format validity (more expensive)\n if self.nnz > 0:\n if self.indices.max() >= N//C:\n raise ValueError(\"column index values must be < %d (now max %d)\" % (N//C, self.indices.max()))\n if self.indices.min() < 0:\n raise ValueError(\"column index values must be >= 0\")\n if np.diff(self.indptr).min() < 0:\n raise ValueError(\"index pointer values must form a \"\n \"non-decreasing sequence\")\n\n # if not self.has_sorted_indices():\n # warn('Indices were not in sorted order. Sorting indices.')\n # self.sort_indices(check_first=False)\n\n def _get_blocksize(self):\n return self.data.shape[1:]\n blocksize = property(fget=_get_blocksize)\n\n def getnnz(self, axis=None):\n if axis is not None:\n raise NotImplementedError(\"getnnz over an axis is not implemented \"\n \"for BSR format\")\n R,C = self.blocksize\n return int(self.indptr[-1] * R * C)\n\n getnnz.__doc__ = spmatrix.getnnz.__doc__\n\n def __repr__(self):\n format = _formats[self.getformat()][1]\n return (\"<%dx%d sparse matrix of type '%s'\\n\"\n \"\\twith %d stored elements (blocksize = %dx%d) in %s format>\" %\n (self.shape + (self.dtype.type, self.nnz) + self.blocksize +\n (format,)))\n\n def diagonal(self, k=0):\n rows, cols = self.shape\n if k <= -rows or k >= cols:\n return np.empty(0, dtype=self.data.dtype)\n R, C = self.blocksize\n y = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),\n dtype=upcast(self.dtype))\n _sparsetools.bsr_diagonal(k, rows // R, cols // C, R, C,\n self.indptr, self.indices,\n np.ravel(self.data), y)\n return y\n\n diagonal.__doc__ = spmatrix.diagonal.__doc__\n\n ##########################\n # NotImplemented methods #\n ##########################\n\n def __getitem__(self,key):\n raise NotImplementedError\n\n def __setitem__(self,key,val):\n raise NotImplementedError\n\n ######################\n # Arithmetic methods #\n ######################\n\n @np.deprecate(message=\"BSR matvec is deprecated in SciPy 0.19.0. \"\n \"Use * operator instead.\")\n def matvec(self, other):\n \"\"\"Multiply matrix by vector.\"\"\"\n return self * other\n\n @np.deprecate(message=\"BSR matmat is deprecated in SciPy 0.19.0. \"\n \"Use * operator instead.\")\n def matmat(self, other):\n \"\"\"Multiply this sparse matrix by other matrix.\"\"\"\n return self * other\n\n def _add_dense(self, other):\n return self.tocoo(copy=False)._add_dense(other)\n\n def _mul_vector(self, other):\n M,N = self.shape\n R,C = self.blocksize\n\n result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))\n\n bsr_matvec(M//R, N//C, R, C,\n self.indptr, self.indices, self.data.ravel(),\n other, result)\n\n return result\n\n def _mul_multivector(self,other):\n R,C = self.blocksize\n M,N = self.shape\n n_vecs = other.shape[1] # number of column vectors\n\n result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype))\n\n bsr_matvecs(M//R, N//C, n_vecs, R, C,\n self.indptr, self.indices, self.data.ravel(),\n other.ravel(), result.ravel())\n\n return result\n\n def _mul_sparse_matrix(self, other):\n M, K1 = self.shape\n K2, N = other.shape\n\n R,n = self.blocksize\n\n # convert to this format\n if isspmatrix_bsr(other):\n C = other.blocksize[1]\n else:\n C = 1\n\n from .csr import isspmatrix_csr\n\n if isspmatrix_csr(other) and n == 1:\n other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion\n else:\n other = other.tobsr(blocksize=(n,C))\n\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices))\n\n bnnz = csr_matmat_maxnnz(M//R, N//C,\n self.indptr.astype(idx_dtype),\n self.indices.astype(idx_dtype),\n other.indptr.astype(idx_dtype),\n other.indices.astype(idx_dtype))\n\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=bnnz)\n indptr = np.empty(self.indptr.shape, dtype=idx_dtype)\n indices = np.empty(bnnz, dtype=idx_dtype)\n data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype))\n\n bsr_matmat(bnnz, M//R, N//C, R, C, n,\n self.indptr.astype(idx_dtype),\n self.indices.astype(idx_dtype),\n np.ravel(self.data),\n other.indptr.astype(idx_dtype),\n other.indices.astype(idx_dtype),\n np.ravel(other.data),\n indptr,\n indices,\n data)\n\n data = data.reshape(-1,R,C)\n\n # TODO eliminate zeros\n\n return bsr_matrix((data,indices,indptr),shape=(M,N),blocksize=(R,C))\n\n ######################\n # Conversion methods #\n ######################\n\n def tobsr(self, blocksize=None, copy=False):\n \"\"\"Convert this matrix into Block Sparse Row Format.\n\n With copy=False, the data/indices may be shared between this\n matrix and the resultant bsr_matrix.\n\n If blocksize=(R, C) is provided, it will be used for determining\n block size of the bsr_matrix.\n \"\"\"\n if blocksize not in [None, self.blocksize]:\n return self.tocsr().tobsr(blocksize=blocksize)\n if copy:\n return self.copy()\n else:\n return self\n\n def tocsr(self, copy=False):\n M, N = self.shape\n R, C = self.blocksize\n nnz = self.nnz\n idx_dtype = get_index_dtype((self.indptr, self.indices),\n maxval=max(nnz, N))\n indptr = np.empty(M + 1, dtype=idx_dtype)\n indices = np.empty(nnz, dtype=idx_dtype)\n data = np.empty(nnz, dtype=upcast(self.dtype))\n\n bsr_tocsr(M // R, # n_brow\n N // C, # n_bcol\n R, C,\n self.indptr.astype(idx_dtype, copy=False),\n self.indices.astype(idx_dtype, copy=False),\n self.data,\n indptr,\n indices,\n data)\n from .csr import csr_matrix\n return csr_matrix((data, indices, indptr), shape=self.shape)\n\n tocsr.__doc__ = spmatrix.tocsr.__doc__\n\n def tocsc(self, copy=False):\n return self.tocsr(copy=False).tocsc(copy=copy)\n\n tocsc.__doc__ = spmatrix.tocsc.__doc__\n\n def tocoo(self, copy=True):\n \"\"\"Convert this matrix to COOrdinate format.\n\n When copy=False the data array will be shared between\n this matrix and the resultant coo_matrix.\n \"\"\"\n\n M,N = self.shape\n R,C = self.blocksize\n\n indptr_diff = np.diff(self.indptr)\n if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:\n # Check for potential overflow\n indptr_diff_limited = indptr_diff.astype(np.intp)\n if np.any(indptr_diff_limited != indptr_diff):\n raise ValueError(\"Matrix too big to convert\")\n indptr_diff = indptr_diff_limited\n\n row = (R * np.arange(M//R)).repeat(indptr_diff)\n row = row.repeat(R*C).reshape(-1,R,C)\n row += np.tile(np.arange(R).reshape(-1,1), (1,C))\n row = row.reshape(-1)\n\n col = (C * self.indices).repeat(R*C).reshape(-1,R,C)\n col += np.tile(np.arange(C), (R,1))\n col = col.reshape(-1)\n\n data = self.data.reshape(-1)\n\n if copy:\n data = data.copy()\n\n from .coo import coo_matrix\n return coo_matrix((data,(row,col)), shape=self.shape)\n\n def toarray(self, order=None, out=None):\n return self.tocoo(copy=False).toarray(order=order, out=out)\n\n toarray.__doc__ = spmatrix.toarray.__doc__\n\n def transpose(self, axes=None, copy=False):\n if axes is not None:\n raise ValueError((\"Sparse matrices do not support \"\n \"an 'axes' parameter because swapping \"\n \"dimensions is the only logical permutation.\"))\n\n R, C = self.blocksize\n M, N = self.shape\n NBLK = self.nnz//(R*C)\n\n if self.nnz == 0:\n return bsr_matrix((N, M), blocksize=(C, R),\n dtype=self.dtype, copy=copy)\n\n indptr = np.empty(N//C + 1, dtype=self.indptr.dtype)\n indices = np.empty(NBLK, dtype=self.indices.dtype)\n data = np.empty((NBLK, C, R), dtype=self.data.dtype)\n\n bsr_transpose(M//R, N//C, R, C,\n self.indptr, self.indices, self.data.ravel(),\n indptr, indices, data.ravel())\n\n return bsr_matrix((data, indices, indptr),\n shape=(N, M), copy=copy)\n\n transpose.__doc__ = spmatrix.transpose.__doc__\n\n ##############################################################\n # methods that examine or modify the internal data structure #\n ##############################################################\n\n def eliminate_zeros(self):\n \"\"\"Remove zero elements in-place.\"\"\"\n\n if not self.nnz:\n return # nothing to do\n\n R,C = self.blocksize\n M,N = self.shape\n\n mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks\n\n nonzero_blocks = mask.nonzero()[0]\n\n self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks]\n\n # modifies self.indptr and self.indices *in place*\n _sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr,\n self.indices, mask)\n self.prune()\n\n def sum_duplicates(self):\n \"\"\"Eliminate duplicate matrix entries by adding them together\n\n The is an *in place* operation\n \"\"\"\n if self.has_canonical_format:\n return\n self.sort_indices()\n R, C = self.blocksize\n M, N = self.shape\n\n # port of _sparsetools.csr_sum_duplicates\n n_row = M // R\n nnz = 0\n row_end = 0\n for i in range(n_row):\n jj = row_end\n row_end = self.indptr[i+1]\n while jj < row_end:\n j = self.indices[jj]\n x = self.data[jj]\n jj += 1\n while jj < row_end and self.indices[jj] == j:\n x += self.data[jj]\n jj += 1\n self.indices[nnz] = j\n self.data[nnz] = x\n nnz += 1\n self.indptr[i+1] = nnz\n\n self.prune() # nnz may have changed\n self.has_canonical_format = True\n\n def sort_indices(self):\n \"\"\"Sort the indices of this matrix *in place*\n \"\"\"\n if self.has_sorted_indices:\n return\n\n R,C = self.blocksize\n M,N = self.shape\n\n bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel())\n\n self.has_sorted_indices = True\n\n def prune(self):\n \"\"\" Remove empty space after all non-zero elements.\n \"\"\"\n\n R,C = self.blocksize\n M,N = self.shape\n\n if len(self.indptr) != M//R + 1:\n raise ValueError(\"index pointer has invalid length\")\n\n bnnz = self.indptr[-1]\n\n if len(self.indices) < bnnz:\n raise ValueError(\"indices array has too few elements\")\n if len(self.data) < bnnz:\n raise ValueError(\"data array has too few elements\")\n\n self.data = self.data[:bnnz]\n self.indices = self.indices[:bnnz]\n\n # utility functions\n def _binopt(self, other, op, in_shape=None, out_shape=None):\n \"\"\"Apply the binary operation fn to two sparse matrices.\"\"\"\n\n # Ideally we'd take the GCDs of the blocksize dimensions\n # and explode self and other to match.\n other = self.__class__(other, blocksize=self.blocksize)\n\n # e.g. bsr_plus_bsr, etc.\n fn = getattr(_sparsetools, self.format + op + self.format)\n\n R,C = self.blocksize\n\n max_bnnz = len(self.data) + len(other.data)\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=max_bnnz)\n indptr = np.empty(self.indptr.shape, dtype=idx_dtype)\n indices = np.empty(max_bnnz, dtype=idx_dtype)\n\n bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']\n if op in bool_ops:\n data = np.empty(R*C*max_bnnz, dtype=np.bool_)\n else:\n data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype))\n\n fn(self.shape[0]//R, self.shape[1]//C, R, C,\n self.indptr.astype(idx_dtype),\n self.indices.astype(idx_dtype),\n self.data,\n other.indptr.astype(idx_dtype),\n other.indices.astype(idx_dtype),\n np.ravel(other.data),\n indptr,\n indices,\n data)\n\n actual_bnnz = indptr[-1]\n indices = indices[:actual_bnnz]\n data = data[:R*C*actual_bnnz]\n\n if actual_bnnz < max_bnnz/2:\n indices = indices.copy()\n data = data.copy()\n\n data = data.reshape(-1,R,C)\n\n return self.__class__((data, indices, indptr), shape=self.shape)\n\n # needed by _data_matrix\n def _with_data(self,data,copy=True):\n \"\"\"Returns a matrix with the same sparsity structure as self,\n but with different data. By default the structure arrays\n (i.e. .indptr and .indices) are copied.\n \"\"\"\n if copy:\n return self.__class__((data,self.indices.copy(),self.indptr.copy()),\n shape=self.shape,dtype=data.dtype)\n else:\n return self.__class__((data,self.indices,self.indptr),\n shape=self.shape,dtype=data.dtype)\n\n# # these functions are used by the parent class\n# # to remove redudancy between bsc_matrix and bsr_matrix\n# def _swap(self,x):\n# \"\"\"swap the members of x if this is a column-oriented matrix\n# \"\"\"\n# return (x[0],x[1])\n\n\ndef isspmatrix_bsr(x):\n \"\"\"Is x of a bsr_matrix type?\n\n Parameters\n ----------\n x\n object to check for being a bsr matrix\n\n Returns\n -------\n bool\n True if x is a bsr matrix, False otherwise\n\n Examples\n --------\n >>> from scipy.sparse import bsr_matrix, isspmatrix_bsr\n >>> isspmatrix_bsr(bsr_matrix([[5]]))\n True\n\n >>> from scipy.sparse import bsr_matrix, csr_matrix, isspmatrix_bsr\n >>> isspmatrix_bsr(csr_matrix([[5]]))\n False\n \"\"\"\n return isinstance(x, bsr_matrix)\n" ]
[ [ "numpy.empty", "numpy.zeros", "numpy.diff", "numpy.dtype", "numpy.any", "numpy.asarray", "numpy.ravel", "numpy.arange", "numpy.deprecate", "numpy.array" ] ]
dimitri-justeau/rasterio
[ "dda4b823473ba3cb27038e00ee7aa82f867f2a55" ]
[ "tests/test_blocks.py" ]
[ "from functools import partial\nimport logging\nimport os.path\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport unittest\n\nimport numpy as np\nimport pytest\n\nimport rasterio\nfrom rasterio import windows\n\n\nlogging.basicConfig(stream=sys.stderr, level=logging.DEBUG)\n\nclass WindowTest(unittest.TestCase):\n def test_window_shape_errors(self):\n # Positive height and width are needed when stop is None.\n self.assertRaises(\n ValueError,\n rasterio.window_shape,\n (((10, 20),(10, None)),) )\n self.assertRaises(\n ValueError,\n rasterio.window_shape,\n (((None, 10),(10, 20)),) )\n def test_window_shape_None_start(self):\n self.assertEqual(\n rasterio.window_shape(((None,4),(None,102))),\n (4, 102))\n def test_window_shape_None_stop(self):\n self.assertEqual(\n rasterio.window_shape(((10, None),(10, None)), 100, 90),\n (90, 80))\n def test_window_shape_positive(self):\n self.assertEqual(\n rasterio.window_shape(((0,4),(1,102))),\n (4, 101))\n def test_window_shape_negative(self):\n self.assertEqual(\n rasterio.window_shape(((-10, None),(-10, None)), 100, 90),\n (10, 10))\n self.assertEqual(\n rasterio.window_shape(((~0, None),(~0, None)), 100, 90),\n (1, 1))\n self.assertEqual(\n rasterio.window_shape(((None, ~0),(None, ~0)), 100, 90),\n (99, 89))\n def test_eval(self):\n self.assertEqual(\n rasterio.eval_window(((-10, None), (-10, None)), 100, 90),\n ((90, 100), (80, 90)))\n self.assertEqual(\n rasterio.eval_window(((None, -10), (None, -10)), 100, 90),\n ((0, 90), (0, 80)))\n\ndef test_window_index():\n idx = rasterio.window_index(((0,4),(1,12)))\n assert len(idx) == 2\n r, c = idx\n assert r.start == 0\n assert r.stop == 4\n assert c.start == 1\n assert c.stop == 12\n arr = np.ones((20,20))\n assert arr[idx].shape == (4, 11)\n\nclass RasterBlocksTest(unittest.TestCase):\n def test_blocks(self):\n with rasterio.open('tests/data/RGB.byte.tif') as s:\n self.assertEqual(len(s.block_shapes), 3)\n self.assertEqual(s.block_shapes, ((3, 791), (3, 791), (3, 791)))\n windows = s.block_windows(1)\n (j,i), first = next(windows)\n self.assertEqual((j,i), (0, 0))\n self.assertEqual(first, ((0, 3), (0, 791)))\n windows = s.block_windows()\n (j,i), first = next(windows)\n self.assertEqual((j,i), (0, 0))\n self.assertEqual(first, ((0, 3), (0, 791)))\n (j, i), second = next(windows)\n self.assertEqual((j,i), (1, 0))\n self.assertEqual(second, ((3, 6), (0, 791)))\n (j, i), last = list(windows)[~0]\n self.assertEqual((j,i), (239, 0))\n self.assertEqual(last, ((717, 718), (0, 791)))\n def test_block_coverage(self):\n with rasterio.open('tests/data/RGB.byte.tif') as s:\n self.assertEqual(\n s.width*s.height,\n sum((w[0][1]-w[0][0])*(w[1][1]-w[1][0])\n for ji, w in s.block_windows(1)))\n\nclass WindowReadTest(unittest.TestCase):\n def test_read_window(self):\n with rasterio.open('tests/data/RGB.byte.tif') as s:\n windows = s.block_windows(1)\n ji, first_window = next(windows)\n first_block = s.read(1, window=first_window)\n self.assertEqual(first_block.dtype, rasterio.ubyte)\n self.assertEqual(\n first_block.shape,\n rasterio.window_shape(first_window))\n\nclass WindowWriteTest(unittest.TestCase):\n def setUp(self):\n self.tempdir = tempfile.mkdtemp()\n def tearDown(self):\n shutil.rmtree(self.tempdir)\n def test_write_window(self):\n name = os.path.join(self.tempdir, \"test_write_window.tif\")\n a = np.ones((50, 50), dtype=rasterio.ubyte) * 127\n with rasterio.open(\n name, 'w',\n driver='GTiff', width=100, height=100, count=1,\n dtype=a.dtype) as s:\n s.write(a, indexes=1, window=((30, 80), (10, 60)))\n # subprocess.call([\"open\", name])\n info = subprocess.check_output([\"gdalinfo\", \"-stats\", name])\n self.assert_(\n \"Minimum=0.000, Maximum=127.000, \"\n \"Mean=31.750, StdDev=54.993\" in info.decode('utf-8'),\n info)\n\n\ndef test_block_windows_unfiltered(path_rgb_byte_tif):\n \"\"\"Get all block windows\"\"\"\n with rasterio.open(path_rgb_byte_tif) as src:\n assert len(list(src.block_windows())) == 240\n\n\ndef test_block_windows_filtered_all(path_rgb_byte_tif):\n \"\"\"Get all block windows using filter\"\"\"\n with rasterio.open(path_rgb_byte_tif) as src:\n w, s, e, n = src.bounds\n focus_window = src.window(w, s, e, n)\n filter_func = partial(windows.intersect, focus_window)\n itr = ((ij, win) for ij, win in src.block_windows() if filter_func(win))\n assert len(list(itr)) == 240\n\n\ndef test_block_windows_filtered_one(path_rgb_byte_tif):\n \"\"\"Get the first block windows using filter\"\"\"\n with rasterio.open(path_rgb_byte_tif) as src:\n w, s, e, n = src.bounds\n focus_window = src.window(w, n - 1.0, w + 1.0, n)\n filter_func = partial(windows.intersect, focus_window)\n itr = ((ij, win) for ij, win in src.block_windows() if filter_func(win))\n assert next(itr) == ((0, 0), ((0, 3), (0, 791)))\n with pytest.raises(StopIteration):\n next(itr)\n\n\ndef test_block_windows_filtered_none(path_rgb_byte_tif):\n \"\"\"Get no block windows using filter\"\"\"\n with rasterio.open(path_rgb_byte_tif) as src:\n w, s, e, n = src.bounds\n focus_window = src.window(w - 100.0, n + 100.0, w - 1.0, n + 1.0)\n filter_func = partial(windows.intersect, focus_window)\n itr = ((ij, win) for ij, win in src.block_windows() if filter_func(win))\n with pytest.raises(StopIteration):\n next(itr)\n" ]
[ [ "numpy.ones" ] ]
Redhorde/biolabYoloTEST
[ "e74cdcffed9c2105f882bbd541b3746ce1b638be" ]
[ "YoloFunctionality.py" ]
[ "import cv2\r\nimport numpy as np\r\nimport os\r\n\r\n\r\ndef yolo_cut(image):\r\n net = cv2.dnn.readNet(\"hand-obj_final.weights\", \"hand-obj.cfg\")\r\n classes = []\r\n with open(\"obj.names\", \"r\") as f:\r\n classes = [line.strip() for line in f.readlines()]\r\n\r\n layer_names = net.getLayerNames()\r\n outputlayers = [layer_names[i[0]-1]for i in net.getUnconnectedOutLayers()]\r\n\r\n # img = cv2.imread(\"hand.JPG\")\r\n img = image\r\n height, width, channels = img.shape\r\n # cv2.imshow(\"Hand\", img)\r\n # cv2.waitKey(0)\r\n # cv2.destroyAllWindows()\r\n blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\r\n net.setInput(blob)\r\n outputs = net.forward(outputlayers)\r\n # print(outputs)\r\n\r\n class_ids = []\r\n confidences = []\r\n boxes = []\r\n for out in outputs:\r\n for detection in out:\r\n scores = detection[5:]\r\n class_id = np.argmax(scores)\r\n confidence = scores[class_id]\r\n if confidence > 0:\r\n center_x = int(detection[0]*width)\r\n center_y = int(detection[1]*height)\r\n w = int(detection[2]*width)\r\n h = int(detection[3]*height)\r\n\r\n x = int(center_x - w/2)\r\n y = int(center_y - h/2)\r\n # cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 1)\r\n boxes.append([x, y, w, h])\r\n confidences.append(float(confidence))\r\n class_ids.append(class_id)\r\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.4, 0.6)\r\n font = cv2.FONT_HERSHEY_PLAIN\r\n # print(\"Hand bounding box:\")\r\n # print(boxes)\r\n try:\r\n x = boxes[0][0]\r\n y = boxes[0][1]\r\n w = boxes[0][2]\r\n h = boxes[0][3]\r\n # p rint(str(x)+\" \"+str(y)+\" \"+str(w)+\" \"+str(h))\r\n expand = 20 # expand mask by number of pixels\r\n img_crop = img[y-expand:y+h+expand, x-expand:x+w+expand]\r\n # cv2.imshow(\"Hand_cropped\", img_crop)\r\n # cv2.waitKey(0)\r\n # cv2.destroyAllWindows()\r\n # cv2.imwrite(\"hand_crop.JPG\", img_crop)\r\n except:\r\n print(\"No hand found\")\r\n img_crop = img\r\n # cv2.imwrite(\"hand_crop.JPG\", img_crop)\r\n return img_crop\r\n\r\n\r\ndef vgg_detect():\r\n pass\r\n\r\n\r\ndef main():\r\n for folder in os.scandir(\"Dataset\"):\r\n for file in os.listdir(folder.path):\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n main()" ]
[ [ "numpy.argmax" ] ]
putcn/moviepy
[ "48ae70c5f46dab61bafe8f462faa19d844ad60d3" ]
[ "moviepy/video/compositing/concatenate.py" ]
[ "import numpy as np\n\nfrom moviepy.audio.AudioClip import CompositeAudioClip\nfrom moviepy.tools import deprecated_version_of\nfrom moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip\nfrom moviepy.video.compositing.on_color import on_color\nfrom moviepy.video.VideoClip import ColorClip, VideoClip\n\ntry: # Python 2\n reduce\nexcept NameError: # Python 3\n from functools import reduce\n\n\n\ndef concatenate_videoclips(clips, method=\"chain\", transition=None,\n bg_color=None, ismask=False, padding = 0):\n \"\"\" Concatenates several video clips\n\n Returns a video clip made by clip by concatenating several video clips.\n (Concatenated means that they will be played one after another).\n\n There are two methods:\n\n - method=\"chain\": will produce a clip that simply outputs\n the frames of the succesive clips, without any correction if they are\n not of the same size of anything. If none of the clips have masks the\n resulting clip has no mask, else the mask is a concatenation of masks\n (using completely opaque for clips that don't have masks, obviously).\n If you have clips of different size and you want to write directly the\n result of the concatenation to a file, use the method \"compose\" instead.\n\n - method=\"compose\", if the clips do not have the same\n resolution, the final resolution will be such that no clip has\n to be resized.\n As a consequence the final clip has the height of the highest\n clip and the width of the widest clip of the list. All the\n clips with smaller dimensions will appear centered. The border\n will be transparent if mask=True, else it will be of the\n color specified by ``bg_color``.\n\n The clip with the highest FPS will be the FPS of the result clip.\n\n Parameters\n -----------\n clips\n A list of video clips which must all have their ``duration``\n attributes set.\n method\n \"chain\" or \"compose\": see above.\n transition\n A clip that will be played between each two clips of the list.\n\n bg_color\n Only for method='compose'. Color of the background.\n Set to None for a transparent clip\n\n padding\n Only for method='compose'. Duration during two consecutive clips.\n Note that for negative padding, a clip will partly play at the same\n time as the clip it follows (negative padding is cool for clips who fade\n in on one another). A non-null padding automatically sets the method to\n `compose`.\n\n \"\"\"\n\n if transition is not None:\n l = [[v, transition] for v in clips[:-1]]\n clips = reduce(lambda x, y: x + y, l) + [clips[-1]]\n transition = None\n\n tt = np.cumsum([0] + [c.duration for c in clips])\n\n sizes = [v.size for v in clips]\n\n w = max([r[0] for r in sizes])\n h = max([r[1] for r in sizes])\n\n tt = np.maximum(0, tt + padding*np.arange(len(tt)))\n\n if method == \"chain\":\n def make_frame(t):\n i = max([i for i, e in enumerate(tt) if e <= t])\n return clips[i].get_frame(t - tt[i])\n\n def get_mask(c):\n mask = c.mask or ColorClip([1, 1], color=1, ismask=True)\n if mask.duration is None:\n mask.duration = c.duration\n return mask\n\n result = VideoClip(ismask = ismask, make_frame = make_frame)\n if any([c.mask is not None for c in clips]):\n masks = [get_mask(c) for c in clips]\n result.mask = concatenate_videoclips(masks, method=\"chain\",\n ismask=True)\n result.clips = clips\n elif method == \"compose\":\n result = CompositeVideoClip( [c.set_start(t).set_position('center')\n for (c, t) in zip(clips, tt)],\n size = (w, h), bg_color=bg_color, ismask=ismask)\n else:\n raise Exception(\"Moviepy Error: The 'method' argument of \"\n \"concatenate_videoclips must be 'chain' or 'compose'\")\n\n result.tt = tt\n\n result.start_times = tt[:-1]\n result.start, result.duration, result.end = 0, tt[-1] , tt[-1]\n\n audio_t = [(c.audio,t) for c,t in zip(clips,tt) if c.audio is not None]\n if len(audio_t)>0:\n result.audio = CompositeAudioClip([a.set_start(t)\n for a,t in audio_t])\n\n fpss = [c.fps for c in clips if hasattr(c,'fps') and c.fps is not None]\n if len(fpss) == 0:\n result.fps = None\n else:\n result.fps = max(fpss)\n\n return result\n\n\nconcatenate = deprecated_version_of(concatenate_videoclips,\n oldname=\"concatenate\")\n" ]
[ [ "numpy.cumsum" ] ]
raphaelavalos/multiagent-particle-envs
[ "d589429084031a58352d214b6147339a21f41cd5" ]
[ "multiagent/rendering.py" ]
[ "\"\"\"\n2D rendering framework\n\"\"\"\nfrom __future__ import division\nimport os\nimport six\nimport sys\n\nif \"Apple\" in sys.version:\n if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ:\n os.environ['DYLD_FALLBACK_LIBRARY_PATH'] += ':/usr/lib'\n # (JDS 2016/04/15): avoid bug on Anaconda 2.3.0 / Yosemite\n\n\nfrom gym import error\n\ntry:\n import pyglet\nexcept ImportError as e:\n raise ImportError(\"HINT: you can install pyglet directly via 'pip install pyglet'. But if you really just want to install all Gym dependencies and not have to think about it, 'pip install -e .[all]' or 'pip install gym[all]' will do it.\")\n\ntry:\n from pyglet.gl import *\nexcept ImportError as e:\n raise ImportError(\"Error occured while running `from pyglet.gl import *`\",suffix=\"HINT: make sure you have OpenGL install. On Ubuntu, you can run 'apt-get install python-opengl'. If you're running on a server, you may need a virtual frame buffer; something like this should work: 'xvfb-run -s \\\"-screen 0 1400x900x24\\\" python <your_script.py>'\")\n\nimport math\nimport numpy as np\n\nRAD2DEG = 57.29577951308232\n\ndef get_display(spec):\n \"\"\"Convert a display specification (such as :0) into an actual Display\n object.\n Pyglet only supports multiple Displays on Linux.\n \"\"\"\n if spec is None:\n return pyglet.canvas.get_display()\n # returns already available pyglet_display,\n # if there is no pyglet display available then it creates one\n elif isinstance(spec, str):\n return pyglet.canvas.Display(spec)\n else:\n raise error.Error('Invalid display specification: {}. (Must be a string like :0 or None.)'.format(spec))\n\nclass Viewer(object):\n def __init__(self, width, height, display=None):\n display = get_display(display)\n\n self.width = width\n self.height = height\n\n self.window = pyglet.window.Window(width=width, height=height, display=display)\n self.window.on_close = self.window_closed_by_user\n self.geoms = []\n self.onetime_geoms = []\n self.transform = Transform()\n\n glEnable(GL_BLEND)\n # glEnable(GL_MULTISAMPLE)\n glEnable(GL_LINE_SMOOTH)\n # glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)\n glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)\n glLineWidth(2.0)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n def close(self):\n self.window.close()\n\n def window_closed_by_user(self):\n self.close()\n\n def set_bounds(self, left, right, bottom, top):\n assert right > left and top > bottom\n scalex = self.width/(right-left)\n scaley = self.height/(top-bottom)\n self.transform = Transform(\n translation=(-left*scalex, -bottom*scaley),\n scale=(scalex, scaley))\n\n def add_geom(self, geom):\n self.geoms.append(geom)\n\n def add_onetime(self, geom):\n self.onetime_geoms.append(geom)\n\n def render(self, return_rgb_array=False):\n glClearColor(1,1,1,1)\n self.window.clear()\n self.window.switch_to()\n self.window.dispatch_events()\n self.transform.enable()\n for geom in self.geoms:\n geom.render()\n for geom in self.onetime_geoms:\n geom.render()\n self.transform.disable()\n arr = None\n if return_rgb_array:\n buffer = pyglet.image.get_buffer_manager().get_color_buffer()\n image_data = buffer.get_image_data()\n arr = np.fromstring(image_data.get_data(), dtype=np.uint8, sep='')\n # In https://github.com/openai/gym-http-api/issues/2, we\n # discovered that someone using Xmonad on Arch was having\n # a window of size 598 x 398, though a 600 x 400 window\n # was requested. (Guess Xmonad was preserving a pixel for\n # the boundary.) So we use the buffer height/width rather\n # than the requested one.\n arr = arr.reshape(buffer.height, buffer.width, 4)\n arr = arr[::-1,:,0:3]\n self.window.flip()\n self.onetime_geoms = []\n return arr\n\n # Convenience\n def draw_circle(self, radius=10, res=30, filled=True, **attrs):\n geom = make_circle(radius=radius, res=res, filled=filled)\n _add_attrs(geom, attrs)\n self.add_onetime(geom)\n return geom\n\n def draw_polygon(self, v, filled=True, **attrs):\n geom = make_polygon(v=v, filled=filled)\n _add_attrs(geom, attrs)\n self.add_onetime(geom)\n return geom\n\n def draw_polyline(self, v, **attrs):\n geom = make_polyline(v=v)\n _add_attrs(geom, attrs)\n self.add_onetime(geom)\n return geom\n\n def draw_line(self, start, end, **attrs):\n geom = Line(start, end)\n _add_attrs(geom, attrs)\n self.add_onetime(geom)\n return geom\n\n def get_array(self):\n self.window.flip()\n image_data = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()\n self.window.flip()\n arr = np.fromstring(image_data.data, dtype=np.uint8, sep='')\n arr = arr.reshape(self.height, self.width, 4)\n return arr[::-1,:,0:3]\n\ndef _add_attrs(geom, attrs):\n if \"color\" in attrs:\n geom.set_color(*attrs[\"color\"])\n if \"linewidth\" in attrs:\n geom.set_linewidth(attrs[\"linewidth\"])\n\nclass Geom(object):\n def __init__(self):\n self._color=Color((0, 0, 0, 1.0))\n self.attrs = [self._color]\n def render(self):\n for attr in reversed(self.attrs):\n attr.enable()\n self.render1()\n for attr in self.attrs:\n attr.disable()\n def render1(self):\n raise NotImplementedError\n def add_attr(self, attr):\n self.attrs.append(attr)\n def set_color(self, r, g, b, alpha=1):\n self._color.vec4 = (r, g, b, alpha)\n\nclass Attr(object):\n def enable(self):\n raise NotImplementedError\n def disable(self):\n pass\n\nclass Transform(Attr):\n def __init__(self, translation=(0.0, 0.0), rotation=0.0, scale=(1,1)):\n self.set_translation(*translation)\n self.set_rotation(rotation)\n self.set_scale(*scale)\n def enable(self):\n glPushMatrix()\n glTranslatef(self.translation[0], self.translation[1], 0) # translate to GL loc ppint\n glRotatef(RAD2DEG * self.rotation, 0, 0, 1.0)\n glScalef(self.scale[0], self.scale[1], 1)\n def disable(self):\n glPopMatrix()\n def set_translation(self, newx, newy):\n self.translation = (float(newx), float(newy))\n def set_rotation(self, new):\n self.rotation = float(new)\n def set_scale(self, newx, newy):\n self.scale = (float(newx), float(newy))\n\nclass Color(Attr):\n def __init__(self, vec4):\n self.vec4 = vec4\n def enable(self):\n glColor4f(*self.vec4)\n\nclass LineStyle(Attr):\n def __init__(self, style):\n self.style = style\n def enable(self):\n glEnable(GL_LINE_STIPPLE)\n glLineStipple(1, self.style)\n def disable(self):\n glDisable(GL_LINE_STIPPLE)\n\nclass LineWidth(Attr):\n def __init__(self, stroke):\n self.stroke = stroke\n def enable(self):\n glLineWidth(self.stroke)\n\nclass Point(Geom):\n def __init__(self):\n Geom.__init__(self)\n def render1(self):\n glBegin(GL_POINTS) # draw point\n glVertex3f(0.0, 0.0, 0.0)\n glEnd()\n\nclass FilledPolygon(Geom):\n def __init__(self, v):\n Geom.__init__(self)\n self.v = v\n def render1(self):\n if len(self.v) == 4 : glBegin(GL_QUADS)\n elif len(self.v) > 4 : glBegin(GL_POLYGON)\n else: glBegin(GL_TRIANGLES)\n for p in self.v:\n glVertex3f(p[0], p[1],0) # draw each vertex\n glEnd()\n\n color = (self._color.vec4[0] * 0.5, self._color.vec4[1] * 0.5, self._color.vec4[2] * 0.5, self._color.vec4[3] * 0.5)\n glColor4f(*color)\n glBegin(GL_LINE_LOOP)\n for p in self.v:\n glVertex3f(p[0], p[1],0) # draw each vertex\n glEnd()\n\ndef make_circle(radius=10, res=30, filled=True):\n points = []\n for i in range(res):\n ang = 2*math.pi*i / res\n points.append((math.cos(ang)*radius, math.sin(ang)*radius))\n if filled:\n return FilledPolygon(points)\n else:\n return PolyLine(points, True)\n\ndef make_polygon(v, filled=True):\n if filled: return FilledPolygon(v)\n else: return PolyLine(v, True)\n\ndef make_polyline(v):\n return PolyLine(v, False)\n\ndef make_capsule(length, width):\n l, r, t, b = 0, length, width/2, -width/2\n box = make_polygon([(l,b), (l,t), (r,t), (r,b)])\n circ0 = make_circle(width/2)\n circ1 = make_circle(width/2)\n circ1.add_attr(Transform(translation=(length, 0)))\n geom = Compound([box, circ0, circ1])\n return geom\n\nclass Compound(Geom):\n def __init__(self, gs):\n Geom.__init__(self)\n self.gs = gs\n for g in self.gs:\n g.attrs = [a for a in g.attrs if not isinstance(a, Color)]\n def render1(self):\n for g in self.gs:\n g.render()\n\nclass PolyLine(Geom):\n def __init__(self, v, close):\n Geom.__init__(self)\n self.v = v\n self.close = close\n self.linewidth = LineWidth(1)\n self.add_attr(self.linewidth)\n def render1(self):\n glBegin(GL_LINE_LOOP if self.close else GL_LINE_STRIP)\n for p in self.v:\n glVertex3f(p[0], p[1],0) # draw each vertex\n glEnd()\n def set_linewidth(self, x):\n self.linewidth.stroke = x\n\nclass Line(Geom):\n def __init__(self, start=(0.0, 0.0), end=(0.0, 0.0)):\n Geom.__init__(self)\n self.start = start\n self.end = end\n self.linewidth = LineWidth(1)\n self.add_attr(self.linewidth)\n\n def render1(self):\n glBegin(GL_LINES)\n glVertex2f(*self.start)\n glVertex2f(*self.end)\n glEnd()\n\nclass Image(Geom):\n def __init__(self, fname, width, height):\n Geom.__init__(self)\n self.width = width\n self.height = height\n img = pyglet.image.load(fname)\n self.img = img\n self.flip = False\n def render1(self):\n self.img.blit(-self.width/2, -self.height/2, width=self.width, height=self.height)\n\n# ================================================================\n\nclass SimpleImageViewer(object):\n def __init__(self, display=None):\n self.window = None\n self.isopen = False\n self.display = display\n def imshow(self, arr):\n if self.window is None:\n height, width, channels = arr.shape\n self.window = pyglet.window.Window(width=width, height=height, display=self.display)\n self.width = width\n self.height = height\n self.isopen = True\n assert arr.shape == (self.height, self.width, 3), \"You passed in an image with the wrong number shape\"\n image = pyglet.image.ImageData(self.width, self.height, 'RGB', arr.tobytes(), pitch=self.width * -3)\n self.window.clear()\n self.window.switch_to()\n self.window.dispatch_events()\n image.blit(0,0)\n self.window.flip()\n def close(self):\n if self.isopen:\n self.window.close()\n self.isopen = False\n def __del__(self):\n self.close()" ]
[ [ "numpy.fromstring" ] ]
bsm8734/formula-image-latex-recognition
[ "86d5070e8f907571a47967d64facaee246d92a35" ]
[ "checkpoint.py" ]
[ "import os\nimport torch\nfrom tensorboardX import SummaryWriter\n\nuse_cuda = torch.cuda.is_available()\n\ndefault_checkpoint = {\n \"epoch\": 0,\n \"train_losses\": [],\n \"train_symbol_accuracy\": [],\n \"train_sentence_accuracy\": [],\n \"train_wer\": [],\n \"train_score\": [],\n \"validation_losses\": [],\n \"validation_symbol_accuracy\": [],\n \"validation_sentence_accuracy\": [],\n \"validation_wer\": [],\n \"validation_score\": [],\n \"lr\": [],\n \"grad_norm\": [],\n \"model\": {},\n \"configs\":{},\n \"token_to_id\":{},\n \"id_to_token\":{},\n}\n\n\ndef save_checkpoint(checkpoint, dir=\"./checkpoints\", prefix=\"\"):\n \"\"\" Saving check point\n\n Args:\n checkpoint(dict) : Checkpoint to save\n dir(str) : Path to save the checkpoint\n prefix(str) : Path of location of dir \n \"\"\"\n # Padded to 4 digits because of lexical sorting of numbers.\n # e.g. 0009.pth\n filename = \"{num:0>4}.pth\".format(num=checkpoint[\"epoch\"])\n if not os.path.exists(os.path.join(prefix, dir)):\n os.makedirs(os.path.join(prefix, dir))\n torch.save(checkpoint, os.path.join(prefix, dir, filename))\n\n\ndef load_checkpoint(path, cuda=use_cuda):\n \"\"\" Load check point\n\n Args:\n path(str) : Path checkpoint located\n cuda : Whether use cuda or not [Default: use_cuda]\n Returns\n Loaded checkpoints\n \"\"\"\n if cuda:\n return torch.load(path)\n else:\n # Load GPU model on CPU\n return torch.load(path, map_location=lambda storage, loc: storage)\n\n\ndef init_tensorboard(name=\"\", base_dir=\"./tensorboard\"):\n \"\"\"Init tensorboard\n Args:\n name(str) : name of tensorboard\n base_dir(str): path of tesnorboard\n \"\"\"\n return SummaryWriter(os.path.join(name, base_dir))\n\n\ndef write_tensorboard(\n writer,\n epoch,\n grad_norm,\n train_loss,\n train_symbol_accuracy,\n train_sentence_accuracy,\n train_wer,\n train_score,\n validation_loss,\n validation_symbol_accuracy,\n validation_sentence_accuracy,\n validation_wer,\n validation_score,\n model,\n):\n writer.add_scalar(\"train_loss\", train_loss, epoch)\n writer.add_scalar(\"train_symbol_accuracy\", train_symbol_accuracy, epoch)\n writer.add_scalar(\"train_sentence_accuracy\",train_sentence_accuracy,epoch)\n writer.add_scalar(\"train_wer\", train_wer, epoch)\n writer.add_scalar(\"train_score\", train_score, epoch)\n writer.add_scalar(\"validation_loss\", validation_loss, epoch)\n writer.add_scalar(\"validation_symbol_accuracy\", validation_symbol_accuracy, epoch)\n writer.add_scalar(\"validation_sentence_accuracy\",validation_sentence_accuracy,epoch)\n writer.add_scalar(\"validation_wer\",validation_wer,epoch)\n writer.add_scalar(\"validation_score\", validation_score, epoch)\n writer.add_scalar(\"grad_norm\", grad_norm, epoch)\n\n for name, param in model.encoder.named_parameters():\n writer.add_histogram(\n \"encoder/{}\".format(name), param.detach().cpu().numpy(), epoch\n )\n if param.grad is not None:\n writer.add_histogram(\n \"encoder/{}/grad\".format(name), param.grad.detach().cpu().numpy(), epoch\n )\n\n for name, param in model.decoder.named_parameters():\n writer.add_histogram(\n \"decoder/{}\".format(name), param.detach().cpu().numpy(), epoch\n )\n if param.grad is not None:\n writer.add_histogram(\n \"decoder/{}/grad\".format(name), param.grad.detach().cpu().numpy(), epoch\n )\n" ]
[ [ "torch.cuda.is_available", "torch.load" ] ]
webclinic017/fastbt
[ "715982cc454ee6fabcaa605188fd1aad7a32a376" ]
[ "tests/test_datasource.py" ]
[ "import unittest\nimport pandas as pd\nimport numpy as np\nimport context\n\nfrom fastbt.datasource import DataSource\nimport talib\n\nclass TestDataSource(unittest.TestCase):\n\n def setUp(self): \n df = pd.read_csv('tests/data/sample.csv', parse_dates=['timestamp'])\n self.ds = DataSource(data=df)\n\n def test_data(self):\n self.assertEqual(self.ds.data.iloc[20,1], 'five')\n self.assertEqual(self.ds.data.iloc[14,3], 112)\n self.assertEqual(self.ds.data.iloc[24,7], 10.54)\n\n def test_data_without_sort(self):\n df = pd.read_csv('tests/data/sample.csv', parse_dates=['timestamp'])\n self.ds = DataSource(data=df, sort=False)\n self.assertEqual(self.ds.data.iloc[9,4], 999)\n self.assertEqual(self.ds.data.iloc[24,6], 41688)\n self.assertEqual(self.ds.data.at[4, 'close'], 10.6)\n\n def test_initialize_case(self):\n df = pd.read_csv('tests/data/sample.csv', parse_dates=['timestamp'])\n df.columns = [x.upper() for x in df.columns]\n self.assertEqual(df.columns[0], 'TIMESTAMP')\n self.ds = DataSource(data=df)\n self.assertEqual(self.ds.data.columns[0], 'timestamp')\n\n def test_initialize_column_rename(self):\n df = pd.read_csv('tests/data/sample.csv', parse_dates=['timestamp'])\n df.columns = ['TS', 'TRADINGSYMBOL', 'OPEN', 'HIGH', 'LOW',\n 'CLOSE', 'VOLUME', 'PREVCLOSE']\n self.ds = DataSource(data=df, timestamp='TS', symbol='TRADINGSYMBOL')\n self.assertEqual(self.ds.data.columns[0], 'timestamp')\n self.assertEqual(self.ds.data.columns[1], 'symbol')\n\n def test_add_lag(self):\n length = len(self.ds.data)\n idx = pd.IndexSlice\n self.ds.add_lag(on='close')\n self.ds.add_lag(on='volume', period=2)\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n self.assertEqual(d.at[idx['2018-01-04', 'one'], 'lag_close_1'], 11)\n self.assertEqual(d.at[idx['2018-01-06', 'six'], 'lag_volume_2'], 86014)\n self.assertEqual(len(self.ds.data.columns), 10)\n self.assertEqual(len(self.ds.data), length)\n\n def test_add_lag_column_rename(self):\n idx = pd.IndexSlice\n self.ds.add_lag(on='close')\n self.ds.add_lag(on='close', col_name='some_col')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n self.assertEqual(d.at[idx['2018-01-04', 'one'], 'lag_close_1'], 11)\n self.assertEqual(d.at[idx['2018-01-04', 'one'], 'some_col'], 11)\n self.assertEqual(d.at[idx['2018-01-05', 'three'], 'some_col'], 109)\n\n def test_add_pct_change(self):\n idx = pd.IndexSlice\n self.ds.add_pct_change(on='close')\n self.ds.add_pct_change(on='close', period=2)\n self.ds.add_pct_change(on='close', period=2, col_name='new_col')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n R = lambda x: round(x,2)\n self.assertEqual(R(d.at[idx['2018-01-05', 'three'], 'chg_close_1']), -0.07)\n self.assertEqual(R(d.at[idx['2018-01-06', 'five'], 'chg_close_1']), 0.17)\n self.assertEqual(R(d.at[idx['2018-01-05', 'four'], 'chg_close_2']), 0.05)\n self.assertEqual(R(d.at[idx['2018-01-05', 'four'], 'new_col']), 0.05)\n self.assertEqual(R(d.at[idx['2018-01-03', 'six'], 'new_col']), -0.1)\n self.assertEqual(pd.isna(d.at[idx['2018-01-02', 'one'], 'new_col']), True)\n self.assertEqual(len(self.ds.data.columns), 11)\n\n def test_add_pct_change_lag(self):\n idx = pd.IndexSlice\n self.ds.add_pct_change(on='close', period=2, lag=1)\n self.ds.add_pct_change(on='close', period=1, lag=2)\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n R = lambda x: round(x,2)\n self.assertEqual(R(d.at[idx['2018-01-04', 'four'], 'chg_close_2']), 0.09)\n self.assertEqual(R(d.at[idx['2018-01-04', 'four'], 'chg_close_1']), 0.01)\n self.assertEqual(R(d.at[idx['2018-01-06', 'three'], 'chg_close_1']), -0.01)\n\n\n def test_add_pct_change_lag_col_name(self):\n idx = pd.IndexSlice\n self.ds.add_pct_change(on='high', period=2, lag=1)\n self.ds.add_pct_change(on='close', period=1, lag=2, col_name='lagged_2')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n R = lambda x: round(x,2)\n self.assertEqual(R(d.at[idx['2018-01-05', 'six'], 'chg_high_2']), -0.04)\n self.assertEqual(R(d.at[idx['2018-01-04', 'four'], 'lagged_2']), 0.01)\n\n def test_formula_add_col_name(self):\n idx = pd.IndexSlice\n self.ds.add_formula('open+close', 'new_col')\n self.ds.add_formula('volume/close', 'new_col_2')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n R = lambda x: round(x,2)\n self.assertEqual(R(d.at[idx['2018-01-04', 'four'], 'new_col']), 336)\n self.assertEqual(R(d.at[idx['2018-01-06', 'one'], 'new_col_2']), 77755.77)\n\n def test_formula_case_insensitive(self):\n idx = pd.IndexSlice\n self.ds.add_formula('OPEN+CLOSE', 'new_col')\n self.ds.add_formula('volume/close', 'NEW_COL_2')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n R = lambda x: round(x,2)\n self.assertEqual(R(d.at[idx['2018-01-04', 'four'], 'new_col']), 336)\n self.assertEqual(R(d.at[idx['2018-01-06', 'one'], 'new_col_2']), 77755.77)\n\n\n def test_formula_calculated_column(self):\n idx = pd.IndexSlice\n self.ds.add_formula('(open+close)*100', 'new_col_1')\n self.ds.add_formula('volume/100', 'new_col_2')\n self.ds.add_formula('new_col_1+new_col_2', 'new_col_3')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n R = lambda x: round(x,2)\n self.assertEqual(R(d.at[idx['2018-01-06', 'one'], 'new_col_3']), 10190.6)\n self.assertEqual(R(d.at[idx['2018-01-05', 'two'], 'new_col_3']), 200389.97)\n\n def test_rolling_simple(self):\n from pandas import isna\n q = 'symbol == \"one\"'\n df = pd.read_csv('tests/data/sample.csv', parse_dates=['timestamp']).query(q)\n df['r2'] = df['close'].rolling(2).mean()\n self.ds.add_rolling(2, col_name='r2')\n df2 = self.ds.data.query(q)\n print('RESULT' , df['r2'], df2['r2'])\n for a,b in zip(df['r2'], df2['r2']):\n if not(isna(a)):\n assert a==b \n\n def test_rolling_values(self):\n idx = pd.IndexSlice\n self.ds.add_rolling(4, on='volume', function='max')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n R = lambda x: round(x,2)\n self.assertEqual(d.at[idx['2018-01-05', 'five'], 'rol_max_volume_4'], 971704)\n self.assertEqual(d.at[idx['2018-01-05', 'six'], 'rol_max_volume_4'], 195539)\n self.assertEqual(d.at[idx['2018-01-04', 'three'], 'rol_max_volume_4'], 433733)\n # Adding lag and testing\n self.ds.add_rolling(4, on='volume', function='max', lag=1)\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n self.assertEqual(d.at[idx['2018-01-06', 'five'], 'rol_max_volume_4'], 971704)\n self.assertEqual(d.at[idx['2018-01-06', 'six'], 'rol_max_volume_4'], 195539)\n self.assertEqual(d.at[idx['2018-01-05', 'three'], 'rol_max_volume_4'], 433733)\n # Testing for 2 lags and column name\n self.ds.add_rolling(4, on='volume', function='max', lag=2, col_name='check')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n self.assertEqual(d.at[idx['2018-01-06', 'three'], 'check'], 433733) \n\n def test_batch(self):\n length = len(self.ds.data)\n batch = [\n {'P': {'on': 'close', 'period': 1, 'lag': 1}},\n {'L': {'on': 'volume', 'period': 1}},\n {'F': {'formula': '(open+close)/2', 'col_name': 'AvgPrice'}},\n {'I': {'indicator': 'SMA', 'period': 3, 'lag': 1, 'col_name': 'SMA3'}},\n {'F': {'formula': 'avgprice + sma3', 'col_name': 'final'}},\n {'R': {'window': 3, 'function': 'mean'}}\n ]\n d = self.ds.batch_process(batch).set_index(['timestamp', 'symbol'])\n self.assertEqual(len(d.columns), 12)\n self.assertEqual(len(self.ds.data.columns), 14)\n self.assertEqual(len(self.ds.data), length)\n\n def test_raise_error_if_not_dataframe(self):\n pass\n\n\ndef test_rolling_zscore():\n np.random.seed(100)\n df = pd.DataFrame(np.random.randn(100,4), \n columns=['open', 'high', 'low', 'close'])\n df['symbol'] = list('ABCD') * 25\n dates = list(pd.date_range(end='2018-04-25', periods=25)) * 4\n df['timestamp'] = dates\n from fastbt.datasource import DataSource\n ds = DataSource(df)\n ds.add_rolling(on='close', window=5, function='zscore')\n assert ds.data.query('symbol==\"A\"').iloc[8]['rol_zscore_close_5'].round(2) == 0.12\n assert ds.data.query('symbol==\"B\"').iloc[-7]['rol_zscore_close_5'].round(2) == 0.17\n assert ds.data.query('symbol==\"C\"').iloc[-6]['rol_zscore_close_5'].round(2) == -0.48\n\nclass TestDataSourceReindex(unittest.TestCase):\n\n def setUp(self): \n df = pd.DataFrame(np.arange(24).reshape(6, 4),\n columns=['open', 'high', 'low', 'close'])\n df['symbol'] = list('ABCABA')\n df['timestamp'] = [1, 1, 1, 2, 3, 3]\n self.df = df\n\n def test_reindex(self):\n ds = DataSource(self.df)\n ds.reindex([1,2,3])\n assert len(ds.data) == 9\n # Check values\n assert ds.data.set_index(['symbol', 'timestamp']).at[('A', 1), 'open'] == 0\n assert ds.data.set_index(['symbol', 'timestamp']).at[('B', 2), 'close'] == 7\n assert ds.data.set_index(['symbol', 'timestamp']).at[('C', 3), 'high'] == 9\n ds.reindex([1,2,3,4])\n assert len(ds.data) == 12 \n\n def test_reindex_different_fills(self):\n ds = DataSource(self.df)\n ds.reindex([1,2,3], method=None)\n print(ds.data)\n assert pd.isnull(ds.data.set_index(['symbol', 'timestamp']).at[('C', 3), 'high'])\n ds = DataSource(self.df)\n ds.reindex([1,2,3,4], method='bfill') \n assert ds.data.set_index(['symbol', 'timestamp']).at[('B', 2), 'close'] == 19\n\nclass TestDataSourceTALIB(unittest.TestCase):\n\n \"\"\"\n Test TALIB indicators\n \"\"\"\n\n def setUp(self): \n self.df = pd.read_csv('tests/data/sample.csv', parse_dates=['timestamp'])\n\n def test_single_symbol(self):\n df = self.df.query('symbol==\"one\"')\n ds = DataSource(df)\n ds.add_indicator('SMA', period=3, col_name='sma')\n assert len(ds.data) == 6\n\n sma = talib.SMA(df.close.values, timeperiod=3)\n # If both are equal, there should be no differences\n assert (ds.data.sma - sma).sum() == 0\n\n\n\n" ]
[ [ "pandas.date_range", "pandas.read_csv", "numpy.random.seed", "numpy.random.randn", "numpy.arange", "pandas.isna" ] ]
OspreyData/lime
[ "ceec55cf074b6242ffdde3487afb08ab3250cd63" ]
[ "lime/lime_tabular.py" ]
[ "\"\"\"\nFunctions for explaining classifiers that use tabular data (matrices).\n\"\"\"\nimport collections\nimport copy\nfrom functools import partial\nimport json\nimport warnings\n\nimport numpy as np\nimport scipy as sp\nimport sklearn\nimport sklearn.preprocessing\nfrom sklearn.utils import check_random_state\n\nfrom lime.discretize import QuartileDiscretizer\nfrom lime.discretize import DecileDiscretizer\nfrom lime.discretize import EntropyDiscretizer\nfrom lime.discretize import BaseDiscretizer\nfrom lime.discretize import StatsDiscretizer\nfrom . import explanation\nfrom . import lime_base\n\n\nclass TableDomainMapper(explanation.DomainMapper):\n \"\"\"Maps feature ids to names, generates table views, etc\"\"\"\n\n def __init__(self, feature_names, feature_values, scaled_row,\n categorical_features, discretized_feature_names=None,\n feature_indexes=None):\n \"\"\"Init.\n\n Args:\n feature_names: list of feature names, in order\n feature_values: list of strings with the values of the original row\n scaled_row: scaled row\n categorical_features: list of categorical features ids (ints)\n feature_indexes: optional feature indexes used in the sparse case\n \"\"\"\n self.exp_feature_names = feature_names\n self.discretized_feature_names = discretized_feature_names\n self.feature_names = feature_names\n self.feature_values = feature_values\n self.feature_indexes = feature_indexes\n self.scaled_row = scaled_row\n if sp.sparse.issparse(scaled_row):\n self.all_categorical = False\n else:\n self.all_categorical = len(categorical_features) == len(scaled_row)\n self.categorical_features = categorical_features\n\n def map_exp_ids(self, exp):\n \"\"\"Maps ids to feature names.\n\n Args:\n exp: list of tuples [(id, weight), (id,weight)]\n\n Returns:\n list of tuples (feature_name, weight)\n \"\"\"\n names = self.exp_feature_names\n if self.discretized_feature_names is not None:\n names = self.discretized_feature_names\n return [(names[x[0]], x[1]) for x in exp]\n\n def visualize_instance_html(self,\n exp,\n label,\n div_name,\n exp_object_name,\n show_table=True,\n show_all=False):\n \"\"\"Shows the current example in a table format.\n\n Args:\n exp: list of tuples [(id, weight), (id,weight)]\n label: label id (integer)\n div_name: name of div object to be used for rendering(in js)\n exp_object_name: name of js explanation object\n show_table: if False, don't show table visualization.\n show_all: if True, show zero-weighted features in the table.\n \"\"\"\n if not show_table:\n return ''\n weights = [0] * len(self.feature_names)\n for x in exp:\n weights[x[0]] = x[1]\n if self.feature_indexes is not None:\n # Sparse case: only display the non-zero values and importances\n fnames = [self.exp_feature_names[i] for i in self.feature_indexes]\n fweights = [weights[i] for i in self.feature_indexes]\n if show_all:\n out_list = list(zip(fnames,\n self.feature_values,\n fweights))\n else:\n out_dict = dict(map(lambda x: (x[0], (x[1], x[2], x[3])),\n zip(self.feature_indexes,\n fnames,\n self.feature_values,\n fweights)))\n out_list = [out_dict.get(x[0], (str(x[0]), 0.0, 0.0)) for x in exp]\n else:\n out_list = list(zip(self.exp_feature_names,\n self.feature_values,\n weights))\n if not show_all:\n out_list = [out_list[x[0]] for x in exp]\n ret = u'''\n %s.show_raw_tabular(%s, %d, %s);\n ''' % (exp_object_name, json.dumps(out_list, ensure_ascii=False), label, div_name)\n return ret\n\n\nclass LimeTabularExplainer(object):\n \"\"\"Explains predictions on tabular (i.e. matrix) data.\n For numerical features, perturb them by sampling from a Normal(0,1) and\n doing the inverse operation of mean-centering and scaling, according to the\n means and stds in the training data. For categorical features, perturb by\n sampling according to the training distribution, and making a binary\n feature that is 1 when the value is the same as the instance being\n explained.\"\"\"\n\n def __init__(self,\n training_data,\n mode=\"classification\",\n training_labels=None,\n feature_names=None,\n categorical_features=None,\n categorical_names=None,\n kernel_width=None,\n kernel=None,\n verbose=False,\n class_names=None,\n feature_selection='auto',\n discretize_continuous=True,\n discretizer='quartile',\n sample_around_instance=False,\n random_state=None,\n training_data_stats=None):\n \"\"\"Init function.\n\n Args:\n training_data: numpy 2d array\n mode: \"classification\" or \"regression\"\n training_labels: labels for training data. Not required, but may be\n used by discretizer.\n feature_names: list of names (strings) corresponding to the columns\n in the training data.\n categorical_features: list of indices (ints) corresponding to the\n categorical columns. Everything else will be considered\n continuous. Values in these columns MUST be integers.\n categorical_names: map from int to list of names, where\n categorical_names[x][y] represents the name of the yth value of\n column x.\n kernel_width: kernel width for the exponential kernel.\n If None, defaults to sqrt (number of columns) * 0.75\n kernel: similarity kernel that takes euclidean distances and kernel\n width as input and outputs weights in (0,1). If None, defaults to\n an exponential kernel.\n verbose: if true, print local prediction values from linear model\n class_names: list of class names, ordered according to whatever the\n classifier is using. If not present, class names will be '0',\n '1', ...\n feature_selection: feature selection method. can be\n 'forward_selection', 'lasso_path', 'none' or 'auto'.\n See function 'explain_instance_with_data' in lime_base.py for\n details on what each of the options does.\n discretize_continuous: if True, all non-categorical features will\n be discretized into quartiles.\n discretizer: only matters if discretize_continuous is True\n and data is not sparse. Options are 'quartile', 'decile',\n 'entropy' or a BaseDiscretizer instance.\n sample_around_instance: if True, will sample continuous features\n in perturbed samples from a normal centered at the instance\n being explained. Otherwise, the normal is centered on the mean\n of the feature data.\n random_state: an integer or numpy.RandomState that will be used to\n generate random numbers. If None, the random state will be\n initialized using the internal numpy seed.\n training_data_stats: a dict object having the details of training data\n statistics. If None, training data information will be used, only matters\n if discretize_continuous is True. Must have the following keys:\n means\", \"mins\", \"maxs\", \"stds\", \"feature_values\",\n \"feature_frequencies\"\n \"\"\"\n self.random_state = check_random_state(random_state)\n self.mode = mode\n self.categorical_names = categorical_names or {}\n self.sample_around_instance = sample_around_instance\n self.training_data_stats = training_data_stats\n\n # Check and raise proper error in stats are supplied in non-descritized path\n if self.training_data_stats:\n self.validate_training_data_stats(self.training_data_stats)\n\n if categorical_features is None:\n categorical_features = []\n if feature_names is None:\n feature_names = [str(i) for i in range(training_data.shape[1])]\n\n self.categorical_features = list(categorical_features)\n self.feature_names = list(feature_names)\n\n self.discretizer = None\n if discretize_continuous and not sp.sparse.issparse(training_data):\n # Set the discretizer if training data stats are provided\n if self.training_data_stats:\n discretizer = StatsDiscretizer(training_data, self.categorical_features,\n self.feature_names, labels=training_labels,\n data_stats=self.training_data_stats,\n random_state=self.random_state)\n\n if discretizer == 'quartile':\n self.discretizer = QuartileDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels,\n random_state=self.random_state)\n elif discretizer == 'decile':\n self.discretizer = DecileDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels,\n random_state=self.random_state)\n elif discretizer == 'entropy':\n self.discretizer = EntropyDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels,\n random_state=self.random_state)\n elif isinstance(discretizer, BaseDiscretizer):\n self.discretizer = discretizer\n else:\n raise ValueError('''Discretizer must be 'quartile',''' +\n ''' 'decile', 'entropy' or a''' +\n ''' BaseDiscretizer instance''')\n self.categorical_features = list(range(training_data.shape[1]))\n\n # Get the discretized_training_data when the stats are not provided\n if(self.training_data_stats is None):\n discretized_training_data = self.discretizer.discretize(\n training_data)\n\n if kernel_width is None:\n kernel_width = np.sqrt(training_data.shape[1]) * .75\n kernel_width = float(kernel_width)\n\n if kernel is None:\n def kernel(d, kernel_width):\n return np.sqrt(np.exp(-(d ** 2) / kernel_width ** 2))\n\n kernel_fn = partial(kernel, kernel_width=kernel_width)\n\n self.feature_selection = feature_selection\n self.base = lime_base.LimeBase(kernel_fn, verbose, random_state=self.random_state)\n self.class_names = class_names\n\n # Though set has no role to play if training data stats are provided\n self.scaler = sklearn.preprocessing.StandardScaler(with_mean=False)\n self.scaler.fit(training_data)\n self.feature_values = {}\n self.feature_frequencies = {}\n\n for feature in self.categorical_features:\n if training_data_stats is None:\n if self.discretizer is not None:\n column = discretized_training_data[:, feature]\n else:\n column = training_data[:, feature]\n\n feature_count = collections.Counter(column)\n values, frequencies = map(list, zip(*(sorted(feature_count.items()))))\n else:\n values = training_data_stats[\"feature_values\"][feature]\n frequencies = training_data_stats[\"feature_frequencies\"][feature]\n\n self.feature_values[feature] = values\n self.feature_frequencies[feature] = (np.array(frequencies) /\n float(sum(frequencies)))\n self.scaler.mean_[feature] = 0\n self.scaler.scale_[feature] = 1\n\n @staticmethod\n def convert_and_round(values):\n return ['%.2f' % v for v in values]\n\n @staticmethod\n def validate_training_data_stats(training_data_stats):\n \"\"\"\n Method to validate the structure of training data stats\n \"\"\"\n stat_keys = list(training_data_stats.keys())\n valid_stat_keys = [\"means\", \"mins\", \"maxs\", \"stds\", \"feature_values\", \"feature_frequencies\"]\n missing_keys = list(set(valid_stat_keys) - set(stat_keys))\n if len(missing_keys) > 0:\n raise Exception(\"Missing keys in training_data_stats. Details: %s\" % (missing_keys))\n\n def explain_instance(self,\n data_row,\n predict_fn,\n labels=(1,),\n top_labels=None,\n num_features=10,\n num_samples=5000,\n distance_metric='euclidean',\n model_regressor=None):\n \"\"\"Generates explanations for a prediction.\n\n First, we generate neighborhood data by randomly perturbing features\n from the instance (see __data_inverse). We then learn locally weighted\n linear models on this neighborhood data to explain each of the classes\n in an interpretable way (see lime_base.py).\n\n Args:\n data_row: 1d numpy array or scipy.sparse matrix, corresponding to a row\n predict_fn: prediction function. For classifiers, this should be a\n function that takes a numpy array and outputs prediction\n probabilities. For regressors, this takes a numpy array and\n returns the predictions. For ScikitClassifiers, this is\n `classifier.predict_proba()`. For ScikitRegressors, this\n is `regressor.predict()`. The prediction function needs to work\n on multiple feature vectors (the vectors randomly perturbed\n from the data_row).\n labels: iterable with labels to be explained.\n top_labels: if not None, ignore labels and produce explanations for\n the K labels with highest prediction probabilities, where K is\n this parameter.\n num_features: maximum number of features present in explanation\n num_samples: size of the neighborhood to learn the linear model\n distance_metric: the distance metric to use for weights.\n model_regressor: sklearn regressor to use in explanation. Defaults\n to Ridge regression in LimeBase. Must have model_regressor.coef_\n and 'sample_weight' as a parameter to model_regressor.fit()\n\n Returns:\n An Explanation object (see explanation.py) with the corresponding\n explanations.\n \"\"\"\n if sp.sparse.issparse(data_row) and not sp.sparse.isspmatrix_csr(data_row):\n # Preventative code: if sparse, convert to csr format if not in csr format already\n data_row = data_row.tocsr()\n data, inverse = self.__data_inverse(data_row, num_samples)\n if sp.sparse.issparse(data):\n # Note in sparse case we don't subtract mean since data would become dense\n scaled_data = data.multiply(self.scaler.scale_)\n # Multiplying with csr matrix can return a coo sparse matrix\n if not sp.sparse.isspmatrix_csr(scaled_data):\n scaled_data = scaled_data.tocsr()\n else:\n scaled_data = (data - self.scaler.mean_) / self.scaler.scale_\n distances = sklearn.metrics.pairwise_distances(\n scaled_data,\n scaled_data[0].reshape(1, -1),\n metric=distance_metric\n ).ravel()\n\n yss = predict_fn(inverse)\n\n # for classification, the model needs to provide a list of tuples - classes\n # along with prediction probabilities\n if self.mode == \"classification\":\n if len(yss.shape) == 1:\n raise NotImplementedError(\"LIME does not currently support \"\n \"classifier models without probability \"\n \"scores. If this conflicts with your \"\n \"use case, please let us know: \"\n \"https://github.com/datascienceinc/lime/issues/16\")\n elif len(yss.shape) == 2:\n if self.class_names is None:\n self.class_names = [str(x) for x in range(yss[0].shape[0])]\n else:\n self.class_names = list(self.class_names)\n if not np.allclose(yss.sum(axis=1), 1.0):\n warnings.warn(\"\"\"\n Prediction probabilties do not sum to 1, and\n thus does not constitute a probability space.\n Check that you classifier outputs probabilities\n (Not log probabilities, or actual class predictions).\n \"\"\")\n else:\n raise ValueError(\"Your model outputs \"\n \"arrays with {} dimensions\".format(len(yss.shape)))\n\n # for regression, the output should be a one-dimensional array of predictions\n else:\n try:\n if len(yss.shape) != 1 and len(yss[0].shape) == 1:\n yss = np.array([v[0] for v in yss])\n assert isinstance(yss, np.ndarray) and len(yss.shape) == 1\n except AssertionError:\n raise ValueError(\"Your model needs to output single-dimensional \\\n numpyarrays, not arrays of {} dimensions\".format(yss.shape))\n\n predicted_value = yss[0]\n min_y = min(yss)\n max_y = max(yss)\n\n # add a dimension to be compatible with downstream machinery\n yss = yss[:, np.newaxis]\n\n feature_names = copy.deepcopy(self.feature_names)\n if feature_names is None:\n feature_names = [str(x) for x in range(data_row.shape[0])]\n\n if sp.sparse.issparse(data_row):\n values = self.convert_and_round(data_row.data)\n feature_indexes = data_row.indices\n else:\n values = self.convert_and_round(data_row)\n feature_indexes = None\n\n for i in self.categorical_features:\n if self.discretizer is not None and i in self.discretizer.lambdas:\n continue\n name = int(data_row[i])\n if i in self.categorical_names:\n name = self.categorical_names[i][name]\n feature_names[i] = '%s=%s' % (feature_names[i], name)\n values[i] = 'True'\n categorical_features = self.categorical_features\n\n discretized_feature_names = None\n if self.discretizer is not None:\n categorical_features = range(data.shape[1])\n discretized_instance = self.discretizer.discretize(data_row)\n discretized_feature_names = copy.deepcopy(feature_names)\n for f in self.discretizer.names:\n discretized_feature_names[f] = self.discretizer.names[f][int(\n discretized_instance[f])]\n\n domain_mapper = TableDomainMapper(feature_names,\n values,\n scaled_data[0],\n categorical_features=categorical_features,\n discretized_feature_names=discretized_feature_names,\n feature_indexes=feature_indexes)\n ret_exp = explanation.Explanation(domain_mapper,\n mode=self.mode,\n class_names=self.class_names)\n if self.mode == \"classification\":\n ret_exp.predict_proba = yss[0]\n if top_labels:\n labels = np.argsort(yss[0])[-top_labels:]\n ret_exp.top_labels = list(labels)\n ret_exp.top_labels.reverse()\n else:\n ret_exp.predicted_value = predicted_value\n ret_exp.min_value = min_y\n ret_exp.max_value = max_y\n labels = [0]\n for label in labels:\n (ret_exp.intercept[label],\n ret_exp.local_exp[label],\n ret_exp.score, ret_exp.local_pred) = self.base.explain_instance_with_data(\n scaled_data,\n yss,\n distances,\n label,\n num_features,\n model_regressor=model_regressor,\n feature_selection=self.feature_selection)\n\n if self.mode == \"regression\":\n ret_exp.intercept[1] = ret_exp.intercept[0]\n ret_exp.local_exp[1] = [x for x in ret_exp.local_exp[0]]\n ret_exp.local_exp[0] = [(i, -1 * j) for i, j in ret_exp.local_exp[1]]\n\n return ret_exp\n\n def __data_inverse(self,\n data_row,\n num_samples):\n \"\"\"Generates a neighborhood around a prediction.\n\n For numerical features, perturb them by sampling from a Normal(0,1) and\n doing the inverse operation of mean-centering and scaling, according to\n the means and stds in the training data. For categorical features,\n perturb by sampling according to the training distribution, and making\n a binary feature that is 1 when the value is the same as the instance\n being explained.\n\n Args:\n data_row: 1d numpy array, corresponding to a row\n num_samples: size of the neighborhood to learn the linear model\n\n Returns:\n A tuple (data, inverse), where:\n data: dense num_samples * K matrix, where categorical features\n are encoded with either 0 (not equal to the corresponding value\n in data_row) or 1. The first row is the original instance.\n inverse: same as data, except the categorical features are not\n binary, but categorical (as the original data)\n \"\"\"\n is_sparse = sp.sparse.issparse(data_row)\n if is_sparse:\n num_cols = data_row.shape[1]\n data = sp.sparse.csr_matrix((num_samples, num_cols), dtype=data_row.dtype)\n else:\n num_cols = data_row.shape[0]\n data = np.zeros((num_samples, num_cols))\n categorical_features = range(num_cols)\n if self.discretizer is None:\n instance_sample = data_row\n scale = self.scaler.scale_\n mean = self.scaler.mean_\n if is_sparse:\n # Perturb only the non-zero values\n non_zero_indexes = data_row.nonzero()[1]\n num_cols = len(non_zero_indexes)\n instance_sample = data_row[:, non_zero_indexes]\n scale = scale[non_zero_indexes]\n mean = mean[non_zero_indexes]\n data = self.random_state.normal(\n 0, 1, num_samples * num_cols).reshape(\n num_samples, num_cols)\n if self.sample_around_instance:\n data = data * scale + instance_sample\n else:\n data = data * scale + mean\n if is_sparse:\n if num_cols == 0:\n data = sp.sparse.csr_matrix((num_samples,\n data_row.shape[1]),\n dtype=data_row.dtype)\n else:\n indexes = np.tile(non_zero_indexes, num_samples)\n indptr = np.array(\n range(0, len(non_zero_indexes) * (num_samples + 1),\n len(non_zero_indexes)))\n data_1d_shape = data.shape[0] * data.shape[1]\n data_1d = data.reshape(data_1d_shape)\n data = sp.sparse.csr_matrix(\n (data_1d, indexes, indptr),\n shape=(num_samples, data_row.shape[1]))\n categorical_features = self.categorical_features\n first_row = data_row\n else:\n first_row = self.discretizer.discretize(data_row)\n data[0] = data_row.copy()\n inverse = data.copy()\n for column in categorical_features:\n values = self.feature_values[column]\n freqs = self.feature_frequencies[column]\n inverse_column = self.random_state.choice(values, size=num_samples,\n replace=True, p=freqs)\n binary_column = (inverse_column == first_row[column]).astype(int)\n binary_column[0] = 1\n inverse_column[0] = data[0, column]\n data[:, column] = binary_column\n inverse[:, column] = inverse_column\n if self.discretizer is not None:\n inverse[1:] = self.discretizer.undiscretize(inverse[1:])\n inverse[0] = data_row\n return data, inverse\n\n\nclass RecurrentTabularExplainer(LimeTabularExplainer):\n \"\"\"\n An explainer for keras-style recurrent neural networks, where the\n input shape is (n_samples, n_timesteps, n_features). This class\n just extends the LimeTabularExplainer class and reshapes the training\n data and feature names such that they become something like\n\n (val1_t1, val1_t2, val1_t3, ..., val2_t1, ..., valn_tn)\n\n Each of the methods that take data reshape it appropriately,\n so you can pass in the training/testing data exactly as you\n would to the recurrent neural network.\n\n \"\"\"\n\n def __init__(self, training_data, mode=\"classification\",\n training_labels=None, feature_names=None,\n categorical_features=None, categorical_names=None,\n kernel_width=None, kernel=None, verbose=False, class_names=None,\n feature_selection='auto', discretize_continuous=True,\n discretizer='quartile', random_state=None):\n \"\"\"\n Args:\n training_data: numpy 3d array with shape\n (n_samples, n_timesteps, n_features)\n mode: \"classification\" or \"regression\"\n training_labels: labels for training data. Not required, but may be\n used by discretizer.\n feature_names: list of names (strings) corresponding to the columns\n in the training data.\n categorical_features: list of indices (ints) corresponding to the\n categorical columns. Everything else will be considered\n continuous. Values in these columns MUST be integers.\n categorical_names: map from int to list of names, where\n categorical_names[x][y] represents the name of the yth value of\n column x.\n kernel_width: kernel width for the exponential kernel.\n If None, defaults to sqrt(number of columns) * 0.75\n kernel: similarity kernel that takes euclidean distances and kernel\n width as input and outputs weights in (0,1). If None, defaults to\n an exponential kernel.\n verbose: if true, print local prediction values from linear model\n class_names: list of class names, ordered according to whatever the\n classifier is using. If not present, class names will be '0',\n '1', ...\n feature_selection: feature selection method. can be\n 'forward_selection', 'lasso_path', 'none' or 'auto'.\n See function 'explain_instance_with_data' in lime_base.py for\n details on what each of the options does.\n discretize_continuous: if True, all non-categorical features will\n be discretized into quartiles.\n discretizer: only matters if discretize_continuous is True. Options\n are 'quartile', 'decile', 'entropy' or a BaseDiscretizer\n instance.\n random_state: an integer or numpy.RandomState that will be used to\n generate random numbers. If None, the random state will be\n initialized using the internal numpy seed.\n \"\"\"\n\n # Reshape X\n n_samples, n_timesteps, n_features = training_data.shape\n training_data = np.transpose(training_data, axes=(0, 2, 1)).reshape(\n n_samples, n_timesteps * n_features)\n self.n_timesteps = n_timesteps\n self.n_features = n_features\n\n # Update the feature names\n feature_names = ['{}_t-{}'.format(n, n_timesteps - (i + 1))\n for n in feature_names for i in range(n_timesteps)]\n\n # Send off the the super class to do its magic.\n super(RecurrentTabularExplainer, self).__init__(\n training_data,\n mode=mode,\n training_labels=training_labels,\n feature_names=feature_names,\n categorical_features=categorical_features,\n categorical_names=categorical_names,\n kernel_width=kernel_width,\n kernel=kernel,\n verbose=verbose,\n class_names=class_names,\n feature_selection=feature_selection,\n discretize_continuous=discretize_continuous,\n discretizer=discretizer,\n random_state=random_state)\n\n def _make_predict_proba(self, func):\n \"\"\"\n The predict_proba method will expect 3d arrays, but we are reshaping\n them to 2D so that LIME works correctly. This wraps the function\n you give in explain_instance to first reshape the data to have\n the shape the the keras-style network expects.\n \"\"\"\n\n def predict_proba(X):\n n_samples = X.shape[0]\n new_shape = (n_samples, self.n_features, self.n_timesteps)\n X = np.transpose(X.reshape(new_shape), axes=(0, 2, 1))\n return func(X)\n\n return predict_proba\n\n def explain_instance(self, data_row, classifier_fn, labels=(1,),\n top_labels=None, num_features=10, num_samples=5000,\n distance_metric='euclidean', model_regressor=None):\n \"\"\"Generates explanations for a prediction.\n\n First, we generate neighborhood data by randomly perturbing features\n from the instance (see __data_inverse). We then learn locally weighted\n linear models on this neighborhood data to explain each of the classes\n in an interpretable way (see lime_base.py).\n\n Args:\n data_row: 2d numpy array, corresponding to a row\n classifier_fn: classifier prediction probability function, which\n takes a numpy array and outputs prediction probabilities. For\n ScikitClassifiers , this is classifier.predict_proba.\n labels: iterable with labels to be explained.\n top_labels: if not None, ignore labels and produce explanations for\n the K labels with highest prediction probabilities, where K is\n this parameter.\n num_features: maximum number of features present in explanation\n num_samples: size of the neighborhood to learn the linear model\n distance_metric: the distance metric to use for weights.\n model_regressor: sklearn regressor to use in explanation. Defaults\n to Ridge regression in LimeBase. Must have\n model_regressor.coef_ and 'sample_weight' as a parameter\n to model_regressor.fit()\n\n Returns:\n An Explanation object (see explanation.py) with the corresponding\n explanations.\n \"\"\"\n\n # Flatten input so that the normal explainer can handle it\n data_row = data_row.T.reshape(self.n_timesteps * self.n_features)\n\n # Wrap the classifier to reshape input\n classifier_fn = self._make_predict_proba(classifier_fn)\n return super(RecurrentTabularExplainer, self).explain_instance(\n data_row, classifier_fn,\n labels=labels,\n top_labels=top_labels,\n num_features=num_features,\n num_samples=num_samples,\n distance_metric=distance_metric,\n model_regressor=model_regressor)\n" ]
[ [ "numpy.sqrt", "numpy.tile", "sklearn.utils.check_random_state", "numpy.transpose", "numpy.zeros", "scipy.sparse.issparse", "scipy.sparse.csr_matrix", "numpy.argsort", "numpy.exp", "scipy.sparse.isspmatrix_csr", "sklearn.preprocessing.StandardScaler", "numpy.array" ] ]
benwmcdowell/charge_density_methods_VASP
[ "c1d965b62e638e4509c8b2b94fc797568aa46919" ]
[ "charge_density_methods_VASP/2d_slice.py" ]
[ "from numpy import zeros, shape, dot\nfrom numpy.linalg import norm\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Patch\n\nfrom lib import parse_CHGCAR, parse_LOCPOT\n\ndef plot_2d_slice(ifile,pos,**args):\n if 'dim' in args:\n dim=args['dim']\n else:\n dim=2\n \n if 'filetype' in args:\n filetype=args['filetype']\n else:\n filetype='LOCPOT'\n \n if filetype=='LOCPOT':\n e,lv,coord,atomtypes,atomnums=parse_LOCPOT(ifile)\n else:\n e,lv,coord,atomtypes,atomnums=parse_CHGCAR(ifile)\n \n if 'ref' in args:\n for i in args['ref']:\n if filetype=='LOCPOT':\n tempvar=parse_LOCPOT(i)[0]\n else:\n tempvar=parse_CHGCAR(i)[0]\n e-=tempvar\n \n if 'direct' in args:\n pos=norm(dot(pos,lv[dim]))\n \n if 'tol' in args:\n tol=round(args['tol']/norm(lv[dim])*shape(e)[dim])\n else:\n tol=0\n \n plot_atoms=[]\n if 'overlay_atoms' in args:\n ranges=args['overlay_atoms']\n for i in range(sum(atomnums)):\n for j in range(3):\n if coord[i][j] > max(ranges[j]) or coord[i][j] < min(ranges[j]):\n break\n else:\n plot_atoms.append(i)\n if 'atom_sizes' in args:\n sizes=args['atom_sizes']\n else:\n sizes=[800 for i in range(len(atomnums))]\n \n if 'atom_colors' in args:\n colors=args['atom_colors']\n else:\n colors=['black' for i in range(len(atomnums))]\n \n pos_dim=[]\n for i in range(3):\n if i!=dim:\n pos_dim.append(i)\n \n xy=zeros((shape(e)[pos_dim[0]],shape(e)[pos_dim[1]],2))\n for i in range(len(xy)):\n for j in range(len(xy[i])):\n xy[i][j]+=lv[pos_dim[0]][:2]*i/(len(xy)+1)+lv[pos_dim[1]][:2]*j/(len(xy[i])+1)\n \n pos=round(pos*shape(e)[dim]/norm(lv[dim]))\n z=zeros((shape(e)[pos_dim[0]],shape(e)[pos_dim[1]]))\n for i in range(-tol,tol+1):\n if dim==0:\n z+=e[pos,:,:]/(2*tol+1)\n if dim==1:\n z+=e[:,pos,:]/(2*tol+1)\n if dim==2:\n z+=e[:,:,pos]/(2*tol+1)\n \n plt.figure()\n plt.pcolormesh(xy[:,:,0],xy[:,:,1],z,shading='nearest',cmap='jet')\n plt.colorbar()\n for i in plot_atoms:\n for j in range(len(atomtypes)):\n if i < sum(atomnums[:j+1]):\n break\n plt.scatter(coord[i][pos_dim[0]],coord[i][pos_dim[1]],color=colors[j],s=sizes[j])\n patches=[]\n if len(plot_atoms)>0:\n for i in range(len(atomtypes)):\n patches.append(Patch(color=colors[i],label=atomtypes[i]))\n \n plt.xlabel('position / $\\AA$')\n plt.ylabel('position / $\\AA$')\n plt.legend(handles=patches)\n plt.show()\n" ]
[ [ "matplotlib.patches.Patch", "matplotlib.pyplot.pcolormesh", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.shape", "matplotlib.pyplot.colorbar", "numpy.dot", "numpy.linalg.norm", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.scatter" ] ]
rickyHong/TensorRT-inference-server-repl
[ "024e6760d4efd2f1bbeb242d7a306851ccb5ea62" ]
[ "qa/common/gen_qa_sequence_models.py" ]
[ "# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of NVIDIA CORPORATION nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport argparse\nfrom builtins import range\nimport os\nimport sys\nimport numpy as np\nimport gen_ensemble_model_utils as emu\n\nFLAGS = None\nnp_dtype_string = np.dtype(object)\n\ndef np_to_model_dtype(np_dtype):\n if np_dtype == np.bool:\n return \"TYPE_BOOL\"\n elif np_dtype == np.int8:\n return \"TYPE_INT8\"\n elif np_dtype == np.int16:\n return \"TYPE_INT16\"\n elif np_dtype == np.int32:\n return \"TYPE_INT32\"\n elif np_dtype == np.int64:\n return \"TYPE_INT64\"\n elif np_dtype == np.uint8:\n return \"TYPE_UINT8\"\n elif np_dtype == np.uint16:\n return \"TYPE_UINT16\"\n elif np_dtype == np.float16:\n return \"TYPE_FP16\"\n elif np_dtype == np.float32:\n return \"TYPE_FP32\"\n elif np_dtype == np.float64:\n return \"TYPE_FP64\"\n elif np_dtype == np_dtype_string:\n return \"TYPE_STRING\"\n return None\n\ndef np_to_tf_dtype(np_dtype):\n if np_dtype == np.bool:\n return tf.bool\n elif np_dtype == np.int8:\n return tf.int8\n elif np_dtype == np.int16:\n return tf.int16\n elif np_dtype == np.int32:\n return tf.int32\n elif np_dtype == np.int64:\n return tf.int64\n elif np_dtype == np.uint8:\n return tf.uint8\n elif np_dtype == np.uint16:\n return tf.uint16\n elif np_dtype == np.float16:\n return tf.float16\n elif np_dtype == np.float32:\n return tf.float32\n elif np_dtype == np.float64:\n return tf.float64\n elif np_dtype == np_dtype_string:\n return tf.string\n return None\n\ndef np_to_c2_dtype(np_dtype):\n if np_dtype == np.bool:\n return c2core.DataType.BOOL\n elif np_dtype == np.int8:\n return c2core.DataType.INT8\n elif np_dtype == np.int16:\n return c2core.DataType.INT16\n elif np_dtype == np.int32:\n return c2core.DataType.INT32\n elif np_dtype == np.int64:\n return c2core.DataType.INT64\n elif np_dtype == np.uint8:\n return c2core.DataType.UINT8\n elif np_dtype == np.uint16:\n return c2core.DataType.UINT16\n elif np_dtype == np.float16:\n return c2core.DataType.FLOAT16\n elif np_dtype == np.float32:\n return c2core.DataType.FLOAT\n elif np_dtype == np.float64:\n return c2core.DataType.DOUBLE\n elif np_dtype == np_dtype_string:\n return c2core.DataType.STRING\n return None\n\ndef np_to_trt_dtype(np_dtype):\n if np_dtype == np.int8:\n return trt.infer.DataType.INT8\n elif np_dtype == np.int32:\n return trt.infer.DataType.INT32\n elif np_dtype == np.float16:\n return trt.infer.DataType.HALF\n elif np_dtype == np.float32:\n return trt.infer.DataType.FLOAT\n return None\n\ndef np_to_onnx_dtype(np_dtype):\n if np_dtype == np.bool:\n return onnx.TensorProto.BOOL\n elif np_dtype == np.int8:\n return onnx.TensorProto.INT8\n elif np_dtype == np.int16:\n return onnx.TensorProto.INT16\n elif np_dtype == np.int32:\n return onnx.TensorProto.INT32\n elif np_dtype == np.int64:\n return onnx.TensorProto.INT64\n elif np_dtype == np.uint8:\n return onnx.TensorProto.UINT8\n elif np_dtype == np.uint16:\n return onnx.TensorProto.UINT16\n elif np_dtype == np.float16:\n return onnx.TensorProto.FLOAT16\n elif np_dtype == np.float32:\n return onnx.TensorProto.FLOAT\n elif np_dtype == np.float64:\n return onnx.TensorProto.DOUBLE\n elif np_dtype == np_dtype_string:\n return onnx.TensorProto.STRING\n return None\n\ndef create_tf_modelfile(\n create_savedmodel, models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_tf_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n tf_input_dtype = np_to_tf_dtype(dtype)\n tf_dtype = tf_input_dtype\n\n # If the input is a string then use int32 for operation and just\n # cast to/from string for input and output.\n if tf_input_dtype == tf.string:\n tf_dtype = tf.int32\n\n # Create the model. If non-batching then don't include the batch\n # dimension.\n tf.reset_default_graph()\n if create_savedmodel and (max_batch == 0):\n input0 = tf.placeholder(tf_input_dtype, [1,], \"INPUT\")\n if tf_input_dtype == tf.string:\n input0 = tf.strings.to_number(tf.strings.join([\"0\", input0]), tf_dtype)\n start0 = tf.placeholder(tf_dtype, [1,], \"START\")\n ready0 = tf.placeholder(tf_dtype, [1,], \"READY\")\n acc = tf.get_variable(\"ACC\", [1,], dtype=tf_dtype)\n tmp = tf.where(tf.equal(start0, 1), input0, tf.add(acc, input0))\n newacc = tf.where(tf.equal(ready0, 1), tmp, acc)\n assign = tf.assign(acc, newacc)\n if tf_input_dtype == tf.string:\n output0 = tf.dtypes.as_string(assign, name=\"OUTPUT\")\n else:\n output0 = tf.identity(assign, name=\"OUTPUT\")\n else:\n # For batching we can't use a tf.variable to hold the\n # accumulated values since that forces the size of the output\n # to the size of the variable (which must be a max-batch-size\n # vector since require one accumulator each), instead of the\n # output shape being [None, 1]. So instead we just return 0 if\n # not-ready and 'INPUT'+'START' otherwise... the tests know to\n # expect this.\n input0 = tf.placeholder(tf_input_dtype, [None,] + tu.shape_to_tf_shape(shape), \"INPUT\")\n if tf_input_dtype == tf.string:\n input0 = tf.strings.to_number(tf.strings.join([\"0\", input0]), tf_dtype)\n start0 = tf.placeholder(tf_dtype, [None,1], \"START\")\n ready0 = tf.placeholder(tf_dtype, [None,1], \"READY\")\n tmp = tf.where(tf.equal(ready0, 1), tf.add(start0, input0),\n tf.zeros(tf.shape(input0), dtype=tf_dtype))\n if tf_input_dtype == tf.string:\n output0 = tf.dtypes.as_string(tmp, name=\"OUTPUT\")\n else:\n output0 = tf.identity(tmp, name=\"OUTPUT\")\n\n # Use a different model name for the non-batching variant\n if create_savedmodel:\n model_name = tu.get_sequence_model_name(\n \"savedmodel_nobatch\" if max_batch == 0 else \"savedmodel\", dtype)\n else:\n model_name = tu.get_sequence_model_name(\n \"graphdef_nobatch\" if max_batch == 0 else \"graphdef\", dtype)\n\n model_version_dir = models_dir + \"/\" + model_name + \"/\" + str(model_version)\n\n try:\n os.makedirs(model_version_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n if create_savedmodel:\n with tf.Session() as sess:\n sess.run(tf.initializers.global_variables())\n input0_tensor = tf.get_default_graph().get_tensor_by_name(\"INPUT:0\")\n start0_tensor = tf.get_default_graph().get_tensor_by_name(\"START:0\")\n ready0_tensor = tf.get_default_graph().get_tensor_by_name(\"READY:0\")\n output0_tensor = tf.get_default_graph().get_tensor_by_name(\"OUTPUT:0\")\n tf.saved_model.simple_save(sess, model_version_dir + \"/model.savedmodel\",\n inputs={\"INPUT\": input0_tensor, \"START\": start0_tensor,\n \"READY\" : ready0_tensor},\n outputs={\"OUTPUT\": output0_tensor})\n else:\n with tf.Session() as sess:\n sess.run(tf.initializers.global_variables())\n graph_io.write_graph(sess.graph.as_graph_def(), model_version_dir,\n \"model.graphdef\", as_text=False)\n\ndef create_tf_modelconfig(\n create_savedmodel, models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_tf_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n # Use a different model name for the non-batching variant\n if create_savedmodel:\n model_name = tu.get_sequence_model_name(\n \"savedmodel_nobatch\" if max_batch == 0 else \"savedmodel\", dtype)\n else:\n model_name = tu.get_sequence_model_name(\n \"graphdef_nobatch\" if max_batch == 0 else \"graphdef\", dtype)\n\n config_dir = models_dir + \"/\" + model_name\n config = '''\nname: \"{}\"\nplatform: \"{}\"\nmax_batch_size: {}\nsequence_batching {{\n max_sequence_idle_microseconds: 5000000\n control_input [\n {{\n name: \"START\"\n control [\n {{\n kind: CONTROL_SEQUENCE_START\n {}_false_true: [ 0, 1 ]\n }}\n ]\n }},\n {{\n name: \"READY\"\n control [\n {{\n kind: CONTROL_SEQUENCE_READY\n {}_false_true: [ 0, 1 ]\n }}\n ]\n }}\n ]\n}}\ninput [\n {{\n name: \"INPUT\"\n data_type: {}\n dims: [ {} ]\n }}\n]\noutput [\n {{\n name: \"OUTPUT\"\n data_type: {}\n dims: [ 1 ]\n }}\n]\ninstance_group [\n {{\n kind: KIND_GPU\n }}\n]\n'''.format(model_name,\n \"tensorflow_savedmodel\" if create_savedmodel else \"tensorflow_graphdef\",\n max_batch,\n \"fp32\" if dtype == np.float32 else \"int32\",\n \"fp32\" if dtype == np.float32 else \"int32\",\n np_to_model_dtype(dtype), tu.shape_to_dims_str(shape),\n np_to_model_dtype(dtype))\n\n try:\n os.makedirs(config_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n with open(config_dir + \"/config.pbtxt\", \"w\") as cfile:\n cfile.write(config)\n\n\ndef create_netdef_modelfile(\n models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_c2_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n c2_dtype = np_to_c2_dtype(dtype)\n model_name = tu.get_sequence_model_name(\n \"netdef_nobatch\" if max_batch == 0 else \"netdef\", dtype)\n\n # Create the model. For now don't implement a proper accumulator\n # just return 0 if not-ready and 'INPUT'+'START' otherwise... the\n # tests know to expect this.\n model = c2model_helper.ModelHelper(name=model_name)\n model.net.Add([\"INPUT\", \"START\"], \"add\")\n model.net.Sub([\"READY\", \"READY\"], \"zeros\")\n model.net.NE([\"READY\", \"zeros\"], \"compare\")\n model.net.Where([\"compare\", \"add\", \"zeros\"], \"OUTPUT\")\n\n model_version_dir = models_dir + \"/\" + model_name + \"/\" + str(model_version)\n\n try:\n os.makedirs(model_version_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n with open(model_version_dir + \"/model.netdef\", \"wb\") as f:\n f.write(model.Proto().SerializeToString())\n with open(model_version_dir + \"/init_model.netdef\", \"wb\") as f:\n f.write(model.InitProto().SerializeToString())\n\n\ndef create_netdef_modelconfig(\n models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_c2_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n model_name = tu.get_sequence_model_name(\n \"netdef_nobatch\" if max_batch == 0 else \"netdef\", dtype)\n config_dir = models_dir + \"/\" + model_name\n config = '''\nname: \"{}\"\nplatform: \"caffe2_netdef\"\nmax_batch_size: {}\nsequence_batching {{\n max_sequence_idle_microseconds: 5000000\n control_input [\n {{\n name: \"START\"\n control [\n {{\n kind: CONTROL_SEQUENCE_START\n {}_false_true: [ 0, 1 ]\n }}\n ]\n }},\n {{\n name: \"READY\"\n control [\n {{\n kind: CONTROL_SEQUENCE_READY\n {}_false_true: [ 0, 1 ]\n }}\n ]\n }}\n ]\n}}\ninput [\n {{\n name: \"INPUT\"\n data_type: {}\n dims: [ {} ]\n }}\n]\noutput [\n {{\n name: \"OUTPUT\"\n data_type: {}\n dims: [ 1 ]\n }}\n]\ninstance_group [\n {{\n kind: KIND_CPU\n }}\n]\n'''.format(model_name, max_batch,\n \"int32\" if dtype == np.int32 else \"fp32\",\n \"int32\" if dtype == np.int32 else \"fp32\",\n np_to_model_dtype(dtype), tu.shape_to_dims_str(shape),\n np_to_model_dtype(dtype))\n\n try:\n os.makedirs(config_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n with open(config_dir + \"/config.pbtxt\", \"w\") as cfile:\n cfile.write(config)\n\n\ndef create_plan_modelfile(\n models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_trt_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n trt_dtype = np_to_trt_dtype(dtype)\n\n # Create the model. For now don't implement a proper accumulator\n # just return 0 if not-ready and 'INPUT'+'START' otherwise... the\n # tests know to expect this.\n G_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.INFO)\n builder = trt.infer.create_infer_builder(G_LOGGER)\n network = builder.create_network()\n in0 = network.add_input(\"INPUT\", trt_dtype, shape)\n start0 = network.add_input(\"START\", trt_dtype, [1, 1, 1])\n ready0 = network.add_input(\"READY\", trt_dtype, [1, 1, 1])\n add = network.add_elementwise(in0, start0, trt.infer.ElementWiseOperation.SUM)\n out0 = network.add_elementwise(add.get_output(0), ready0, trt.infer.ElementWiseOperation.PROD)\n\n out0.get_output(0).set_name(\"OUTPUT\")\n network.mark_output(out0.get_output(0))\n\n builder.set_max_batch_size(max(1, max_batch))\n builder.set_max_workspace_size(1 << 20)\n engine = builder.build_cuda_engine(network)\n network.destroy()\n\n model_name = tu.get_sequence_model_name(\n \"plan_nobatch\" if max_batch == 0 else \"plan\", dtype)\n model_version_dir = models_dir + \"/\" + model_name + \"/\" + str(model_version)\n\n try:\n os.makedirs(model_version_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n lengine = trt.lite.Engine(engine_stream=engine.serialize(),\n max_batch_size=max(1, max_batch))\n lengine.save(model_version_dir + \"/model.plan\")\n engine.destroy()\n builder.destroy()\n\n\ndef create_plan_modelconfig(\n models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_trt_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n model_name = tu.get_sequence_model_name(\n \"plan_nobatch\" if max_batch == 0 else \"plan\", dtype)\n config_dir = models_dir + \"/\" + model_name\n config = '''\nname: \"{}\"\nplatform: \"tensorrt_plan\"\nmax_batch_size: {}\nsequence_batching {{\n max_sequence_idle_microseconds: 5000000\n control_input [\n {{\n name: \"START\"\n control [\n {{\n kind: CONTROL_SEQUENCE_START\n {}_false_true: [ 0, 1 ]\n }}\n ]\n }},\n {{\n name: \"READY\"\n control [\n {{\n kind: CONTROL_SEQUENCE_READY\n {}_false_true: [ 0, 1 ]\n }}\n ]\n }}\n ]\n}}\ninput [\n {{\n name: \"INPUT\"\n data_type: {}\n dims: [ {} ]\n }}\n]\noutput [\n {{\n name: \"OUTPUT\"\n data_type: {}\n dims: [ 1, 1, 1 ]\n }}\n]\ninstance_group [\n {{\n kind: KIND_GPU\n }}\n]\n'''.format(model_name, max_batch,\n \"int32\" if dtype == np.int32 else \"fp32\",\n \"int32\" if dtype == np.int32 else \"fp32\",\n np_to_model_dtype(dtype), tu.shape_to_dims_str(shape),\n np_to_model_dtype(dtype))\n\n try:\n os.makedirs(config_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n with open(config_dir + \"/config.pbtxt\", \"w\") as cfile:\n cfile.write(config)\n\ndef create_onnx_modelfile(\n models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_onnx_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n model_name = tu.get_sequence_model_name(\n \"onnx_nobatch\" if max_batch == 0 else \"onnx\", dtype)\n model_version_dir = models_dir + \"/\" + model_name + \"/\" + str(model_version)\n\n # Create the model. For now don't implement a proper accumulator\n # just return 0 if not-ready and 'INPUT'+'START' otherwise... the\n # tests know to expect this.\n onnx_dtype = np_to_onnx_dtype(dtype)\n onnx_input_shape, idx = tu.shape_to_onnx_shape(shape, 0)\n onnx_start_shape, idx = tu.shape_to_onnx_shape(shape, idx)\n onnx_ready_shape, idx = tu.shape_to_onnx_shape(shape, idx)\n onnx_output_shape, idx = tu.shape_to_onnx_shape(shape, idx)\n\n # If the input is a string then use int32 for operation and just\n # cast to/from string for input and output.\n onnx_control_dtype = onnx_dtype\n if onnx_dtype == onnx.TensorProto.STRING:\n onnx_control_dtype = onnx.TensorProto.INT32\n\n batch_dim = [] if max_batch == 0 else [max_batch]\n\n onnx_input = onnx.helper.make_tensor_value_info(\"INPUT\", onnx_dtype, batch_dim + onnx_input_shape)\n onnx_start = onnx.helper.make_tensor_value_info(\"START\", onnx_control_dtype, batch_dim + onnx_start_shape)\n onnx_ready = onnx.helper.make_tensor_value_info(\"READY\", onnx_control_dtype, batch_dim + onnx_ready_shape)\n onnx_output = onnx.helper.make_tensor_value_info(\"OUTPUT\", onnx_dtype, batch_dim + onnx_output_shape)\n\n internal_input = onnx.helper.make_node(\"Identity\", [\"INPUT\"], [\"_INPUT\"])\n\n # cast int8, int16 input to higer precision int as Onnx Add/Sub operator doesn't support those type\n # Also casting String data type to int32\n if ((onnx_dtype == onnx.TensorProto.INT8) or (onnx_dtype == onnx.TensorProto.INT16) or\n (onnx_dtype == onnx.TensorProto.STRING)):\n internal_input = onnx.helper.make_node(\"Cast\", [\"INPUT\"], [\"_INPUT\"], to=onnx.TensorProto.INT32)\n\n add = onnx.helper.make_node(\"Add\", [\"_INPUT\", \"START\"], [\"add\"])\n # Take advantage of knowledge that the READY false value is 0 and true is 1\n mul = onnx.helper.make_node(\"Mul\", [\"READY\", \"add\"], [\"CAST\"])\n cast = onnx.helper.make_node(\"Cast\", [\"CAST\"], [\"OUTPUT\"], to=onnx_dtype)\n\n # Avoid cast from float16 to float16\n # (bug in Onnx Runtime, cast from float16 to float16 will become cast from float16 to float32)\n if onnx_dtype == onnx.TensorProto.FLOAT16:\n cast = onnx.helper.make_node(\"Identity\", [\"CAST\"], [\"OUTPUT\"])\n\n onnx_nodes = [internal_input, add, mul, cast]\n onnx_inputs = [onnx_input, onnx_start, onnx_ready]\n onnx_outputs = [onnx_output]\n\n graph_proto = onnx.helper.make_graph(onnx_nodes, model_name, onnx_inputs, onnx_outputs)\n model_def = onnx.helper.make_model(graph_proto, producer_name=\"TRTIS\")\n\n try:\n os.makedirs(model_version_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n onnx.save(model_def, model_version_dir + \"/model.onnx\")\n\n\ndef create_onnx_modelconfig(\n models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_onnx_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n model_name = tu.get_sequence_model_name(\n \"onnx_nobatch\" if max_batch == 0 else \"onnx\", dtype)\n config_dir = models_dir + \"/\" + model_name\n\n # Must make sure all Onnx models will be loaded to the same GPU if they are\n # run on GPU. This is due to the current limitation of Onnx Runtime\n # https://github.com/microsoft/onnxruntime/issues/1034\n instance_group_string = '''\ninstance_group [\n {\n kind: KIND_GPU\n gpus: [ 0 ]\n }\n]\n'''\n # [TODO] move create_general_modelconfig() out of emu as it is general\n # enough for all backends to use\n config = emu.create_general_modelconfig(model_name, \"onnxruntime_onnx\", max_batch,\n [dtype], [shape], [None], [dtype], [shape], [None], [None],\n force_tensor_number_suffix=False, instance_group_str=instance_group_string)\n\n config += '''\nsequence_batching {{\n max_sequence_idle_microseconds: 5000000\n control_input [\n {{\n name: \"START\"\n control [\n {{\n kind: CONTROL_SEQUENCE_START\n {type}_false_true: [ 0, 1 ]\n }}\n ]\n }},\n {{\n name: \"READY\"\n control [\n {{\n kind: CONTROL_SEQUENCE_READY\n {type}_false_true: [ 0, 1 ]\n }}\n ]\n }}\n ]\n}}\n'''.format(type=\"fp32\" if dtype == np.float32 else \"int32\")\n\n try:\n os.makedirs(config_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n with open(config_dir + \"/config.pbtxt\", \"w\") as cfile:\n cfile.write(config)\n\n\ndef create_models(models_dir, dtype, shape, no_batch=True):\n model_version = 1\n\n if FLAGS.graphdef:\n create_tf_modelconfig(False, models_dir, model_version, 8, dtype, shape)\n create_tf_modelfile(False, models_dir, model_version, 8, dtype, shape)\n if no_batch:\n create_tf_modelconfig(False, models_dir, model_version, 0, dtype, shape)\n create_tf_modelfile(False, models_dir, model_version, 0, dtype, shape)\n\n if FLAGS.savedmodel:\n create_tf_modelconfig(True, models_dir, model_version, 8, dtype, shape)\n create_tf_modelfile(True, models_dir, model_version, 8, dtype, shape)\n if no_batch:\n create_tf_modelconfig(True, models_dir, model_version, 0, dtype, shape)\n create_tf_modelfile(True, models_dir, model_version, 0, dtype, shape)\n\n if FLAGS.netdef:\n create_netdef_modelconfig(models_dir, model_version, 8, dtype, shape)\n create_netdef_modelfile(models_dir, model_version, 8, dtype, shape)\n if no_batch:\n create_netdef_modelconfig(models_dir, model_version, 0, dtype, shape)\n create_netdef_modelfile(models_dir, model_version, 0, dtype, shape)\n\n if FLAGS.tensorrt:\n create_plan_modelconfig(models_dir, model_version, 8, dtype, shape + [1, 1])\n create_plan_modelfile(models_dir, model_version, 8, dtype, shape + [1, 1])\n if no_batch:\n create_plan_modelconfig(models_dir, model_version, 0, dtype, shape + [1, 1])\n create_plan_modelfile(models_dir, model_version, 0, dtype, shape + [1, 1])\n\n if FLAGS.onnx:\n create_onnx_modelconfig(models_dir, model_version, 8, dtype, shape)\n create_onnx_modelfile(models_dir, model_version, 8, dtype, shape)\n if no_batch:\n create_onnx_modelconfig(models_dir, model_version, 0, dtype, shape)\n create_onnx_modelfile(models_dir, model_version, 0, dtype, shape)\n\n if FLAGS.ensemble:\n for pair in emu.platform_types_and_validation():\n if pair[0] == \"plan\":\n shape = shape + [1, 1]\n if not pair[1](dtype, dtype, dtype,\n shape, shape, shape):\n continue\n\n emu.create_sequence_ensemble_modelconfig(\n pair[0], models_dir, 8, model_version, shape, dtype)\n emu.create_sequence_ensemble_modelfile(\n pair[0], models_dir, 8, model_version, shape, dtype)\n if no_batch:\n emu.create_sequence_ensemble_modelconfig(\n pair[0], models_dir, 0, model_version, shape, dtype)\n emu.create_sequence_ensemble_modelfile(\n pair[0], models_dir, 0, model_version, shape, dtype)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--models_dir', type=str, required=True,\n help='Top-level model directory')\n parser.add_argument('--graphdef', required=False, action='store_true',\n help='Generate GraphDef models')\n parser.add_argument('--savedmodel', required=False, action='store_true',\n help='Generate SavedModel models')\n parser.add_argument('--netdef', required=False, action='store_true',\n help='Generate NetDef models')\n parser.add_argument('--tensorrt', required=False, action='store_true',\n help='Generate TensorRT PLAN models')\n parser.add_argument('--onnx', required=False, action='store_true',\n help='Generate Onnx models')\n parser.add_argument('--variable', required=False, action='store_true',\n help='Used variable-shape tensors for input/output')\n parser.add_argument('--ensemble', required=False, action='store_true',\n help='Generate ensemble models against the models'\n + ' in all platforms. Note that the models generated'\n + ' are not completed.')\n FLAGS, unparsed = parser.parse_known_args()\n\n if FLAGS.netdef:\n from caffe2.python import core as c2core\n from caffe2.python import model_helper as c2model_helper\n if FLAGS.graphdef or FLAGS.savedmodel:\n import tensorflow as tf\n from tensorflow.python.framework import graph_io, graph_util\n if FLAGS.tensorrt:\n import tensorrt.legacy as trt\n if FLAGS.onnx:\n import onnx\n\n import test_util as tu\n\n # Tests with models that accept fixed-shape input/output tensors\n if not FLAGS.variable:\n create_models(FLAGS.models_dir, np.float32, [1,])\n create_models(FLAGS.models_dir, np.int32, [1,])\n create_models(FLAGS.models_dir, np_dtype_string, [1,])\n\n # Tests with models that accept variable-shape input/output tensors\n if FLAGS.variable:\n create_models(FLAGS.models_dir, np.int32, [-1,], False)\n create_models(FLAGS.models_dir, np.float32, [-1,], False)\n create_models(FLAGS.models_dir, np_dtype_string, [-1,], False)\n\n if FLAGS.ensemble:\n # Create nop models used in ensemble\n for model_dtype in [\"TYPE_INT32\", \"TYPE_FP32\"]:\n # 3D shape for TensorRT Plan\n for model_shape in [(-1,), (-1, -1, -1)]:\n emu.create_nop_modelconfig(FLAGS.models_dir, model_shape, model_dtype)\n" ]
[ [ "tensorflow.strings.join", "tensorflow.placeholder", "tensorflow.equal", "tensorflow.shape", "tensorflow.initializers.global_variables", "numpy.dtype", "tensorflow.add", "tensorflow.assign", "tensorflow.Session", "tensorflow.saved_model.simple_save", "tensorflow.identity", "tensorflow.get_default_graph", "tensorflow.reset_default_graph", "tensorflow.get_variable", "tensorflow.dtypes.as_string" ] ]
Rhcsky/cifar100-classification
[ "00a099b608d798f59f1781375687e10e7fd3a250" ]
[ "model/resnet.py" ]
[ "# Original code: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n\nimport torch.nn as nn\nimport math\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * Bottleneck.expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * Bottleneck.expansion)\n self.relu = nn.ReLU(inplace=True)\n\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, depth, num_classes, bottleneck=False):\n super(ResNet, self).__init__()\n self.inplanes = 16\n\n if bottleneck:\n n = int((depth - 2) / 9)\n block = Bottleneck\n else:\n n = int((depth - 2) / 6)\n block = BasicBlock\n\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(block, 16, n)\n self.layer2 = self._make_layer(block, 32, n, stride=2)\n self.layer3 = self._make_layer(block, 64, n, stride=2)\n self.avgpool = nn.AvgPool2d(8)\n self.fc = nn.Linear(64 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\nif __name__ == '__main__':\n import torch\n\n img = torch.rand((1, 3, 32, 32))\n model = ResNet(20, 100, False)\n\n print(model)\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.Linear", "torch.rand", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.AvgPool2d", "torch.nn.ReLU" ] ]
KushDen/deepimportance_code_release
[ "5d16f1f95568dc402be6dfed4ad993ec0dbaa356" ]
[ "lrp_toolbox/modules/softmax.py" ]
[ "'''\n@author: Sebastian Lapuschkin\n@author: Gregoire Montavon\n@maintainer: Sebastian Lapuschkin\n@contact: [email protected], [email protected]\n@date: 14.08.2015\n@version: 1.2+\n@copyright: Copyright (c) 2015-2017, Sebastian Lapuschkin, Alexander Binder, Gregoire Montavon, Klaus-Robert Mueller, Wojciech Samek\n@license : BSD-2-Clause\n'''\n\nimport numpy as np\nfrom .module import Module\n\n# -------------------------------\n# Softmax layer\n# -------------------------------\nclass SoftMax(Module):\n '''\n Softmax Layer\n '''\n\n def __init__(self):\n Module.__init__(self)\n\n def forward(self,X,*args,**kwargs):\n self.X = X\n self.Y = np.exp(X) / np.exp(X).sum(axis=1,keepdims=True)\n return self.Y\n\n\n def lrp(self,R,*args,**kwargs):\n # just propagate R further down.\n # makes sure subroutines never get called.\n #return R*self.X\n return R\n\n def clean(self):\n self.X = None\n self.Y = None" ]
[ [ "numpy.exp" ] ]
jhaux/triplet-reid
[ "ac475c38c1de083482634db75dde53f12ef69cb1" ]
[ "triplet_reid/edflow_implementations/deepfashion/eval_tsne.py" ]
[ "import sys\nsys.path.append(\".\")\nimport yaml, os, json\nfrom triplet_reid.edflow_implementations.deepfashion.data import (\n FromCSVWithEmbedding, FromCSVWithMultiEmbedding)\nfrom tqdm import trange, tqdm\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nfrom triplet_reid.excluders.diagonal import Excluder as DiagonalExcluder\nfrom scipy.spatial.distance import cdist\nfrom sklearn.metrics import average_precision_score\n\n\ndef make_tsne_plot(outpath, dataset):\n indices = np.random.permutation(len(dataset))\n N = 1000\n indices = indices[:N]\n data = list()\n for i in tqdm(indices):\n data.append(dataset[i][\"embedding\"])\n data = np.stack(data)\n\n from sklearn.manifold import TSNE\n tsne = TSNE(n_components=2, random_state=0, verbose = 1, perplexity = 40, n_iter=300)\n data_2d = tsne.fit_transform(data)\n\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.scatter(data_2d[:,0], data_2d[:,1])\n\n fig.savefig(outpath, dpi = 300)\n print(\"Wrote \", outpath)\n\ndef make_combined_tsne_plot(outpath, dataset1, dataset2, label1, label2):\n indices1 = np.random.permutation(len(dataset1))\n indices2 = np.random.permutation(len(dataset2))\n N = 1000\n indices1 = indices1[:N]\n indices2 = indices2[:N]\n data = list()\n for i in tqdm(indices1):\n data.append(dataset1[i][\"embedding\"])\n for i in tqdm(indices2):\n data.append(dataset2[i][\"embedding\"])\n data = np.stack(data)\n print(data.shape)\n\n from sklearn.manifold import TSNE\n tsne = TSNE(n_components=2, random_state=0, verbose = 1)\n data_2d = tsne.fit_transform(data)\n print(data_2d.shape)\n\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n colors = [\"r\", \"g\"]\n markers = [\"+\", \"x\"]\n alphas = [1.0, 1.0]\n ax.scatter(\n data_2d[:N,0], data_2d[:N,1],\n c = colors[0], label = label1, marker = markers[0], alpha = alphas[0])\n ax.scatter(\n data_2d[N:,0], data_2d[N:,1],\n c = colors[1], label = label2, marker = markers[1], alpha = alphas[1])\n ax.legend()\n\n fig.savefig(outpath, dpi = 300)\n print(\"Wrote \", outpath)\n\n\ndef run(embedding_root, postfixes):\n joint_config = {\n \"spatial_size\": 256,\n \"data_root\": \"data/deepfashion/images\",\n \"embedding_root\": embedding_root,\n \"embedding_postfixes\": postfixes,\n \"data_csv\": \"data/deepfashion/test_reconstruction.csv\",\n \"z_size\": None}\n joint_dataset = FromCSVWithMultiEmbedding(joint_config)\n marginal_config = {\n \"spatial_size\": 256,\n \"data_root\": \"data/deepfashion/images\",\n \"embedding_root\": embedding_root,\n \"embedding_postfixes\": postfixes,\n \"data_csv\": \"data/deepfashion/test_transfer.csv\",\n \"z_size\": None}\n marginal_dataset = FromCSVWithMultiEmbedding(marginal_config)\n print(len(joint_dataset))\n print(len(marginal_dataset))\n for name, dataset in zip([\"joint\", \"marginal\"], [joint_dataset, marginal_dataset]):\n out_path = \"tsne_\" + name + \".png\"\n out_path = os.path.join(embedding_root, out_path)\n make_tsne_plot(out_path, dataset)\n\n out_path = \"tsne_\" + \"combined\" + \".png\"\n out_path = os.path.join(embedding_root, out_path)\n make_combined_tsne_plot(out_path, joint_dataset, marginal_dataset, \"joint\", \"marginal\")\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"embedding_root\")\n parser.add_argument(\"--postfixes\", nargs = \"+\", required = True)\n opt = parser.parse_args()\n run(opt.embedding_root, opt.postfixes)\n" ]
[ [ "numpy.stack", "sklearn.manifold.TSNE", "matplotlib.pyplot.figure" ] ]
iam-abbas/numpy
[ "2fb5e969fded3cd468f2ca01d5b954c953545dd9" ]
[ "benchmarks/benchmarks/bench_lib.py" ]
[ "\"\"\"Benchmarks for `numpy.lib`.\"\"\"\n\n\nfrom .common import Benchmark\n\nimport numpy as np\n\n\nclass Pad(Benchmark):\n \"\"\"Benchmarks for `numpy.pad`.\n\n When benchmarking the pad function it is useful to cover scenarios where\n the ratio between the size of the input array and the output array differs\n significantly (original area vs. padded area). This allows to evaluate for\n which scenario a padding algorithm is optimized. Furthermore involving\n large range of array sizes ensures that the effects of CPU-bound caching is\n visible.\n\n The table below shows the sizes of the arrays involved in this benchmark:\n\n +-----------------+----------+-----------+-----------+-----------------+\n | shape | original | padded: 1 | padded: 8 | padded: (0, 32) |\n +=================+==========+===========+===========+=================+\n | (2 ** 22,) | 32 MiB | 32.0 MiB | 32.0 MiB | 32.0 MiB |\n +-----------------+----------+-----------+-----------+-----------------+\n | (1024, 1024) | 8 MiB | 8.03 MiB | 8.25 MiB | 8.51 MiB |\n +-----------------+----------+-----------+-----------+-----------------+\n | (256, 256, 1) | 256 KiB | 786 KiB | 5.08 MiB | 11.6 MiB |\n +-----------------+----------+-----------+-----------+-----------------+\n | (4, 4, 4, 4) | 2 KiB | 10.1 KiB | 1.22 MiB | 12.8 MiB |\n +-----------------+----------+-----------+-----------+-----------------+\n | (1, 1, 1, 1, 1) | 8 B | 1.90 MiB | 10.8 MiB | 299 MiB |\n +-----------------+----------+-----------+-----------+-----------------+\n \"\"\"\n\n param_names = [\"shape\", \"pad_width\", \"mode\"]\n params = [\n # Shape of the input arrays\n [(2 ** 22,), (1024, 1024), (256, 128, 1),\n (4, 4, 4, 4), (1, 1, 1, 1, 1)],\n # Tested pad widths\n [1, 8, (0, 32)],\n # Tested modes: mean, median, minimum & maximum use the same code path\n # reflect & symmetric share a lot of their code path\n [\"constant\", \"edge\", \"linear_ramp\", \"mean\", \"reflect\", \"wrap\"],\n ]\n\n def setup(self, shape, pad_width, mode):\n # Make sure to fill the array to make the OS page fault\n # in the setup phase and not the timed phase\n self.array = np.full(shape, fill_value=1, dtype=np.float64)\n\n def time_pad(self, shape, pad_width, mode):\n np.pad(self.array, pad_width, mode)\n\n\nclass Nan(Benchmark):\n \"\"\"Benchmarks for nan functions\"\"\"\n\n param_names = [\"array_size\", \"percent_nans\"]\n params = [\n # sizes of the 1D arrays\n [200, int(2e5)],\n # percent of np.nan in arrays\n [0, 0.1, 2., 50., 90.],\n ]\n\n def setup(self, array_size, percent_nans):\n np.random.seed(123)\n # produce a randomly shuffled array with the\n # approximate desired percentage np.nan content\n base_array = np.random.uniform(size=array_size)\n base_array[base_array < percent_nans / 100.] = np.nan\n self.arr = base_array\n\n def time_nanmin(self, array_size, percent_nans):\n np.nanmin(self.arr)\n\n def time_nanmax(self, array_size, percent_nans):\n np.nanmax(self.arr)\n\n def time_nanargmin(self, array_size, percent_nans):\n np.nanargmin(self.arr)\n\n def time_nanargmax(self, array_size, percent_nans):\n np.nanargmax(self.arr)\n\n def time_nansum(self, array_size, percent_nans):\n np.nansum(self.arr)\n\n def time_nanprod(self, array_size, percent_nans):\n np.nanprod(self.arr)\n\n def time_nancumsum(self, array_size, percent_nans):\n np.nancumsum(self.arr)\n\n def time_nancumprod(self, array_size, percent_nans):\n np.nancumprod(self.arr)\n\n def time_nanmean(self, array_size, percent_nans):\n np.nanmean(self.arr)\n\n def time_nanvar(self, array_size, percent_nans):\n np.nanvar(self.arr)\n\n def time_nanstd(self, array_size, percent_nans):\n np.nanstd(self.arr)\n\n def time_nanmedian(self, array_size, percent_nans):\n np.nanmedian(self.arr)\n\n def time_nanquantile(self, array_size, percent_nans):\n np.nanquantile(self.arr, q=0.2)\n\n def time_nanpercentile(self, array_size, percent_nans):\n np.nanpercentile(self.arr, q=50)\n\n\nclass Unique(Benchmark):\n \"\"\"Benchmark for np.unique with np.nan values.\"\"\"\n\n param_names = [\"array_size\", \"percent_nans\"]\n params = [\n # sizes of the 1D arrays\n [200, int(2e5)],\n # percent of np.nan in arrays\n [0, 0.1, 2., 50., 90.],\n ]\n\n def setup(self, array_size, percent_nans):\n np.random.seed(123)\n # produce a randomly shuffled array with the\n # approximate desired percentage np.nan content\n base_array = np.random.uniform(size=array_size)\n base_array[base_array < percent_nans / 100.] = np.nan\n self.arr = base_array\n\n def time_unique(self, array_size, percent_nans):\n np.unique(self.arr)\n" ]
[ [ "numpy.nanmedian", "numpy.random.seed", "numpy.nancumsum", "numpy.nansum", "numpy.nanvar", "numpy.nanargmax", "numpy.nanquantile", "numpy.nanmean", "numpy.nancumprod", "numpy.unique", "numpy.random.uniform", "numpy.nanargmin", "numpy.pad", "numpy.nanmax", "numpy.nanprod", "numpy.nanstd", "numpy.nanpercentile", "numpy.nanmin", "numpy.full" ] ]
hayatonakamura/feverDetector
[ "4d39a2551b45aa45954f633b4dd35e4f7444e7f8" ]
[ "board/new_hit_with_camera.py" ]
[ "# Hayato Nakamura\n# hn2357\n# Copyright 2020 Hayato Nakamura\nfrom __future__ import print_function #compatible with python 2.7\nimport sys, time\nimport numpy as np\nfrom picamera import PiCamera \nimport aws\nfrom collections import OrderedDict\nfrom decimal import *\nfrom datetime import datetime\n#import threading\ndynamodb = aws.getResource('dynamodb', 'us-east-1')\ns3 = aws.getClient('s3', 'us-east-1')\n\ndef search(mat):\n\t# Scan the matrix\n\tfor y in range(24-3):\n\t\tfor x in range(32-3):\n\t\t\twindow = mat[y:y+3, x:x+3]\n\t\t\tprint(window)\n\t\t\tprint(np.mean(window))\n\t\t\tif (np.mean(window) > 36 and np.mean(window) < 45):\n\t\t\t\tprint(\"\\n\\nHIT\\n\\n\")\n\t\t\t\treturn True\n\treturn False\n\n\ndef process_pic(name):\n\tcamera = PiCamera()\n\tcamera.capture('/home/pi/Desktop/mlx90640-library/' + name + '.jpg')\n\tcamera.close()\n\ttry:\n\t\tfile = name + '.jpg'\n\t\ts3.upload_file(file, 'hayatopia', file)\n\t\tprint(\"File uploaded on S3\")\n\texcept:\n\t\tprint(\"S3 failed...\")\n\n\ndef dynamo_add(name, arr, timestamp):\n\ttry:\n\t\ttable = dynamodb.Table(name)\n\texcept:\n\t\tprint(\"Table with name \", name, \"doesn't exist...\")\n\t\treturn\n\titems = OrderedDict()\n\titems['timestamp'] = timestamp\n\tfor x in range(len(arr)):\n\t\tval = '{:3.2f}'.format(arr[x])\n\t\tval = Decimal(val)\n\t\titems[str(x)] = val\n\ttry:\n\t\ttable.put_item(Item=items)\n\t\tprint(\"Data successfully uploaded...\")\n\texcept:\n\t\tprint(\"Data upload unsuccessful...\")\n\n# def t_add(name, ir):\n# \ttry:\n# \t\tprint('Starting Thread: ', threading.currentThread().getName())\n# \t\ttake_picture(name)\n# \t\tdynamo_add('temperature', ir)\n# \t\tprint ('Exiting Thread: ', threading.currentThread().getName())\n# \texcept:\n# \t\tprint(\"Error with threading...\")\n\n\n\ndef main():\n\tfifo = open('/var/run/mlx9062x.sock', 'r')\n\tfor z in range(20):\n\t\tfile = open('temperature.txt', 'w')\n\t\tmat = np.zeros((24, 32))\n\n\t\t# 20 frames\n\t\tir = np.frombuffer(fifo.read()[0:3072], dtype=np.float32)\n\t\tif (len(ir) == 0):\n\t\t\tbreak\n\n\t\ttemp = \"\"\n\t\tfor y in range(24):\n\t\t\tfor x in range(32):\n\t\t\t\tval = '{:3.2f}'.format(ir[32 * (23-y) + x])\n\t\t\t\ttemp += val + \" \"\n\n\t\t\t\tmat[y, x] = float(val)\n\t\t\tfile.write(temp)\n\t\t\tfile.write('\\n')\n\t\t\ttemp = \"\"\n\t\tfile.write('\\n')\n\t\tfile.write('\\n')\n\n\t\tfile.close()\n\t\tif (search(mat)):\n\t\t\tprint(\"here\")\n\t\t\tnow = str(datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\"))\n\t\t\tname = 'temp'\n\t\t\tprocess_pic(now)\n\t\t\tdynamo_add('temperature', ir, now)\n\t\t\t# t1 = threading.Thread(name='Upload to dynamo', target=t_add, args=(name, ir,))\n\t\t\t# t1.setDaemon(True)\n\t\t\t# t1.start()\n\n\t\ttime.sleep(0.1) #10fps\n\n\t\n\nif __name__ == \"__main__\":\n\t#print(\"Active threads: \", threading.active_count())\n\tmain()\n\n" ]
[ [ "numpy.mean", "numpy.zeros" ] ]
ichigo663/SimulatedAnnealing
[ "f319ee6bbdd8e0be16d39de0893a7a26d234a817" ]
[ "simulatedAnnealing.py" ]
[ "from pyglet.gl import *\nimport numpy as np\n\nclass SimulatedAnnealing:\n\n def __init__(self, function, tStart=15.0, tEnd=2.0,localMovement=False, dxMovement=1, scale=2.0):\n #xpos, ypos of current position\n self.scale = scale\n self.step = function.step\n self.xmin = function.minx\n self.xmax = function.maxx\n self.pos = [0, 0]\n self.pos[0] = np.random.randint(function.minx, function.maxx)\n self.pos[1] = function.compute(self.pos[0])\n self.evaluate = function.compute\n self.tmax = tStart\n self.t = tStart\n self.t_end = tEnd\n self.timer = 0.0\n self.p = 1.0\n self.local_movement = localMovement\n self.dx_interval = dxMovement\n self.run_time = tStart\n self.vline_len = 5.0\n self.y_max = np.max(function.y)\n self.y_min = np.min(function.y)\n\n def add_callback(self, f):\n self.callback = f\n\n def render(self):\n # render current solution\n glColor3f(0, 0, 0)\n glPointSize(10)\n glBegin(GL_POINTS)\n glVertex2f(self.pos[0], self.pos[1])\n glEnd()\n glBegin(GL_LINES)\n glVertex2f(self.pos[0], self.pos[1] - self.vline_len)\n glVertex2f(self.pos[0], self.pos[1] + self.vline_len)\n glEnd()\n\n def decreaseT(self, dec):\n self.t -= dec\n\n # return next random move\n # pick is made from [xmin, xmax]\n def next_value_all_random(self):\n x = (self.xmax - self.xmin) * np.random.ranf() + self.xmin\n return x, self.evaluate(x)\n\n # return next random move\n # pick is made from [x-dx, x+dx]\n def next_value_int(self, dx):\n x = dx*2 * np.random.ranf() + self.pos[0]-dx\n return x, self.evaluate(x)\n\n # returns a value in [0,1]\n def schedule(self):\n return self.t/self.tmax\n\n def run(self, dt):\n self.timer += dt\n # time in seconds\n if self.timer >= 0.1:\n self.decreaseT(self.timer)\n self.run_time -= self.timer\n self.timer = 0.0\n # update T probability\n self.p = self.schedule()\n # check termination\n if self.run_time < 0:\n self.callback()\n print(self)\n else:\n # pick next move\n if(self.local_movement):\n x, y = self.next_value_int(self.dx_interval)\n else:\n x, y = self.next_value_all_random()\n # delta of energy\n # and normalization in [0,1]\n # then we scale (optional), scaling\n # helps avoiding large P(x) for minor bad moves\n d_energy = np.abs((y - self.pos[1]) /(self.y_max - self.y_min))*self.scale\n # find the minimum\n if y < self.pos[1]:\n self.pos = [x, y]\n # accept with probability e^(-(delta_energy)/temperature))\n elif self.t > self.t_end and np.exp(-(d_energy) / self.p) >= np.random.ranf():\n self.pos = [x, y]\n\n def __repr__(self):\n return \"pos: [{x}, {y}]\\nstep: {step}\".format(x=self.pos[0], y=self.pos[1], step=self.step)\n\n" ]
[ [ "numpy.random.ranf", "numpy.abs", "numpy.exp", "numpy.max", "numpy.min", "numpy.random.randint" ] ]
chipmuenk/A2SRC
[ "156c063c825669130bdaf1f41a1e972bbc1747e3" ]
[ "A2SRC/plot_vispy_test8_mesh.py" ]
[ "import numpy as np\nfrom vispy import app, gloo, visuals\nfrom vispy.geometry import create_sphere\nfrom vispy.visuals.transforms import (STTransform, AffineTransform,\n ChainTransform)\n\n\nclass Canvas(app.Canvas):\n def __init__(self):\n app.Canvas.__init__(self, keys='interactive', size=(800, 550))\n\n self.meshes = []\n self.rotation = AffineTransform()\n\n # Generate some data to work with\n global mdata\n mdata = create_sphere(20, 40, 1.0)\n\n # Mesh with pre-indexed vertices, uniform color\n self.meshes.append(visuals.MeshVisual(meshdata=mdata, color='r'))\n\n ## Mesh with pre-indexed vertices, per-face color\n ## Because vertices are pre-indexed, we get a different color\n ## every time a vertex is visited, resulting in sharp color\n ## differences between edges.\n verts = mdata.get_vertices(indexed='faces')\n nf = verts.size//9\n fcolor = np.ones((nf, 3, 4), dtype=np.float32)\n fcolor[..., 0] = np.linspace(1, 0, nf)[:, np.newaxis]\n fcolor[..., 1] = np.random.normal(size=nf)[:, np.newaxis]\n fcolor[..., 2] = np.linspace(0, 1, nf)[:, np.newaxis]\n mesh = visuals.MeshVisual(vertices=verts, face_colors=fcolor)\n self.meshes.append(mesh)\n\n ## Mesh with unindexed vertices, per-vertex color\n ## Because vertices are unindexed, we get the same color\n ## every time a vertex is visited, resulting in no color differences\n ## between edges.\n verts = mdata.get_vertices()\n faces = mdata.get_faces()\n nv = verts.size//3\n vcolor = np.ones((nv, 4), dtype=np.float32)\n vcolor[:, 0] = np.linspace(1, 0, nv)\n vcolor[:, 1] = np.random.normal(size=nv)\n vcolor[:, 2] = np.linspace(0, 1, nv)\n self.meshes.append(visuals.MeshVisual(verts, faces, vcolor))\n self.meshes.append(visuals.MeshVisual(verts, faces, vcolor,\n shading='flat'))\n self.meshes.append(visuals.MeshVisual(verts, faces, vcolor,\n shading='smooth'))\n\n # Lay out meshes in a grid\n grid = (3, 3)\n s = 300. / max(grid)\n for i, mesh in enumerate(self.meshes):\n x = 800. * (i % grid[0]) / grid[0] + 400. / grid[0] - 2\n y = 800. * (i // grid[1]) / grid[1] + 400. / grid[1] + 2\n transform = ChainTransform([STTransform(translate=(x, y),\n scale=(s, s, 1)),\n self.rotation])\n tr_sys = visuals.transforms.TransformSystem(self)\n tr_sys.visual_to_document = transform\n mesh.tr_sys = tr_sys\n\n self.show()\n\n self.timer = app.Timer(connect=self.rotate)\n self.timer.start(0.016)\n\n def rotate(self, event):\n self.rotation.rotate(1, (0, 1, 0))\n self.update()\n\n def on_draw(self, ev):\n gloo.set_viewport(0, 0, *self.physical_size)\n gloo.clear(color='black', depth=True)\n for mesh in self.meshes:\n mesh.draw(mesh.tr_sys)\n\n\nif __name__ == '__main__':\n win = Canvas()\n import sys\n if sys.flags.interactive != 1:\n app.run()" ]
[ [ "numpy.random.normal", "numpy.ones", "numpy.linspace" ] ]
ttthomaschan/DeepcvLib
[ "18f7728559136a3c5c8ad54666788ea771e95b16", "18f7728559136a3c5c8ad54666788ea771e95b16" ]
[ "Detection/detect.py", "Detection/dataset/VOC_dataset.py" ]
[ "import cv2\r\nfrom model.fcos import FCOSDetector\r\nimport torch\r\nfrom torchvision import transforms\r\nimport numpy as np\r\nfrom dataset.VOC_dataset import VOCDataset\r\nimport time\r\nimport matplotlib.patches as patches\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.ticker import NullLocator\r\n\r\n\r\ndef preprocess_img(image, input_ksize):\r\n \"\"\"\r\n resize image and bboxes\r\n Returns\r\n image_paded: input_ksize\r\n bboxes: [None,4]\r\n \"\"\"\r\n min_side, max_side = input_ksize\r\n h, w, _ = image.shape\r\n\r\n smallest_side = min(w, h)\r\n largest_side = max(w, h)\r\n scale = min_side / smallest_side\r\n if largest_side * scale > max_side:\r\n scale = max_side / largest_side\r\n nw, nh = int(scale * w), int(scale * h)\r\n image_resized = cv2.resize(image, (nw, nh))\r\n\r\n pad_w = 32 - nw % 32\r\n pad_h = 32 - nh % 32\r\n\r\n image_paded = np.zeros(shape=[nh + pad_h, nw + pad_w, 3], dtype=np.uint8)\r\n image_paded[:nh, :nw, :] = image_resized\r\n return image_paded\r\n\r\n\r\ndef convertSyncBNtoBN(module):\r\n module_output = module\r\n if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):\r\n module_output = torch.nn.BatchNorm2d(module.num_features,\r\n module.eps, module.momentum,\r\n module.affine,\r\n module.track_running_stats)\r\n if module.affine:\r\n module_output.weight.data = module.weight.data.clone().detach()\r\n module_output.bias.data = module.bias.data.clone().detach()\r\n module_output.running_mean = module.running_mean\r\n module_output.running_var = module.running_var\r\n for name, child in module.named_children():\r\n module_output.add_module(name, convertSyncBNtoBN(child))\r\n del module\r\n return module_output\r\n\r\n\r\nif __name__ == \"__main__\":\r\n cmap = plt.get_cmap('tab20b')\r\n colors = [cmap(i) for i in np.linspace(0, 1, 20)]\r\n\r\n\r\n class Config():\r\n # backbone\r\n pretrained = False\r\n freeze_stage_1 = True\r\n freeze_bn = True\r\n\r\n # fpn\r\n fpn_out_channels = 256\r\n use_p5 = True\r\n\r\n # head\r\n class_num = 80\r\n use_GN_head = True\r\n prior = 0.01\r\n add_centerness = True\r\n cnt_on_reg = False\r\n\r\n # training\r\n strides = [8, 16, 32, 64, 128]\r\n limit_range = [[-1, 64], [64, 128], [128, 256], [256, 512], [512, 999999]]\r\n\r\n # inference\r\n score_threshold = 0.3\r\n nms_iou_threshold = 0.4\r\n max_detection_boxes_num = 300\r\n\r\n\r\n model = FCOSDetector(mode=\"inference\", config=Config)\r\n # model=torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\r\n # print(\"INFO===>success convert BN to SyncBN\")\r\n model = torch.nn.DataParallel(model)\r\n model.load_state_dict(torch.load(\"./checkpoint/voc_77.8.pth\", map_location=torch.device('cpu')))\r\n # model=convertSyncBNtoBN(model)\r\n # print(\"INFO===>success convert SyncBN to BN\")\r\n model = model.eval()\r\n model.to(\"cuda\")\r\n print(\"===>success loading model\")\r\n\r\n import os\r\n\r\n root = \"./test_images/\"\r\n names = os.listdir(root)\r\n for name in names:\r\n img_bgr = cv2.imread(root + name)\r\n img_pad = preprocess_img(img_bgr, [800, 1333])\r\n img = cv2.cvtColor(img_pad.copy(), cv2.COLOR_BGR2RGB)\r\n img1 = transforms.ToTensor()(img)\r\n img1 = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], inplace=True)(img1)\r\n img1 = img1.to(\"cuda\")\r\n\r\n start_t = time.time()\r\n with torch.no_grad():\r\n out = model(img1.unsqueeze_(dim=0))\r\n end_t = time.time()\r\n cost_t = 1000 * (end_t - start_t)\r\n print(\"===>success processing img, cost time %.2f ms\" % cost_t)\r\n # print(out)\r\n scores, classes, boxes = out\r\n\r\n boxes = boxes[0].cpu().numpy().tolist()\r\n classes = classes[0].cpu().numpy().tolist()\r\n scores = scores[0].cpu().numpy().tolist()\r\n plt.figure()\r\n fig, ax = plt.subplots(1)\r\n ax.imshow(img)\r\n for i, box in enumerate(boxes):\r\n pt1 = (int(box[0]), int(box[1]))\r\n pt2 = (int(box[2]), int(box[3]))\r\n img_pad = cv2.rectangle(img_pad, pt1, pt2, (0, 255, 0))\r\n b_color = colors[int(classes[i]) - 1]\r\n bbox = patches.Rectangle((box[0], box[1]), width=box[2] - box[0], height=box[3] - box[1], linewidth=1,\r\n facecolor='none', edgecolor=b_color)\r\n ax.add_patch(bbox)\r\n plt.text(box[0], box[1], s=\"%s %.3f\" % (VOCDataset.CLASSES_NAME[int(classes[i])], scores[i]), color='white',\r\n verticalalignment='top',\r\n bbox={'color': b_color, 'pad': 0})\r\n plt.axis('off')\r\n plt.gca().xaxis.set_major_locator(NullLocator())\r\n plt.gca().yaxis.set_major_locator(NullLocator())\r\n plt.savefig('out_images/{}'.format(name), bbox_inches='tight', pad_inches=0.0)\r\n plt.close()\r\n", "import torch\r\nimport xml.etree.ElementTree as ET\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nfrom torchvision import transforms\r\nfrom PIL import Image\r\nimport random\r\n\r\n\r\ndef flip(img, boxes):\r\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\r\n w = img.width\r\n if boxes.shape[0] != 0:\r\n xmin = w - boxes[:, 2]\r\n xmax = w - boxes[:, 0]\r\n boxes[:, 2] = xmax\r\n boxes[:, 0] = xmin\r\n return img, boxes\r\n\r\n\r\nclass VOCDataset(torch.utils.data.Dataset):\r\n CLASSES_NAME = (\r\n \"__background__ \",\r\n \"aeroplane\",\r\n \"bicycle\",\r\n \"bird\",\r\n \"boat\",\r\n \"bottle\",\r\n \"bus\",\r\n \"car\",\r\n \"cat\",\r\n \"chair\",\r\n \"cow\",\r\n \"diningtable\",\r\n \"dog\",\r\n \"horse\",\r\n \"motorbike\",\r\n \"person\",\r\n \"pottedplant\",\r\n \"sheep\",\r\n \"sofa\",\r\n \"train\",\r\n \"tvmonitor\",\r\n )\r\n\r\n def __init__(self, root_dir, resize_size=[800, 1333], split='trainval', use_difficult=False, is_train=True,\r\n augment=None):\r\n self.root = root_dir\r\n self.use_difficult = use_difficult\r\n self.imgset = split\r\n\r\n self._annopath = os.path.join(self.root, \"Annotations\", \"%s.xml\")\r\n self._imgpath = os.path.join(self.root, \"JPEGImages\", \"%s.jpg\")\r\n self._imgsetpath = os.path.join(self.root, \"ImageSets\", \"Main\", \"%s.txt\")\r\n\r\n with open(self._imgsetpath % self.imgset) as f:\r\n self.img_ids = f.readlines()\r\n self.img_ids = [x.strip() for x in self.img_ids]\r\n self.name2id = dict(zip(VOCDataset.CLASSES_NAME, range(len(VOCDataset.CLASSES_NAME))))\r\n self.id2name = {v: k for k, v in self.name2id.items()}\r\n self.resize_size = resize_size\r\n self.mean = [0.485, 0.456, 0.406]\r\n self.std = [0.229, 0.224, 0.225]\r\n self.train = is_train\r\n self.augment = augment\r\n print(\"INFO=====>voc dataset init finished ! !\")\r\n\r\n def __len__(self):\r\n return len(self.img_ids)\r\n\r\n def __getitem__(self, index):\r\n\r\n img_id = self.img_ids[index]\r\n img = Image.open(self._imgpath % img_id)\r\n\r\n anno = ET.parse(self._annopath % img_id).getroot()\r\n\r\n boxes = []\r\n classes = []\r\n for obj in anno.iter(\"object\"):\r\n difficult = int(obj.find(\"difficult\").text) == 1\r\n if not self.use_difficult and difficult:\r\n continue\r\n _box = obj.find(\"bndbox\")\r\n # Make pixel indexes 0-based\r\n # Refer to \"https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/pascal_voc.py#L208-L211\"\r\n box = [\r\n _box.find(\"xmin\").text,\r\n _box.find(\"ymin\").text,\r\n _box.find(\"xmax\").text,\r\n _box.find(\"ymax\").text,\r\n ]\r\n TO_REMOVE = 1\r\n box = tuple(\r\n map(lambda x: x - TO_REMOVE, list(map(float, box)))\r\n )\r\n boxes.append(box)\r\n\r\n name = obj.find(\"name\").text.lower().strip()\r\n classes.append(self.name2id[name])\r\n\r\n boxes = np.array(boxes, dtype=np.float32)\r\n if self.train:\r\n if random.random() < 0.5:\r\n img, boxes = flip(img, boxes)\r\n if self.augment is not None:\r\n img, boxes = self.augment(img, boxes)\r\n\r\n img = np.array(img)\r\n img, boxes = self.preprocess_img_boxes(img, boxes, self.resize_size)\r\n\r\n img = transforms.ToTensor()(img)\r\n boxes = torch.from_numpy(boxes)\r\n classes = torch.LongTensor(classes)\r\n\r\n return img, boxes, classes\r\n\r\n def preprocess_img_boxes(self, image, boxes, input_ksize):\r\n \"\"\"\r\n resize image and bboxes\r\n Returns\r\n image_paded: input_ksize\r\n bboxes: [None,4]\r\n \"\"\"\r\n min_side, max_side = input_ksize\r\n h, w, _ = image.shape\r\n\r\n smallest_side = min(w, h)\r\n largest_side = max(w, h)\r\n scale = min_side / smallest_side\r\n if largest_side * scale > max_side:\r\n scale = max_side / largest_side\r\n nw, nh = int(scale * w), int(scale * h)\r\n image_resized = cv2.resize(image, (nw, nh))\r\n\r\n pad_w = 32 - nw % 32\r\n pad_h = 32 - nh % 32\r\n\r\n image_paded = np.zeros(shape=[nh + pad_h, nw + pad_w, 3], dtype=np.uint8)\r\n image_paded[:nh, :nw, :] = image_resized\r\n\r\n if boxes is None:\r\n return image_paded\r\n else:\r\n boxes[:, [0, 2]] = boxes[:, [0, 2]] * scale\r\n boxes[:, [1, 3]] = boxes[:, [1, 3]] * scale\r\n return image_paded, boxes\r\n\r\n def collate_fn(self, data):\r\n imgs_list, boxes_list, classes_list = zip(*data)\r\n assert len(imgs_list) == len(boxes_list) == len(classes_list)\r\n batch_size = len(boxes_list)\r\n pad_imgs_list = []\r\n pad_boxes_list = []\r\n pad_classes_list = []\r\n\r\n h_list = [int(s.shape[1]) for s in imgs_list]\r\n w_list = [int(s.shape[2]) for s in imgs_list]\r\n max_h = np.array(h_list).max()\r\n max_w = np.array(w_list).max()\r\n for i in range(batch_size):\r\n img = imgs_list[i]\r\n pad_imgs_list.append(transforms.Normalize(self.mean, self.std, inplace=True)(\r\n torch.nn.functional.pad(img, (0, int(max_w - img.shape[2]), 0, int(max_h - img.shape[1])), value=0.)))\r\n\r\n max_num = 0\r\n for i in range(batch_size):\r\n n = boxes_list[i].shape[0]\r\n if n > max_num: max_num = n\r\n for i in range(batch_size):\r\n pad_boxes_list.append(\r\n torch.nn.functional.pad(boxes_list[i], (0, 0, 0, max_num - boxes_list[i].shape[0]), value=-1))\r\n pad_classes_list.append(\r\n torch.nn.functional.pad(classes_list[i], (0, max_num - classes_list[i].shape[0]), value=-1))\r\n\r\n batch_boxes = torch.stack(pad_boxes_list)\r\n batch_classes = torch.stack(pad_classes_list)\r\n batch_imgs = torch.stack(pad_imgs_list)\r\n\r\n return batch_imgs, batch_boxes, batch_classes\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n eval_dataset = VOCDataset(root_dir='../VOCdevkit/VOC2012', resize_size=[800, 1333],\r\n split='trainval_demoData', use_difficult=False, is_train=False, augment=None)\r\n print(len(eval_dataset.CLASSES_NAME))\r\n # dataset=VOCDataset(\"/home/data/voc2007_2012/VOCdevkit/VOC2012\",split='trainval')\r\n # for i in range(100):\r\n # img,boxes,classes=dataset[i]\r\n # img,boxes,classes=img.numpy().astype(np.uint8),boxes.numpy(),classes.numpy()\r\n # img=np.transpose(img,(1,2,0))\r\n # print(img.shape)\r\n # print(boxes)\r\n # print(classes)\r\n # for box in boxes:\r\n # pt1=(int(box[0]),int(box[1]))\r\n # pt2=(int(box[2]),int(box[3]))\r\n # img=cv2.rectangle(img,pt1,pt2,[0,255,0],3)\r\n # cv2.imshow(\"test\",img)\r\n # if cv2.waitKey(0)==27:\r\n # break\r\n # imgs,boxes,classes=eval_dataset.collate_fn([dataset[105],dataset[101],dataset[200]])\r\n # print(boxes,classes,\"\\n\",imgs.shape,boxes.shape,classes.shape,boxes.dtype,classes.dtype,imgs.dtype)\r\n # for index,i in enumerate(imgs):\r\n # i=i.numpy().astype(np.uint8)\r\n # i=np.transpose(i,(1,2,0))\r\n # i=cv2.cvtColor(i,cv2.COLOR_RGB2BGR)\r\n # print(i.shape,type(i))\r\n # cv2.imwrite(str(index)+\".jpg\",i)\r\n" ]
[ [ "torch.nn.BatchNorm2d", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.axis", "torch.no_grad", "matplotlib.pyplot.gca", "matplotlib.pyplot.subplots", "torch.device", "matplotlib.patches.Rectangle", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.close", "torch.nn.DataParallel", "numpy.linspace", "matplotlib.ticker.NullLocator" ], [ "torch.stack", "numpy.zeros", "torch.nn.functional.pad", "torch.from_numpy", "numpy.array", "torch.LongTensor" ] ]
Ynakatsuka/nishika-22
[ "72994cab16486b3a26686642ad72a29b6761b46d" ]
[ "src/benchmark/app.py" ]
[ "import warnings\n\nimport numpy as np\nimport pandas as pd\nimport streamlit as st\nfrom benchmark import (\n EXPERIMENT_NAMES,\n get_result,\n load_augmentation,\n load_config,\n load_lightning_module,\n normalize,\n)\nfrom kvt.initialization import initialize as kvt_initialize\nfrom kvt.utils import QueryExpansion\nfrom PIL import Image\n\n\[email protected](allow_output_mutation=True)\ndef load():\n # config\n config = load_config()\n\n # variables\n sample_submission_path = config.competition.sample_submission_path\n save_dir = config.save_dir\n\n # load reference\n sub = pd.read_csv(sample_submission_path)\n cite = pd.read_csv(config.competition.cite_path)\n embedding_paths = [\n f\"{save_dir}/predictions/test/{name}/test_fold_0.npy\"\n for name in EXPERIMENT_NAMES\n ]\n embeddings = np.concatenate(\n [normalize(np.load(path)) for path in embedding_paths], axis=1\n ).astype(\"float32\")\n embeddings = normalize(embeddings)\n n_query = len(sub)\n reference_embeddings = embeddings[n_query:]\n reference_ids = cite[\"gid\"].values\n\n # load models\n models, transforms, preprocessors = [], [], []\n for name in EXPERIMENT_NAMES:\n overrides = name.split(\",\")\n config = load_config(name, overrides=overrides)\n models.append(load_lightning_module(config))\n transforms.append(load_augmentation(config))\n preprocessors.append(lambda x: x)\n\n qe = QueryExpansion(\n alpha=1,\n k=50,\n similarity_threshold=0.7,\n normalize_similarity=True,\n strategy_to_deal_original=\"add\",\n n_query_update_iter=1,\n n_reference_update_iter=0,\n batch_size=10,\n )\n _, reference_embeddings = qe(reference_embeddings[:1], reference_embeddings)\n index = qe.create_index(reference_embeddings)\n\n return (\n config,\n preprocessors,\n transforms,\n models,\n qe,\n index,\n reference_embeddings,\n reference_ids,\n )\n\n\ndef main(\n config,\n preprocessors,\n transforms,\n models,\n qe,\n index,\n reference_embeddings,\n reference_ids,\n):\n # draw the page\n st.title(\"Similar Trade Mark Image Search\")\n\n k = 20\n n_cols, n_rows = 5, 4\n assert n_cols * n_rows == k\n\n # search\n uploaded_file = st.sidebar.file_uploader(\"Upload Image File\", type=\"jpg\")\n if uploaded_file is not None:\n image = Image.open(uploaded_file)\n st.sidebar.image(image, caption=\"Query Image\", use_column_width=True)\n\n D, I, _embeddings = get_result(\n np.array(image),\n preprocessors,\n transforms,\n models,\n qe,\n index,\n reference_embeddings,\n reference_ids,\n k=k,\n )\n assert len(D) == 1\n\n # draw image\n st.header(\"Found Images:\")\n col = st.columns(n_cols)\n for i, (sim, ref_id) in enumerate(zip(D[0], I[0])):\n if (i > 0) and (i % n_cols == 0):\n col = st.columns(n_cols)\n\n with col[i % n_cols]:\n path = f\"{config.input_dir}/cite_images/{ref_id}/{ref_id}.jpg\"\n image = Image.open(path)\n st.image(\n image,\n caption=f\"#{i+1}: Similarity: {sim:.3f}\",\n use_column_width=True,\n )\n\n\nif __name__ == \"__main__\":\n warnings.filterwarnings(\"ignore\")\n kvt_initialize()\n (\n config,\n preprocessors,\n transforms,\n models,\n qe,\n index,\n reference_embeddings,\n reference_ids,\n ) = load()\n main(\n config,\n preprocessors,\n transforms,\n models,\n qe,\n index,\n reference_embeddings,\n reference_ids,\n )\n" ]
[ [ "pandas.read_csv", "numpy.load", "numpy.array" ] ]
mmolnar0/sgillen_research
[ "752e09fdf7a996c832e71b0a8296322fe77e9ae3" ]
[ "torch_lstm_mod/lstm.py" ]
[ "import math\nimport torch as th\nimport torch.nn as nn\n\n\n\nclass LSTMCell(nn.Module):\n\n def __init__(self, input_size, hidden_size, bias=True):\n super(LSTMCell, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.bias = bias\n self.i2h = nn.Linear(input_size, 4 * hidden_size, bias=bias)\n self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias)\n self.reset_parameters()\n\n def reset_parameters(self):\n std = 1.0 / math.sqrt(self.hidden_size)\n for w in self.parameters():\n w.data.uniform_(-std, std)\n\n def forward(self, x, hidden):\n\n if hidden is None:\n hidden = self._init_hidden(x)\n\n h, c = hidden\n h = h.view(h.size(1), -1)\n c = c.view(c.size(1), -1)\n x = x.view(x.size(1), -1)\n\n # Linear mappings\n preact = self.i2h(x) + self.h2h(h)\n\n # activations\n gates = preact[:, :3 * self.hidden_size].sigmoid()\n g_t = preact[:, 3 * self.hidden_size:].tanh()\n i_t = gates[:, :self.hidden_size]\n f_t = gates[:, self.hidden_size:2 * self.hidden_size]\n o_t = gates[:, -self.hidden_size:]\n\n c_t = th.mul(c, f_t) + th.mul(i_t, g_t)\n\n h_t = th.mul(o_t, c_t.tanh())\n\n h_t = h_t.view(1, h_t.size(0), -1)\n c_t = c_t.view(1, c_t.size(0), -1)\n return h_t, (h_t, c_t)\n\n @staticmethod\n def _init_hidden(input_):\n #h = th.zeros_like(input_.view(1, input_.size(1), -1))\n #c = th.zeros(1, input_.size(1), self.hidden_size))\n\n #return h, c\n return\n\n\nclass LSTM(nn.Module):\n\n def __init__(self, input_size, hidden_size, bias=True):\n super().__init__()\n self.lstm_cell = LSTMCell(input_size, hidden_size, bias)\n\n def forward(self, input_, hidden=None):\n # input_ is of dimensionalty (1, time, input_size, ...)\n\n outputs = []\n for x in th.unbind(input_, dim=1):\n hidden = self.lstm_cell(x, hidden)\n outputs.append(hidden[0].clone())\n\n return th.stack(outputs, dim=1)\n\n\n\nif __name__ == \"__main__\":\n\n lstm_cell = LSTMCell(input_size = 4, hidden_size = 12, bias=False)\n\n x = th.randn(4,1)\n h = th.randn(12,1)\n c = th.randn(12,1)\n\n yc, (hc,cc) = lstm_cell.forward(x,(h,c))\n\n print(\"yc shape: \" , yc.shape)\n print(\"hc shape: \" , hc.shape)\n print(\"cc shape: \" , cc.shape)\n\n\n lstm = LSTM(input_size = 4, hidden_size = 12, bias=False)\n\n x = th.randn(4,100,1)\n h = th.randn(12,1)\n\n y = lstm.forward(x, hidden = (h,h))\n\n print(\"y shape: \", y.shape)\n\n" ]
[ [ "torch.unbind", "torch.stack", "torch.nn.Linear", "torch.randn", "torch.mul" ] ]
jxhuang0508/CVRN
[ "ecbd1bebd43dadfd29536a8f31a65b920346fda6" ]
[ "cvrn/dataset/datasets_crst.py" ]
[ "import os\nimport os.path as osp\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport collections\nimport torch\nimport torchvision.transforms as transforms\nimport torchvision\nimport cv2\nfrom torch.utils import data\nimport sys\nfrom PIL import Image\n\npalette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,\n 220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,\n 0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]\nzero_pad = 256 * 3 - len(palette)\nfor i in range(zero_pad):\n palette.append(0)\n\n\ndef colorize_mask(mask):\n # mask: numpy array of the mask\n new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')\n new_mask.putpalette(palette)\n\n return new_mask\n\nclass GTA5TestDataSet(data.Dataset):\n def __init__(self, root, list_path, max_iters=None, test_size=(1024, 512), test_scale=1.0, mean=(128, 128, 128),\n std=(1, 1, 1), scale=True, mirror=True, ignore_label=255):\n self.root = root\n self.list_path = list_path\n self.test_h, self.test_w = test_size\n self.scale = scale\n self.test_scale = test_scale\n self.ignore_label = ignore_label\n self.mean = mean\n self.std = std\n self.is_mirror = mirror\n # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])\n self.img_ids = []\n self.label_ids = []\n with open(list_path) as f:\n for item in f.readlines():\n fields = item.strip().split('\\t')\n self.img_ids.append(fields[0])\n self.label_ids.append(fields[1])\n if not max_iters == None:\n self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))\n self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))\n self.files = []\n\n for idx in range(len(self.img_ids)):\n img_name = self.img_ids[idx]\n label_name = self.label_ids[idx]\n img_file = osp.join(self.root, img_name)\n label_file = osp.join(self.root, label_name)\n self.files.append({\n \"img\": img_file,\n \"label\": label_file,\n \"img_name\": img_name,\n \"label_name\": label_name\n })\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, index):\n datafiles = self.files[index]\n image = cv2.imread(datafiles[\"img\"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB\n label = cv2.imread(datafiles[\"label\"], cv2.IMREAD_GRAYSCALE)\n\n img_name = datafiles[\"img_name\"]\n image = cv2.resize(image, None, fx=self.test_scale, fy=self.test_scale, interpolation=cv2.INTER_CUBIC)\n\n image = np.asarray(image, np.float32)\n label = np.asarray(label, np.float32)\n\n image -= self.mean # BGR\n image = image / self.std # np.reshape(self.std,(1,1,3))\n size = image.shape\n image = image.transpose((2, 0, 1))\n\n return image.copy(), label.copy(), np.array(size), img_name\n" ]
[ [ "numpy.array", "numpy.asarray" ] ]
yasirabd/deployment-notebook-prescriptive
[ "f3e07ee8472be9f2d8c78cfea2990131bdcbe881" ]
[ "utils/transform.py" ]
[ "import pandas as pd\nimport numpy as np\nimport time\nfrom datetime import timedelta, date, datetime\n\nclass TransformData(object):\n def __init__(self):\n pass\n\n # get data and preprocessing\n def format_timestamp(self, utc_datetime):\n now_timestamp = time.time()\n offset = datetime.fromtimestamp(now_timestamp) - datetime.utcfromtimestamp(now_timestamp)\n return utc_datetime + offset\n\n def reduce_columns(self, df, sensor):\n idx_cols_selected = [i for i in range(df.shape[1]) if i==0 or i%6==0]\n idx_col_timestamp = [1]\n idx = idx_col_timestamp + idx_cols_selected\n\n df = df[df.columns[idx]]\n df.columns = ['date'] + sensor\n\n # format col timestamp\n result = df.copy()\n result['date'] = pd.to_datetime(df['date']).dt.strftime('%Y-%m-%d %H:%M:%S')\n result['date'] = pd.to_datetime(result['date']).apply(self.format_timestamp)\n return result.iloc[0]\n\n def transform(self, data):\n date = data['date'].strftime(\"%Y-%m-%d %H:%M:%S\")\n sensors = data.index.tolist()[1:]\n actuals = []\n for d in data.tolist()[1:]:\n if type(d) == np.int or type(d) == np.int_ or type(d) == np.float64 or type(d) == np.float:\n actuals.append(np.around(d, 6))\n else:\n actuals.append(np.nan)\n return {'date': date, 'sensors': sensors, 'actuals':actuals}" ]
[ [ "numpy.around", "pandas.to_datetime" ] ]
CoAxLab/binding_manuscript
[ "fc6c3dabc81b505edb5a79a1835c6f29c494f941" ]
[ "imaging_code/fMRI_task.py" ]
[ "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\nfrom __future__ import division # so that 1/3=0.333 instead of 1/3=0\nfrom psychopy import visual, core, data, event, logging, gui\nfrom psychopy.constants import * # things like STARTED, FINISHED\nimport pandas as pd\nimport numpy as np # whole numpy lib is available, prepend 'np.'\nfrom numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray\nfrom numpy.random import random, randint, normal, shuffle\nimport os # handy system and path functions\nimport statsmodels.formula.api as sm\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport scipy.io\n\n# Ensure that relative paths start from the same directory as this script\n_thisDir = os.path.dirname(os.path.abspath(__file__))\nos.chdir(_thisDir)\n\n# Store info about the experiment session\nexpName = u'r2d4_MM' # from the Builder filename that created this script\nexpInfo = {'participant':u'', 'session':u''}\ndlg = gui.DlgFromDict(dictionary=expInfo, title=expName)\nif dlg.OK == False: core.quit() # user pressed cancel\nexpInfo['date'] = data.getDateStr() # add a simple timestamp\nexpInfo['expName'] = expName\n\n# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc\nfilename = _thisDir + os.sep + 'data/%s_%s_%s' %(expInfo['participant'], expName, expInfo['date'])\n\nout_all_fn = _thisDir + os.sep + 'data/%s_%s_%s_responses.csv' %(expInfo['participant'], expName, expInfo['session'])\ndata_out = pd.DataFrame(columns=('onsetTime','correctResp','keysPressed'))\n\n\n# An ExperimentHandler isn't essential but helps with data saving\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath=None,\n savePickle=True, saveWideText=True,\n dataFileName=filename)\n#save a log file for detail verbose info\nlogFile = logging.LogFile(filename+'.log', level=logging.EXP)\nlogging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file\n\nendExpNow = False # flag for 'escape' or other condition => quit the exp\n\n# Start Code - component code to be run before the window creation\n\n# Setup the Window\nwin = visual.Window(size=(500, 500), fullscr=True, screen=0, allowGUI=False, allowStencil=False,\n monitor='testMonitor', color=[-1,-1,-1], colorSpace='rgb',\n blendMode='avg', useFBO=True,\n )\n# store frame rate of monitor if we can measure it successfully\nexpInfo['frameRate']=win.getActualFrameRate()\nif expInfo['frameRate']!=None:\n frameDur = 1.0/round(expInfo['frameRate'])\nelse:\n frameDur = 1.0/60.0 # couldn't get a reliable measure so guess\n\n# Initialize components for Routine \"Instructions\"\nInstructionsClock = core.Clock()\ntext_2 = visual.TextStim(win=win, ori=0, name='text_2',\n text=u'The experiment is about to begin. ', font=u'Arial',\n pos=[0, 0], height=0.1, wrapWidth=None,\n color=u'white', colorSpace='rgb', opacity=1,\n depth=0.0)\n\n# Initialize components for Routine \"trial\"\ntrialClock = core.Clock()\nISI = core.StaticPeriod(win=win, screenHz=expInfo['frameRate'], name='ISI')\nimage = visual.ImageStim(win=win, name='image',units='pix',\n image='sin', mask=None,\n ori=0, pos=[0, 0], size=[200,200],\n color=[1,1,1], colorSpace='rgb', opacity=1,\n flipHoriz=False, flipVert=False,\n texRes=128, interpolate=True, depth=0.0)\n\nfixation = visual.ShapeStim(win,\n vertices=((0, -0.075), (0, 0.075), (0,0), (-0.05,0), (0.05, 0)),\n lineWidth=3,\n closeShape=False,\n lineColor='white')\n\n\n\nWrong_1 = visual.Circle(win=win, units = 'pix', radius = 100,lineColor='red', fillColor = 'red')\n\n\n# Initialize components for Routine \"End\"\nEndClock = core.Clock()\ntext = visual.TextStim(win=win, ori=0, name='text',\n text=u'Experiment is completed. Thank you for your participation.', font=u'Arial',\n pos=[0, 0], height=0.1, wrapWidth=None,\n color=u'white', colorSpace='rgb', opacity=1,\n depth=0.0)\n\n\n#######################\n#### Set up onsets ####\n#######################\ncorr_thresh = 0.1\ndfStims = pd.DataFrame\nsequence_img_ids = []\nimg_dict = {2: 'image_folder/stim_2.png', 3: 'image_folder/stim_3.png', 4: 'image_folder/stim_4.png', 5: 'image_folder/stim_5.png'}\nkey_dict = {2:'2', 3:'3', 4:'4', 5:'5'}\n\nisDone = 0\nwhile not isDone:\n trial_types = np.asarray([2, 3, 4, 5])\n trial_IDs = np.asarray(range(4))\n trial_freq = np.asarray([12, 12, 12, 12])\n iti_range = np.asarray([2, 2, 2, 2, 3, 3, 3, 4, 5, 6, 7, 8])\n\n n_post = 3\n t_vec = []\n iti_vec = []\n tid_vec = []\n\n for tt in range(0,len(trial_types)):\n t_vec = np.repeat(trial_types,12)\n iti_vec = np.tile(iti_range,4)\n\n np.random.shuffle(t_vec)\n np.random.shuffle(iti_vec)\n vec = [0]\n id_vec = vec\n\n for t in range(0, len(t_vec)):\n vec = vec + [t_vec[t]] + np.repeat(0,iti_vec[t]).tolist()\n vec = vec + [0,0,0]\n dfStims = pd.DataFrame()\n X = np.zeros((len(vec),len(trial_types)))\n ons = np.zeros((12,4))\n for c in trial_types:\n a = np.where(vec==c)[0]\n ons[:,c-2] = a*2\n for indx in range(0, len(a)):\n name = a[indx]\n X[a[indx]][c-2]= 1\n\n df=pd.DataFrame(X)\n cxy = df.corr()\n cxy = abs(np.tril(cxy, k=-1))\n if cxy.max() < corr_thresh:\n isDone = 1\n\nfor x in range(0,len(vec)):\n if vec[x] == 0:\n sequence_img_ids.append('image_folder/skip.png')\n elif vec[x] != 0:\n sequence_img_ids.append(img_dict[vec[x]])\n\nid_vec = vec\nt_vec = range(0,480,2)\ndfStims['trial_img'] = sequence_img_ids\ndfStims['trial_ans'] = vec\n\n\n#######################\n## End Set up onsets ##\n#######################\n\nfilename = _thisDir + os.sep + 'data/%s_%s_%s_onsets.csv' %(expInfo['participant'], expName, expInfo['session'])\nnp.savetxt(filename, ons, '%5.2f',delimiter=\",\")\ndfStims.to_csv('MM_onsets.csv', index= False)\n\n\n#######################\n## Save as mat file for SPM\n#######################\n\n#\n# new_onsets = np.empty((4,), dtype=object)\n# df = pd.read_csv('0273_r2d4_MM_Run1_onsets.csv',header=None)\n# new_onsets[0] = np.array(df[0][:,np.newaxis])/2\n# new_onsets[1] = np.array(df[1][:,np.newaxis])/2\n# new_onsets[2] = np.array(df[2][:,np.newaxis])/2\n# new_onsets[3] = np.array(df[3][:,np.newaxis])/2\n# data={}\n# data['ons'] = new_onsets\n# scipy.io.savemat('0273_r2d4_MM_Run1_onsets.mat', data)\n#\n\n# Create some handy timers\nglobalClock = core.Clock() # to track the time since experiment started\nroutineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine\n\n#------Prepare to start Routine \"Instructions\"-------\nt = 0\nInstructionsClock.reset() # clock\nframeN = -1\nroutineTimer.add(5.000000)\n# update component parameters for each repeat\n# keep track of which components have finished\nInstructionsComponents = []\nInstructionsComponents.append(text_2)\nfor thisComponent in InstructionsComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n\n#-------Start Routine \"Instructions\"-------\ncontinueRoutine = True\nwhile continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = InstructionsClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n\n # *text_2* updates\n if t >= 0.0 and text_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n text_2.tStart = t # underestimates by a little under one frame\n text_2.frameNStart = frameN # exact frame index\n text_2.setAutoDraw(True)\n if text_2.status == STARTED and t >= (0.0 + (5-win.monitorFramePeriod*0.75)): #most of one frame period left\n text_2.setAutoDraw(False)\n\n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in InstructionsComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n\n # check for quit (the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n\n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n#-------Ending Routine \"Instructions\"-------\nfor thisComponent in InstructionsComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n# set up handler to look after randomisation of conditions etc\ntrials = data.TrialHandler(nReps=1, method='sequential',\n extraInfo=expInfo, originPath=None,\n trialList=data.importConditions(u'MM_onsets.csv'),\n seed=None, name='trials')\n\n\nthisExp.addLoop(trials) # add the loop to the experiment\nthisTrial = trials.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)\nif thisTrial != None:\n for paramName in thisTrial.keys():\n exec(paramName + '= thisTrial.' + paramName)\nRTclock = core.Clock()\nmax_rt = 1\n\n##### Wait for scanner trigger key #####\nevent.clearEvents(eventType='keyboard')\n\nScannerKey = event.waitKeys([\"^\",\"escape\"])\nif endExpNow or \"escape\" in ScannerKey:\n core.quit()\nglobalClock.reset()\n\n\n\ntrial = -1\nfor thisTrial in trials:\n trial = trial+1\n\n currentLoop = trials\n # abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)\n if thisTrial != None:\n for paramName in thisTrial.keys():\n exec(paramName + '= thisTrial.' + paramName)\n\n fixation.setAutoDraw(True)\n win.flip()\n\n\n\n #------Prepare to start Routine \"trial\"-------\n\n frameN = -1\n routineTimer.add(2.000000)\n\n #For Debugging\n #print globalClock.getTime()\n #print t_vec[trial]\n # update component parameters for each repeat\n while globalClock.getTime() < t_vec[trial]:\n core.wait(.001)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n\n\n if trial_img != 'image_folder/skip.png':\n fixation.setAutoDraw(False)\n win.flip()\n image.setImage(trial_img)\n key_response = event.BuilderKeyResponse() # create an object of type KeyResponse\n key_response.status = NOT_STARTED\n # keep track of which components have finished\n trialComponents = []\n trialComponents.append(image)\n trialComponents.append(key_response)\n\n for thisComponent in trialComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n #-------Start Routine \"trial\"-------\n continueRoutine = True\n trialClock.reset() # clock\n # Print routTimer to verify matches correct onset timings.\n # print routineTimer.getTime()\n\n while continueRoutine:\n # get current me\n t = trialClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n\n # *image* updates\n if t >= 0.0 and image.status == NOT_STARTED:\n # keep track of start time/frame for later\n image.tStart = t # underestimates by a little under one frame\n image.frameNStart = frameN # exact frame index\n image.setAutoDraw(True)\n onsetTime = globalClock.getTime()\n if image.status == STARTED and t >= (0.0 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left\n image.setAutoDraw(False)\n continueRoutine = False\n # *key_response* updates\n if t >= 0.0 and key_response.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_response.tStart = t # underestimates by a little under one frame\n key_response.frameNStart = frameN # exact frame index\n key_response.status = STARTED\n # keyboard checking is just starting\n key_response.clock.reset() # now t=0\n event.clearEvents(eventType='keyboard')\n if key_response.status == STARTED and t >= (0.0 + (1-win.monitorFramePeriod*0.75)): #most of one frame period left\n key_response.status = STOPPED\n continueRoutine = False\n if key_response.status == STARTED:\n theseKeys = event.getKeys(keyList=['2', '3', '4', '5'])\n\n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n key_response.keys.extend(theseKeys) # storing all keys\n key_response.rt.append(key_response.clock.getTime())\n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n\n for thisComponent in trialComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n\n # check for quit (the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n\n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n #-------Ending Routine \"trial\"-------\n for thisComponent in trialComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_response.keys in ['', [], None]: # No response was made\n key_response.keys=None\n # was no response the correct answer?!\n if str(trial_ans).lower() == 'none': key_response.corr = 1 # correct non-response\n else: key_response.corr = 0 # failed to respond (incorrectly)\n # store data for trials (TrialHandler)\n trials.addData('key_response.keys',key_response.keys)\n trials.addData('key_response.corr', key_response.corr)\n if key_response.keys != None: # we had a response\n trials.addData('key_response.rt', key_response.rt)\n thisExp.nextEntry()\n win.flip()\n #Save Data to output File\n\n\n data_out.loc[len(data_out)+1]=[onsetTime,trial_ans, str(key_response.keys).strip('[]')]\n data_out.to_csv(out_all_fn, index=False)\n\n elif trial_img == 'image_folder/skip.png':\n fixation.setAutoDraw(True)\n core.wait(0.5)\n thisExp.nextEntry()\n\n\n# completed all trials\n\n\n#------Prepare to start Routine \"End\"-------\nt = 0\nEndClock.reset() # clock\nframeN = -1\nroutineTimer.add(1.000000)\n# update component parameters for each repeat\n# keep track of which components have finished\nEndComponents = []\nEndComponents.append(text)\nfor thisComponent in EndComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"End\"-------\ncontinueRoutine = True\nwhile continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = EndClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n\n # *text* updates\n if t >= 0.0 and text.status == NOT_STARTED:\n # keep track of start time/frame for later\n text.tStart = t # underestimates by a little under one frame\n text.frameNStart = frameN # exact frame index\n text.setAutoDraw(True)\n if text.status == STARTED and t >= (0.0 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left\n text.setAutoDraw(False)\n\n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in EndComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n\n # check for quit (the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n\n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n#-------Ending Routine \"End\"-------\nfor thisComponent in EndComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\nwin.close()\ncore.quit()\n" ]
[ [ "numpy.tile", "numpy.random.shuffle", "numpy.zeros", "numpy.savetxt", "pandas.DataFrame", "numpy.asarray", "numpy.repeat", "numpy.where", "numpy.tril" ] ]
NunoEdgarGFlowHub/io
[ "242a3be6c375e4f7cf130766c0098cfe4b0fc8d2" ]
[ "tensorflow_io/kafka/__init__.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Kafka Dataset.\n\n@@KafkaDataset\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow_io.kafka.python.ops.kafka_dataset_ops import KafkaDataset\n\nfrom tensorflow.python.util.all_util import remove_undocumented\n\n_allowed_symbols = [\n \"KafkaDataset\",\n]\n\nremove_undocumented(__name__)\n" ]
[ [ "tensorflow.python.util.all_util.remove_undocumented" ] ]
jbbrokaw/matplotlib
[ "86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427", "86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427" ]
[ "lib/matplotlib/backends/backend_cairo.py", "examples/mplot3d/trisurf3d_demo.py" ]
[ "\"\"\"\nA Cairo backend for matplotlib\nAuthor: Steve Chaplin\n\nCairo is a vector graphics library with cross-device output support.\nFeatures of Cairo:\n * anti-aliasing\n * alpha channel\n * saves image files as PNG, PostScript, PDF\n\nhttp://cairographics.org\nRequires (in order, all available from Cairo website):\n cairo, pycairo\n\nNaming Conventions\n * classes MixedUpperCase\n * varables lowerUpper\n * functions underscore_separated\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\n\nimport os, sys, warnings, gzip\n\nimport numpy as np\n\ndef _fn_name(): return sys._getframe(1).f_code.co_name\n\ntry:\n import cairocffi as cairo\nexcept ImportError:\n try:\n import cairo\n except ImportError:\n raise ImportError(\"Cairo backend requires that cairocffi or pycairo is installed.\")\n else:\n HAS_CAIRO_CFFI = False\nelse:\n HAS_CAIRO_CFFI = True\n\n_version_required = (1,2,0)\nif cairo.version_info < _version_required:\n raise ImportError (\"Pycairo %d.%d.%d is installed\\n\"\n \"Pycairo %d.%d.%d or later is required\"\n % (cairo.version_info + _version_required))\nbackend_version = cairo.version\ndel _version_required\n\nfrom matplotlib.backend_bases import RendererBase, GraphicsContextBase,\\\n FigureManagerBase, FigureCanvasBase\nfrom matplotlib.cbook import is_string_like\nfrom matplotlib.figure import Figure\nfrom matplotlib.mathtext import MathTextParser\nfrom matplotlib.path import Path\nfrom matplotlib.transforms import Bbox, Affine2D\nfrom matplotlib.font_manager import ttfFontProperty\n\n_debug = False\n#_debug = True\n\n# Image::color_conv(format) for draw_image()\nif sys.byteorder == 'little':\n BYTE_FORMAT = 0 # BGRA\nelse:\n BYTE_FORMAT = 1 # ARGB\n\n\nclass RendererCairo(RendererBase):\n fontweights = {\n 100 : cairo.FONT_WEIGHT_NORMAL,\n 200 : cairo.FONT_WEIGHT_NORMAL,\n 300 : cairo.FONT_WEIGHT_NORMAL,\n 400 : cairo.FONT_WEIGHT_NORMAL,\n 500 : cairo.FONT_WEIGHT_NORMAL,\n 600 : cairo.FONT_WEIGHT_BOLD,\n 700 : cairo.FONT_WEIGHT_BOLD,\n 800 : cairo.FONT_WEIGHT_BOLD,\n 900 : cairo.FONT_WEIGHT_BOLD,\n 'ultralight' : cairo.FONT_WEIGHT_NORMAL,\n 'light' : cairo.FONT_WEIGHT_NORMAL,\n 'normal' : cairo.FONT_WEIGHT_NORMAL,\n 'medium' : cairo.FONT_WEIGHT_NORMAL,\n 'semibold' : cairo.FONT_WEIGHT_BOLD,\n 'bold' : cairo.FONT_WEIGHT_BOLD,\n 'heavy' : cairo.FONT_WEIGHT_BOLD,\n 'ultrabold' : cairo.FONT_WEIGHT_BOLD,\n 'black' : cairo.FONT_WEIGHT_BOLD,\n }\n fontangles = {\n 'italic' : cairo.FONT_SLANT_ITALIC,\n 'normal' : cairo.FONT_SLANT_NORMAL,\n 'oblique' : cairo.FONT_SLANT_OBLIQUE,\n }\n\n\n def __init__(self, dpi):\n \"\"\"\n \"\"\"\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n self.dpi = dpi\n self.gc = GraphicsContextCairo (renderer=self)\n self.text_ctx = cairo.Context (\n cairo.ImageSurface (cairo.FORMAT_ARGB32,1,1))\n self.mathtext_parser = MathTextParser('Cairo')\n\n RendererBase.__init__(self)\n\n def set_ctx_from_surface (self, surface):\n self.gc.ctx = cairo.Context (surface)\n\n\n def set_width_height(self, width, height):\n self.width = width\n self.height = height\n self.matrix_flipy = cairo.Matrix (yy=-1, y0=self.height)\n # use matrix_flipy for ALL rendering?\n # - problem with text? - will need to switch matrix_flipy off, or do a\n # font transform?\n\n\n def _fill_and_stroke (self, ctx, fill_c, alpha, alpha_overrides):\n if fill_c is not None:\n ctx.save()\n if len(fill_c) == 3 or alpha_overrides:\n ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha)\n else:\n ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], fill_c[3])\n ctx.fill_preserve()\n ctx.restore()\n ctx.stroke()\n\n @staticmethod\n def convert_path(ctx, path, transform):\n for points, code in path.iter_segments(transform):\n if code == Path.MOVETO:\n ctx.move_to(*points)\n elif code == Path.CLOSEPOLY:\n ctx.close_path()\n elif code == Path.LINETO:\n ctx.line_to(*points)\n elif code == Path.CURVE3:\n ctx.curve_to(points[0], points[1],\n points[0], points[1],\n points[2], points[3])\n elif code == Path.CURVE4:\n ctx.curve_to(*points)\n\n\n def draw_path(self, gc, path, transform, rgbFace=None):\n ctx = gc.ctx\n\n transform = transform + \\\n Affine2D().scale(1.0, -1.0).translate(0, self.height)\n\n ctx.new_path()\n self.convert_path(ctx, path, transform)\n\n self._fill_and_stroke(ctx, rgbFace, gc.get_alpha(), gc.get_forced_alpha())\n\n def draw_image(self, gc, x, y, im):\n # bbox - not currently used\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n\n rows, cols, buf = im.color_conv (BYTE_FORMAT)\n surface = cairo.ImageSurface.create_for_data (\n buf, cairo.FORMAT_ARGB32, cols, rows, cols*4)\n ctx = gc.ctx\n y = self.height - y - rows\n\n ctx.save()\n ctx.set_source_surface (surface, x, y)\n if gc.get_alpha() != 1.0:\n ctx.paint_with_alpha(gc.get_alpha())\n else:\n ctx.paint()\n ctx.restore()\n\n def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):\n # Note: x,y are device/display coords, not user-coords, unlike other\n # draw_* methods\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n\n if ismath:\n self._draw_mathtext(gc, x, y, s, prop, angle)\n\n else:\n ctx = gc.ctx\n ctx.new_path()\n ctx.move_to (x, y)\n ctx.select_font_face (prop.get_name(),\n self.fontangles [prop.get_style()],\n self.fontweights[prop.get_weight()])\n\n size = prop.get_size_in_points() * self.dpi / 72.0\n\n ctx.save()\n if angle:\n ctx.rotate (-angle * np.pi / 180)\n ctx.set_font_size (size)\n\n if HAS_CAIRO_CFFI:\n if not isinstance(s, six.text_type):\n s = six.text_type(s)\n else:\n if not six.PY3 and isinstance(s, six.text_type):\n s = s.encode(\"utf-8\")\n\n ctx.show_text(s)\n ctx.restore()\n\n def _draw_mathtext(self, gc, x, y, s, prop, angle):\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n\n ctx = gc.ctx\n width, height, descent, glyphs, rects = self.mathtext_parser.parse(\n s, self.dpi, prop)\n\n ctx.save()\n ctx.translate(x, y)\n if angle:\n ctx.rotate (-angle * np.pi / 180)\n\n for font, fontsize, s, ox, oy in glyphs:\n ctx.new_path()\n ctx.move_to(ox, oy)\n\n fontProp = ttfFontProperty(font)\n ctx.save()\n ctx.select_font_face (fontProp.name,\n self.fontangles [fontProp.style],\n self.fontweights[fontProp.weight])\n\n size = fontsize * self.dpi / 72.0\n ctx.set_font_size(size)\n if isinstance(s, six.text_type):\n s = s.encode(\"utf-8\")\n ctx.show_text(s)\n ctx.restore()\n\n for ox, oy, w, h in rects:\n ctx.new_path()\n ctx.rectangle (ox, oy, w, h)\n ctx.set_source_rgb (0, 0, 0)\n ctx.fill_preserve()\n\n ctx.restore()\n\n\n def flipy(self):\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n return True\n #return False # tried - all draw objects ok except text (and images?)\n # which comes out mirrored!\n\n\n def get_canvas_width_height(self):\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n return self.width, self.height\n\n\n def get_text_width_height_descent(self, s, prop, ismath):\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n if ismath:\n width, height, descent, fonts, used_characters = self.mathtext_parser.parse(\n s, self.dpi, prop)\n return width, height, descent\n\n ctx = self.text_ctx\n ctx.save()\n ctx.select_font_face (prop.get_name(),\n self.fontangles [prop.get_style()],\n self.fontweights[prop.get_weight()])\n\n # Cairo (says it) uses 1/96 inch user space units, ref: cairo_gstate.c\n # but if /96.0 is used the font is too small\n\n size = prop.get_size_in_points() * self.dpi / 72.0\n\n # problem - scale remembers last setting and font can become\n # enormous causing program to crash\n # save/restore prevents the problem\n ctx.set_font_size (size)\n\n y_bearing, w, h = ctx.text_extents (s)[1:4]\n ctx.restore()\n\n return w, h, h + y_bearing\n\n\n def new_gc(self):\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n self.gc.ctx.save()\n self.gc._alpha = 1.0\n self.gc._forced_alpha = False # if True, _alpha overrides A from RGBA\n return self.gc\n\n\n def points_to_pixels(self, points):\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n return points/72.0 * self.dpi\n\n\nclass GraphicsContextCairo(GraphicsContextBase):\n _joind = {\n 'bevel' : cairo.LINE_JOIN_BEVEL,\n 'miter' : cairo.LINE_JOIN_MITER,\n 'round' : cairo.LINE_JOIN_ROUND,\n }\n\n _capd = {\n 'butt' : cairo.LINE_CAP_BUTT,\n 'projecting' : cairo.LINE_CAP_SQUARE,\n 'round' : cairo.LINE_CAP_ROUND,\n }\n\n\n def __init__(self, renderer):\n GraphicsContextBase.__init__(self)\n self.renderer = renderer\n\n\n def restore(self):\n self.ctx.restore()\n\n\n def set_alpha(self, alpha):\n GraphicsContextBase.set_alpha(self, alpha)\n _alpha = self.get_alpha()\n rgb = self._rgb\n if self.get_forced_alpha():\n self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], _alpha)\n else:\n self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], rgb[3])\n\n\n #def set_antialiased(self, b):\n # enable/disable anti-aliasing is not (yet) supported by Cairo\n\n\n def set_capstyle(self, cs):\n if cs in ('butt', 'round', 'projecting'):\n self._capstyle = cs\n self.ctx.set_line_cap (self._capd[cs])\n else:\n raise ValueError('Unrecognized cap style. Found %s' % cs)\n\n\n def set_clip_rectangle(self, rectangle):\n if not rectangle: return\n x,y,w,h = rectangle.bounds\n # pixel-aligned clip-regions are faster\n x,y,w,h = round(x), round(y), round(w), round(h)\n ctx = self.ctx\n ctx.new_path()\n ctx.rectangle (x, self.renderer.height - h - y, w, h)\n ctx.clip ()\n\n def set_clip_path(self, path):\n if not path: return\n tpath, affine = path.get_transformed_path_and_affine()\n ctx = self.ctx\n ctx.new_path()\n affine = affine + Affine2D().scale(1.0, -1.0).translate(0.0, self.renderer.height)\n RendererCairo.convert_path(ctx, tpath, affine)\n ctx.clip()\n\n def set_dashes(self, offset, dashes):\n self._dashes = offset, dashes\n if dashes == None:\n self.ctx.set_dash([], 0) # switch dashes off\n else:\n self.ctx.set_dash(\n list(self.renderer.points_to_pixels(np.asarray(dashes))), offset)\n\n\n def set_foreground(self, fg, isRGBA=None):\n GraphicsContextBase.set_foreground(self, fg, isRGBA)\n if len(self._rgb) == 3:\n self.ctx.set_source_rgb(*self._rgb)\n else:\n self.ctx.set_source_rgba(*self._rgb)\n\n def set_graylevel(self, frac):\n GraphicsContextBase.set_graylevel(self, frac)\n if len(self._rgb) == 3:\n self.ctx.set_source_rgb(*self._rgb)\n else:\n self.ctx.set_source_rgba(*self._rgb)\n\n\n def set_joinstyle(self, js):\n if js in ('miter', 'round', 'bevel'):\n self._joinstyle = js\n self.ctx.set_line_join(self._joind[js])\n else:\n raise ValueError('Unrecognized join style. Found %s' % js)\n\n\n def set_linewidth(self, w):\n self._linewidth = w\n self.ctx.set_line_width (self.renderer.points_to_pixels(w))\n\n\ndef new_figure_manager(num, *args, **kwargs): # called by backends/__init__.py\n \"\"\"\n Create a new figure manager instance\n \"\"\"\n if _debug: print('%s()' % (_fn_name()))\n FigureClass = kwargs.pop('FigureClass', Figure)\n thisFig = FigureClass(*args, **kwargs)\n return new_figure_manager_given_figure(num, thisFig)\n\n\ndef new_figure_manager_given_figure(num, figure):\n \"\"\"\n Create a new figure manager instance for the given figure.\n \"\"\"\n canvas = FigureCanvasCairo(figure)\n manager = FigureManagerBase(canvas, num)\n return manager\n\n\nclass FigureCanvasCairo (FigureCanvasBase):\n def print_png(self, fobj, *args, **kwargs):\n width, height = self.get_width_height()\n\n renderer = RendererCairo (self.figure.dpi)\n renderer.set_width_height (width, height)\n surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, width, height)\n renderer.set_ctx_from_surface (surface)\n\n self.figure.draw (renderer)\n surface.write_to_png (fobj)\n\n def print_pdf(self, fobj, *args, **kwargs):\n return self._save(fobj, 'pdf', *args, **kwargs)\n\n def print_ps(self, fobj, *args, **kwargs):\n return self._save(fobj, 'ps', *args, **kwargs)\n\n def print_svg(self, fobj, *args, **kwargs):\n return self._save(fobj, 'svg', *args, **kwargs)\n\n def print_svgz(self, fobj, *args, **kwargs):\n return self._save(fobj, 'svgz', *args, **kwargs)\n\n def _save (self, fo, format, **kwargs):\n # save PDF/PS/SVG\n orientation = kwargs.get('orientation', 'portrait')\n\n dpi = 72\n self.figure.dpi = dpi\n w_in, h_in = self.figure.get_size_inches()\n width_in_points, height_in_points = w_in * dpi, h_in * dpi\n\n if orientation == 'landscape':\n width_in_points, height_in_points = (height_in_points,\n width_in_points)\n\n if format == 'ps':\n if not hasattr(cairo, 'PSSurface'):\n raise RuntimeError ('cairo has not been compiled with PS '\n 'support enabled')\n surface = cairo.PSSurface (fo, width_in_points, height_in_points)\n elif format == 'pdf':\n if not hasattr(cairo, 'PDFSurface'):\n raise RuntimeError ('cairo has not been compiled with PDF '\n 'support enabled')\n surface = cairo.PDFSurface (fo, width_in_points, height_in_points)\n elif format in ('svg', 'svgz'):\n if not hasattr(cairo, 'SVGSurface'):\n raise RuntimeError ('cairo has not been compiled with SVG '\n 'support enabled')\n if format == 'svgz':\n filename = fo\n if is_string_like(fo):\n fo = open(fo, 'wb')\n close = True\n else:\n close = False\n try:\n fo = gzip.GzipFile(None, 'wb', fileobj=fo)\n finally:\n if close:\n fo.close()\n surface = cairo.SVGSurface (fo, width_in_points, height_in_points)\n else:\n warnings.warn (\"unknown format: %s\" % format)\n return\n\n # surface.set_dpi() can be used\n renderer = RendererCairo (self.figure.dpi)\n renderer.set_width_height (width_in_points, height_in_points)\n renderer.set_ctx_from_surface (surface)\n ctx = renderer.gc.ctx\n\n if orientation == 'landscape':\n ctx.rotate (np.pi/2)\n ctx.translate (0, -height_in_points)\n # cairo/src/cairo_ps_surface.c\n # '%%Orientation: Portrait' is always written to the file header\n # '%%Orientation: Landscape' would possibly cause problems\n # since some printers would rotate again ?\n # TODO:\n # add portrait/landscape checkbox to FileChooser\n\n self.figure.draw (renderer)\n\n show_fig_border = False # for testing figure orientation and scaling\n if show_fig_border:\n ctx.new_path()\n ctx.rectangle(0, 0, width_in_points, height_in_points)\n ctx.set_line_width(4.0)\n ctx.set_source_rgb(1,0,0)\n ctx.stroke()\n ctx.move_to(30,30)\n ctx.select_font_face ('sans-serif')\n ctx.set_font_size(20)\n ctx.show_text('Origin corner')\n\n ctx.show_page()\n surface.finish()\n\n\nFigureCanvas = FigureCanvasCairo\n", "from mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nn_angles = 36\nn_radii = 8\n\n# An array of radii\n# Does not include radius r=0, this is to eliminate duplicate points\nradii = np.linspace(0.125, 1.0, n_radii)\n\n# An array of angles\nangles = np.linspace(0, 2*np.pi, n_angles, endpoint=False)\n\n# Repeat all angles for each radius\nangles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)\n\n# Convert polar (radii, angles) coords to cartesian (x, y) coords\n# (0, 0) is added here. There are no duplicate points in the (x, y) plane\nx = np.append(0, (radii*np.cos(angles)).flatten())\ny = np.append(0, (radii*np.sin(angles)).flatten())\n\n# Pringle surface\nz = np.sin(-x*y)\n\nfig = plt.figure()\nax = fig.gca(projection='3d')\n\nax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2)\n\nplt.show()\n" ]
[ [ "matplotlib.backend_bases.FigureManagerBase", "matplotlib.backend_bases.RendererBase.__init__", "matplotlib.cbook.is_string_like", "matplotlib.backend_bases.GraphicsContextBase.__init__", "matplotlib.backend_bases.GraphicsContextBase.set_foreground", "numpy.asarray", "matplotlib.backend_bases.GraphicsContextBase.set_alpha", "matplotlib.font_manager.ttfFontProperty", "matplotlib.mathtext.MathTextParser", "matplotlib.backend_bases.GraphicsContextBase.set_graylevel", "matplotlib.transforms.Affine2D" ], [ "matplotlib.pyplot.figure", "numpy.repeat", "numpy.cos", "matplotlib.pyplot.show", "numpy.sin", "numpy.linspace" ] ]
ejnnr/steerable_pdo_experiments
[ "17902e56641cefe305b935c8733b45aa066bf068" ]
[ "stl_experiments/experiments/plot_exps.py" ]
[ "\nimport pandas as pd\nimport argparse\nimport os\nimport matplotlib\n\nimport utils\n\nif \"DISPLAY\" not in os.environ:\n matplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\n\nSHOW_PLOT = False\nSAVE_PLOT = True\n\nRESHUFFLE = False\nAUGMENT_TRAIN = False\n\ncolors = {\n \"train\": \"blue\",\n \"valid\": \"green\",\n \"test\": \"red\"\n}\n\n\ndef plot_mean_with_variance(axis, data, label):\n mean = data.mean()\n std = data.std()\n axis.plot(mean, label=label, color=colors[label])\n axis.fill_between(\n mean.index,\n mean - std,\n mean + std,\n color=colors[label],\n alpha=0.1\n )\n\n\ndef plot(logs, plotpath=None, show=False, outfig=None):\n \n if isinstance(logs, str) and os.path.isfile(logs):\n logs = utils.retrieve_logs(logs)\n elif not isinstance(logs, pd.DataFrame):\n raise ValueError()\n \n if outfig is None:\n figure, (loss_axis, acc_axis) = plt.subplots(1, 2, figsize=(10, 4))\n else:\n figure, (loss_axis, acc_axis) = outfig\n\n train = logs[logs.split.str.startswith(\"train\")].groupby(\"iteration\")\n valid = logs[logs.split == \"valid\"].groupby(\"iteration\")\n test = logs[logs.split == \"test\"].groupby(\"iteration\")\n \n #################### Plot Loss trends ####################\n \n loss_axis.cla()\n \n plot_mean_with_variance(loss_axis, train.loss, \"train\")\n if len(valid) > 0:\n plot_mean_with_variance(loss_axis, valid.loss, \"valid\")\n if len(test) > 0:\n plot_mean_with_variance(loss_axis, test.loss, \"test\")\n \n loss_axis.legend()\n loss_axis.set_xlabel('iterations')\n loss_axis.set_ylabel('Loss')\n \n #################### Plot Accuracy trends ####################\n \n acc_axis.cla()\n \n plot_mean_with_variance(acc_axis, train.accuracy, \"train\")\n if len(valid) > 0:\n plot_mean_with_variance(acc_axis, valid.accuracy, \"valid\")\n if len(test) > 0:\n plot_mean_with_variance(acc_axis, test.accuracy, \"test\")\n \n ################## Test score ########################\n \n test = logs[logs.split == \"test\"]\n \n xmax = logs.iteration.max()\n \n if len(test) > 0:\n best_acc = test.accuracy.max()\n acc_axis.hlines(best_acc, xmin=0, xmax=xmax, linewidth=0.5, linestyles='--', label='Max Test Accuracy')\n acc_axis.set_yticks(list(acc_axis.get_yticks()) + [best_acc])\n \n if len(test) > 1:\n mean_acc = test.accuracy.mean()\n mean_std = test.accuracy.std()\n acc_axis.hlines(mean_acc, xmin=0, xmax=xmax, linewidth=0.5, color=colors[\"test\"], label='Mean Test Accuracy')\n acc_axis.fill_between([0, xmax], [mean_acc - mean_std] * 2, [mean_acc + mean_std] * 2, color=colors[\"test\"],\n alpha=0.1)\n acc_axis.set_yticks(list(acc_axis.get_yticks()) + [mean_acc])\n \n acc_axis.legend()\n acc_axis.set_xlabel('iterations')\n acc_axis.set_ylabel('Accuracy')\n \n figure.tight_layout()\n plt.draw()\n \n if plotpath is not None:\n figure.savefig(plotpath, format='svg', dpi=256, bbox_inches=\"tight\")\n \n if show:\n figure.show()\n plt.pause(0.01)\n\n\n################################################################################\n################################################################################\n\n\nif __name__ == \"__main__\":\n # Parse training configuration\n parser = argparse.ArgumentParser()\n \n # Dataset params\n parser.add_argument('--dataset', type=str, help='The name of the dataset to use')\n parser.add_argument('--augment', dest=\"augment\", action=\"store_true\",\n help='Augment the training set with rotated images')\n parser.set_defaults(augment=AUGMENT_TRAIN)\n\n parser.add_argument('--reshuffle', dest=\"reshuffle\", action=\"store_true\",\n help='Reshuffle train and valid splits instead of using the default split')\n parser.set_defaults(reshuffle=RESHUFFLE)\n \n # Model params\n parser.add_argument('--model', type=str, help='The name of the model to use')\n parser.add_argument('--N', type=int, help='Size of cyclic group for GCNN and maximum frequency for HNET')\n parser.add_argument('--flip', dest=\"flip\", action=\"store_true\",\n help='Use also reflection equivariance in the EXP model')\n parser.set_defaults(flip=False)\n parser.add_argument('--sgsize', type=int, default=None,\n help='Number of rotations in the subgroup to restrict to in the EXP e2sfcnn models')\n parser.add_argument('--fixparams', dest=\"fixparams\", action=\"store_true\",\n help='Keep the number of parameters of the model fixed by adjusting its topology')\n parser.set_defaults(fixparams=False)\n parser.add_argument('--F', type=float, default=0.8, help='Frequency cut-off: maximum frequency at radius \"r\" is \"F*r\"')\n parser.add_argument('--sigma', type=float, default=0.6, help='Width of the rings building the bases (std of the gaussian window)')\n parser.add_argument('--J', type=int, default=0, help='Number of additional frequencies in the interwiners of finite groups')\n parser.add_argument('--restrict', type=int, default=-1, help='Layer where to restrict SFCNN from E(2) to SE(2)')\n\n # plot configs\n parser.add_argument('--show', dest=\"show\", action=\"store_true\", help='Show the plots during execution')\n parser.set_defaults(show=SHOW_PLOT)\n\n parser.add_argument('--store_plot', dest=\"store_plot\", action=\"store_true\", help='Save the plots in a file or not')\n parser.set_defaults(store_plot=SAVE_PLOT)\n \n config = parser.parse_args()\n \n # Draw the plot\n logs_file = utils.logs_path(config)\n plotpath = utils.plot_path(config)\n plot(logs_file, plotpath, config.show)\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.pause", "matplotlib.pyplot.draw", "matplotlib.pyplot.subplots" ] ]
Abdiel-EMT/segnet
[ "474a68079000a85d1e62ad9723d316074bb1eb8d" ]
[ "segnet/models/multiresunet.py" ]
[ "from tensorflow import keras as K\n\n\ndef conv2d(x, filters, shape, padding=\"same\", strides=(1, 1), activation=\"relu\"):\n \"\"\"\n 2D Convolutional layers with Batch Normalization\n \n Arguments:\n x: Keras layer, the input to the feature map\n filters: Int representing the number of filters to use\n shape: Tuple with two integer values (number of rows, number of columns)\n padding: String that determines the padding mode\n strides: Tuple of two integer values that represent the strides\n activation: String that defines the activation function\n \n Returns:\n x: A Keras layer\n \"\"\"\n\n x = K.layers.Conv2D(\n filters, shape, strides=strides, padding=padding, use_bias=False\n )(x)\n x = K.layers.BatchNormalization(scale=False)(x)\n\n if activation is None:\n return x\n\n x = K.layers.Activation(activation)(x)\n\n return x\n\n\ndef MultiResBlock(u_val, input, alpha=1.67):\n \"\"\"\n MultiRes Block, as defined in the paper. Alpha is a parameter that controls\n the number of parameters in the block.\n \n Arguments:\n U: Integer value for the number of filters.\n input: A Keras layer.\n \n Returns:\n out: A Keras layer.\n \"\"\"\n # Calculate the value of W as defined in the paper.\n weight = u_val * alpha\n # The first 1x1 map, to preserve dimensions\n dimension_conservation = conv2d(\n input,\n int(weight * 0.167) + int(weight * 0.333) + int(weight * 0.5),\n (1, 1),\n activation=None,\n padding=\"same\",\n )\n # First 3x3 map, adjusted with W / 6\n conv3x3 = conv2d(\n input, int(weight * 0.167), (3, 3), activation=\"relu\", padding=\"same\"\n )\n # Second 3x3 map, adjusted with W / 3\n conv5x5 = conv2d(\n conv3x3, int(weight * 0.333), (3, 3), activation=\"relu\", padding=\"same\"\n )\n # Third 3x3 map, adjusted with W / 2\n conv7x7 = conv2d(\n conv5x5, int(weight * 0.5), (3, 3), activation=\"relu\", padding=\"same\"\n )\n # Concatenate all three 3x3 maps\n out = K.layers.Concatenate()([conv3x3, conv5x5, conv7x7])\n out = K.layers.BatchNormalization()(out)\n # And add the new 7x7 map with the 1x1 map, batch normalized\n out = K.layers.add([dimension_conservation, out])\n out = K.layers.Activation(\"relu\")(out)\n out = K.layers.BatchNormalization()(out)\n\n return out\n\n\ndef ResPath(filters, input, length=None):\n \"\"\"\n ResPath, to mitigate the semantic gap in the architecture.\n This function creates a path with just one combination of residual\n and feature maps, and this can easily be extended with the length\n argument.\n \n Arguments:\n filters: Integer value corresponding to the number of filters.\n length: Integer value with the length of the path, number of maps.\n input: Keras layer.\n \n Returns:\n out: Keras layer.\n \"\"\"\n # First residual connection\n residual = conv2d(input, filters, (1, 1), activation=None, padding=\"same\")\n # And first feature map\n out = conv2d(input, filters, (3, 3), activation=\"relu\", padding=\"same\")\n # Add the layers and batch normalize\n out = K.layers.add([residual, out])\n out = K.layers.Activation(\"relu\")(out)\n out = K.layers.BatchNormalization()(out)\n # If there is more maps to add, we add them with this loop\n if not length is None:\n for _ in range(length - 1):\n\n residual = out\n residual = conv2d(\n residual, filters, (1, 1), activation=None, padding=\"same\"\n )\n\n out = conv2d(out, filters, (3, 3), activation=\"relu\", padding=\"same\")\n\n out = K.layers.add([residual, out])\n out = K.layers.Activation(\"relu\")(out)\n out = K.layers.BatchNormalization()(out)\n\n return out\n\n\ndef MultiResUnet(input_size=(256, 256, 3)):\n \"\"\"\n A TensorFlow implementation of the MultiResUNet architecture as defined in the\n following paper:\n https://arxiv.org/abs/1902.04049\n \n This is a variant of the U-Net, with additional blocks and paths to help mitigate\n semantic gaps and to obtain better characteristics from the images and maps.\n \n Arguments:\n input_size: Tuple of three integers (height, width, number of channels) that\n describe the input images.\n \n Returns:\n model: A Keras model instance.\n \"\"\"\n\n inputs = K.layers.Input((input_size))\n\n mresblock_1 = MultiResBlock(32, inputs)\n pool_1 = K.layers.MaxPooling2D(pool_size=(2, 2))(mresblock_1)\n mresblock_1 = ResPath(32, mresblock_1, 4)\n\n mresblock_2 = MultiResBlock(64, pool_1)\n pool_2 = K.layers.MaxPooling2D(pool_size=(2, 2))(mresblock_2)\n mresblock_2 = ResPath(64, mresblock_2, 3)\n\n mresblock_3 = MultiResBlock(128, pool_2)\n pool_3 = K.layers.MaxPooling2D(pool_size=(2, 2))(mresblock_3)\n mresblock_3 = ResPath(128, mresblock_3, 2)\n\n mresblock_4 = MultiResBlock(256, pool_3)\n pool_4 = K.layers.MaxPooling2D(pool_size=(2, 2))(mresblock_4)\n mresblock_4 = ResPath(256, mresblock_4)\n\n mresblock5 = MultiResBlock(512, pool_4)\n\n up_6 = K.layers.Conv2DTranspose(256, (2, 2), strides=(2, 2), padding=\"same\")(\n mresblock5\n )\n up_6 = K.layers.Concatenate()([up_6, mresblock_4])\n mresblock_6 = MultiResBlock(256, up_6)\n\n up_7 = K.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding=\"same\")(\n mresblock_6\n )\n up_7 = K.layers.Concatenate()([up_7, mresblock_3])\n mresblock7 = MultiResBlock(128, up_7)\n\n up_8 = K.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding=\"same\")(\n mresblock7\n )\n up_8 = K.layers.Concatenate()([up_8, mresblock_2])\n mresblock8 = MultiResBlock(64, up_8)\n\n up_9 = K.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding=\"same\")(\n mresblock8\n )\n up_9 = K.layers.Concatenate()([up_9, mresblock_1])\n mresblock9 = MultiResBlock(32, up_9)\n\n conv_10 = conv2d(mresblock9, 1, (1, 1), activation=\"sigmoid\")\n\n model = K.models.Model(inputs=[inputs], outputs=[conv_10])\n\n return model\n\n" ]
[ [ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.layers.Conv2DTranspose", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Activation", "tensorflow.keras.models.Model", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.add", "tensorflow.keras.layers.Input" ] ]
iankhr/armagarch
[ "5d292b54cde992cca47024aaeb8d4120f0665a7d" ]
[ "armagarch/tStudent.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 26 12:46:36 2020\r\nThis class defines t-Student distribution for ARMA-GARCH models\r\n\r\n@author: Ian Khrashchevskyi\r\n\"\"\"\r\n\r\nfrom .distModel import DistModel\r\nimport numpy as np\r\nimport scipy.stats as stats\r\nfrom scipy.special import gamma\r\n\r\nclass tStudent(DistModel):\r\n \"\"\"\r\n INPUT:\r\n data - innovations\r\n params -dict with mean and Var and other as a parameter nu\r\n \"\"\"\r\n def _giveName(self):\r\n if self._params is None:\r\n self._params = {'Mean':0,'Var':1, 'other':3}\r\n \r\n self._name = 'Student'\r\n self._startingValues = 3\r\n self._varnames = ['nu']\r\n \r\n \r\n def _setConstraints(self, data=None):\r\n self._constraints = {'Mean':[(-np.Inf, np.inf),], 'Var':[(0,np.inf),],\\\r\n 'other':[(3,np.Inf),]}\r\n \r\n \r\n def lls(self, data =None, params = None):\r\n if data is None:\r\n data = self._data\r\n \r\n if params is None:\r\n params = self._params\r\n \r\n mu = params['Mean']\r\n var = params['Var']\r\n nu = params['other']\r\n ells = np.log(gamma((nu+1)/2)/(np.sqrt(np.pi*(nu-2))*gamma(nu/2)))\\\r\n - 0.5*np.log(var.values) \\\r\n - (nu+1)/2*np.log(1+(data.values-mu)**2/(var.values*(nu-2))) \r\n return -ells\r\n\r\n\r\n def simulate(self, nobs= 1):\r\n \"\"\"\r\n Use built in simulator for now\r\n \"\"\"\r\n return stats.t.rvs(df = self._params['other'],\\\r\n loc = self._params['Mean'],\\\r\n scale = self._params['Var'],\\\r\n size = nobs)" ]
[ [ "numpy.sqrt", "numpy.log", "scipy.stats.t.rvs", "scipy.special.gamma" ] ]
ishine/sudo_rm_rf
[ "ec3fae1e2c9d85710f933a600f3ab93f92468dee" ]
[ "sudo_rm_rf/dnn/experiments/run_fuss_separation.py" ]
[ "\"\"\"!\n@brief Running an experiment with the improved version of SuDoRmRf on\nuniversal source separation with multiple sources.\n\n@author Efthymios Tzinis {[email protected]}\n@copyright University of Illinois at Urbana-Champaign\n\"\"\"\n\nimport os\nimport sys\ncurrent_dir = os.path.dirname(os.path.abspath('__file__'))\nroot_dir = os.path.abspath(os.path.join(current_dir, '../../../'))\nsys.path.append(root_dir)\n\nfrom __config__ import API_KEY\nfrom comet_ml import Experiment, OfflineExperiment\n\nimport torch\nfrom torch.nn import functional as F\nfrom tqdm import tqdm\nfrom pprint import pprint\nimport sudo_rm_rf.dnn.experiments.utils.improved_cmd_args_parser_v2 as parser\nimport sudo_rm_rf.dnn.experiments.utils.mixture_consistency \\\n as mixture_consistency\nimport sudo_rm_rf.dnn.experiments.utils.dataset_setup as dataset_setup\nimport sudo_rm_rf.dnn.losses.sisdr as sisdr_lib\nimport sudo_rm_rf.dnn.losses.snr as snr_lib\nimport sudo_rm_rf.dnn.losses.norm as norm_lib\nimport sudo_rm_rf.dnn.models.improved_sudormrf as improved_sudormrf\nimport sudo_rm_rf.dnn.models.groupcomm_sudormrf_v2 as sudormrf_gc_v2\nimport sudo_rm_rf.dnn.models.causal_improved_sudormrf_v3 as \\\n causal_improved_sudormrf\nimport sudo_rm_rf.dnn.models.sudormrf as initial_sudormrf\nimport sudo_rm_rf.dnn.utils.cometml_loss_report as cometml_report\nimport sudo_rm_rf.dnn.utils.cometml_log_audio as cometml_audio_logger\nimport sudo_rm_rf.dnn.utils.log_audio as offline_audio_logger\n\n# torch.backends.cudnn.enabled = False\nargs = parser.get_args()\nhparams = vars(args)\ngenerators = dataset_setup.setup(hparams)\n# Hardcode n_sources for all the experiments with musdb\nassert hparams['n_channels'] == 1, 'Mono source separation is available for now'\n\naudio_loggers = dict(\n [(n_src,\n cometml_audio_logger.AudioLogger(fs=hparams[\"fs\"],\n bs=1,\n n_sources=n_src))\n for n_src in range(1, hparams['max_num_sources'] + 1)])\n\n# offline_savedir = os.path.join('/home/thymios/offline_exps',\n# hparams[\"project_name\"],\n# '_'.join(hparams['cometml_tags']))\n# if not os.path.exists(offline_savedir):\n# os.makedirs(offline_savedir)\n# audio_logger = offline_audio_logger.AudioLogger(dirpath=offline_savedir,\n# fs=hparams[\"fs\"], bs=hparams[\"batch_size\"], n_sources=4)\n\n# Hardcode the test generator for each one of the number of sources\nfor n_src in range(hparams['min_num_sources'], hparams['max_num_sources']+1):\n for split_name in ['val', 'test']:\n loader = dataset_setup.create_loader_for_simple_dataset(\n dataset_name='FUSS',\n separation_task=hparams['separation_task'],\n data_split=split_name, sample_rate=hparams['fs'],\n n_channels=hparams['n_channels'], min_or_max=hparams['min_or_max'],\n zero_pad=hparams['zero_pad_audio'],\n timelegth=hparams['audio_timelength'],\n normalize_audio=hparams['normalize_audio'],\n n_samples=0, min_num_sources=n_src, max_num_sources=n_src)\n\n gen_name = '{}_{}_srcs'.format(split_name, n_src)\n generators[gen_name] = loader.get_generator(\n batch_size=hparams['batch_size'], num_workers=hparams['n_jobs'])\n\n# experiment = OfflineExperiment(API_KEY, offline_directory=offline_savedir)\nexperiment = Experiment(API_KEY, project_name=hparams['project_name'])\nexperiment.log_parameters(hparams)\nexperiment_name = '_'.join(hparams['cometml_tags'])\nfor tag in hparams['cometml_tags']:\n experiment.add_tag(tag)\nif hparams['experiment_name'] is not None:\n experiment.set_name(hparams['experiment_name'])\nelse:\n experiment.set_name(experiment_name)\n\nos.environ['CUDA_VISIBLE_DEVICES'] = ','.join(\n [cad for cad in hparams['cuda_available_devices']])\n\nback_loss_tr_loss_name, back_loss_tr_loss = (\n 'tr_back_loss_SNR',\n # norm_lib.L1(return_individual_results=False)\n # norm_lib.PermInvariantL1(n_sources=hparams[\"n_sources\"],\n # weighted_norm=True)\n # 'tr_back_loss_SISDRi',\n snr_lib.PermInvariantSNRwithZeroRefs(\n n_sources=hparams[\"max_num_sources\"],\n zero_mean=False,\n backward_loss=True,\n inactivity_threshold=-40.)\n)\n\nval_losses = {}\nall_losses = []\nfor val_set in [x for x in generators if not x == 'train']:\n if generators[val_set] is None:\n continue\n\n n_actual_sources = int(val_set.split('_')[1])\n if n_actual_sources == 1:\n single_source = False\n improvement = False\n metric_name = 'SISDR'\n n_estimated_sources = 1\n else:\n single_source = False\n improvement = True\n n_estimated_sources = hparams['max_num_sources']\n metric_name = 'SISDRi'\n val_losses[val_set] = {}\n all_losses.append(val_set + '_{}'.format(metric_name))\n val_losses[val_set][val_set + '_{}'.format(metric_name)] = \\\n sisdr_lib.StabilizedPermInvSISDRMetric(\n zero_mean=True,\n single_source=single_source,\n n_estimated_sources=n_estimated_sources,\n n_actual_sources=n_actual_sources,\n backward_loss=False,\n improvement=improvement,\n return_individual_results=True)\nall_losses.append(back_loss_tr_loss_name)\n\nif hparams['model_type'] == 'relu':\n model = improved_sudormrf.SuDORMRF(out_channels=hparams['out_channels'],\n in_channels=hparams['in_channels'],\n num_blocks=hparams['num_blocks'],\n upsampling_depth=hparams['upsampling_depth'],\n enc_kernel_size=hparams['enc_kernel_size'],\n enc_num_basis=hparams['enc_num_basis'],\n num_sources=hparams['max_num_sources'])\nelif hparams['model_type'] == 'causal':\n model = causal_improved_sudormrf.CausalSuDORMRF(\n in_audio_channels=1,\n out_channels=hparams['out_channels'],\n in_channels=hparams['in_channels'],\n num_blocks=hparams['num_blocks'],\n upsampling_depth=hparams['upsampling_depth'],\n enc_kernel_size=hparams['enc_kernel_size'],\n enc_num_basis=hparams['enc_num_basis'],\n num_sources=hparams['max_num_sources'])\nelif hparams['model_type'] == 'softmax':\n model = initial_sudormrf.SuDORMRF(out_channels=hparams['out_channels'],\n in_channels=hparams['in_channels'],\n num_blocks=hparams['num_blocks'],\n upsampling_depth=hparams['upsampling_depth'],\n enc_kernel_size=hparams['enc_kernel_size'],\n enc_num_basis=hparams['enc_num_basis'],\n num_sources=hparams['max_num_sources'])\nelif hparams['model_type'] == 'groupcomm_v2':\n model = sudormrf_gc_v2.GroupCommSudoRmRf(\n in_audio_channels=hparams['n_channels'],\n out_channels=hparams['out_channels'],\n in_channels=hparams['in_channels'],\n num_blocks=hparams['num_blocks'],\n upsampling_depth=hparams['upsampling_depth'],\n enc_kernel_size=hparams['enc_kernel_size'],\n enc_num_basis=hparams['enc_num_basis'],\n num_sources=hparams['max_num_sources'],\n group_size=16)\nelse:\n raise ValueError('Invalid model: {}.'.format(hparams['model_type']))\n\nnumparams = 0\nfor f in model.parameters():\n if f.requires_grad:\n numparams += f.numel()\nexperiment.log_parameter('Parameters', numparams)\nprint('Trainable Parameters: {}'.format(numparams))\n\nmodel = torch.nn.DataParallel(model).cuda()\nopt = torch.optim.Adam(model.parameters(), lr=hparams['learning_rate'])\n# lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n# optimizer=opt, mode='max', factor=1. / hparams['divide_lr_by'],\n# patience=hparams['patience'], verbose=True)\n\n\ndef normalize_tensor_wav(wav_tensor, eps=1e-8, std=None):\n mean = wav_tensor.mean(-1, keepdim=True)\n if std is None:\n std = wav_tensor.std(-1, keepdim=True)\n return (wav_tensor - mean) / (std + eps)\n\n\ndef online_augment(clean_sources):\n # clean_sources: (batch, n_sources, time)\n # Online mixing over samples of the batch. (This might cause to get\n # mixtures from the same type of sound but it's highly improbable).\n # Keep the exact same SNR distribution with the initial mixtures.\n n_sources = clean_sources.shape[1]\n batch_size = clean_sources.shape[0]\n\n initial_biases = torch.mean(clean_sources, dim=-1, keepdim=True)\n initial_energies = torch.std(clean_sources, dim=-1, keepdim=True)\n\n augmented_wavs_l = []\n for i in range(n_sources):\n augmented_wavs_l.append(clean_sources[torch.randperm(batch_size), i])\n augmented_wavs = torch.stack(augmented_wavs_l, 1)\n # augmented_wavs = normalize_tensor_wav(augmented_wavs)\n # augmented_wavs = (augmented_wavs * initial_energies) + initial_biases\n augmented_wavs = augmented_wavs[:, torch.randperm(n_sources)]\n augmented_wavs *= (torch.rand(batch_size, n_sources).unsqueeze(-1) + 0.5)\n\n return augmented_wavs\n\n\ntr_step = 0\nval_step = 0\nprev_epoch_val_loss = 0.\nfor i in range(hparams['n_epochs']):\n res_dic = {}\n for loss_name in all_losses:\n res_dic[loss_name] = {'mean': 0., 'std': 0., 'median': 0., 'acc': []}\n print(\"FUSS Sudo-RM-RF: {} - {} || Epoch: {}/{}\".format(\n experiment.get_key(), experiment.get_tags(), i+1, hparams['n_epochs']))\n model.train()\n\n sum_loss = 0.\n train_tqdm_gen = tqdm(generators['train'], desc='Training')\n for cnt, data in enumerate(train_tqdm_gen):\n opt.zero_grad()\n # data shape: (batch, n_sources, time_samples)\n clean_wavs = online_augment(data)\n clean_wavs = clean_wavs.cuda()\n\n input_mixture = torch.sum(clean_wavs, -2, keepdim=True)\n # input_mixture = normalize_tensor_wav(input_mixture)\n\n input_mix_std = input_mixture.std(-1, keepdim=True)\n input_mix_mean = input_mixture.mean(-1, keepdim=True)\n input_mixture = (input_mixture - input_mix_mean) / (\n input_mix_std + 1e-9)\n\n # input_mix_std = input_mixture.std(-1, keepdim=True)\n # input_mix_mean = input_mixture.mean(-1, keepdim=True)\n # input_mixture = (input_mixture - input_mix_mean) / (input_mix_std + 1e-9)\n # clean_wavs = normalize_tensor_wav(clean_wavs, std=input_mix_std)\n\n rec_sources_wavs = model(input_mixture)\n # rec_sources_wavs = (rec_sources_wavs * input_mix_std) + input_mix_mean\n rec_sources_wavs = mixture_consistency.apply(rec_sources_wavs,\n input_mixture)\n\n # l = back_loss_tr_loss(normalize_tensor_wav(rec_sources_wavs),\n # normalize_tensor_wav(clean_wavs))\n l = back_loss_tr_loss(rec_sources_wavs,\n clean_wavs)\n l.backward()\n\n if hparams['clip_grad_norm'] > 0:\n torch.nn.utils.clip_grad_norm_(model.parameters(),\n hparams['clip_grad_norm'])\n\n opt.step()\n sum_loss += l.detach().item()\n train_tqdm_gen.set_description(\n \"Training, Running Avg Loss: {}\".format(sum_loss / (cnt + 1)))\n\n if hparams['patience'] > 0:\n if tr_step % hparams['patience'] == 0:\n new_lr = (hparams['learning_rate']\n / (hparams['divide_lr_by'] ** (tr_step // hparams['patience'])))\n print('Reducing Learning rate to: {}'.format(new_lr))\n for param_group in opt.param_groups:\n param_group['lr'] = new_lr\n tr_step += 1\n\n for val_set in [x for x in generators if not x == 'train']:\n if generators[val_set] is not None:\n n_actual_sources = int(val_set.split('_')[1])\n model.eval()\n n_songs_written = 10\n with torch.no_grad():\n for data in tqdm(generators[val_set],\n desc='Validation on {}'.format(val_set)):\n clean_wavs = data.cuda()\n input_mixture = torch.sum(clean_wavs, -2, keepdim=True)\n # input_mixture = normalize_tensor_wav(input_mixture)\n input_mix_std = input_mixture.std(-1, keepdim=True)\n input_mix_mean = input_mixture.mean(-1, keepdim=True)\n input_mixture = (input_mixture - input_mix_mean) / (\n input_mix_std + 1e-9)\n\n rec_sources_wavs = model(input_mixture)\n # rec_sources_wavs = (rec_sources_wavs * input_mix_std) + input_mix_mean\n rec_sources_wavs = mixture_consistency.apply(\n rec_sources_wavs,\n input_mixture)\n\n for loss_name, loss_func in val_losses[val_set].items():\n # l, best_perm = loss_func(\n # normalize_tensor_wav(rec_sources_wavs),\n # normalize_tensor_wav(clean_wavs),\n # return_best_permutation=True)\n l, best_perm = loss_func(\n rec_sources_wavs,\n clean_wavs,\n return_best_permutation=True)\n res_dic[loss_name]['acc'] += l.tolist()\n\n audio_loggers[n_actual_sources].log_batch(\n rec_sources_wavs[:, best_perm.long().cuda()][0, 0].unsqueeze(0),\n clean_wavs[0].unsqueeze(0),\n input_mixture[0].unsqueeze(0),\n experiment, step=val_step, tag=val_set)\n\n val_step += 1\n\n res_dic = cometml_report.report_losses_mean_and_std(res_dic,\n experiment,\n tr_step,\n val_step)\n\n for loss_name in res_dic:\n res_dic[loss_name]['acc'] = []\n pprint(res_dic)\n" ]
[ [ "torch.sum", "torch.stack", "torch.std", "torch.rand", "torch.no_grad", "torch.randperm", "torch.nn.DataParallel", "torch.mean" ] ]
niekh-13/Textmining_NPA-reports
[ "0453f2f12e7d0745ac59076a1d255f4de79fc85c" ]
[ "util/outlier.py" ]
[ "##############################################################\n# #\n# Niek Huijsmansen #\n# Textmining medical notes for cognition #\n# Outlier Z score #\n# #\n##############################################################\n\nimport scipy\nimport math\nimport numpy as np\n\n# Class for outlier detection algorithms based on some distribution of the data. They\n# all consider only single points per row (i.e. one column).\nclass OutlierDetection:\n\n # Finds outliers in the specified columns of datatable and removes outliers\n def chauvenet(self, data_table, col):\n # take only the column\n data = data_table[col]\n #remove nans\n data.dropna(inplace=True)\n # Taken partly from: https://www.astro.rug.nl/software/kapteyn/\n # Computer the mean and standard deviation.\n mean = data.mean()\n std = data.std()\n N = len(data.index)\n criterion = 1.0 / (2 * N)\n\n # Consider the deviation for the data points.\n deviation = abs(data - mean) / std\n\n # Express the upper and lower bounds.\n low = -deviation / math.sqrt(2)\n high = deviation / math.sqrt(2)\n mask = []\n\n # Pass all rows in the dataset.\n for i in data.index.tolist():\n # Determine the probability of observing the point\n prob = 1.0 - 0.5 * (scipy.special.erf(high.loc[i]) - scipy.special.erf(low.loc[i]))\n # And mark as an outlier when the probability is below our criterion.\n if prob < criterion:\n mask.append(i)\n else:\n continue\n print(data_table.loc[mask, col])\n data_table.loc[mask, col] = np.nan\n return data_table\n\n\n\n" ]
[ [ "scipy.special.erf" ] ]
knit-pk/AI-Section-2017
[ "a744b130defe58050264a37d88732af66ecabf40" ]
[ "SARSA/SARSA.py" ]
[ "'''\nExample implementation of SARSA algorithm for learning the path through frozen lake.\nThe is_slippery flag lets us change the rules of the game, if True the probability of\nchanging the chosen direction is 4 out of 10.\n'''\n\nimport gym\nimport numpy as np\nimport time\nimport pygame\n\n\nclass Game:\n stan = 0;\n\n def __init__(self, field):\n self.field = field\n\n def step(self, action):\n reward = -0.04\n done = False\n info = False\n\n if (action == 0) and ((self.stan % 4) != 0):\n self.stan -= 1\n if (action == 1) and (self.stan < 12):\n self.stan += 4\n if (action == 2) and ((self.stan % 4) != 3):\n self.stan += 1\n if (action == 3) and (self.stan > 3):\n self.stan -= 4\n\n if self.field[self.stan] == 'H':\n reward = -5\n done = True\n\n if self.field[self.stan] == 'G':\n reward = 1\n done = True\n\n return self.stan, reward, done, info;\n\n def reset(self):\n self.stan = 0\n return self.stan;\n\n\ndef drawGridWorld(Q, field, player, action):\n # Grid world init\n pygame.init()\n font = pygame.font.SysFont(\"monospace\", 30, True)\n surface = pygame.display.set_mode((860, 860)) # width x height\n pygame.display.set_caption('GridWorld')\n sleep_time = 0.02;\n\n surface.fill((0, 0, 0))\n wiersz = 0\n kolumna = 0\n offset = 10\n size = 200\n # print(action)\n for pole in range(len(Q)): # Y # pola pionowo\n if pole != 0 and (pole % len(Q[0]) == 0):\n wiersz += 1\n kolumna = 0\n x_cord = offset + offset * kolumna + kolumna * size\n y_cord = offset + offset * wiersz + wiersz * size\n # Field\n field_color = (189, 189, 189)\n if field[pole] == 'H':\n field_color = (33, 33, 33)\n if field[pole] == 'S':\n field_color = (255, 179, 0)\n if field[pole] == 'G':\n field_color = (118, 255, 3)\n pygame.draw.rect(surface, field_color, (x_cord, y_cord, size, size))\n # Player\n if pole == player:\n field_color = (3, 169, 244)\n pygame.draw.circle(surface, field_color, (\n int(round(x_cord + size / 2)), int(round(y_cord + size / 2))),\n int(round(size / 2)))\n if action == 0:\n move_action = font.render(\"<\", False, (255, 0, 0))\n if action == 1:\n move_action = font.render(\"\\/\", False, (255, 0, 0))\n if action == 2:\n move_action = font.render(\">\", False, (255, 0, 0))\n if action == 3:\n move_action = font.render(\"/\\\\\", False, (255, 0, 0))\n\n surface.blit(move_action, (0, 0))\n # QMatrix\n\n color = (255, 255, 255)\n\n best = Q[pole].argmax()\n for i in range(4):\n # print(best)\n if i == best:\n color = (255, 0, 0)\n x_label_cord = 0\n y_label_cord = 0\n if i == 0: # left\n x_label_cord = x_cord\n y_label_cord = y_cord\n direction = 'left'\n # color = (0, 0, 255) # blue\n\n if i == 1: # down\n x_label_cord = x_cord\n y_label_cord = y_cord + size / 4\n direction = 'down'\n # color = (0, 255, 0) # green\n\n if i == 2: # right\n x_label_cord = x_cord\n y_label_cord = y_cord + size / 4 * 2\n direction = 'right'\n # color = (0, 255, 255) # green blue\n\n if i == 3: # up\n x_label_cord = x_cord\n y_label_cord = y_cord + size / 2 + size / 4\n direction = 'up'\n # color = (255, 0, 0) # red\n\n label = font.render(\"{}:{}\".format(direction, round(Q[pole][i], 3)), False,\n color)\n surface.blit(label, (x_label_cord, y_label_cord))\n kolumna += 1\n pygame.display.update()\n time.sleep(sleep_time)\n\n\ndef learn(is_slippery):\n if is_slippery:\n env = gym.make('FrozenLake-v0')\n Q = np.zeros([env.observation_space.n, env.action_space.n])\n else:\n field = ['S', 'F', 'F', 'F',\n 'F', 'H', 'F', 'H',\n 'F', 'F', 'F', 'H',\n 'H', 'F', 'F', 'G'\n ]\n env = Game(field)\n Q = np.zeros([16, 4])\n\n a = .8 # alpha\n y = .95 # gamma\n num_episodes = 2000\n\n for i in range(num_episodes):\n\n current_state = env.reset()\n current_action = np.argmax(Q[current_state, :])\n for j in range(100):\n\n next_state, reward, done, _ = env.step(current_action)\n\n if is_slippery:\n next_action = np.argmax(\n Q[next_state, :] + np.random.randn(1, env.action_space.n) * (\n 1. / (i + 1)))\n else:\n next_action = np.argmax(Q[next_state, :] + np.random.randn(1, 4) * (\n 1. / (i + 1)))\n\n Q[current_state, current_action] += a * (\n reward + y * Q[next_state, next_action] - Q[\n current_state, current_action])\n\n current_state = next_state\n current_action = next_action\n\n if done == True:\n break\n\n return Q\n\n\ndef play(inQ, is_slippery):\n field = ['S', 'F', 'F', 'F',\n 'F', 'H', 'F', 'H',\n 'F', 'F', 'F', 'H',\n 'H', 'F', 'F', 'G'\n ]\n\n if is_slippery:\n env = gym.make('FrozenLake-v0')\n else:\n env = Game(field)\n\n num_episodes = 2000\n Q = inQ\n rList = [] # reward list\n\n for i in range(num_episodes):\n total_reward = 0\n\n state = env.reset()\n\n drawGridWorld(Q, field, state, 0)\n\n action = np.argmax(Q[state, :])\n for j in range(100):\n\n drawGridWorld(Q, field, state, action)\n\n state, reward, done, _ = env.step(action)\n\n action = np.argmax(Q[state, :])\n\n total_reward += reward\n\n if done == True:\n break\n rList.append(total_reward)\n\n print(\"Score over time: \" + str(sum(rList) / num_episodes))\n\n\nif __name__ == '__main__':\n is_slippery = False\n Q = learn(is_slippery)\n play(Q, is_slippery)\n" ]
[ [ "numpy.random.randn", "numpy.argmax", "numpy.zeros" ] ]
RCheese/gumpy
[ "c5d602122bef87827dae4abeace6c867c65eb1cb" ]
[ "gumpy/split.py" ]
[ "import sklearn.model_selection\nfrom sklearn.model_selection import (ShuffleSplit, StratifiedKFold,\n StratifiedShuffleSplit)\n\n\ndef normal(X, labels, test_size):\n \"\"\"Split a dataset into training and test parts.\n Args:\n X (numpy.ndarray): 2D features matrix \n labels: labels vector \n test_size: size of the split\n \n Returns:\n A 2D CSP features matrix \n \"\"\"\n Y = labels\n X_train, X_test, Y_train, Y_test = \\\n sklearn.model_selection.train_test_split(X, Y,\n test_size=test_size,\n random_state=0)\n return X_train, X_test, Y_train, Y_test\n\n\ndef time_series_split(features, labels, n_splits):\n \"\"\"Split a dataset into n splits.\n\n \"\"\"\n xx = sklearn.model_selection.TimeSeriesSplit(n_splits)\n for train_index, test_index in xx.split(features):\n X_train, X_test = features[train_index], features[test_index]\n y_train, y_test = labels[train_index], labels[test_index]\n\n return X_train, X_test, y_train, y_test\n\n\ndef stratified_KFold(features, labels, n_splits):\n \"\"\"Stratified K-Folds cross-validator\n Stratification is the process of rearranging the data as to ensure each fold is a good representative of the whole\n and by also keeping the balance of classes\n \"\"\"\n skf = StratifiedKFold(n_splits)\n skf.get_n_splits(features, labels)\n for train_index, test_index in skf.split(features, labels):\n X_train, X_test = features[train_index], features[test_index]\n Y_train, Y_test = labels[train_index], labels[test_index]\n return X_train, X_test, Y_train, Y_test\n\n\n# Stratified ShuffleSplit cross-validator\ndef stratified_shuffle_Split(features, labels, n_splits, test_size, random_state):\n \"\"\"Stratified ShuffleSplit cross-validator\n \"\"\"\n cv = StratifiedShuffleSplit(n_splits, test_size, random_state=random_state)\n for train_index, test_index in cv.split(features, labels):\n X_train = features[train_index]\n X_test = features[test_index]\n Y_train = labels[train_index]\n Y_test = labels[test_index]\n return X_train, X_test, Y_train, Y_test\n\n\n# Random permutation cross-validator\ndef shuffle_Split(features, labels, n_splits, test_size, random_state):\n \"\"\"ShuffleSplit: Random permutation cross-validator\n \"\"\"\n cv = ShuffleSplit(n_splits, test_size, random_state=random_state)\n for train_index, test_index in cv.split(features):\n X_train = features[train_index]\n X_test = features[test_index]\n Y_train = labels[train_index]\n Y_test = labels[test_index]\n return X_train, X_test, Y_train, Y_test\n" ]
[ [ "sklearn.model_selection.StratifiedShuffleSplit", "sklearn.model_selection.StratifiedKFold", "sklearn.model_selection.ShuffleSplit" ] ]
XiSHEN0220/SSR
[ "50f473b690f6c28e8c828c8ec65de7680400b011" ]
[ "transductive_few_shot/src/dataset.py" ]
[ "## settings of different datasets\nimport numpy as np\nimport torchvision.transforms as transforms\n\ndef dataset_setting(dataset, nSupport, nQuery=15):\n\n if 'miniImageNet' in dataset :\n mean = [x/255.0 for x in [120.39586422, 115.59361427, 104.54012653]]\n std = [x/255.0 for x in [70.68188272, 68.27635443, 72.54505529]]\n normalize = transforms.Normalize(mean=mean, std=std)\n trainTransform = transforms.Compose([transforms.RandomCrop(80, padding=8),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n lambda x: np.asarray(x),\n transforms.ToTensor(),\n normalize\n ])\n \n \n valTransform = transforms.Compose([transforms.CenterCrop(80),\n lambda x: np.asarray(x),\n transforms.ToTensor(),\n normalize])\n \n\n inputW, inputH, nbCls = 80, 80, 64\n\n trainDir = '../data/Mini-ImageNet/train/'\n valDir = '../data/Mini-ImageNet/val/'\n testDir = '../data/Mini-ImageNet/test/'\n episodeJson = '../data/Mini-ImageNet/val1000Episode_5_way_{:d}_shot_{:d}_query.json'.format(nSupport, nQuery)\n \n ## the preprocessing is the same as https://gitlab.mpi-klsb.mpg.de/yaoyaoliu/e3bm/-/blob/inductive/dataloader/tiered_imagenet.py \n elif 'tieredImageNet' in dataset :\n mean = [x / 255.0 for x in [125.3, 123.0, 113.9]]\n std = [x / 255.0 for x in [63.0, 62.1, 66.7]]\n normalize = transforms.Normalize(mean=mean, std=std)\n trainTransform = transforms.Compose([\n transforms.RandomResizedCrop(84),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize\n ])\n \n\n valTransform = transforms.Compose([ transforms.Resize([92, 92]),\n transforms.CenterCrop(84),\n transforms.ToTensor(),\n normalize])\n \n \n\n inputW, inputH, nbCls = 84, 84, 351\n\n trainDir = '../data/tiered_imagenet/train/'\n valDir = '../data/tiered_imagenet/val/'\n testDir = '../data/tiered_imagenet/test/'\n episodeJson = '../data/tiered_imagenet/val1000Episode_5_way_{:d}_shot_{:d}_query.json'.format(nSupport, nQuery)\n\n elif dataset == 'Cifar':\n mean = [x/255.0 for x in [129.37731888, 124.10583864, 112.47758569]]\n std = [x/255.0 for x in [68.20947949, 65.43124043, 70.45866994]]\n normalize = transforms.Normalize(mean=mean, std=std)\n trainTransform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n lambda x: np.asarray(x),\n transforms.ToTensor(),\n normalize\n ])\n \n\n valTransform = transforms.Compose([lambda x: np.asarray(x),\n transforms.ToTensor(),\n normalize])\n \n \n inputW, inputH, nbCls = 32, 32, 64\n\n trainDir = '../data/cifar-fs/train/'\n valDir = '../data/cifar-fs/val/'\n testDir = '../data/cifar-fs/test/'\n episodeJson = '../data/cifar-fs/val1000Episode_5_way_{:d}_shot_{:d}_query.json'.format(nSupport, nQuery)\n\n else:\n raise ValueError('Do not support other datasets yet.')\n\n return trainTransform, valTransform, inputW, inputH, trainDir, valDir, testDir, episodeJson, nbCls\n" ]
[ [ "numpy.asarray" ] ]
jlee-ds/meshcnn
[ "6a3c9efa18f00786e2c71f56934d101a1895e9c2" ]
[ "data/autoencoder_data.py" ]
[ "import os\nimport torch\nfrom data.base_dataset import BaseDataset\nfrom util.util import is_mesh_file, pad, pad_vertices\nimport numpy as np\nfrom models.layers.mesh import Mesh\n\nclass AutoEncoderData(BaseDataset):\n\n def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.opt = opt\n self.device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu')\n self.root = opt.dataroot\n self.dir = os.path.join(opt.dataroot)\n self.classes, self.class_to_idx = self.find_classes(self.dir)\n print(self.classes, self.class_to_idx)\n self.paths = self.make_dataset_by_class(self.dir, self.class_to_idx, opt.phase)\n self.nclasses = len(self.classes)\n self.size = len(self.paths)\n self.get_mean_std()\n # # modify for network later.\n opt.nclasses = self.nclasses\n opt.input_nc = self.ninput_channels\n\n def __getitem__(self, index):\n path = self.paths[index][0]\n #print(path)\n mesh = Mesh(file=path, opt=self.opt, hold_history=True, export_folder=self.opt.export_folder)\n meta = {}\n mesh.vs = (mesh.vs - np.mean(mesh.vs, 0)) / np.std(mesh.vs, 0)\n meta['mesh'] = mesh\n meta['export_folder'] = mesh.export_folder\n meta['filename'] = mesh.filename\n # get edge features\n edge_features = mesh.extract_features()\n edge_features = pad(edge_features, self.opt.ninput_edges)\n vs, pad_iter = pad_vertices(mesh.vs, 1402)\n meta['edge_features'] = (edge_features - self.mean) / self.std\n meta['label'] = vs.astype(np.float)\n meta['init_faces'] = mesh.init_faces\n meta['pad_iter'] = pad_iter\n return meta\n\n def __len__(self):\n return self.size\n\n @staticmethod\n def make_dataset(path):\n meshes = []\n assert os.path.isdir(path), '%s is not a valid directory' % path\n\n for root, _, fnames in sorted(os.walk(path)):\n for fname in fnames:\n if is_mesh_file(fname):\n path = os.path.join(root, fname)\n meshes.append(path)\n\n return meshes\n \n @staticmethod \n def find_classes(dir):\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n classes.sort()\n if '.ipynb_checkpoints' in classes:\n classes.remove('.ipynb_checkpoints')\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n @staticmethod\n def make_dataset_by_class(dir, class_to_idx, phase):\n meshes = []\n dir = os.path.expanduser(dir)\n for target in sorted(os.listdir(dir)):\n d = os.path.join(dir, target)\n if not os.path.isdir(d):\n continue\n for root, _, fnames in sorted(os.walk(d)):\n ## LEE\n #file_num = 0\n for fname in sorted(fnames):\n if is_mesh_file(fname) and (root.count(phase)==1):\n path = os.path.join(root, fname)\n item = (path, class_to_idx[target])\n meshes.append(item)\n \n ## LEE\n #file_num += 1\n #if file_num == 100 :\n # break\n return meshes" ]
[ [ "numpy.std", "torch.device", "numpy.mean" ] ]
hegland/cmepy
[ "fa8cdf2fad779badbcb629bf6ee33316724ec4a4" ]
[ "cmepy/lexarrayset.py" ]
[ "\"\"\"\nlexical array set operations\n\nthese operations are based upon the one dimensional array set operations\nfrom numpy.lib.arraysetops, but generalised to work for sets of m-tuples,\nwhere each element is stored as a row of a 2d m by n array, using numpy's\n'lexsort' lexical sorting function.\n\"\"\"\n\nimport numpy\n\ndef unique(las, return_inverse=False):\n \"\"\"\n returns a sorted vector of unique states\n \n if the optional flag return_inverse is set to True,\n additionally returns an index vector used to\n inverse the unique operation and recover the\n original vector\n \"\"\"\n \n # argsort the array via lexical sorting using the keys\n # las[0, :] to las[-1, :], in increasing priority\n order = numpy.lexsort(las)\n if numpy.size(order) == 0:\n return las\n slas = las[:, order]\n # then figure out the indices of the first instance of each row\n not_equal_adj = numpy.logical_or.reduce(slas[:, :-1] != slas[:, 1:])\n not_equal_adj = numpy.concatenate(([True], not_equal_adj))\n \n uslas = slas[:, not_equal_adj]\n if return_inverse:\n order_inverse = order.argsort(kind='mergesort')\n # compute the unique inverse indices by summing over the\n # boolean array. because we want to compute 0-based indices\n # it is necessary to set the first boolean to False.\n # (alternatively, we could have subtracted 1 from the entire\n # result after summing)\n not_equal_adj[0] = False\n unique_inverse = numpy.add.accumulate(not_equal_adj)[order_inverse]\n return uslas, unique_inverse\n else:\n return uslas\n\ndef nonunique_member(arr1, las2):\n \"\"\"\n vectorised set membership operation for lexical array arr1 and\n lexical array set las2\n \n in general, the rows of array arr1 can be non-unique\n \n returns a boolean array 'mask' such that\n arr1[:, mask] is the subset of rows of arr1 that are also\n rows of las2\n \"\"\"\n las1, unique_inverse = unique(arr1, return_inverse=True)\n return member(las1, las2)[unique_inverse]\n\ndef member(las1, las2):\n \"\"\"\n vectorised set membership operation for lexical array sets las1, las2\n \n returns a boolean array 'mask' such that\n \n las1[:, mask] is the subset of rows of las1 that\n are also rows of las2\n \"\"\"\n \n las = numpy.hstack((las1, las2))\n \n las1_n = numpy.shape(las1)[1]\n \n # since the 'in' operation is non-commutative we must\n # use a stable sort algorithm. this ensures that\n # if slas[i] == slas[i+1], then slas[i] is the element\n # from las1, while slas[i+1] is the element from las2\n \n # by grepping through the numpy source it seems that lexsort\n # should be a stable sort (is this officially documented anywhere?)\n \n order = numpy.lexsort(las, )\n \n if numpy.size(order) == 0:\n return numpy.zeros((las1_n, ), dtype=numpy.bool)\n slas = las[:, order]\n equal_adj = numpy.logical_and.reduce(slas[:, :-1] == slas[:, 1:])\n mask = numpy.concatenate((equal_adj, [False]))\n \n inverse_order = order.argsort(kind='mergesort')[:las1_n] \n return mask[inverse_order]\n\ndef split(las1, las2):\n \"\"\"\n returns (las1 intersect las2, las1 difference las2)\n \"\"\"\n if numpy.size(las1) == 0:\n # if las1 is empty, return a couple of copies of las1\n return (numpy.array(las1), numpy.array(las1))\n mask = member(las1, las2)\n return (numpy.array(las1[:, mask]),\n numpy.array(las1[:, numpy.logical_not(mask)]))\n\ndef difference(las1, las2):\n \"\"\"\n returns las1 difference las2\n \"\"\"\n if numpy.size(las1) == 0:\n # if las1 is empty, return a copy of las1\n return numpy.array(las1)\n return numpy.array(las1[:, numpy.logical_not(member(las1, las2))])\n\ndef intersection(las1, las2):\n \"\"\"\n intersection of las1 with las2\n \"\"\"\n las = numpy.hstack((las1, las2))\n order = numpy.lexsort(las)\n if numpy.size(order) == 0:\n return las\n slas = las[:, order]\n equal_adj = numpy.logical_and.reduce(slas[:, :-1] == slas[:, 1:])\n return slas[:, :-1][:, equal_adj]\n\ndef union(las1, las2):\n \"\"\"\n union of las1 with las2\n \"\"\"\n return unique(numpy.hstack((las1, las2)))\n\ndef shift(las, offset):\n \"\"\"\n shifts all states in las by offset\n \"\"\"\n offset = numpy.asarray(offset)[:, numpy.newaxis]\n return las + offset\n\ndef empty(dim):\n \"\"\"\n returns an empty LexArraySet of dimension dim.\n \"\"\"\n empty_data = numpy.zeros((dim, 0), dtype=numpy.int)\n return LexArraySet(empty_data)\n\ndef create(data, unique_data=False):\n \"\"\"\n returns a new LexArraySet for the given data\n \"\"\"\n return LexArraySet(data, unique_data)\n\nclass LexArraySet(object):\n \"\"\"\n LexArraySet is an implementation of a set as a 2d array, where\n the members of the set are the rows of the array. The rows\n are ordered using lexical ordering.\n \"\"\"\n \n def __init__(self, data, unique_data=False):\n \"\"\"\n data can either be another LexArraySet instance, in which\n case a copy is made of that instance's data, or a\n two-dimensional numpy array, where each row is interpreted\n as a tuple belonging to the set.\n \n If data is a two-dimensional numpy array, then the optional\n unique_data flag may be set to True to indicate that the\n rows of data are already unique.\n \"\"\"\n if type(data) is LexArraySet:\n self.data = numpy.array(data.data)\n else:\n data = numpy.asarray(data)\n if not unique_data:\n self.data = unique(data)\n else:\n self.data = data\n \n @property\n def size(self):\n \"\"\"\n number of elements in set (equal to number of rows of the lexical array)\n \"\"\"\n shape = numpy.shape(self.data)\n if len(shape) < 2:\n return 0\n else:\n return shape[1]\n \n def member(self, rhs):\n \"\"\"\n las1.member(las2) -> mask; mask[i] True iff row i of las1 is in las2\n \"\"\"\n return member(self.data, rhs.data)\n \n def split(self, rhs):\n \"\"\"\n las1.split(las2) -> (las1.intersect(las2), las1.difference(las2))\n \"\"\"\n intersect, diff = split(self.data, rhs.data)\n las_intersect = LexArraySet(intersect, unique_data=True)\n las_diff = LexArraySet(diff, unique_data=True)\n return las_intersect, las_diff\n \n def difference(self, rhs):\n \"\"\"\n las1.difference(las2) -> diff; diff's rows are those of las1 not in las2\n \"\"\"\n return LexArraySet(difference(self.data, rhs.data), unique_data=True)\n \n def intersection(self, rhs):\n \"\"\"\n las1.intersection(las2) -> isect; isect's rows common to las1, las2\n \"\"\"\n return LexArraySet(intersection(self.data, rhs.data), unique_data=True)\n \n def union(self, rhs):\n \"\"\"\n las1.union(las2) -> u; u's rows are union of rows in las1, las2\n \"\"\"\n return LexArraySet(union(self.data, rhs.data), unique_data=True)\n \n def shift(self, offset):\n \"\"\"\n las.shift(offset) -> slas; where rows of slas are rows of las + offset\n \n offset must be of compatible shape to the rows of las.\n \"\"\"\n return LexArraySet(shift(self.data, offset), unique_data=True)\n \n def difference_update(self, rhs):\n \"\"\"\n in place difference\n \"\"\"\n self.data = difference(self.data, rhs.data)\n \n def intersection_update(self, rhs):\n \"\"\"\n in place intersection\n \"\"\"\n self.data = intersection(self.data, rhs.data)\n \n def union_update(self, rhs):\n \"\"\"\n in place union\n \"\"\"\n self.data = union(self.data, rhs.data)\n \n def shift_update(self, shift):\n \"\"\"\n in place shift\n \"\"\"\n shift = numpy.asarray(shift)[:, numpy.newaxis]\n self.data += shift\n" ]
[ [ "numpy.logical_and.reduce", "numpy.zeros", "numpy.add.accumulate", "numpy.asarray", "numpy.size", "numpy.lexsort", "numpy.logical_not", "numpy.hstack", "numpy.shape", "numpy.logical_or.reduce", "numpy.concatenate", "numpy.array" ] ]
remorsecs/Kaggle-plant-seedlings-classification
[ "2fb837fb09ad07c3950684a8179052aa14a745e9" ]
[ "libs/model.py" ]
[ "import torch.nn as nn\n\n\nclass VGG11(nn.Module):\n def __init__(self):\n super().__init__()\n conv_layers = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d((2, 2), 2),\n\n nn.Conv2d(64, 128, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d((2, 2), 2),\n\n nn.Conv2d(128, 256, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d((2, 2), 2),\n\n nn.Conv2d(256, 512, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.Conv2d(512, 512, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d((2, 2), 2),\n\n nn.Conv2d(512, 512, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.Conv2d(512, 512, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d((2, 2), 2),\n )\n fc_layers = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(0.5),\n )\n self.feature = nn.Sequential(\n conv_layers,\n nn.Flatten(),\n fc_layers,\n )\n self.classifier = nn.Linear(4096, 12)\n\n def forward(self, x):\n feature = self.feature(x)\n score = self.classifier(feature)\n return score\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.Flatten", "torch.nn.Conv2d", "torch.nn.ReLU", "torch.nn.Dropout" ] ]
VinAIResearch/PointSWD
[ "ea676926e21286185e836ab355fee7937540ce69" ]
[ "criteria_comparing_sets_pcs/all_metrics_calculator.py" ]
[ "import os.path as osp\nimport sys\n\nimport torch\nimport torch.nn as nn\n\n\nsys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))\nfrom criteria_comparing_sets_pcs.jsd_calculator import JsdCalculator\nfrom metrics_from_point_flow.evaluation_metrics import compute_all_metrics\n\n\nclass AllMetricsCalculator(nn.Module):\n def __init__(self):\n super(AllMetricsCalculator, self).__init__()\n\n @staticmethod\n def forward(sample_pcs, ref_pcs, batch_size, **kwargs):\n results = {}\n results.update(compute_all_metrics(sample_pcs, ref_pcs, batch_size, **kwargs))\n for key, value in results.items():\n if torch.is_tensor(value):\n results[key] = value.item()\n if \"save_file\" in kwargs.keys():\n log = \"{}: {}\\n\"\n with open(kwargs[\"save_file\"], \"a\") as fp:\n for key, value in results.items():\n fp.write(log.format(key, value))\n # end for\n # end with\n # end if\n print(\"\\n\")\n log = \"{}: {}\\n\"\n for key, value in results.items():\n print(log.format(key, value))\n # end for\n jsd = JsdCalculator.forward(sample_pcs, ref_pcs, **kwargs)\n return jsd\n\n\nif __name__ == \"__main__\":\n sample_pcs = torch.empty(10, 2048, 3).uniform_(0, 1).cuda()\n ref_pcs = torch.empty(10, 2048, 3).uniform_(0, 1).cuda()\n batch_size = 10\n print(AllMetricsCalculator.forward(sample_pcs, ref_pcs, batch_size))\n" ]
[ [ "torch.empty", "torch.is_tensor" ] ]
nden/photutils
[ "87879b2464ccfcd160f6a0c53ea4c0869a6e1cc2" ]
[ "photutils/detection/tests/test_findstars.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport os.path as op\nimport itertools\nimport warnings\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport pytest\n\nfrom astropy.table import Table\nfrom astropy.utils.exceptions import AstropyUserWarning\n\nfrom ..findstars import DAOStarFinder, IRAFStarFinder\nfrom ...datasets import make_100gaussians_image\n\ntry:\n import scipy # noqa\n HAS_SCIPY = True\nexcept ImportError:\n HAS_SCIPY = False\n\n\nDATA = make_100gaussians_image()\nTHRESHOLDS = [8.0, 10.0]\nFWHMS = [1.0, 1.5, 2.0]\nwarnings.simplefilter('always', AstropyUserWarning)\n\n\[email protected]('not HAS_SCIPY')\nclass TestDAOStarFinder:\n @pytest.mark.parametrize(('threshold', 'fwhm'),\n list(itertools.product(THRESHOLDS, FWHMS)))\n def test_daofind(self, threshold, fwhm):\n starfinder = DAOStarFinder(threshold, fwhm, sigma_radius=1.5)\n t = starfinder(DATA)\n datafn = ('daofind_test_thresh{0:04.1f}_fwhm{1:04.1f}'\n '.txt'.format(threshold, fwhm))\n datafn = op.join(op.dirname(op.abspath(__file__)), 'data', datafn)\n t_ref = Table.read(datafn, format='ascii')\n\n assert t.colnames == t_ref.colnames\n for col in t.colnames:\n assert_allclose(t[col], t_ref[col])\n\n def test_daofind_threshold_fwhm_inputs(self):\n with pytest.raises(TypeError):\n DAOStarFinder(threshold=np.ones((2, 2)), fwhm=3.)\n\n with pytest.raises(TypeError):\n DAOStarFinder(threshold=3., fwhm=np.ones((2, 2)))\n\n def test_daofind_include_border(self):\n starfinder = DAOStarFinder(threshold=10, fwhm=2, sigma_radius=1.5,\n exclude_border=False)\n t = starfinder(DATA)\n assert len(t) == 20\n\n def test_daofind_exclude_border(self):\n starfinder = DAOStarFinder(threshold=10, fwhm=2, sigma_radius=1.5,\n exclude_border=True)\n t = starfinder(DATA)\n assert len(t) == 19\n\n def test_daofind_nosources(self):\n data = np.ones((3, 3))\n starfinder = DAOStarFinder(threshold=10, fwhm=1)\n t = starfinder(data)\n assert len(t) == 0\n\n def test_daofind_sharpness(self):\n \"\"\"Sources found, but none pass the sharpness criteria.\"\"\"\n starfinder = DAOStarFinder(threshold=50, fwhm=1.0, sharplo=1.)\n t = starfinder(DATA)\n assert len(t) == 0\n\n def test_daofind_roundness(self):\n \"\"\"Sources found, but none pass the roundness criteria.\"\"\"\n starfinder = DAOStarFinder(threshold=50, fwhm=1.0, roundlo=1.)\n t = starfinder(DATA)\n assert len(t) == 0\n\n def test_daofind_flux_negative(self):\n \"\"\"Test handling of negative flux (here created by large sky).\"\"\"\n data = np.ones((5, 5))\n data[2, 2] = 10.\n starfinder = DAOStarFinder(threshold=0.1, fwhm=1.0, sky=10)\n t = starfinder(data)\n assert not np.isfinite(t['mag'])\n\n def test_daofind_negative_fit_peak(self):\n \"\"\"\n Regression test that sources with negative fit peaks (i.e.\n hx/hy<=0) are excluded.\n \"\"\"\n\n starfinder = DAOStarFinder(threshold=7., fwhm=1.5, roundlo=-np.inf,\n roundhi=np.inf, sharplo=-np.inf,\n sharphi=np.inf)\n t = starfinder(DATA)\n assert len(t) == 102\n\n def test_daofind_peakmax_filtering(self):\n \"\"\"\n Regression test that objects with ``peak`` >= ``peakmax`` are\n filtered out.\n \"\"\"\n\n peakmax = 20\n starfinder = DAOStarFinder(threshold=7., fwhm=1.5, roundlo=-np.inf,\n roundhi=np.inf, sharplo=-np.inf,\n sharphi=np.inf, peakmax=peakmax)\n t = starfinder(DATA)\n assert len(t) == 37\n assert all(t['peak'] < peakmax)\n\n def test_daofind_brightest_filtering(self):\n \"\"\"\n Regression test that only top ``brightest`` objects are\n selected.\n \"\"\"\n\n brightest = 40\n peakmax = 20\n starfinder = DAOStarFinder(threshold=7., fwhm=1.5, roundlo=-np.inf,\n roundhi=np.inf, sharplo=-np.inf,\n sharphi=np.inf, brightest=brightest)\n t = starfinder(DATA)\n # combined with peakmax\n assert len(t) == brightest\n starfinder = DAOStarFinder(threshold=7., fwhm=1.5, roundlo=-np.inf,\n roundhi=np.inf, sharplo=-np.inf,\n sharphi=np.inf, brightest=brightest,\n peakmax=peakmax)\n t = starfinder(DATA)\n assert len(t) == 37\n\n def test_daofind_mask(self):\n \"\"\"Test DAOStarFinder with a mask.\"\"\"\n\n starfinder = DAOStarFinder(threshold=10, fwhm=1.5)\n mask = np.zeros_like(DATA, dtype=bool)\n mask[100:200] = True\n tbl1 = starfinder(DATA)\n tbl2 = starfinder(DATA, mask=mask)\n assert len(tbl1) > len(tbl2)\n\n\[email protected]('not HAS_SCIPY')\nclass TestIRAFStarFinder:\n @pytest.mark.parametrize(('threshold', 'fwhm'),\n list(itertools.product(THRESHOLDS, FWHMS)))\n def test_irafstarfind(self, threshold, fwhm):\n starfinder = IRAFStarFinder(threshold, fwhm, sigma_radius=1.5)\n t = starfinder(DATA)\n datafn = ('irafstarfind_test_thresh{0:04.1f}_fwhm{1:04.1f}'\n '.txt'.format(threshold, fwhm))\n datafn = op.join(op.dirname(op.abspath(__file__)), 'data', datafn)\n t_ref = Table.read(datafn, format='ascii')\n\n assert t.colnames == t_ref.colnames\n for col in t.colnames:\n assert_allclose(t[col], t_ref[col])\n\n def test_irafstarfind_threshold_fwhm_inputs(self):\n with pytest.raises(TypeError):\n IRAFStarFinder(threshold=np.ones((2, 2)), fwhm=3.)\n\n with pytest.raises(TypeError):\n IRAFStarFinder(threshold=3., fwhm=np.ones((2, 2)))\n\n def test_irafstarfind_nosources(self):\n data = np.ones((3, 3))\n starfinder = IRAFStarFinder(threshold=10, fwhm=1)\n t = starfinder(data)\n assert len(t) == 0\n\n def test_irafstarfind_sharpness(self):\n \"\"\"Sources found, but none pass the sharpness criteria.\"\"\"\n starfinder = IRAFStarFinder(threshold=50, fwhm=1.0, sharplo=2.)\n t = starfinder(DATA)\n assert len(t) == 0\n\n def test_irafstarfind_roundness(self):\n \"\"\"Sources found, but none pass the roundness criteria.\"\"\"\n starfinder = IRAFStarFinder(threshold=50, fwhm=1.0, roundlo=1.)\n t = starfinder(DATA)\n assert len(t) == 0\n\n def test_irafstarfind_sky(self):\n starfinder = IRAFStarFinder(threshold=25.0, fwhm=2.0, sky=10.)\n t = starfinder(DATA)\n assert len(t) == 4\n\n def test_irafstarfind_largesky(self):\n starfinder = IRAFStarFinder(threshold=25.0, fwhm=2.0, sky=100.)\n t = starfinder(DATA)\n assert len(t) == 0\n\n def test_irafstarfind_peakmax_filtering(self):\n \"\"\"\n Regression test that objects with ``peak`` >= ``peakmax`` are\n filtered out.\n \"\"\"\n peakmax = 20\n starfinder = IRAFStarFinder(threshold=7., fwhm=2, roundlo=-np.inf,\n roundhi=np.inf, sharplo=-np.inf,\n sharphi=np.inf, peakmax=peakmax)\n t = starfinder(DATA)\n assert len(t) == 117\n assert all(t['peak'] < peakmax)\n\n def test_irafstarfind_brightest_filtering(self):\n \"\"\"\n Regression test that only top ``brightest`` objects are selected.\n \"\"\"\n brightest = 40\n starfinder = IRAFStarFinder(threshold=7., fwhm=2, roundlo=-np.inf,\n roundhi=np.inf, sharplo=-np.inf,\n sharphi=np.inf, brightest=brightest)\n t = starfinder(DATA)\n assert len(t) == brightest\n\n def test_irafstarfind_mask(self):\n \"\"\"Test IRAFStarFinder with a mask.\"\"\"\n\n starfinder = IRAFStarFinder(threshold=10, fwhm=1.5)\n mask = np.zeros_like(DATA, dtype=bool)\n mask[100:200] = True\n tbl1 = starfinder(DATA)\n tbl2 = starfinder(DATA, mask=mask)\n assert len(tbl1) > len(tbl2)\n" ]
[ [ "numpy.ones", "numpy.zeros_like", "numpy.isfinite", "numpy.testing.assert_allclose" ] ]
kjdavidson/NoisePy
[ "a7445dd2f68f64cb562d6a87096e5f12a2c3b612" ]
[ "src/application_modules/measure_dvv.py" ]
[ "import sys\nimport time\nimport obspy\nimport pyasdf\nimport os, glob\nimport datetime\nimport numpy as np\nimport pandas as pd\nfrom mpi4py import MPI\nimport matplotlib.pyplot as plt\nfrom obspy.signal.filter import bandpass\n\nsys.path.insert(1,'../')\nimport noise_module\n\n# register datetime converter\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n\n'''\nthis application script of NoisePy is to perform dv/v analysis on the resulted cross-correlation\nfunctions from S2. Note that, to use this script, the `keep_substack` parameter in S2 has to be turned\n`True` when running S2. So the sub-stacked waveforms can be saved and further to be compared with the \nall-stacked waveforms to measure dv/v.\n\nAuthors: Chengxin Jiang ([email protected])\n Marine Denolle ([email protected])\n\nNOTE:\n 0) this script is only showing an example of how dv/v can be measured on the resulted file from S2, and \n the users need to expand/modify this script in order to apply for regional studies;\n 1) See Yuan et al., (2019) for more details on the comparison of different methods for mesuring dv/v as\n well as the numerical validation. \n'''\n\n############################################\n############ PAMAETER SECTION ##############\n############################################\n\n# input data and targeted component\nrootpath = '/Users/chengxin/Documents/NoisePy_example/SCAL/' # root path for this data processing\nsfile = os.path.join(rootpath,'STACK_month/CI.BLC/CI.BLC_CI.MPI.h5') # ASDF file containing stacked data\noutdir = os.path.join(rootpath,'figures/monitoring') # dir where to output dispersive image and extracted dispersion\nif not os.path.isdir(outdir):\n os.mkdir(outdir)\n\n# targeted component\nstack_method = 'linear' # which stacked data to measure dispersion info\nccomp = 'ZZ' # cross component\n\n# pre-defined group velocity to window direct and code waves\nvmin = 0.8 # minimum velocity of the direct waves -> start of the coda window\nlwin = 150 # window length in sec for the coda waves\n\n# basic parameters \nfreq = [0.1,0.2,0.3,0.5] # targeted frequency band for waveform monitoring\nnfreq = len(freq)-1\nonelag = False # make measurement one one lag or two \nnorm_flag = True # whether to normalize the cross-correlation waveforms\ndo_stretch = True # use strecthing method or not\ndo_dtw = False # use dynamic time warping method or not\ndo_mwcs = True # use moving-window cross spectrum method or not\ndo_mwcc = False # use moving-window cross correlation method or not\ndo_wts = True # use wavelet streching method or not\ndo_wxs = True # use wavelet cross spectrum method or not\n\n# parameters for stretching method\nepsilon = 2/100 # limit for dv/v (in decimal)\nnbtrial = 50 # number of increment of dt [-epsilon,epsilon] for the streching\n\n# parameters for DTW\nmlag = 50 # maxmum points to move (times dt gives the maximum time shifts)\nb = 5 # strain limit (to be tested)\ndirect = 1 # direction to accumulate errors (1=forward, -1=backward)\n\n# parameters for MWCS & MWCC\nmove_win_sec = 1.2*int(1/np.min(freq)) # moving window length (in sec)\nstep_sec = 0.3*move_win_sec # step for moving window sliding (in sec)\n\n# parameters for wavelet domain methods\ndj =1/12 # Spacing between discrete scales. Default value is 1/12.\ns0 =-1 # Smallest scale of the wavelet. Default value is 2*dt.\nJ =-1 # Number of scales less one.\nwvn='morlet' # wavelet class\n\n##############################################\n############ LOAD WAVEFORM DATA ##############\n##############################################\n\n# load stacked and sub-stacked waveforms \nwith pyasdf.ASDFDataSet(sfile,mode='r') as ds:\n dtype = 'Allstack0'+stack_method\n substacks = ds.auxiliary_data.list()\n nwin = len(substacks)-2\n try:\n dt = ds.auxiliary_data[dtype][ccomp].parameters['dt']\n dist = ds.auxiliary_data[dtype][ccomp].parameters['dist']\n maxlag = ds.auxiliary_data[dtype][ccomp].parameters['maxlag']\n tdata = ds.auxiliary_data[dtype][ccomp].data[:]\n except Exception:\n raise ValueError('cannot open %s to read'%sfile)\n\n# make conda window based on vmin\ntwin = [int(dist/vmin),int(dist/vmin)+lwin]\nif twin[1] > maxlag:\n raise ValueError('proposed window exceeds limit! reduce %d'%lwin)\n\n# ref and tvec\nref = tdata\ntvec_all = np.arange(-maxlag,maxlag+dt,dt)\n# add 20 s to the coda window for plotting purpose\ndisp_indx = np.where(np.abs(tvec_all)<=np.max(twin)+20)[0]\n# casual and acasual coda window \npwin_indx = np.where((tvec_all>=np.min(twin))&(tvec_all<np.max(twin)))[0]\nnwin_indx = np.where((tvec_all<=-np.min(twin))&(tvec_all>=-np.max(twin)))[0]\ntvec_disp = tvec_all[disp_indx]\n# npts for the win and raw\nnpts_all = len(tvec_all)\nnpts_win = len(pwin_indx)\n\n# save parameters as a dictionary\npara = {'twin':twin,'freq':freq,'dt':dt,'ccomp':ccomp,'onelag':onelag,'norm_flag':norm_flag,'npts_all':npts_all,'npts_win':npts_win}\n\n# allocate matrix for cur and ref waveforms and corr coefficient\ncur = np.zeros(shape=(nwin,npts_all),dtype=np.float32)\ntcur = np.zeros(shape=(nwin,npts_all),dtype=np.float32)\npcor_cc = np.zeros(shape=(nwin),dtype=np.float32)\nncor_cc = np.zeros(shape=(nwin),dtype=np.float32)\ntimestamp = np.empty(nwin,dtype='datetime64[s]')\n\n# tick inc for plotting \nif nwin>100:\n tick_inc = int(nwin/10)\nelif nwin>10:\n tick_inc = int(nwin/5) \nelse:\n tick_inc = 2\n\n# load all current waveforms and get corr-coeff\nwith pyasdf.ASDFDataSet(sfile,mode='r') as ds:\n\n # loop through each freq band\n for ifreq in range(nfreq):\n \n # freq parameters\n freq1 = freq[ifreq]\n freq2 = freq[ifreq+1]\n para['freq'] = [freq1,freq2]\n move_win_sec = 1.2*int(1/freq1)\n\n # reference waveform\n tref = bandpass(ref,freq1,freq2,int(1/dt),corners=4,zerophase=True)\n if norm_flag:\n tref = tref/np.max(np.abs(tref))\n\n # loop through each cur waveforms and do filtering\n igood = 0\n for ii in range(nwin):\n try:\n cur[igood] = ds.auxiliary_data[substacks[ii+2]][ccomp].data[:]\n except Exception:\n continue\n timestamp[igood] = obspy.UTCDateTime(np.float(substacks[ii+2][1:]))\n tcur[igood] = bandpass(cur[igood],freq1,freq2,int(1/dt),corners=4,zerophase=True)\n if norm_flag:\n tcur[igood] /= np.max(np.abs(tcur[igood]))\n \n # get cc coeffient\n pcor_cc[igood] = np.corrcoef(tref[pwin_indx],tcur[igood,pwin_indx])[0,1]\n ncor_cc[igood] = np.corrcoef(tref[nwin_indx],tcur[igood,nwin_indx])[0,1]\n igood += 1 \n nwin = igood\n\n ############ PLOT WAVEFORM DATA AND CC ##############\n # plot the raw waveform and the correlation coefficient\n plt.figure(figsize=(11,12))\n ax0= plt.subplot(311)\n # 2D waveform matrix\n ax0.matshow(tcur[:igood,disp_indx],cmap='seismic',extent=[tvec_disp[0],tvec_disp[-1],nwin,0],aspect='auto')\n ax0.plot([0,0],[0,nwin],'k--',linewidth=2)\n ax0.set_title('%s, dist:%5.2fkm, filter @%4.2f-%4.2fHz' % (sfile.split('/')[-1],dist,freq1,freq2))\n ax0.set_xlabel('time [s]')\n ax0.set_ylabel('wavefroms')\n ax0.set_yticks(np.arange(0,nwin,step=tick_inc))\n # shade the coda part\n ax0.fill(np.concatenate((tvec_all[nwin_indx],np.flip(tvec_all[nwin_indx],axis=0)),axis=0), \\\n np.concatenate((np.ones(len(nwin_indx))*0,np.ones(len(nwin_indx))*nwin),axis=0),'c', alpha=0.3,linewidth=1)\n ax0.fill(np.concatenate((tvec_all[pwin_indx],np.flip(tvec_all[pwin_indx],axis=0)),axis=0), \\\n np.concatenate((np.ones(len(nwin_indx))*0,np.ones(len(nwin_indx))*nwin),axis=0),'y', alpha=0.3)\n ax0.xaxis.set_ticks_position('bottom')\n # reference waveform\n ax1 = plt.subplot(613)\n ax1.plot(tvec_disp,tref[disp_indx],'k-',linewidth=1)\n ax1.autoscale(enable=True, axis='x', tight=True)\n ax1.grid(True)\n ax1.legend(['reference'],loc='upper right')\n # the cross-correlation coefficient\n ax2 = plt.subplot(614)\n ax2.plot(timestamp[:igood],pcor_cc[:igood],'yo-',markersize=2,linewidth=1)\n ax2.plot(timestamp[:igood],ncor_cc[:igood],'co-',markersize=2,linewidth=1)\n ax2.set_xticks(timestamp[0:nwin:tick_inc])\n ax2.set_ylabel('cc coeff')\n ax2.legend(['positive','negative'],loc='upper right')\n\n ###############################################\n ############ MONITORING PROCESSES #############\n ###############################################\n \n # allocate matrix for dvv and its unc\n dvv_stretch = np.zeros(shape=(nwin,4),dtype=np.float32)\n dvv_dtw = np.zeros(shape=(nwin,4),dtype=np.float32)\n dvv_mwcs = np.zeros(shape=(nwin,4),dtype=np.float32)\n dvv_wcc = np.zeros(shape=(nwin,4),dtype=np.float32)\n dvv_wts = np.zeros(shape=(nwin,4),dtype=np.float32)\n dvv_wxs = np.zeros(shape=(nwin,4),dtype=np.float32)\n\n # loop through each win again\n for ii in range(nwin):\n\n # casual and acasual lags for both ref and cur waveforms\n pcur = tcur[ii,pwin_indx]\n ncur = tcur[ii,nwin_indx]\n pref = tref[pwin_indx]\n nref = tref[nwin_indx]\n\n # functions working in time domain\n if do_stretch:\n dvv_stretch[ii,0],dvv_stretch[ii,1],cc,cdp = noise_module.stretching(pref,pcur,epsilon,nbtrial,para)\n dvv_stretch[ii,2],dvv_stretch[ii,3],cc,cdp = noise_module.stretching(nref,ncur,epsilon,nbtrial,para)\n if do_dtw:\n dvv_dtw[ii,0],dvv_dtw[ii,1],dist = noise_module.dtw_dvv(pref,pcur,para,mlag,b,direct)\n dvv_dtw[ii,2],dvv_dtw[ii,3],dist = noise_module.dtw_dvv(nref,ncur,para,mlag,b,direct)\n\n # check parameters for mwcs\n if move_win_sec > 0.5*(np.max(twin)-np.min(twin)):\n raise IOError('twin too small for MWCS')\n\n # functions with moving window \n if do_mwcs:\n dvv_mwcs[ii,0],dvv_mwcs[ii,1] = noise_module.mwcs_dvv(pref,pcur,move_win_sec,step_sec,para)\n dvv_mwcs[ii,2],dvv_mwcs[ii,3] = noise_module.mwcs_dvv(nref,ncur,move_win_sec,step_sec,para)\n if do_mwcc:\n dvv_wcc[ii,0],dvv_wcc[ii,1] = noise_module.WCC_dvv(pref,pcur,move_win_sec,step_sec,para)\n dvv_wcc[ii,2],dvv_wcc[ii,3] = noise_module.WCC_dvv(pref,pcur,move_win_sec,step_sec,para)\n\n allfreq = False # average dv/v over the frequency band for wts and wxs\n if do_wts:\n dvv_wts[ii,0],dvv_wts[ii,1] = noise_module.wts_allfreq(pref,pcur,allfreq,para,epsilon,nbtrial,dj,s0,J,wvn)\n dvv_wts[ii,2],dvv_wts[ii,3] = noise_module.wts_allfreq(nref,ncur,allfreq,para,epsilon,nbtrial,dj,s0,J,wvn)\n if do_wxs:\n dvv_wxs[ii,0],dvv_wxs[ii,1] = noise_module.wxs_allfreq(pref,pcur,allfreq,para,dj,s0,J)\n dvv_wxs[ii,2],dvv_wxs[ii,3] = noise_module.wxs_allfreq(nref,ncur,allfreq,para,dj,s0,J)\n\n '''\n allfreq = True # look at all frequency range\n para['freq'] = freq\n\n # functions in wavelet domain to compute dvv for all frequncy\n if do_wts:\n dfreq,dv_wts1,unc1 = noise_module.wts_allfreq(ref[pwin_indx],cur[pwin_indx],allfreq,para,epsilon,nbtrial,dj,s0,J,wvn)\n dfreq,dv_wts2,unc2 = noise_module.wts_allfreq(ref[nwin_indx],cur[nwin_indx],allfreq,para,epsilon,nbtrial,dj,s0,J,wvn)\n if do_wxs:\n dfreq,dv_wxs1,unc1 = noise_module.wxs_allfreq(ref[pwin_indx],cur[pwin_indx],allfreq,para,dj,s0,J)\n dfreq,dv_wxs2,unc2 = noise_module.wxs_allfreq(ref[nwin_indx],cur[nwin_indx],allfreq,para,dj,s0,J)\n '''\n\n ###############################################\n ############ PLOTTING SECTION #################\n ###############################################\n\n # dv/v at each filtered frequency band\n ax3 = plt.subplot(313)\n legend_mark = []\n if do_stretch:\n ax3.plot(timestamp[:igood],dvv_stretch[:,0],'yo-',markersize=6,linewidth=0.5)\n ax3.plot(timestamp[:igood],dvv_stretch[:,2],'co-',markersize=6,linewidth=0.5)\n legend_mark.append('str+')\n legend_mark.append('str-')\n if do_dtw:\n ax3.plot(timestamp[:igood],dvv_dtw[:,0],'yv-',markersize=6,linewidth=0.5)\n ax3.plot(timestamp[:igood],dvv_dtw[:,2],'cv-',markersize=6,linewidth=0.5)\n legend_mark.append('dtw+')\n legend_mark.append('dtw-')\n if do_mwcs:\n ax3.plot(timestamp[:igood],dvv_mwcs[:,0],'ys-',markersize=6,linewidth=0.5)\n ax3.plot(timestamp[:igood],dvv_mwcs[:,2],'cs-',markersize=6,linewidth=0.5)\n legend_mark.append('mwcs+')\n legend_mark.append('mwcs-')\n if do_mwcc:\n ax3.plot(timestamp[:igood],dvv_wcc[:,0],'y*-',markersize=6,linewidth=0.5)\n ax3.plot(timestamp[:igood],dvv_wcc[:,2],'c*-',markersize=6,linewidth=0.5)\n legend_mark.append('wcc+')\n legend_mark.append('wcc-')\n if do_wts:\n ax3.plot(timestamp[:igood],dvv_wts[:,0],'yx-',markersize=6,linewidth=0.5)\n ax3.plot(timestamp[:igood],dvv_wts[:,2],'cx-',markersize=6,linewidth=0.5)\n legend_mark.append('wts+')\n legend_mark.append('wts-')\n if do_wxs:\n ax3.plot(timestamp[:igood],dvv_wxs[:,0],'yp-',markersize=6,linewidth=0.5)\n ax3.plot(timestamp[:igood],dvv_wxs[:,2],'cp-',markersize=6,linewidth=0.5)\n legend_mark.append('wxs+')\n legend_mark.append('wxs-')\n ax3.legend(legend_mark,loc='upper right')\n #ax3.grid('true')\n ax3.set_ylabel('dv/v [%]')\n\n # save figure or just show\n outfname = outdir+'/{0:s}_{1:4.2f}_{2:4.2f}Hz.pdf'.format(sfile.split('/')[-1],freq1,freq2)\n plt.savefig(outfname, format='pdf', dpi=400)\n plt.close()\n" ]
[ [ "numpy.empty", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.abs", "pandas.plotting.register_matplotlib_converters", "numpy.arange", "matplotlib.pyplot.subplot", "numpy.max", "numpy.float", "numpy.min", "matplotlib.pyplot.close", "numpy.flip", "numpy.corrcoef" ] ]
kanesp/keras
[ "7f8c62b90274f9c5a261984c098312ff8fab3d66" ]
[ "keras/layers/preprocessing/index_lookup_test.py" ]
[ "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras text vectorization preprocessing layer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nimport itertools\nimport os\nimport random\nimport string\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nimport keras\nfrom keras import keras_parameterized\nfrom keras import testing_utils\nfrom keras.layers.preprocessing import index_lookup\nfrom keras.layers.preprocessing import index_lookup_v1\nfrom keras.layers.preprocessing import preprocessing_test_utils\nfrom keras.saving import save\nfrom keras.utils.generic_utils import CustomObjectScope\n\n\ndef get_layer_class():\n if tf.executing_eagerly():\n return index_lookup.IndexLookup\n else:\n return index_lookup_v1.IndexLookup\n\n\ndef _get_end_to_end_test_cases():\n test_cases = (\n {\n \"testcase_name\":\n \"test_strings_soft_vocab_cap\",\n # Create an array where 'earth' is the most frequent term, followed by\n # 'wind', then 'and', then 'fire'. This ensures that the vocab\n # accumulator is sorting by frequency.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"wind\"], [\"and\"], [\"fire\"], [\"fire\"],\n [\"and\"], [\"earth\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": None,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"dtype\": tf.string,\n },\n \"expected_output\": [[2], [3], [4], [5], [5], [4], [2], [1]],\n \"input_dtype\":\n tf.string\n },\n {\n \"testcase_name\":\n \"test_inverse_strings_soft_vocab_cap\",\n # Create an array where 'earth' is the most frequent term, followed by\n # 'wind', then 'and', then 'fire'. This ensures that the vocab\n # accumulator is sorting by frequency.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[2], [3], [4], [1], [1], [4], [2], [5]]),\n \"kwargs\": {\n \"max_tokens\": None,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"dtype\": tf.string,\n \"invert\": True\n },\n \"expected_output\":\n np.array([[b\"earth\"], [b\"wind\"], [b\"and\"], [b\"[OOV]\"], [b\"[OOV]\"],\n [b\"and\"], [b\"earth\"], [b\"fire\"]]),\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_strings_with_special_tokens\",\n # Mask and oov values in the vocab data should be dropped, and mapped\n # to 0 and 1 respectively when calling the layer.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"\"], [\"\"], [\"\"], [\"[OOV]\"], [\"[OOV]\"], [\"[OOV]\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"\"], [\"wind\"], [\"[OOV]\"], [\"and\"], [\"\"],\n [\"fire\"], [\"and\"], [\"[OOV]\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": None,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"dtype\": tf.string,\n },\n \"expected_output\": [[2], [0], [3], [1], [4], [0], [5], [4], [1], [1]],\n \"input_dtype\":\n tf.string\n },\n {\n \"testcase_name\":\n \"test_ints_soft_vocab_cap\",\n # Create an array where 1138 is the most frequent term, followed by\n # 1729, then 725, then 42. This ensures that the vocab accumulator\n # is sorting by frequency.\n \"vocab_data\":\n np.array([[42], [1138], [1138], [1138], [1138], [1729], [1729],\n [1729], [725], [725]],\n dtype=np.int64),\n \"input_data\":\n np.array([[1138], [1729], [725], [42], [42], [725], [1138], [4]],\n dtype=np.int64),\n \"kwargs\": {\n \"max_tokens\": None,\n \"num_oov_indices\": 1,\n \"mask_token\": 0,\n \"oov_token\": -1,\n \"dtype\": tf.int64,\n },\n \"expected_output\": [[2], [3], [4], [5], [5], [4], [2], [1]],\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_ints_with_special_tokens\",\n # Mask and oov values in the vocab data should be dropped, and mapped\n # to 0 and 1 respectively when calling the layer.\n \"vocab_data\":\n np.array([[42], [1138], [1138], [1138], [1138], [0], [0], [0],\n [-1], [-1], [-1], [1729], [1729], [1729], [725], [725]],\n dtype=np.int64),\n \"input_data\":\n np.array([[1138], [0], [1729], [-1], [725], [0], [42], [725],\n [-1], [4]],\n dtype=np.int64),\n \"kwargs\": {\n \"max_tokens\": None,\n \"num_oov_indices\": 1,\n \"mask_token\": 0,\n \"oov_token\": -1,\n \"dtype\": tf.int64,\n },\n \"expected_output\": [[2], [0], [3], [1], [4], [0], [5], [4], [1], [1]],\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_strings_hard_vocab_cap\",\n # Create an array where 'earth' is the most frequent term, followed by\n # 'wind', then 'and', then 'fire'. This ensures that the vocab\n # accumulator is sorting by frequency.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"wind\"], [\"and\"], [\"fire\"], [\"fire\"],\n [\"and\"], [\"earth\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"dtype\": tf.string,\n },\n \"expected_output\": [[2], [3], [4], [1], [1], [4], [2], [1]],\n \"input_dtype\":\n tf.string\n },\n {\n \"testcase_name\":\n \"test_inverse_strings_hard_vocab_cap\",\n # Create an array where 'earth' is the most frequent term, followed by\n # 'wind', then 'and', then 'fire'. This ensures that the vocab\n # accumulator is sorting by frequency.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[2], [3], [4], [1], [1], [4], [2], [5]]),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"dtype\": tf.string,\n \"invert\": True\n },\n \"expected_output\":\n np.array([[b\"earth\"], [b\"wind\"], [b\"and\"], [b\"[OOV]\"], [b\"[OOV]\"],\n [b\"and\"], [b\"earth\"], [b\"[OOV]\"]]),\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_ints_hard_vocab_cap\",\n # Create an array where 1138 is the most frequent term, followed by\n # 1729, then 725, then 42. This ensures that the vocab accumulator\n # is sorting by frequency.\n \"vocab_data\":\n np.array([[42], [1138], [1138], [1138], [1138], [1729], [1729],\n [1729], [725], [725]],\n dtype=np.int64),\n \"input_data\":\n np.array([[1138], [1729], [725], [42], [42], [725], [1138], [4]],\n dtype=np.int64),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"num_oov_indices\": 1,\n \"mask_token\": 0,\n \"oov_token\": -1,\n \"dtype\": tf.int64,\n },\n \"expected_output\": [[2], [3], [4], [1], [1], [4], [2], [1]],\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_ints_tf_idf_output\",\n \"vocab_data\":\n np.array([[42], [1138], [1138], [1138], [1138], [1729], [1729],\n [1729], [725], [725]]),\n \"input_data\":\n np.array([[1138], [1729], [725], [42], [42], [725], [1138], [4]]),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"num_oov_indices\": 1,\n \"mask_token\": 0,\n \"oov_token\": -1,\n \"output_mode\": index_lookup.TFIDF,\n \"dtype\": tf.int64,\n },\n \"expected_output\": [[0, 1.098612, 0, 0, 0], [0, 0, 1.252763, 0, 0],\n [0, 0, 0, 1.466337, 0], [0, 0, 0, 0, 1.7917595],\n [0, 0, 0, 0, 1.7917595], [0, 0, 0, 1.4663371, 0],\n [0, 1.098612, 0, 0, 0], [1.402368, 0, 0, 0, 0]],\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_strings_tf_idf_output\",\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"wind\"], [\"and\"], [\"fire\"], [\"fire\"],\n [\"and\"], [\"earth\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"output_mode\": index_lookup.TFIDF,\n \"dtype\": tf.string,\n },\n \"expected_output\": [[0, 1.098612, 0, 0, 0], [0, 0, 1.252763, 0, 0],\n [0, 0, 0, 1.466337, 0], [0, 0, 0, 0, 1.7917595],\n [0, 0, 0, 0, 1.7917595], [0, 0, 0, 1.4663371, 0],\n [0, 1.098612, 0, 0, 0], [1.402368, 0, 0, 0, 0]],\n \"input_dtype\":\n tf.string\n },\n )\n\n crossed_test_cases = []\n # Cross above test cases with use_dataset in (True, False)\n for use_dataset in (True, False):\n for case in test_cases:\n case = case.copy()\n if use_dataset:\n case[\"testcase_name\"] = case[\"testcase_name\"] + \"_with_dataset\"\n case[\"use_dataset\"] = use_dataset\n crossed_test_cases.append(case)\n\n return crossed_test_cases\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupLayerTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n @parameterized.named_parameters(*_get_end_to_end_test_cases())\n def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,\n use_dataset, expected_output,\n input_dtype):\n cls = get_layer_class()\n if \"invert\" in kwargs and kwargs[\"invert\"]:\n expected_output_dtype = kwargs[\"dtype\"]\n elif \"output_mode\" in kwargs and kwargs[\"output_mode\"] != index_lookup.INT:\n expected_output_dtype = tf.float32\n else:\n expected_output_dtype = tf.int64\n\n input_shape = input_data.shape\n\n if use_dataset:\n # Keras APIs expect batched datasets.\n # TODO(rachelim): `model.predict` predicts the result on each\n # dataset batch separately, then tries to concatenate the results\n # together. When the results have different shapes on the non-concat\n # axis (which can happen in the output_mode = INT case for\n # IndexLookup), the concatenation fails. In real use cases, this may\n # not be an issue because users are likely to pipe the preprocessing layer\n # into other keras layers instead of predicting it directly. A workaround\n # for these unit tests is to have the dataset only contain one batch, so\n # no concatenation needs to happen with the result. For consistency with\n # numpy input, we should make `predict` join differently shaped results\n # together sensibly, with 0 padding.\n input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(\n input_shape[0])\n vocab_data = tf.data.Dataset.from_tensor_slices(vocab_data).batch(\n input_shape[0])\n\n with CustomObjectScope({\"IndexLookup\": cls}):\n output_data = testing_utils.layer_test(\n cls,\n kwargs=kwargs,\n input_shape=input_shape,\n input_data=input_data,\n input_dtype=input_dtype,\n expected_output_dtype=expected_output_dtype,\n validate_training=False,\n adapt_data=vocab_data)\n if \"invert\" in kwargs and kwargs[\"invert\"]:\n self.assertAllEqual(expected_output, output_data)\n else:\n self.assertAllClose(expected_output, output_data)\n\n\n@keras_parameterized.run_all_keras_modes\nclass CategoricalEncodingInputTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_sparse_string_input(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.SparseTensor(\n indices=[[0, 0], [1, 2]],\n values=[\"fire\", \"michigan\"],\n dense_shape=[3, 4])\n\n expected_indices = [[0, 0], [1, 2]]\n expected_values = [5, 1]\n expected_dense_shape = [3, 4]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, sparse=True)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_indices, output_data.indices)\n self.assertAllEqual(expected_values, output_data.values)\n self.assertAllEqual(expected_dense_shape, output_data.dense_shape)\n\n def test_sparse_int_input(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.SparseTensor(\n indices=[[0, 0], [1, 2]],\n values=np.array([13, 32], dtype=np.int64),\n dense_shape=[3, 4])\n\n expected_indices = [[0, 0], [1, 2]]\n expected_values = [5, 1]\n expected_dense_shape = [3, 4]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_indices, output_data.indices)\n self.assertAllEqual(expected_values, output_data.values)\n self.assertAllEqual(expected_dense_shape, output_data.dense_shape)\n\n def test_ragged_string_input(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.ragged.constant(\n [[\"earth\", \"wind\", \"fire\"], [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_ragged_int_input(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 42]],\n dtype=np.int64)\n expected_output = [[2, 3, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int32_input_with_int64_keys(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 42]],\n dtype=np.int32)\n expected_output = [[2, 3, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int32, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n\n@keras_parameterized.run_all_keras_modes\nclass CategoricalEncodingMultiOOVTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_sparse_string_input_multi_bucket(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.SparseTensor(\n indices=[[0, 0], [1, 2]],\n values=[\"fire\", \"ohio\"],\n dense_shape=[3, 4])\n\n expected_indices = [[0, 0], [1, 2]]\n expected_values = [6, 2]\n expected_dense_shape = [3, 4]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, sparse=True)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=2,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_indices, output_data.indices)\n self.assertAllEqual(expected_values, output_data.values)\n self.assertAllEqual(expected_dense_shape, output_data.dense_shape)\n\n def test_sparse_int_input_multi_bucket(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.SparseTensor(\n indices=[[0, 0], [1, 2]],\n values=np.array([13, 133], dtype=np.int64),\n dense_shape=[3, 4])\n\n expected_indices = [[0, 0], [1, 2]]\n expected_values = [6, 2]\n expected_dense_shape = [3, 4]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=2,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_indices, output_data.indices)\n self.assertAllEqual(expected_values, output_data.values)\n self.assertAllEqual(expected_dense_shape, output_data.dense_shape)\n\n def test_ragged_string_input_multi_bucket(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.ragged.constant(\n [[\"earth\", \"wind\", \"fire\"], [\"fire\", \"and\", \"earth\", \"ohio\"]])\n expected_output = [[3, 4, 6], [6, 5, 3, 2]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=2,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_ragged_int_input_multi_bucket(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 133]],\n dtype=np.int64)\n expected_output = [[3, 4, 6], [6, 5, 3, 2]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=2,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n\n@keras_parameterized.run_all_keras_modes\nclass CategoricalEncodingAdaptTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_sparse_adapt(self):\n vocab_data = tf.SparseTensor(\n indices=[[0, 0], [0, 1], [1, 2]],\n values=[\"michigan\", \"fire\", \"michigan\"],\n dense_shape=[3, 4])\n vocab_dataset = tf.data.Dataset.from_tensors(vocab_data)\n\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.adapt(vocab_dataset)\n expected_vocabulary = [\"\", \"[OOV]\", \"michigan\", \"fire\"]\n self.assertAllEqual(expected_vocabulary, layer.get_vocabulary())\n\n def test_ragged_adapt(self):\n vocab_data = tf.ragged.constant([[\"michigan\"],\n [\"fire\", \"michigan\"]])\n vocab_dataset = tf.data.Dataset.from_tensors(vocab_data)\n\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.adapt(vocab_dataset)\n expected_vocabulary = [\"\", \"[OOV]\", \"michigan\", \"fire\"]\n self.assertAllEqual(expected_vocabulary, layer.get_vocabulary())\n\n def test_sparse_int_input(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.SparseTensor(\n indices=[[0, 0], [1, 2]],\n values=np.array([13, 32], dtype=np.int64),\n dense_shape=[3, 4])\n\n expected_indices = [[0, 0], [1, 2]]\n expected_values = [5, 1]\n expected_dense_shape = [3, 4]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_indices, output_data.indices)\n self.assertAllEqual(expected_values, output_data.values)\n self.assertAllEqual(expected_dense_shape, output_data.dense_shape)\n\n def test_ragged_string_input(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.ragged.constant(\n [[\"earth\", \"wind\", \"fire\"], [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_ragged_int_input(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 42]],\n dtype=np.int64)\n expected_output = [[2, 3, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_single_string_generator_dataset(self):\n\n def word_gen():\n for _ in itertools.count(1):\n yield \"\".join(random.choice(string.ascii_letters) for i in range(2))\n\n ds = tf.data.Dataset.from_generator(word_gen, tf.string,\n tf.TensorShape([]))\n batched_ds = ds.take(2)\n input_t = keras.Input(shape=(), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=10,\n num_oov_indices=0,\n mask_token=None,\n oov_token=None,\n dtype=tf.string)\n _ = layer(input_t)\n layer.adapt(batched_ds)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupOutputTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def _write_to_temp_file(self, file_name, vocab_list):\n vocab_path = os.path.join(self.get_temp_dir(), file_name + \".txt\")\n with tf.io.gfile.GFile(vocab_path, \"w\") as writer:\n for vocab in vocab_list:\n writer.write(vocab + \"\\n\")\n writer.flush()\n writer.close()\n return vocab_path\n\n def test_int_output(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_shape(self):\n input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=2,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n int_data = layer(input_data)\n self.assertAllEqual(int_data.shape.as_list(), [16, 4])\n\n def test_int_output_no_reserved_zero(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=None,\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_no_oov(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"ohio\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[1, 2, 3, -1], [4, 3, 1, -1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=0,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_explicit_vocab(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_binary_output_hard_maximum(self):\n \"\"\"Check binary output when pad_to_max_tokens=True.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\", \"\"],\n [\"fire\", \"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [\n [0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 0],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=6,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n pad_to_max_tokens=True,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n binary_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=binary_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_binary_output_no_oov(self):\n \"\"\"Check binary output when pad_to_max_tokens=True.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\", \"ohio\"],\n [\"fire\", \"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [\n [1, 1, 1, 1, 0],\n [1, 0, 1, 1, 0],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=0,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n pad_to_max_tokens=True,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n binary_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=binary_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_binary_output_hard_maximum_multiple_adapts(self):\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\"],\n [\"ohio\", \"and\", \"earth\", \"michigan\"]])\n adapt_data = [\"earth\", \"earth\", \"earth\", \"earth\", \"wind\", \"wind\", \"wind\"]\n first_expected_output = [\n [1, 1, 1, 0, 0],\n [1, 1, 0, 0, 0],\n ]\n second_adapt_data = [\n \"earth\", \"earth\", \"earth\", \"earth\", \"wind\", \"wind\", \"wind\", \"and\",\n \"and\", \"fire\"\n ]\n second_expected_output = [\n [0, 1, 1, 1, 0],\n [1, 1, 0, 1, 0],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n pad_to_max_tokens=True,\n dtype=tf.string)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n\n # Test the first adapt\n layer.adapt(adapt_data)\n first_output = model.predict(input_array)\n # Test the second adapt\n layer.adapt(second_adapt_data)\n second_output = model.predict(input_array)\n self.assertAllEqual(first_expected_output, first_output)\n self.assertAllEqual(second_expected_output, second_output)\n\n def test_binary_output_soft_maximum(self):\n \"\"\"Check binary output when pad_to_max_tokens=False.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\", \"\"],\n [\"fire\", \"and\", \"earth\", \"michigan\", \"\"]])\n expected_output = [\n [0, 1, 1, 1, 1],\n [1, 1, 0, 1, 1],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n binary_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=binary_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_binary_output_shape(self):\n input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=2,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n dtype=tf.string)\n binary_data = layer(input_data)\n self.assertAllEqual(binary_data.shape.as_list(), [16, 2])\n\n def test_count_output_hard_maxiumum(self):\n \"\"\"Check count output when pad_to_max_tokens=True.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"wind\", \"\"],\n [\"fire\", \"fire\", \"fire\", \"michigan\", \"\"]])\n expected_output = [\n [0, 1, 2, 1, 0, 0],\n [1, 0, 0, 0, 3, 0],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=6,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.COUNT,\n pad_to_max_tokens=True,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n count_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=count_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_count_output_soft_maximum(self):\n \"\"\"Check count output when pad_to_max_tokens=False.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"wind\", \"\"],\n [\"fire\", \"fire\", \"fire\", \"michigan\", \"\"]])\n expected_output = [\n [0, 1, 2, 1, 0],\n [1, 0, 0, 0, 3],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.COUNT,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n count_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=count_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_count_output_shape(self):\n input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=2,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.COUNT,\n dtype=tf.string)\n count_data = layer(input_data)\n self.assertAllEqual(count_data.shape.as_list(), [16, 2])\n\n def test_ifidf_output_hard_maximum(self):\n \"\"\"Check tf-idf output when pad_to_max_tokens=True.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n # OOV idf weight (bucket 0) should 0.5, the average of passed weights.\n idf_weights = [.4, .25, .75, .6]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\", \"\"],\n [\"ohio\", \"fire\", \"earth\", \"michigan\", \"\"]])\n expected_output = [\n [0.00, 0.80, 0.25, 0.75, 0.00, 0.00],\n [1.00, 0.40, 0.00, 0.00, 0.60, 0.00],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=6,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.TFIDF,\n pad_to_max_tokens=True,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data, idf_weights=idf_weights)\n layer_output = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=layer_output)\n output_dataset = model.predict(input_array)\n self.assertAllClose(expected_output, output_dataset)\n\n def test_ifidf_output_soft_maximum(self):\n \"\"\"Check tf-idf output when pad_to_max_tokens=False.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n # OOV idf weight (bucket 0) should 0.5, the average of passed weights.\n idf_weights = [.4, .25, .75, .6]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\", \"\"],\n [\"ohio\", \"fire\", \"earth\", \"michigan\", \"\"]])\n expected_output = [\n [0.00, 0.80, 0.25, 0.75, 0.00],\n [1.00, 0.40, 0.00, 0.00, 0.60],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.TFIDF,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data, idf_weights=idf_weights)\n layer_output = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=layer_output)\n output_dataset = model.predict(input_array)\n self.assertAllClose(expected_output, output_dataset)\n\n def test_ifidf_output_shape(self):\n input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=2,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.COUNT,\n dtype=tf.string)\n layer_output = layer(input_data)\n self.assertAllEqual(layer_output.shape.as_list(), [16, 2])\n\n def test_int_output_file_vocab(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 0, 2, 1]]\n\n vocab_file = self._write_to_temp_file(\"temp\", vocab_data)\n vocabulary_initializer = tf.lookup.TextFileInitializer(\n filename=vocab_file,\n key_dtype=tf.string,\n key_index=tf.lookup.TextFileIndex.WHOLE_LINE,\n value_dtype=tf.int64,\n value_index=tf.lookup.TextFileIndex.LINE_NUMBER,\n value_index_offset=2)\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n vocabulary=vocabulary_initializer,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_int_file_vocab(self):\n vocab_data = [\"10\", \"20\", \"30\", \"40\"]\n input_array = np.array([[10, 20, 30, 40], [40, 0, 10, 42]])\n expected_output = [[2, 3, 4, 5], [5, 0, 2, 1]]\n\n vocab_file = self._write_to_temp_file(\"temp\", vocab_data)\n vocabulary_initializer = tf.lookup.TextFileInitializer(\n filename=vocab_file,\n key_dtype=tf.int64,\n key_index=tf.lookup.TextFileIndex.WHOLE_LINE,\n value_dtype=tf.int64,\n value_index=tf.lookup.TextFileIndex.LINE_NUMBER,\n value_index_offset=2)\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64)\n layer = get_layer_class()(\n vocabulary=vocabulary_initializer,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupVocabularyTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest\n ):\n\n def test_int_output_explicit_vocab(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_explicit_vocab_with_special_tokens(self):\n vocab_data = [\"\", \"[OOV]\", \"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_vocab_with_max_cap(self):\n vocab_data = [\"\", \"[OOV]\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n self.assertAllEqual(layer.vocab_size(), 5)\n\n def test_int_vocab_with_max_cap(self):\n vocab_data = [0, -1, 42, 1276, 1138]\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n self.assertAllEqual(layer.vocab_size(), 5)\n\n def test_vocab_with_multiple_oov_indices(self):\n vocab_data = [\"\", \"[OOV]\", \"[OOV]\", \"[OOV]\", \"wind\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=3,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n\n def test_int_vocab_with_multiple_oov_indices(self):\n vocab_data = [0, -1, -1, -1, 42]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=3,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n\n def test_non_unique_vocab_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\", \"fire\"]\n with self.assertRaisesRegex(ValueError, \".*repeated term.*fire.*\"):\n _ = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n\n def test_vocab_with_oov_and_wrong_mask_fails(self):\n vocab_data = [\"custom_mask\", \"[OOV]\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*does not have the mask token.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_oov_and_no_mask_fails(self):\n vocab_data = [\"[OOV]\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*Reserved OOV.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_mask_but_no_oov_fails(self):\n vocab_data = [\"\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*does not have the OOV token.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_repeated_element_fails(self):\n vocab_data = [\"earth\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*repeated term.*earth.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_reserved_oov_element_fails(self):\n vocab_data = [\"earth\", \"test\", \"[OOV]\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*Reserved OOV.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_reserved_mask_element_fails(self):\n vocab_data = [\"earth\", \"mask_token\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"mask_token\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*Reserved mask.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_set_after_call_pad_to_max_false_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n pad_to_max_tokens=False,\n output_mode=index_lookup.BINARY,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n # Calling the layer should lock the vocabulary.\n _ = layer([[\"earth\"]])\n with self.assertRaisesRegex(RuntimeError, \"vocabulary cannot be changed\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_idf_weights_non_tfidf_output_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n weight_data = [1, 1, 1, 1, 1]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError,\n \"`idf_weights` should only be set if\"):\n layer.set_vocabulary(vocab_data, idf_weights=weight_data)\n\n def test_vocab_with_idf_weights_length_mismatch_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n weight_data = [1, 1, 1, 1, 1] # too long\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.TFIDF,\n dtype=tf.string)\n with self.assertRaisesRegex(\n ValueError, \"`idf_weights` must be the same length as vocab\"):\n layer.set_vocabulary(vocab_data, idf_weights=weight_data)\n\n def test_vocab_without_idf_weights_tfidf_output_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.TFIDF,\n dtype=tf.string)\n with self.assertRaisesRegex(\n ValueError, \"`idf_weights` must be set if output_mode is TFIDF\"):\n layer.set_vocabulary(vocab_data)\n\n def test_non_unique_int_vocab_fails(self):\n vocab_data = [12, 13, 14, 15, 15]\n with self.assertRaisesRegex(ValueError, \"repeated term.*15\"):\n _ = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n\n def test_int_vocab_with_oov_and_wrong_mask_fails(self):\n vocab_data = [1234, -1, 11, 21, 13, 14]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \"does not have the mask token `0`\"):\n layer.set_vocabulary(vocab_data)\n\n def test_int_vocab_with_oov_and_no_mask_fails(self):\n vocab_data = [-1, 11, 12, 13, 14]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \"Reserved OOV\"):\n layer.set_vocabulary(vocab_data)\n\n def test_int_vocab_with_mask_but_no_oov_fails(self):\n vocab_data = [0, 11, 12, 13, 14]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \"does not have the OOV token `-1`\"):\n layer.set_vocabulary(vocab_data)\n\n def test_int_vocab_with_repeated_element_fails(self):\n vocab_data = [11, 11, 34, 23, 124]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \"repeated term.*11\"):\n layer.set_vocabulary(vocab_data)\n\n def test_int_vocab_with_reserved_oov_element_fails(self):\n vocab_data = [14, 38, -1, 34, 3, 84]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \"Reserved OOV\"):\n layer.set_vocabulary(vocab_data)\n\n def test_int_vocab_with_reserved_mask_element_fails(self):\n vocab_data = [125, 0, 3, 4, 94]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \"Reserved mask\"):\n layer.set_vocabulary(vocab_data)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupInverseVocabularyTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_int_output_explicit_vocab(self):\n vocab_data = [\"\", \"[OOV]\", \"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 1]])\n expected_output = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"[OOV]\"]])\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64)\n layer = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n invert=True)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_vocab_with_max_cap(self):\n vocab_data = [\"\", \"[OOV]\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n invert=True)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n\n def test_int_vocab_with_max_cap(self):\n vocab_data = [0, -1, 42, 1276, 1138]\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64,\n invert=True)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n\n def test_non_unique_vocab_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\", \"fire\"]\n with self.assertRaisesRegex(ValueError, \".*repeated term.*fire.*\"):\n _ = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n invert=True)\n\n def test_non_int_output_fails(self):\n with self.assertRaisesRegex(ValueError, \"`output_mode` must be int\"):\n _ = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n output_mode=index_lookup.COUNT,\n invert=True)\n\n def test_vocab_with_repeated_element_fails(self):\n vocab_data = [\"earth\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n invert=True)\n with self.assertRaisesRegex(ValueError, \".*repeated term.*earth.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_reserved_mask_element_fails(self):\n vocab_data = [\"earth\", \"mask_token\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"mask_token\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n invert=True)\n with self.assertRaisesRegex(ValueError, \".*Reserved mask.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_non_unique_int_vocab_fails(self):\n vocab_data = [12, 13, 14, 15, 15]\n with self.assertRaisesRegex(ValueError, \".*repeated term.*15.*\"):\n _ = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64,\n invert=True)\n\n def test_int_vocab_with_repeated_element_fails(self):\n vocab_data = [11, 11, 34, 23, 124]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64,\n invert=True)\n with self.assertRaisesRegex(ValueError, \".*repeated term.*11.*\"):\n layer.set_vocabulary(vocab_data)\n\n\n@keras_parameterized.run_all_keras_modes(always_skip_eager=True)\nclass IndexLookupSaveableTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_ops_are_not_added_with_multiple_get_set_weights(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=10,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n weights = model.get_weights()\n model.set_weights(weights)\n keras.backend.get_session().graph.finalize()\n weights = model.get_weights()\n model.set_weights(weights)\n\n def test_layer_saving_with_h5(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=10,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n path = os.path.join(self.get_temp_dir(), \"model\")\n with self.assertRaisesRegex(NotImplementedError,\n \"Save or restore weights that is not.*\"):\n save.save_model(model, path, save_format=\"h5\")\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupErrorTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_too_long_vocab_fails_in_single_setting(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n\n layer = get_layer_class()(\n max_tokens=4,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError,\n \"vocabulary larger than the maximum vocab.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_zero_max_tokens_fails(self):\n with self.assertRaisesRegex(ValueError, \".*max_tokens.*\"):\n _ = get_layer_class()(\n max_tokens=0,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupSavingTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_vocabulary_persistence_across_saving(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n # Build and validate a golden model.\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(output_dataset, expected_output)\n\n # Save the model to disk.\n output_path = os.path.join(self.get_temp_dir(), \"tf_keras_saved_model\")\n model.save(output_path, save_format=\"tf\")\n\n # Delete the session and graph to ensure that the loaded model is generated\n # from scratch.\n # TODO(b/149526183): Can't clear session when TF2 is disabled.\n if tf.__internal__.tf2.enabled():\n keras.backend.clear_session()\n\n loaded_model = keras.models.load_model(\n output_path, custom_objects={\"IndexLookup\": get_layer_class()})\n\n # Ensure that the loaded model is unique (so that the save/load is real)\n self.assertIsNot(model, loaded_model)\n\n # Validate correctness of the new model.\n new_output_dataset = loaded_model.predict(input_array)\n self.assertAllEqual(new_output_dataset, expected_output)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupStringCombinerTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def compare_text_accumulators(self, a, b, msg=None):\n if a is None or b is None:\n self.assertAllEqual(a, b, msg=msg)\n\n self.assertAllEqual(a.count_dict, b.count_dict, msg=msg)\n\n compare_accumulators = compare_text_accumulators\n\n def update_accumulator(self, accumulator, data):\n accumulator.count_dict.update(dict(zip(data[\"vocab\"], data[\"counts\"])))\n\n return accumulator\n\n def test_combiner_api_compatibility_int_mode(self):\n data = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"earth\", \"wind\", \"and\", \"michigan\"]])\n combiner = index_lookup._IndexLookupCombiner()\n expected_accumulator_output = {\n \"vocab\": np.array([\"and\", \"earth\", \"wind\", \"fire\", \"michigan\"]),\n \"counts\": np.array([2, 2, 2, 1, 1]),\n }\n expected_extract_output = {\n \"vocab\": np.array([\"wind\", \"earth\", \"and\", \"michigan\", \"fire\"]),\n \"idf_weights\": None,\n }\n expected_accumulator = combiner._create_accumulator()\n expected_accumulator = self.update_accumulator(expected_accumulator,\n expected_accumulator_output)\n self.validate_accumulator_serialize_and_deserialize(combiner, data,\n expected_accumulator)\n self.validate_accumulator_uniqueness(combiner, data)\n self.validate_accumulator_extract(combiner, data, expected_extract_output)\n\n # TODO(askerryryan): Add tests confirming equivalence to behavior of\n # existing tf.keras.preprocessing.text.Tokenizer.\n @parameterized.named_parameters(\n {\n \"testcase_name\":\n \"top_k_smaller_than_full_vocab\",\n \"data\":\n np.array([[\"earth\", \"wind\"], [\"fire\", \"wind\"], [\"and\"],\n [\"fire\", \"wind\"]]),\n \"vocab_size\":\n 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\", \"and\"]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\"]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\":\n \"top_k_larger_than_full_vocab\",\n \"data\":\n np.array([[\"earth\", \"wind\"], [\"fire\", \"wind\"], [\"and\"],\n [\"fire\", \"wind\"]]),\n \"vocab_size\":\n 10,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\", \"and\"]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\", \"and\"]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\":\n \"no_top_k\",\n \"data\":\n np.array([[\"earth\", \"wind\"], [\"fire\", \"wind\"], [\"and\"],\n [\"fire\", \"wind\"]]),\n \"vocab_size\":\n None,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\", \"and\"]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\", \"and\"]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\": \"single_element_per_row\",\n \"data\": np.array([[\"earth\"], [\"wind\"], [\"fire\"], [\"wind\"], [\"and\"]]),\n \"vocab_size\": 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"and\", \"earth\", \"fire\"]),\n \"counts\": np.array([2, 1, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\"]),\n \"idf_weights\": None,\n },\n },\n # Which tokens are retained are based on global frequency, and thus are\n # sensitive to frequency within a document. In contrast, because idf only\n # considers the presence of a token in a document, it is insensitive\n # to the frequency of the token within the document.\n {\n \"testcase_name\":\n \"retained_tokens_sensitive_to_within_document_frequency\",\n \"data\":\n np.array([[\"earth\", \"earth\"], [\"wind\", \"wind\"], [\"fire\", \"fire\"],\n [\"wind\", \"wind\"], [\"and\", \"michigan\"]]),\n \"vocab_size\":\n 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"earth\", \"fire\", \"and\", \"michigan\"]),\n \"counts\": np.array([4, 2, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\"]),\n \"idf_weights\": None,\n },\n })\n def test_combiner_computation(self, data, vocab_size,\n expected_accumulator_output,\n expected_extract_output):\n combiner = index_lookup._IndexLookupCombiner(vocab_size=vocab_size)\n expected_accumulator = combiner._create_accumulator()\n expected_accumulator = self.update_accumulator(expected_accumulator,\n expected_accumulator_output)\n self.validate_accumulator_computation(combiner, data, expected_accumulator)\n self.validate_accumulator_extract(combiner, data, expected_extract_output)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupIntCombinerTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest\n ):\n\n def compare_text_accumulators(self, a, b, msg=None):\n if a is None or b is None:\n self.assertAllEqual(a, b, msg=msg)\n\n self.assertAllEqual(a.count_dict, b.count_dict, msg=msg)\n\n compare_accumulators = compare_text_accumulators\n\n def update_accumulator(self, accumulator, data):\n accumulator.count_dict.update(dict(zip(data[\"vocab\"], data[\"counts\"])))\n\n return accumulator\n\n def test_combiner_api_compatibility_int_mode(self):\n data = np.array([[42, 1138, 725, 1729], [42, 1138, 725, 203]])\n combiner = index_lookup._IndexLookupCombiner()\n expected_accumulator_output = {\n \"vocab\": np.array([1138, 725, 42, 1729, 203]),\n \"counts\": np.array([2, 2, 2, 1, 1]),\n }\n expected_extract_output = {\n \"vocab\": np.array([1138, 725, 42, 1729, 203]),\n \"idf_weights\": None,\n }\n expected_accumulator = combiner._create_accumulator()\n expected_accumulator = self.update_accumulator(expected_accumulator,\n expected_accumulator_output)\n self.validate_accumulator_serialize_and_deserialize(combiner, data,\n expected_accumulator)\n self.validate_accumulator_uniqueness(combiner, data)\n self.validate_accumulator_extract(combiner, data, expected_extract_output)\n\n # TODO(askerryryan): Add tests confirming equivalence to behavior of\n # existing tf.keras.preprocessing.text.Tokenizer.\n @parameterized.named_parameters(\n {\n \"testcase_name\": \"top_k_smaller_than_full_vocab\",\n \"data\": np.array([[42, 1138], [1729, 1138], [725], [1729, 1138]]),\n \"vocab_size\": 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([1138, 1729, 725]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\": \"top_k_larger_than_full_vocab\",\n \"data\": np.array([[42, 1138], [1729, 1138], [725], [1729, 1138]]),\n \"vocab_size\": 10,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\": \"no_top_k\",\n \"data\": np.array([[42, 1138], [1729, 1138], [725], [1729, 1138]]),\n \"vocab_size\": None,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\": \"single_element_per_row\",\n \"data\": np.array([[42], [1138], [1729], [1138], [725]]),\n \"vocab_size\": 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"counts\": np.array([2, 1, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([1138, 1729, 725]),\n \"idf_weights\": None,\n },\n },\n # Which tokens are retained are based on global frequency, and thus are\n # sensitive to frequency within a document. In contrast, because idf only\n # considers the presence of a token in a document, it is insensitive\n # to the frequency of the token within the document.\n {\n \"testcase_name\":\n \"retained_tokens_sensitive_to_within_document_frequency\",\n \"data\":\n np.array([[42, 42], [1138, 1138], [1729, 1729], [1138, 1138],\n [725, 203]]),\n \"vocab_size\":\n 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([1138, 42, 1729, 725, 203]),\n \"counts\": np.array([4, 2, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([1138, 1729, 42]),\n \"idf_weights\": None,\n },\n })\n def test_combiner_computation(self, data, vocab_size,\n expected_accumulator_output,\n expected_extract_output):\n combiner = index_lookup._IndexLookupCombiner(vocab_size=vocab_size)\n expected_accumulator = combiner._create_accumulator()\n expected_accumulator = self.update_accumulator(expected_accumulator,\n expected_accumulator_output)\n self.validate_accumulator_computation(combiner, data, expected_accumulator)\n self.validate_accumulator_extract(combiner, data, expected_extract_output)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v2.__internal__.tf2.enabled", "tensorflow.compat.v2.data.Dataset.from_tensor_slices", "tensorflow.compat.v2.ragged.constant", "tensorflow.compat.v2.io.gfile.GFile", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.data.Dataset.from_tensors", "tensorflow.compat.v2.lookup.TextFileInitializer", "tensorflow.compat.v2.TensorShape", "numpy.array", "tensorflow.compat.v2.executing_eagerly", "tensorflow.compat.v2.SparseTensor" ] ]
Qub3k/subjective-exp-consistency-check
[ "ad159e9ed161e7f04016cc053d90b8e20f6963ed" ]
[ "qnormal.py" ]
[ "# Authors: Krzysztof Rusek <[email protected]>\n# Jakub Nawała <[email protected]>\n\nimport numpy as np\nimport probability_grid_estimation as pge\n\n\ndef prob(psi, sigma, cdf=False):\n \"\"\"\n\n :param psi: QNormal parameter, vector\n :param sigma: QNormal parameter, vector\n :param cdf: If true return pdf\n :return: probabilities\n \"\"\"\n grid = pge.get_each_answer_probability_for_qnormal([psi], [sigma])\n probs = grid.to_numpy(dtype=np.float64)[0]\n if cdf:\n probs = np.cumsum(probs, axis=-1)\n return probs\n\n\ndef sample(psi, sigma, experiments, n):\n \"\"\"\n\n :param psi: GSD parameter\n :param sigma: GSD parameter\n :param experiments: Number of testers\n :param n: number of samples\n :return: random sample from the QNormal distribution\n \"\"\"\n\n probs = prob(psi, sigma)\n s = np.random.multinomial(experiments, probs, size=(n))\n return s\n" ]
[ [ "numpy.cumsum", "numpy.random.multinomial" ] ]
mukaiu/PaddleNLP
[ "0315365dbafa6e3b1c7147121ba85e05884125a5" ]
[ "paddlenlp/utils/tools.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport paddle\nfrom .log import logger\n\n\ndef static_params_to_dygraph(model, static_tensor_dict):\n \"\"\"Simple tool for convert static paramters to dygraph paramters dict.\n\n **NOTE** The model must both support static graph and dygraph mode.\n\n Args:\n model (nn.Layer): the model of a neural network.\n static_tensor_dict (string): path of which locate the saved paramters in static mode.\n Usualy load by `paddle.static.load_program_state`.\n\n Returns:\n [tensor dict]: a state dict the same as the dygraph mode.\n \"\"\"\n state_dict = model.state_dict()\n # static_tensor_dict = paddle.static.load_program_state(static_params_path)\n\n ret_dict = dict()\n for n, p in state_dict.items():\n if p.name not in static_tensor_dict:\n logger.info(\"%s paramter is missing from you state dict.\" % n)\n continue\n ret_dict[n] = static_tensor_dict[p.name]\n\n return ret_dict\n\n\ndef dygraph_params_to_static(model, dygraph_tensor_dict, topo=None):\n \"\"\"Simple tool for convert dygraph paramters to static paramters dict.\n\n **NOTE** The model must both support static graph and dygraph mode.\n\n Args:\n model (nn.Layer): the model of a neural network.\n dygraph_tensor_dict (string): path of which locate the saved paramters in static mode.\n\n Returns:\n [tensor dict]: a state dict the same as the dygraph mode.\n \"\"\"\n state_dict = model.state_dict()\n\n ret_dict = dict()\n for name, parm in state_dict.items():\n if name not in dygraph_tensor_dict:\n logger.info(\"%s paramter is missing from you state dict.\" % name)\n continue\n\n tensor = dygraph_tensor_dict[name]\n if parm.is_distributed:\n assert topo is not None\n for dim, v in enumerate(tensor.shape):\n if parm.shape[dim] != v:\n break\n\n splited = np.split(tensor, topo.mp_info.size,\n axis=dim)[topo.mp_info.rank]\n ret_dict[parm.name] = splited\n else:\n ret_dict[parm.name] = tensor\n\n return ret_dict\n\n\nclass TimeCostAverage(object):\n \"\"\"\n Simple tool for calcluating time average cost in the process of training and inferencing.\n \"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n \"\"\"\n Reset the recoder state, and reset the `cnt` to zero.\n \"\"\"\n self.cnt = 0\n self.total_time = 0\n\n def record(self, usetime):\n \"\"\"\n Recoding the time cost in current step and accumulating the `cnt`.\n \"\"\"\n self.cnt += 1\n self.total_time += usetime\n\n def get_average(self):\n \"\"\"\n Returning the average time cost after the start of training.\n \"\"\"\n if self.cnt == 0:\n return 0\n return self.total_time / self.cnt\n\n\ndef get_env_device():\n \"\"\"\n Return the device name of running enviroment.\n \"\"\"\n if paddle.is_compiled_with_cuda():\n return 'gpu'\n elif paddle.is_compiled_with_npu():\n return 'npu'\n elif paddle.is_compiled_with_rocm():\n return 'rocm'\n elif paddle.is_compiled_with_xpu():\n return 'xpu'\n return 'cpu'\n\n\ndef compare_version(version, pair_version):\n \"\"\"\n Args:\n version (str): The first version string needed to be compared.\n The format of version string should be as follow : \"xxx.yyy.zzz\".\n pair_version (str): The second version string needed to be compared.\n The format of version string should be as follow : \"xxx.yyy.zzz\".\n Returns:\n int: The result of comparasion. 1 means version > pair_version; 0 means\n version = pair_version; -1 means version < pair_version.\n \n Examples:\n >>> compare_version(\"2.2.1\", \"2.2.0\")\n >>> 1\n >>> compare_version(\"2.2.0\", \"2.2.0\")\n >>> 0\n >>> compare_version(\"2.2.0-rc0\", \"2.2.0\")\n >>> -1\n >>> compare_version(\"2.3.0-rc0\", \"2.2.0\")\n >>> 1\n \"\"\"\n version = version.strip()\n pair_version = pair_version.strip()\n if version == pair_version:\n return 0\n version_list = version.split(\".\")\n pair_version_list = pair_version.split(\".\")\n for version_code, pair_version_code in zip(version_list, pair_version_list):\n if not version_code.isnumeric():\n return -1\n if not pair_version_code.isnumeric():\n return 1\n if int(version_code) > int(pair_version_code):\n return 1\n elif int(version_code) < int(pair_version_code):\n return -1\n return 0\n\n\ndef get_bool_ids_greater_than(probs, limit=0.5, return_prob=False):\n \"\"\"\n Get idx of the last dimension in probability arrays, which is greater than a limitation.\n\n Args:\n probs (List[List[float]]): The input probability arrays.\n limit (float): The limitation for probability.\n return_prob (bool): Whether to return the probability\n Returns:\n List[List[int]]: The index of the last dimension meet the conditions.\n \"\"\"\n probs = np.array(probs)\n dim_len = len(probs.shape)\n if dim_len > 1:\n result = []\n for p in probs:\n result.append(get_bool_ids_greater_than(p, limit, return_prob))\n return result\n else:\n result = []\n for i, p in enumerate(probs):\n if p > limit:\n if return_prob:\n result.append((i, p))\n else:\n result.append(i)\n return result\n\n\ndef get_span(start_ids, end_ids, with_prob=False):\n \"\"\"\n Get span set from position start and end list.\n\n Args:\n start_ids (List[int]/List[tuple]): The start index list.\n end_ids (List[int]/List[tuple]): The end index list.\n with_prob (bool): If True, each element for start_ids and end_ids is a tuple aslike: (index, probability).\n Returns:\n set: The span set without overlapping, every id can only be used once .\n \"\"\"\n if with_prob:\n start_ids = sorted(start_ids, key=lambda x: x[0])\n end_ids = sorted(end_ids, key=lambda x: x[0])\n else:\n start_ids = sorted(start_ids)\n end_ids = sorted(end_ids)\n\n start_pointer = 0\n end_pointer = 0\n len_start = len(start_ids)\n len_end = len(end_ids)\n couple_dict = {}\n while start_pointer < len_start and end_pointer < len_end:\n if with_prob:\n start_id = start_ids[start_pointer][0]\n end_id = end_ids[end_pointer][0]\n else:\n start_id = start_ids[start_pointer]\n end_id = end_ids[end_pointer]\n\n if start_id == end_id:\n couple_dict[end_ids[end_pointer]] = start_ids[start_pointer]\n start_pointer += 1\n end_pointer += 1\n continue\n if start_id < end_id:\n couple_dict[end_ids[end_pointer]] = start_ids[start_pointer]\n start_pointer += 1\n continue\n if start_id > end_id:\n end_pointer += 1\n continue\n result = [(couple_dict[end], end) for end in couple_dict]\n result = set(result)\n return result\n" ]
[ [ "numpy.array", "numpy.split" ] ]
yanb514/I24-trajectory-generation
[ "3b1e25f94f42f1e761a13ab57c48d362b1eb7bc0" ]
[ "homography.py" ]
[ "# Attention interviewers!!! - this code is indicative of how I like to write. Not better, not worse.\n# Judge me based off of this\n# Thanks, Derek Gloudemans 2021\n\nimport torch\nimport numpy as np\nimport cv2\nimport sys, os\nimport csv\n\ndef line_to_point(line,point):\n \"\"\"\n Given a line defined by two points, finds the distance from that line to the third point\n line - (x0,y0,x1,y1) as floats\n point - (x,y) as floats\n Returns\n -------\n distance - float >= 0\n \"\"\"\n \n numerator = np.abs((line[2]-line[0])*(line[1]-point[1]) - (line[3]-line[1])*(line[0]-point[0]))\n denominator = np.sqrt((line[2]-line[0])**2 +(line[3]-line[1])**2)\n \n return numerator / (denominator + 1e-08)\n\ndef find_vanishing_point(lines):\n \"\"\"\n Finds best (L2 norm) vanishing point given a list of lines\n\n Parameters\n ----------\n lines : [(x0,y0,x1,y1), ...]\n\n Returns\n -------\n vp - (x,y)\n \"\"\"\n \n # mx+b form\n #y0 = ax + c\n #y1 = bx + d\n \n line0 = lines[0]\n line1 = lines[1]\n a = (line0[3] - line0[1])/line0[2] - line0[0]\n b = (line1[3] - line1[1])/line1[2] - line1[0]\n c = line0[1] - a*line0[0]\n d = line1[1] - c*line1[0]\n \n # intersection\n px = (d-c)/(a-b)\n py = a*(d-c)/(a-b) + c\n best_dist = np.inf\n \n # using intersection as starting point, grid out a grid of 11 x 11 points with spacing g\n g = 1e+16\n n_pts = 31\n \n while g > 1:\n #print(\"Gridding at g = {}\".format(g))\n\n # create grid centered around px,py with spacing g\n \n x_pts = np.arange(px-g*(n_pts//2),px+g*(n_pts//2),g)\n y_pts = np.arange(py-g*(n_pts//2),py+g*(n_pts//2),g)\n \n for x in x_pts:\n for y in y_pts:\n # for each point in grid, compute average distance to vanishing point\n dist = 0\n for line in lines:\n dist += line_to_point(line,(x,y))**2\n \n # keep best point in grid\n if dist < best_dist:\n px = x \n py = y\n best_dist = dist\n #print(\"Best vp so far: ({},{}), with average distance {}\".format(px,py,np.sqrt(dist/len(lines))))\n \n # regrid\n g = g / 10.0\n \n return [px,py]\n\nclass Homography():\n \"\"\"\n Homographer provides utiliites for converting between image,space, and state coordinates\n One homographer object corresponds to a single space/state formulation but\n can have multiple camera/image correspondences\n \"\"\"\n\n def __init__(self,f1 = None,f2 = None):\n \"\"\"\n Initializes Homgrapher object. \n \n f1 - arbitrary function that converts a [d,m,3] matrix of points in space \n to a [d,m,s] matrix in state formulation\n f2 - arbitrary function that converts [d,m,s] matrix into [d,m,3] matrix in space\n \n where d is the number of objects\n m is the number of points per object\n s is the state size\n\n returns - nothing\n\n \"\"\"\n \n if f1 is not None:\n self.f1 = f1\n self.f2 = f2\n \n else:\n self.f1 = self.i24_space_to_state\n self.f2 = self.i24_state_to_space\n \n # each correspondence is: name: {H,H_inv,P,corr_pts,space_pts,vps} \n # where H and H inv are 3x34 planar homography matrices and P is a 3x4 projection matrix\n self.correspondence = {}\n \n self.class_heights = {\n \"sedan\":4,\n \"midsize\":5,\n \"van\":6,\n \"pickup\":5,\n \"semi\":12,\n \"truck (other)\":12,\n \"truck\": 12,\n \"motorcycle\":4,\n \"trailer\":3,\n \"other\":5\n }\n \n \n self.class_dims = {\n \"sedan\":[16,6,4],\n \"midsize\":[18,6.5,5],\n \"van\":[20,6,6.5],\n \"pickup\":[20,6,5],\n \"semi\":[55,9,12],\n \"truck (other)\":[25,9,12],\n \"truck\": [25,9,12],\n \"motorcycle\":[7,3,4],\n \"trailer\":[16,7,3],\n \"other\":[18,6.5,5]\n }\n \n self.class_dict = { \"sedan\":0,\n \"midsize\":1,\n \"van\":2,\n \"pickup\":3,\n \"semi\":4,\n \"truck (other)\":5,\n \"truck\": 5,\n \"motorcycle\":6,\n \"trailer\":7,\n 0:\"sedan\",\n 1:\"midsize\",\n 2:\"van\",\n 3:\"pickup\",\n 4:\"semi\",\n 5:\"truck (other)\",\n 6:\"motorcycle\",\n 7:\"trailer\"\n }\n \n self.default_correspondence = None\n \n def add_i24_camera(self,point_path,vp_path,camera_name):\n # load points\n corr_pts= []\n space_pts = []\n with open(point_path,\"r\") as f:\n lines = f.readlines()\n \n for line in lines[1:-4]:\n line = line.rstrip(\"\\n\").split(\",\")\n corr_pts.append ([float(line[0]),float(line[1])])\n space_pts.append([int(line[2]),int(line[3])])\n \n # load vps\n lines1 = []\n lines2 = []\n lines3 = []\n with open(vp_path,\"r\") as f:\n read = csv.reader(f)\n for item in read:\n if item[4] == '0':\n lines1.append(np.array(item).astype(float))\n elif item[4] == '1':\n lines2.append(np.array(item).astype(float))\n elif item[4] == '2':\n lines3.append(np.array(item).astype(float))\n \n # get all axis labels for a particular axis orientation\n vp1 = find_vanishing_point(lines1)\n vp2 = find_vanishing_point(lines2)\n vp3 = find_vanishing_point(lines3)\n vps = [vp1,vp2,vp3]\n \n self.add_correspondence(corr_pts,space_pts,vps,name = camera_name)\n \n \n def i24_space_to_state(self,points):\n \"\"\"\n points - [d,8,3] array of x,y,z points for fbr,fbl,bbr,bbl,ftr,ftl,fbr,fbl\n \n returns - [d,6] array of points in state formulation\n \"\"\"\n d = points.shape[0]\n new_pts = torch.zeros([d,6])\n \n # rear center bottom of vehicle is (x,y)\n \n # x is computed as average of two bottom rear points\n new_pts[:,0] = (points[:,2,0] + points[:,3,0]) / 2.0\n \n # y is computed as average 4 bottom point y values\n new_pts[:,1] = (points[:,0,1] + points[:,1,1] +points[:,2,1] + points[:,3,1]) / 4.0\n \n # l is computed as avg length between bottom front and bottom rear\n new_pts[:,2] = torch.abs ( ((points[:,0,0] + points[:,1,0]) - (points[:,2,0] + points[:,3,0]))/2.0 )\n \n # w is computed as avg length between botom left and bottom right\n new_pts[:,3] = torch.abs( ((points[:,0,1] + points[:,2,1]) - (points[:,1,1] + points[:,3,1]))/2.0)\n\n # h is computed as avg length between all top and all bottom points\n new_pts[:,4] = torch.mean(torch.abs( (points[:,0:4,2] - points[:,4:8,2])),dim = 1)\n \n # direction is +1 if vehicle is traveling along direction of increasing x, otherwise -1\n new_pts[:,5] = torch.sign( ((points[:,0,0] + points[:,1,0]) - (points[:,2,0] + points[:,3,0]))/2.0 ) \n \n return new_pts\n \n def i24_state_to_space(self,points):\n d = points.shape[0]\n new_pts = torch.zeros([d,8,3])\n \n # assign x values\n new_pts[:,[0,1,4,5],0] = (points[:,0] + points[:,5]*points[:,2]).unsqueeze(1).repeat(1,4)\n new_pts[:,[2,3,6,7],0] = (points[:,0]).unsqueeze(1).repeat(1,4)\n \n # assign y values\n new_pts[:,[0,2,4,6],1] = (points[:,1] - points[:,5]*points[:,3]/2.0).unsqueeze(1).repeat(1,4)\n new_pts[:,[1,3,5,7],1] = (points[:,1] + points[:,5]*points[:,3]/2.0).unsqueeze(1).repeat(1,4)\n \n # assign z values\n new_pts[:,4:8,2] = -(points[:,4]).unsqueeze(1).repeat(1,4) \n \n return new_pts\n \n \n def space_to_state(self,points):\n \"\"\"\n points - [d,m,3] matrix of points in 3-space\n \"\"\"\n return self.f1(points)\n \n def state_to_space(self,points):\n \"\"\"\n points - [d,m,s] matrix of points in state formulation\n \"\"\"\n return self.f2(points)\n \n\n def add_correspondence(self,corr_pts,space_pts,vps,name = None):\n \"\"\"\n corr_pts - \n space_pts - \n vps -\n name - str, preferably camera name e.g. p1c4\n \"\"\"\n \n if name is None:\n name = self.default_correspondence\n \n corr_pts = np.stack(corr_pts)\n space_pts = np.stack(space_pts)\n cor = {}\n cor[\"vps\"] = vps\n cor[\"corr_pts\"] = corr_pts\n cor[\"space_pts\"] = space_pts\n \n cor[\"H\"],_ = cv2.findHomography(corr_pts,space_pts)\n cor[\"H_inv\"],_ = cv2.findHomography(space_pts,corr_pts)\n \n \n # P is a [3,4] matrix \n # column 0 - vanishing point for space x-axis (axis 0) in image coordinates (im_x,im_y,im_scale_factor)\n # column 1 - vanishing point for space y-axis (axis 1) in image coordinates (im_x,im_y,im_scale_factor)\n # column 2 - vanishing point for space z-axis (axis 2) in image coordinates (im_x,im_y,im_scale_factor)\n # column 3 - space origin in image coordinates (im_x,im_y,scale_factor)\n # columns 0,1 and 3 are identical to the columns of H, \n # We simply insert the z-axis column (im_x,im_y,1) as the new column 2\n \n P = np.zeros([3,4])\n P[:,0] = cor[\"H_inv\"][:,0]\n P[:,1] = cor[\"H_inv\"][:,1]\n P[:,3] = cor[\"H_inv\"][:,2]\n P[:,2] = np.array([vps[2][0],vps[2][1],1]) * 0.01\n cor[\"P\"] = P\n \n self.correspondence[name] = cor\n \n if self.default_correspondence is None:\n self.default_correspondence = name\n \n \n \n def remove_correspondence(self,name): \n try:\n del self.correspondences[name]\n print(\"Deleted correspondence for {}\".format(name))\n except KeyError:\n print(\"Tried to delete correspondence {}, but this does not exist\".format(name))\n \n \n # TODO - finish implementation!\n def im_to_space(self,points, name = None,heights = None):\n \"\"\"\n Converts points by means of ____________\n \n points - [d,m,2] array of points in image\n \"\"\"\n if name is None:\n name = self.default_correspondence\n \n \n d = points.shape[0]\n \n # convert points into size [dm,3]\n points = points.reshape(-1,2).double()\n points = torch.cat((points,torch.ones([points.shape[0],1]).double()),1) # add 3rd row\n \n if heights is not None:\n H = torch.from_numpy(self.correspondence[name][\"H\"]).transpose(0,1)\n new_pts = torch.matmul(points,H)\n \n # divide each point 0th and 1st column by the 2nd column\n new_pts[:,0] = new_pts[:,0] / new_pts[:,2]\n new_pts[:,1] = new_pts[:,1] / new_pts[:,2]\n \n # drop scale factor column\n new_pts = new_pts[:,:2] \n \n # reshape to [d,m,2]\n new_pts = new_pts.reshape(d,-1,2)\n \n # add third column for height\n new_pts = torch.cat((new_pts,torch.zeros([d,new_pts.shape[1],1]).double()),2)\n \n new_pts[:,[4,5,6,7],2] = heights.unsqueeze(1).repeat(1,4).double()\n \n else:\n print(\"No heights were input\")\n return\n \n return new_pts\n \n \n def space_to_im(self,points,name = None):\n \"\"\"\n Projects 3D space points into image/correspondence using P:\n new_pts = P x points T ---> [dm,3] T = [3,4] x [4,dm]\n performed by flattening batch dimension d and object point dimension m together\n \n points - [d,m,3] array of points in 3-space\n \"\"\"\n if name is None:\n name = self.default_correspondence\n \n d = points.shape[0]\n \n # convert points into size [dm,4]\n points = points.reshape(-1,3)\n points = torch.cat((points.double(),torch.ones([points.shape[0],1]).double()),1) # add 4th row\n \n # [dm,3]\n points = torch.transpose(points,0,1).double()\n \n # project into [dm,3]\n P = torch.from_numpy(self.correspondence[name][\"P\"]).double()\n new_pts= torch.matmul(P,points).transpose(0,1)\n \n # divide each point 0th and 1st column by the 2nd column\n new_pts[:,0] = new_pts[:,0] / new_pts[:,2]\n new_pts[:,1] = new_pts[:,1] / new_pts[:,2]\n \n # drop scale factor column\n new_pts = new_pts[:,:2] \n \n # reshape to [d,m,2]\n new_pts = new_pts.reshape(d,-1,2)\n return new_pts\n \n \n def state_to_im(self,points,name = None):\n \"\"\"\n Calls state_to_space, then space_to_im\n \n points - [d,m,s] matrix of points in state formulation\n \"\"\"\n if name is None:\n name = self.default_correspondence\n \n return self.space_to_im(self.state_to_space(points),name = name)\n \n \n def im_to_state(self,points,name = None, heights = None):\n \"\"\"\n Calls im_to_space, then space_to_state\n \n points - [d,m,2] array of points in image\n \"\"\"\n if name is None:\n name = self.default_correspondence\n \n return self.space_to_state(self.im_to_space(points,heights = heights,name = name))\n \n def guess_heights(self,classes):\n \"\"\"\n classes - [d] vector of string class names\n \n returns - [d] vector of float object height guesses\n \"\"\"\n \n heights = torch.zeros(len(classes))\n \n for i in range(len(classes)):\n try:\n heights[i] = self.class_heights[classes[i]]\n except KeyError:\n heights[i] = self.class_heights[\"other\"]\n \n return heights\n \n def height_from_template(self,template_boxes,template_space_heights,boxes):\n \"\"\"\n Predicts space height of boxes in image space. Given a space height and \n the corresponding image box (and thus image height), the relationship \n between heights in different coordinate systems should be roughly estimable. \n This strategy is used to guess the heights of the second set of boxes in\n image space according to : \n template_im_heights:template_space_heights = new_im_heights:new_box heights\n \n template_boxes - [d,m,2,] array of points corresponding to d object boxes \n (typical usage would be to use boxes from previous frame\n or apriori box predictions for current frame))\n template_space_heights - [d] array of corresponding object heights in space\n boxes - [d,m,2] array of points in image\n \n returns\n \n height - [d] array of object heights in space\n \"\"\"\n \n # get rough heights of objects in image\n template_top = torch.mean(template_boxes[:,4:8,:],dim = 1)\n template_bottom = torch.mean(template_boxes[:,0:4,:],dim = 1)\n template_im_height = torch.sum(torch.sqrt(torch.pow((template_top - template_bottom),2)),dim = 1)\n template_ratio = template_im_height / template_space_heights\n \n box_top = torch.mean(boxes[:,4:8,:],dim = 1)\n box_bottom = torch.mean(boxes[:,0:4,:],dim = 1)\n box_height = torch.sum(torch.sqrt(torch.pow((box_top - box_bottom),2)),dim = 1)\n\n\n height = box_height / template_ratio\n return height\n \n \n def test_transformation(self,points,classes = None,name = None, im = None,heights = None, verbose = True):\n \"\"\"\n Transform image -> space -> state -> space -> image and \n outputs the average reprojection error in pixels for top and bottom box coords\n \n points - [d,8,2] array of pixel coordinates corresponding to object corners\n fbr,fbl,bbr,bbl,ftr,ftl,fbr,fbl\n name - str camera/correspondence name\n im- if a cv2-style image is given, will plot original and reprojected boxes \n heights - [d] array of object heights, otherwise heights will be guessed\n based on class\n \"\"\"\n if name is None:\n name = self.default_correspondence\n \n \n if heights is None:\n if classes is None:\n print(\"Must either specify heights or classes for boxes\")\n return\n else:\n guess_heights = self.guess_heights(classes)\n \n \n else:\n guess_heights = heights\n \n state_pts = self.im_to_state(points,heights = guess_heights,name = name)\n im_pts_repro = self.state_to_im(state_pts,name = name)\n \n # calc error\n error = torch.abs(points - im_pts_repro) \n bottom_error = torch.sqrt(torch.pow(error[:,:4,0],2) + torch.pow(error[:,:4,1],2)).mean()\n top_error = torch.sqrt(torch.pow(error[:,4:8,0],2) + torch.pow(error[:,4:8,1],2)).mean()\n \n if verbose:\n print(\"Average distance between reprojected points and original points:\")\n print(\"-----------------------------\")\n print(\"Top: {} pixels\".format(top_error))\n print(\"Bottom: {} pixels\".format(bottom_error))\n \n # if image, plot\n if im is not None:\n im = self.plot_boxes(im,points,color = (0,255,0))\n im = self.plot_boxes(im,im_pts_repro,color = (0,0,255))\n \n cv2.imshow(\"frame\",im)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n \n return top_error + bottom_error\n \n \n def scale_Z(self,boxes,heights,name = None, granularity = 1e-06, max_scale = 10):\n \"\"\"\n When a new correspondence is added, the 3rd column of P is off by a scale factor\n relative to the other columns. This function scales P optimally\n to minimize the reprojection errror of the given boxes with the given heights\n \n boxes - [d,8,2] array of image points corresponding to object bounding boxes\n d indexes objects\n heights - [d] array of object heights (in space coordinates e.g. feet)\n name - str - correspondence \n granularity - float - controls the minimum step size for grid search \n max_scale - float - roughly, a reasonable upper estimate for the space-unit change\n corresponding to one pixel in the Z direction\n \n returns - None (but alters P in self.correspondence)\n \"\"\"\n if name is None:\n name = self.default_correspondence\n \n P_orig = self.correspondence[name][\"P\"].copy()\n \n upper_bound = max_scale\n lower_bound = granularity\n \n # create a grid of 10 evenly spaced entries between upper and lower bound\n C_grid = np.linspace(lower_bound,upper_bound,num = 10)\n step_size = C_grid[1] - C_grid[0]\n iteration = 1\n \n while step_size > granularity:\n \n best_error = np.inf\n best_C = None\n # for each value of P, get average reprojection error\n for C in C_grid:\n \n # scale P\n P = P_orig.copy()\n P[:,2] *= C\n self.correspondence[name][\"P\"] = P\n \n # test error\n error = self.test_transformation(boxes,name = name, heights = heights,verbose = False)\n \n # if this is the best so far, store it\n if error < best_error:\n best_error = error\n best_C = C\n \n \n # define new upper, lower with width 2*step_size centered on best value\n #print(\"On loop {}: best C so far: {} avg error {}\".format(iteration,best_C,best_error))\n lower_bound = best_C - step_size\n upper_bound = best_C + step_size\n C_grid = np.linspace(lower_bound,upper_bound,num = 10)\n step_size = C_grid[1] - C_grid[0]\n\n #print(\"New C_grid: {}\".format(C_grid.round(4)))\n iteration += 1\n \n \n\n def plot_boxes(self,im,boxes,color = (255,255,255),labels = None,thickness = 1):\n \"\"\"\n As one might expect, plots 3D boxes on input image\n \n im - cv2 matrix-style image\n boxes - [d,8,2] array of image points where d indexes objects\n color - 3-tuple specifying box color to plot\n \"\"\"\n \n DRAW = [[0,1,1,0,1,0,0,0], #bfl\n [0,0,0,1,0,1,0,0], #bfr\n [0,0,0,1,0,0,1,1], #bbl\n [0,0,0,0,0,0,1,1], #bbr\n [0,0,0,0,0,1,1,0], #tfl\n [0,0,0,0,0,0,0,1], #tfr\n [0,0,0,0,0,0,0,1], #tbl\n [0,0,0,0,0,0,0,0]] #tbr\n \n DRAW_BASE = [[0,1,1,1], #bfl\n [0,0,1,1], #bfr\n [0,0,0,1], #bbl\n [0,0,0,0]] #bbr\n \n for idx, bbox_3d in enumerate(boxes):\n \n for a in range(len(bbox_3d)):\n ab = bbox_3d[a]\n for b in range(a,len(bbox_3d)):\n bb = bbox_3d[b]\n if DRAW[a][b] == 1:\n try:\n im = cv2.line(im,(int(ab[0]),int(ab[1])),(int(bb[0]),int(bb[1])),color,thickness)\n except:\n pass\n \n if labels is not None:\n label = labels[idx]\n left = bbox_3d[0,0]\n top = bbox_3d[0,1]\n im = cv2.putText(im,\"{}\".format(label),(int(left),int(top - 10)),cv2.FONT_HERSHEY_PLAIN,1,(0,0,0),3)\n im = cv2.putText(im,\"{}\".format(label),(int(left),int(top - 10)),cv2.FONT_HERSHEY_PLAIN,1,(255,255,255),1)\n \n return im\n \n\ndef load_i24_csv(file):\n \"\"\"\n Simple no-frills function to load data as currently formatted on the i24 project\n labels - first row of string headers for data columns\n data - dict of lists, one key per frame, one entry per frame object\n \"\"\"\n short_name = file.split(\"/\")[-1]\n HEADERS = True\n \n # parse first file\n rows = []\n with open(file,\"r\") as f:\n read = csv.reader(f)\n \n for row in read:\n rows.append(row)\n \n data = {}\n HEADERS = True\n for row_idx in range(len(rows)):\n row = rows[row_idx]\n \n # pass header lines through as-is\n if HEADERS:\n headers = row\n if len(row) > 0 and row[0] == \"Frame #\":\n HEADERS = False\n \n \n else:\n \n if len(row) == 0:\n continue\n \n frame_idx = int(row[0])\n if frame_idx not in data.keys():\n data[frame_idx] = [row]\n else:\n data[frame_idx].append(row)\n \n \n return headers,data\n\n# basic test code\nif __name__ == \"__main__\":\n \n camera_name = \"p2c3\"\n \n vp_path = \"/home/worklab/Documents/derek/i24-dataset-gen/DATA/vp/{}_axes.csv\".format(camera_name)\n point_path = \"/home/worklab/Documents/derek/i24-dataset-gen/DATA/tform/{}_im_lmcs_transform_points.csv\".format(camera_name)\n \n \n # get some data\n data_file = \"/home/worklab/Data/dataset_alpha/manual_correction/rectified_{}_0_track_outputs_3D.csv\".format(camera_name)\n labels,data = load_i24_csv(data_file)\n frame_data = data[0]\n # convert labels from first frame into tensor form\n boxes = []\n classes = []\n for item in frame_data:\n if len(item[11]) > 0:\n boxes.append(np.array(item[11:27]).astype(float))\n classes.append(item[3])\n boxes = torch.from_numpy(np.stack(boxes))\n boxes = torch.stack((boxes[:,::2],boxes[:,1::2]),dim = -1)\n \n # get first frame from sequence\n sequence = \"/home/worklab/Data/cv/video/ground_truth_video_06162021/segments/{}_0.mp4\".format(camera_name)\n cap = cv2.VideoCapture(sequence)\n _,frame = cap.read()\n \n \n # test homography\n hg = Homography()\n hg.add_i24_camera(point_path,vp_path,camera_name)\n \n # fit P and evaluate\n heights = hg.guess_heights(classes)\n hg.scale_Z(boxes,heights,name = camera_name)\n hg.test_transformation(boxes,classes,camera_name,frame)\n \n \n" ]
[ [ "torch.ones", "torch.stack", "numpy.array", "numpy.zeros", "torch.pow", "numpy.stack", "torch.zeros", "numpy.abs", "numpy.arange", "torch.sign", "torch.matmul", "torch.from_numpy", "torch.abs", "numpy.sqrt", "numpy.linspace", "torch.mean", "torch.transpose" ] ]
pengfeidip/SSD_pytorch
[ "f17dcfa76e359c288420df1690e9ce4365353f0a" ]
[ "ssd.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom layers import *\nfrom data import voc, coco\nimport os\n\n\nclass SSD(nn.Module):\n \"\"\"Single Shot Multibox Architecture\n The network is composed of a base VGG network followed by the\n added multibox conv layers. Each multibox layer branches into\n 1) conv2d for class conf scores\n 2) conv2d for localization predictions\n 3) associated priorbox layer to produce default bounding\n boxes specific to the layer's feature map size.\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n\n Args:\n phase: (string) Can be \"test\" or \"train\"\n size: (int) input image size\n base: (VGG) VGG16 layers for input, size of either 300 or 500\n extras: (list) extra layers that feed to multibox loc and conf layers\n head: (list) \"multibox head\" consists of loc and conf conv layers\n num_class: (int )number of classinclude the background\n top_k: (int) when phase is test,select the top k default boxes to parse, default=20\n \"\"\"\n\n def __init__(self, phase, size, base, extras, head, num_classes, top_k=400, keep_top_k=200):\n super(SSD, self).__init__()\n self.phase = phase\n self.num_classes = num_classes\n self.cfg = (coco, voc)[num_classes == 21] # 城会玩,这种方式需要学习一下 --- pengfei ---\n self.priorbox = PriorBox(self.cfg)\n self.priors = self.priorbox.forward()\n self.size = size\n\n # SSD network\n self.vgg = nn.ModuleList(base)\n # Layer learns to scale the l2 normalized features from conv4_3\n self.L2Norm = L2Norm(512, 20)\n self.extras = nn.ModuleList(extras)\n\n self.loc = nn.ModuleList(head[0]) # --- pengfei --- conv layers for getting location\n self.conf = nn.ModuleList(head[1])\n\n\n if phase == 'test':\n self.top_k = top_k\n self.keep_top_k = keep_top_k\n self.softmax = nn.Softmax(dim=-1)\n self.detect = Detect(num_classes, bkg_label=0, top_k=self.top_k,\n keep_top_k=self.keep_top_k, conf_thresh=.01, nms_thresh=.45)\n\n def forward(self, x):\n \"\"\"Applies network layers and ops on input image(s) x.\n\n Args:\n x: input image or batch of images. Shape: [batch,3,300,300].\n\n Return:\n Depending on phase:\n test:\n Variable(tensor) of output class label predictions,\n confidence score, and corresponding location predictions for\n each object detected. Shape: [batch,topk,7]\n\n train:\n list of concat outputs from:\n 1: confidence layers, Shape: [batch*num_priors,num_classes]\n 2: localization layers, Shape: [batch,num_priors*4]\n 3: priorbox layers, Shape: [2,num_priors*4]\n \"\"\"\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n # --- pengfei ---这些都是传统的vgg16部分(不带BN)\n for k in range(23):\n x = self.vgg[k](x)\n\n s = self.L2Norm(x)\n sources.append(s)\n\n # apply vgg up to fc7:\n # --- pengfei --- 因为 还有 pool5 + conv(512, 1024) + conv(1024, 1024)\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n # 释了我之前为啥\"添加层\"不带激活函数的疑惑;至此,获得了全部所需要的 feature_map\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1,\n self.num_classes)), # conf preds\n self.priors.type(type(x.data)) # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output\n\n def load_weights(self, base_file):\n other, ext = os.path.splitext(base_file)\n if ext == '.pkl' or '.pth':\n print('Loading weights into state dict...')\n self.load_state_dict(torch.load(base_file,\n map_location=lambda storage, loc: storage))\n print('Finished!')\n else:\n print('Sorry only .pth and .pkl files supported.')\n\n\n# This function is derived from torchvision VGG make_layers()\n# https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py\ndef vgg(cfg, i, batch_norm=False):\n layers = []\n in_channels = i\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n elif v == 'C':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)\n conv7 = nn.Conv2d(1024, 1024, kernel_size=1)\n layers += [pool5, conv6,\n nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]\n return layers\n\n\n# [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],\n# 这个实现有点意思,哈哈,\n#todo 但是为什么只有卷积没有 activation function --- pengfei --- 因为在代码 87行才用了relu\ndef add_extras(cfg, i, batch_norm=False):\n # Extra layers added to VGG for feature scaling\n layers = []\n in_channels = i\n flag = False\n for k, v in enumerate(cfg):\n if in_channels != 'S':\n if v == 'S':\n layers += [nn.Conv2d(in_channels, cfg[k + 1],\n kernel_size=(1, 3)[flag], stride=2, padding=1)]\n else:\n layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]\n flag = not flag\n in_channels = v\n return layers\n\n\ndef multibox(vgg, extra_layers, cfg, num_classes):\n loc_layers = []\n conf_layers = []\n vgg_source = [21, -2] # vgg[21] means conv4_3 layers, vgg[-2] means 改造的fc7 before relu activation\n for k, v in enumerate(vgg_source):\n loc_layers += [nn.Conv2d(vgg[v].out_channels,\n cfg[k] * 4, kernel_size=3, padding=1)]\n conf_layers += [nn.Conv2d(vgg[v].out_channels,\n cfg[k] * num_classes, kernel_size=3, padding=1)]\n for k, v in enumerate(extra_layers[1::2], 2):\n loc_layers += [nn.Conv2d(v.out_channels, cfg[k]\n * 4, kernel_size=3, padding=1)]\n conf_layers += [nn.Conv2d(v.out_channels, cfg[k]\n * num_classes, kernel_size=3, padding=1)]\n return vgg, extra_layers, (loc_layers, conf_layers)\n\nbase = {\n '300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',\n 512, 512, 512],\n '512': [],\n}\nextras = {\n '300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],\n '512': [],\n}\nmbox = {\n '300': [4, 6, 6, 6, 4, 4], # number of boxes per feature map location\n '512': [],\n}\n\n\ndef build_ssd(phase, size=300, num_classes=21, top_k=400):\n \"\"\"\n :param phase: (str) \"train\" or \" test\"\n :param size: (int) im size of NN inputs, default{300}\n :param num_classes: (int) number of classes, include background default 21 for VOC\n :param top_k: (int) only when phase is \"test\", top_k is meaningful, it means how many\n boxes in intra-class NMS will be considered\n :return: (SSD) a SSD object\n \"\"\"\n if phase != \"test\" and phase != \"train\":\n print(\"ERROR: Phase: \" + phase + \" not recognized\")\n return\n if size != 300:\n print(\"ERROR: You specified size \" + repr(size) + \". However, \" +\n \"currently only SSD300 (size=300) is supported!\")\n return\n base_, extras_, head_ = multibox(vgg(base[str(size)], 3),\n add_extras(extras[str(size)], 1024),\n mbox[str(size)], num_classes)\n return SSD(phase, size, base_, extras_, head_, num_classes, top_k)\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.load", "torch.nn.Softmax", "torch.nn.Conv2d", "torch.nn.ModuleList", "torch.nn.ReLU" ] ]
sum-coderepo/HadoopApp
[ "0e8d48c5d541b5935c9054fb1335d829d67d7b59" ]
[ "NeuralNetwork/ClassificationNeuralNetwok.py" ]
[ "import numpy as np\nfrom sklearn import datasets, linear_model\nimport matplotlib.pyplot as plt\n\n\nclass Config:\n nn_input_dim = 2 # input layer dimensionality\n nn_output_dim = 2 # output layer dimensionality\n # Gradient descent parameters (I picked these by hand)\n epsilon = 0.01 # learning rate for gradient descent\n reg_lambda = 0.01 # regularization strength\n\n\ndef generate_data():\n np.random.seed(0)\n X, y = datasets.make_moons(200, noise=0.20)\n return X, y\n\n\ndef visualize(X, y, model):\n # plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)\n # plt.show()\n plot_decision_boundary(lambda x:predict(model,x), X, y)\n plt.title(\"Logistic Regression\")\n plt.show()\n\n\ndef plot_decision_boundary(pred_func, X, y):\n # Set min and max values and give it some padding\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole gid\n Z = pred_func(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)\n plt.show()\n\n\n# Helper function to evaluate the total loss on the dataset\ndef calculate_loss(model, X, y):\n num_examples = len(X) # training set size\n W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']\n # Forward propagation to calculate our predictions\n z1 = X.dot(W1) + b1\n a1 = np.tanh(z1)\n z2 = a1.dot(W2) + b2\n exp_scores = np.exp(z2)\n probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)\n # Calculating the loss\n corect_logprobs = -np.log(probs[range(num_examples), y])\n data_loss = np.sum(corect_logprobs)\n # Add regulatization term to loss (optional)\n data_loss += Config.reg_lambda / 2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)))\n return 1. / num_examples * data_loss\n\n\ndef predict(model, x):\n W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']\n # Forward propagation\n z1 = x.dot(W1) + b1\n a1 = np.tanh(z1)\n z2 = a1.dot(W2) + b2\n exp_scores = np.exp(z2)\n probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)\n return np.argmax(probs, axis=1)\n\n\n# This function learns parameters for the neural network and returns the model.\n# - nn_hdim: Number of nodes in the hidden layer\n# - num_passes: Number of passes through the training data for gradient descent\n# - print_loss: If True, print the loss every 1000 iterations\ndef build_model(X, y, nn_hdim, num_passes=20000, print_loss=False):\n # Initialize the parameters to random values. We need to learn these.\n num_examples = len(X)\n np.random.seed(0)\n W1 = np.random.randn(Config.nn_input_dim, nn_hdim) / np.sqrt(Config.nn_input_dim)\n b1 = np.zeros((1, nn_hdim))\n W2 = np.random.randn(nn_hdim, Config.nn_output_dim) / np.sqrt(nn_hdim)\n b2 = np.zeros((1, Config.nn_output_dim))\n\n # This is what we return at the end\n model = {}\n\n # Gradient descent. For each batch...\n for i in range(0, num_passes):\n\n # Forward propagation\n z1 = X.dot(W1) + b1\n a1 = np.tanh(z1)\n z2 = a1.dot(W2) + b2\n exp_scores = np.exp(z2)\n probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)\n\n # Backpropagation\n delta3 = probs\n delta3[range(num_examples), y] -= 1\n dW2 = (a1.T).dot(delta3)\n db2 = np.sum(delta3, axis=0, keepdims=True)\n delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2))\n dW1 = np.dot(X.T, delta2)\n db1 = np.sum(delta2, axis=0)\n\n # Add regularization terms (b1 and b2 don't have regularization terms)\n dW2 += Config.reg_lambda * W2\n dW1 += Config.reg_lambda * W1\n\n # Gradient descent parameter update\n W1 += -Config.epsilon * dW1\n b1 += -Config.epsilon * db1\n W2 += -Config.epsilon * dW2\n b2 += -Config.epsilon * db2\n\n # Assign new parameters to the model\n model = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}\n\n # Optionally print the loss.\n # This is expensive because it uses the whole dataset, so we don't want to do it too often.\n if print_loss and i % 1000 == 0:\n print(\"Loss after iteration %i: %f\" % (i, calculate_loss(model, X, y)))\n\n return model\n\n\ndef classify(X, y):\n # clf = linear_model.LogisticRegressionCV()\n # clf.fit(X, y)\n # return clf\n\n pass\n\n\ndef main():\n X, y = generate_data()\n model = build_model(X, y, 3, print_loss=True)\n visualize(X, y, model)\n\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.dot", "sklearn.datasets.make_moons", "numpy.random.seed", "numpy.random.randn", "numpy.exp", "numpy.argmax", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.show", "numpy.power", "numpy.sqrt", "matplotlib.pyplot.contourf", "numpy.square", "numpy.tanh", "matplotlib.pyplot.scatter" ] ]
sunshineInmoon/ssd.pytorch
[ "a1cb37ea3e5fe64cdcf1c3d0004006baf1d046a1" ]
[ "train.py" ]
[ "from data import *\nfrom utils.augmentations import SSDAugmentation\nfrom layers.modules import MultiBoxLoss\nfrom ssd import build_ssd\nimport os\nimport sys\nimport time\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\nimport torch.utils.data as data\nimport numpy as np\nimport argparse\n\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\nparser = argparse.ArgumentParser(\n description='Single Shot MultiBox Detector Training With Pytorch')\ntrain_set = parser.add_mutually_exclusive_group()\nparser.add_argument('--dataset', default='VOC', choices=['VOC', 'COCO'],\n type=str, help='VOC or COCO')\nparser.add_argument('--dataset_root', default=VOC_ROOT,\n help='Dataset root directory path')\nparser.add_argument('--basenet', default='vgg16_reducedfc.pth',\n help='Pretrained base model')\nparser.add_argument('--batch_size', default=32, type=int,\n help='Batch size for training')\nparser.add_argument('--resume', default=None, type=str,\n help='Checkpoint state_dict file to resume training from')\nparser.add_argument('--start_iter', default=0, type=int,\n help='Resume training at this iter')\nparser.add_argument('--num_workers', default=4, type=int,\n help='Number of workers used in dataloading')\nparser.add_argument('--cuda', default=True, type=str2bool,\n help='Use CUDA to train model')\nparser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,\n help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float,\n help='Momentum value for optim')\nparser.add_argument('--weight_decay', default=5e-4, type=float,\n help='Weight decay for SGD')\nparser.add_argument('--gamma', default=0.1, type=float,\n help='Gamma update for SGD')\nparser.add_argument('--visdom', default=False, type=str2bool,\n help='Use visdom for loss visualization')\nparser.add_argument('--save_folder', default='weights/',\n help='Directory for saving checkpoint models')\nargs = parser.parse_args()\n\n\nif torch.cuda.is_available():\n if args.cuda:\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n if not args.cuda:\n print(\"WARNING: It looks like you have a CUDA device, but aren't \" +\n \"using CUDA.\\nRun with --cuda for optimal training speed.\")\n torch.set_default_tensor_type('torch.FloatTensor')\nelse:\n torch.set_default_tensor_type('torch.FloatTensor')\n\nif not os.path.exists(args.save_folder):\n os.mkdir(args.save_folder)\n\n\ndef train():\n if args.dataset == 'COCO':\n if args.dataset_root == VOC_ROOT:\n if not os.path.exists(COCO_ROOT):\n parser.error('Must specify dataset_root if specifying dataset')\n print(\"WARNING: Using default COCO dataset_root because \" +\n \"--dataset_root was not specified.\")\n args.dataset_root = COCO_ROOT\n cfg = coco\n dataset = COCODetection(root=args.dataset_root,\n transform=SSDAugmentation(cfg['min_dim'],\n MEANS))\n elif args.dataset == 'VOC':\n if args.dataset_root == COCO_ROOT:\n parser.error('Must specify dataset if specifying dataset_root')\n cfg = voc\n dataset = VOCDetection(root=args.dataset_root,\n transform=SSDAugmentation(cfg['min_dim'],\n MEANS)) #数据的读取和预处理\n\n if args.visdom:\n import visdom\n viz = visdom.Visdom()\n\n ssd_net = build_ssd('train', cfg['min_dim'], cfg['num_classes'])\n net = ssd_net\n\n if args.cuda:\n net = torch.nn.DataParallel(ssd_net)\n cudnn.benchmark = True\n\n if args.resume:\n print('Resuming training, loading {}...'.format(args.resume))\n ssd_net.load_weights(args.resume)\n else:\n vgg_weights = torch.load(args.save_folder + args.basenet)\n print('Loading base network...')\n ssd_net.vgg.load_state_dict(vgg_weights)\n\n if args.cuda:\n net = net.cuda()\n\n if not args.resume:\n print('Initializing weights...')\n # initialize newly added layers' weights with xavier method\n ssd_net.extras.apply(weights_init) #对网络中每一层应用weights_init函数,这里是对卷积的weight初始化\n ssd_net.loc.apply(weights_init)\n ssd_net.conf.apply(weights_init)\n\n optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,\n weight_decay=args.weight_decay)\n criterion = MultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,\n False, args.cuda)\n\n net.train()\n # loss counters\n loc_loss = 0\n conf_loss = 0\n epoch = 0\n print('Loading the dataset...')\n\n epoch_size = len(dataset) // args.batch_size\n print('Training SSD on:', dataset.name)\n print('Using the specified args:')\n print(args)\n\n step_index = 0\n\n if args.visdom:\n vis_title = 'SSD.PyTorch on ' + dataset.name\n vis_legend = ['Loc Loss', 'Conf Loss', 'Total Loss']\n iter_plot = create_vis_plot('Iteration', 'Loss', vis_title, vis_legend)\n epoch_plot = create_vis_plot('Epoch', 'Loss', vis_title, vis_legend)\n\n data_loader = data.DataLoader(dataset, args.batch_size,\n num_workers=args.num_workers,\n shuffle=True, collate_fn=detection_collate,\n pin_memory=True)\n # create batch iterator\n batch_iterator = iter(data_loader)\n for iteration in range(args.start_iter, cfg['max_iter']):\n if args.visdom and iteration != 0 and (iteration % epoch_size == 0):\n update_vis_plot(epoch, loc_loss, conf_loss, epoch_plot, None,\n 'append', epoch_size)\n # reset epoch loss counters\n loc_loss = 0\n conf_loss = 0\n epoch += 1\n\n if iteration in cfg['lr_steps']:\n step_index += 1\n adjust_learning_rate(optimizer, args.gamma, step_index)\n\n # load train data\n images, targets = next(batch_iterator)\n\n if args.cuda:\n images = Variable(images.cuda())\n targets = [Variable(ann.cuda(), volatile=True) for ann in targets]\n else:\n images = Variable(images)\n targets = [Variable(ann, volatile=True) for ann in targets]\n # forward\n t0 = time.time()\n out = net(images)\n # backprop\n optimizer.zero_grad()\n loss_l, loss_c = criterion(out, targets)\n loss = loss_l + loss_c\n loss.backward()\n optimizer.step()\n t1 = time.time()\n loc_loss += loss_l.data[0]\n conf_loss += loss_c.data[0]\n\n if iteration % 10 == 0:\n print('timer: %.4f sec.' % (t1 - t0))\n print('iter ' + repr(iteration) + ' || Loss: %.4f ||' % (loss.data[0]), end=' ')\n\n if args.visdom:\n update_vis_plot(iteration, loss_l.data[0], loss_c.data[0],\n iter_plot, epoch_plot, 'append')\n\n if iteration != 0 and iteration % 5000 == 0:\n print('Saving state, iter:', iteration)\n torch.save(ssd_net.state_dict(), 'weights/ssd300_COCO_' +\n repr(iteration) + '.pth')\n torch.save(ssd_net.state_dict(),\n args.save_folder + '' + args.dataset + '.pth')\n\n\ndef adjust_learning_rate(optimizer, gamma, step):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 at every\n specified step\n # Adapted from PyTorch Imagenet example:\n # https://github.com/pytorch/examples/blob/master/imagenet/main.py\n \"\"\"\n lr = args.lr * (gamma ** (step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef xavier(param):\n init.xavier_uniform(param)\n\n\ndef weights_init(m): #初始化卷积参数\n if isinstance(m, nn.Conv2d):\n xavier(m.weight.data)\n m.bias.data.zero_()\n\n\ndef create_vis_plot(_xlabel, _ylabel, _title, _legend):\n return viz.line(\n X=torch.zeros((1,)).cpu(),\n Y=torch.zeros((1, 3)).cpu(),\n opts=dict(\n xlabel=_xlabel,\n ylabel=_ylabel,\n title=_title,\n legend=_legend\n )\n )\n\n\ndef update_vis_plot(iteration, loc, conf, window1, window2, update_type,\n epoch_size=1):\n viz.line(\n X=torch.ones((1, 3)).cpu() * iteration,\n Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu() / epoch_size,\n win=window1,\n update=update_type\n )\n # initialize epoch plot on first iteration\n if iteration == 0:\n viz.line(\n X=torch.zeros((1, 3)).cpu(),\n Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu(),\n win=window2,\n update=True\n )\n\n\nif __name__ == '__main__':\n train()\n" ]
[ [ "torch.utils.data.DataLoader", "torch.ones", "torch.nn.init.xavier_uniform", "torch.load", "torch.autograd.Variable", "torch.set_default_tensor_type", "torch.cuda.is_available", "torch.zeros", "torch.nn.DataParallel", "torch.Tensor" ] ]
clairebub/interpretability
[ "8c71bbc976ce9382705a2395ad651da009ab4785" ]
[ "metrics/segmentation.py" ]
[ "# modified from https://github.com/learningtitans/isbi2017-part1/blob/master/metrics.py\n\n\nimport numpy as np\nfrom sklearn.metrics import jaccard_similarity_score\n\nsmooth_default = 1.\n\n\ndef dice_coef(y_true, y_pred, smooth=smooth_default):\n y_true_f = y_true.flatten()\n y_pred_f = y_pred.flatten()\n\n tp = np.sum(y_true_f * y_pred_f)\n tn = np.sum(y_true_f == y_pred_f) - tp\n\n return (2. * tp + smooth) / (len(y_true_f) - tn + tp + smooth)\n\n\ndef jacc_idx(y_true, y_pred, smooth=smooth_default):\n y_true_f = y_true.flatten()\n y_pred_f = y_pred.flatten()\n\n tp = np.sum(y_true_f * y_pred_f)\n tn = np.sum(y_true_f == y_pred_f) - tp\n\n return (tp + smooth) / (len(y_true_f) - tn + smooth)\n\n\ndef accuracy(y_true, y_pred, smooth=smooth_default):\n y_true_f = y_true.flatten()\n y_pred_f = y_pred.flatten()\n\n intersection = np.sum(y_true_f == y_pred_f)\n\n return (intersection + smooth) / (len(y_true_f) + smooth)\n\n\ndef jacc_loss(y_true, y_pred):\n return -jacc_idx(y_true, y_pred)\n\n\ndef dice_loss(y_true, y_pred):\n return -dice_coef(y_true, y_pred)\n\n\ndef dice_jacc_single(mask_true, mask_pred, smooth=smooth_default):\n bool_true = mask_true.reshape(-1).astype(np.bool)\n bool_pred = mask_pred.reshape(-1).astype(np.bool)\n if bool_true.shape != bool_pred.shape:\n raise ValueError(\"Masks of different sizes.\")\n\n bool_sum = bool_true.sum() + bool_pred.sum()\n if bool_sum == 0:\n print\n \"Empty mask\"\n return 0, 0\n intersec = np.logical_and(bool_true, bool_pred).sum()\n dice = 2. * intersec / bool_sum\n jacc = jaccard_similarity_score(bool_true.reshape((1, -1)), bool_pred.reshape((1, -1)), normalize=True, sample_weight=None)\n return dice, jacc\n\n\ndef dice_jacc_mean(mask_true, mask_pred, smooth=smooth_default):\n dice = 0\n jacc = 0\n for i in range(mask_true.shape[0]):\n current_dice, current_jacc = dice_jacc_single(mask_true=mask_true[i], mask_pred=mask_pred[i], smooth=smooth)\n dice = dice + current_dice\n jacc = jacc + current_jacc\n return dice / mask_true.shape[0], jacc / mask_true.shape[0]\n" ]
[ [ "numpy.sum", "numpy.logical_and" ] ]
mcvine/mcvine
[ "42232534b0c6af729628009bed165cd7d833789d" ]
[ "packages/mccomponents/tests/mccomponents/sample/geometry/intersection_TestCase.py" ]
[ "#!/usr/bin/env python\n#\n\nstandalone = True\n\nimport os\nos.environ['MCVINE_MPI_BINDING'] = 'NONE'\n\nimport mcni, shutil, numpy as np\nfrom mccomponents.sample import samplecomponent\n\n\nimport unittest\nclass TestCase(unittest.TestCase):\n\n\n def test1a(self):\n \"intersection: two blocks\"\n self._test('sampleassembly-variants/sampleassembly.xml.intersection_of_two_blocks', (0., 0., 0.), (.1, .15, .2))\n return\n\n def _test(self, xml, center, size):\n from utils import createSampleAssembly\n saxml = createSampleAssembly('.', './sampleassembly', xml)\n sample = samplecomponent( 'test', saxml)\n check(sample, center, size)\n import shutil\n shutil.rmtree(os.path.dirname(saxml))\n return\n \n pass # end of TestCase\n\n\ndef check(sample, center, size):\n x0,y0,z0 = center\n x,y,z = size\n \n neutron = mcni.neutron(r=(x0,y0,z0-1), v=(0,0,1), prob=1.)\n sample.scatter(neutron)\n assert np.isclose(neutron.state.position[2], z0+z/2.)\n\n neutron = mcni.neutron(r=(x0,y0,z0+1), v=(0,0,-1), prob=1.)\n sample.scatter(neutron)\n assert np.isclose(neutron.state.position[2], z0-z/2.)\n\n neutron = mcni.neutron(r=(x0,y0-1,z0), v=(0,1,0), prob=1.)\n sample.scatter(neutron)\n assert np.isclose(neutron.state.position[1], y0+y/2.)\n\n neutron = mcni.neutron(r=(x0,y0+1,z0), v=(0,-1,0), prob=1.)\n sample.scatter(neutron)\n assert np.isclose(neutron.state.position[1], y0-y/2.)\n\n neutron = mcni.neutron(r=(x0-1,y0,z0), v=(1,0,0), prob=1.)\n sample.scatter(neutron)\n assert np.isclose(neutron.state.position[0], x0+x/2.)\n\n neutron = mcni.neutron(r=(x0+1,y0,z0), v=(-1,0,0), prob=1.)\n sample.scatter(neutron)\n assert np.isclose(neutron.state.position[0], x0-x/2.)\n return\n\ndef main():\n unittest.main()\n return\n \n \nif __name__ == \"__main__\":\n main()\n \n# version\n__id__ = \"$Id$\"\n\n# End of file \n" ]
[ [ "numpy.isclose" ] ]
hnt4499/fairseq
[ "4b519e9876737db32047167e77bf5f8781edef99" ]
[ "fairseq/tasks/sentence_ranking.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom loguru import logger\nimport os\n\nimport numpy as np\nfrom fairseq import utils, utils_loguru\nfrom fairseq.data import (\n ConcatSentencesDataset,\n Dictionary,\n IdDataset,\n NestedDictionaryDataset,\n NumelDataset,\n NumSamplesDataset,\n PrependTokenDataset,\n RawLabelDataset,\n RightPadDataset,\n SortDataset,\n TruncateDataset,\n data_utils,\n)\nfrom fairseq.data.shorten_dataset import maybe_shorten_dataset\nfrom fairseq.tasks import LegacyFairseqTask, register_task\n\n\nlogger = logger.patch(utils_loguru.loguru_name_patcher)\n\n\n@register_task(\"sentence_ranking\")\nclass SentenceRankingTask(LegacyFairseqTask):\n \"\"\"\n Ranking task on multiple sentences.\n\n Args:\n dictionary (Dictionary): the dictionary for the input of the task\n \"\"\"\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add task-specific arguments to the parser.\"\"\"\n parser.add_argument(\"data\", metavar=\"FILE\", help=\"file prefix for data\")\n parser.add_argument(\n \"--num-classes\", type=int, help=\"number of sentences to be ranked\"\n )\n parser.add_argument(\n \"--init-token\",\n type=int,\n help=\"add token at the beginning of each batch item\",\n )\n parser.add_argument(\n \"--separator-token\", type=int, help=\"add separator token between inputs\"\n )\n parser.add_argument(\"--no-shuffle\", action=\"store_true\")\n parser.add_argument(\n \"--shorten-method\",\n default=\"none\",\n choices=[\"none\", \"truncate\", \"random_crop\"],\n help=\"if not none, shorten sequences that exceed --tokens-per-sample\",\n )\n parser.add_argument(\n \"--shorten-data-split-list\",\n default=\"\",\n help=\"comma-separated list of dataset splits to apply shortening to, \"\n 'e.g., \"train,valid\" (default: all dataset splits)',\n )\n parser.add_argument(\n \"--max-option-length\", type=int, help=\"max length for each option\"\n )\n\n def __init__(self, args, dictionary):\n super().__init__(args)\n self.dictionary = dictionary\n\n @classmethod\n def load_dictionary(cls, args, filename, source=True):\n \"\"\"Load the dictionary from the filename\n\n Args:\n filename (str): the filename\n \"\"\"\n dictionary = Dictionary.load(filename)\n dictionary.add_symbol(\"<mask>\")\n return dictionary\n\n @classmethod\n def setup_task(cls, args, **kwargs):\n assert (\n args.criterion == \"sentence_ranking\"\n ), \"Must set --criterion=sentence_ranking\"\n\n # load data dictionary\n data_dict = cls.load_dictionary(\n args,\n os.path.join(args.data, \"input0\", \"dict.txt\"),\n source=True,\n )\n logger.info(\"[input] dictionary: {} types\".format(len(data_dict)))\n return SentenceRankingTask(args, data_dict)\n\n def load_dataset(self, split, combine=False, **kwargs):\n \"\"\"Load a given dataset split (e.g., train, valid, test).\"\"\"\n\n def get_path(type, split):\n return os.path.join(self.args.data, type, split)\n\n def make_dataset(type, dictionary):\n split_path = get_path(type, split)\n\n dataset = data_utils.load_indexed_dataset(\n split_path,\n self.source_dictionary,\n self.args.dataset_impl,\n combine=combine,\n )\n return dataset\n\n input0 = make_dataset(\"input0\", self.source_dictionary)\n input_options = [\n make_dataset(\"input{idx}\".format(idx=idx + 1), self.source_dictionary)\n for idx in range(self.args.num_classes)\n ]\n\n if self.args.separator_token is not None:\n input0 = PrependTokenDataset(input0, self.args.separator_token)\n\n src_tokens = []\n for input_option in input_options:\n if self.args.init_token is not None:\n input_option = PrependTokenDataset(input_option, self.args.init_token)\n if self.args.max_option_length is not None:\n input_option = TruncateDataset(\n input_option, self.args.max_option_length\n )\n src_token = ConcatSentencesDataset(input_option, input0)\n src_token = maybe_shorten_dataset(\n src_token,\n split,\n self.args.shorten_data_split_list,\n self.args.shorten_method,\n self.args.max_positions,\n self.args.seed,\n )\n src_tokens.append(src_token)\n\n with data_utils.numpy_seed(self.args.seed):\n shuffle = np.random.permutation(len(src_tokens[0]))\n\n dataset = {\n \"id\": IdDataset(),\n \"nsentences\": NumSamplesDataset(),\n \"ntokens\": NumelDataset(src_tokens[0], reduce=True),\n }\n\n for src_token_idx in range(len(src_tokens)):\n dataset.update(\n {\n \"net_input{idx}\".format(idx=src_token_idx + 1): {\n \"src_tokens\": RightPadDataset(\n src_tokens[src_token_idx],\n pad_idx=self.source_dictionary.pad(),\n ),\n \"src_lengths\": NumelDataset(\n src_tokens[src_token_idx], reduce=False\n ),\n }\n }\n )\n\n label_path = \"{}.label\".format(get_path(\"label\", split))\n if os.path.exists(label_path):\n with open(label_path) as h:\n dataset.update(\n target=RawLabelDataset([int(x.strip()) for x in h.readlines()])\n )\n\n nested_dataset = NestedDictionaryDataset(\n dataset,\n sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],\n )\n\n if self.args.no_shuffle:\n dataset = nested_dataset\n else:\n dataset = SortDataset(\n nested_dataset,\n # shuffle\n sort_order=[shuffle],\n )\n\n logger.info(\"Loaded {0} with #samples: {1}\".format(split, len(dataset)))\n\n self.datasets[split] = dataset\n return self.datasets[split]\n\n def build_model(self, args):\n from fairseq import models\n\n model = models.build_model(args, self)\n\n model.register_classification_head(\n getattr(args, \"ranking_head_name\", \"sentence_classification_head\"),\n num_classes=1,\n )\n\n return model\n\n def max_positions(self):\n return self.args.max_positions\n\n @property\n def source_dictionary(self):\n return self.dictionary\n\n @property\n def target_dictionary(self):\n return self.dictionary\n" ]
[ [ "numpy.maximum.reduce" ] ]
YiyiLiao/deep_marching_cubes
[ "6fce0b26d110a6c839b6d46ea2ab67b5bdb470b2" ]
[ "marching_cube/model/cffi/functions/occupancy_to_topology.py" ]
[ "# functions/add.py\nimport torch\nfrom torch.autograd import Function\nfrom _ext import forward_utils \nif torch.cuda.is_available():\n from _ext import forward_utils_cuda \n\n\nclass OccupancyToTopology(Function):\n \"\"\" Convert the occupancy probability to topology probability\n see ../src/occupancy_to_topology.c \n ../src/occupancy_connectivity_cuda.c\n ../src/occupancy_to_topology_kernel.cu \n for more details\n \"\"\"\n def forward(self, occupancy):\n W = occupancy.size()[0]-1\n H = occupancy.size()[1]-1\n D = occupancy.size()[2]-1\n\n T = 256\n if not occupancy.is_cuda:\n topology = torch.zeros(W*H*D, T).type(torch.FloatTensor)\n forward_utils.occupancy_to_topology_forward(occupancy, topology)\n else:\n topology = torch.zeros(W*H*D, T).type(torch.FloatTensor).cuda()\n forward_utils_cuda.occupancy_to_topology_cuda_forward(occupancy, topology)\n\n self.occupancy = occupancy\n self.topology = topology \n\n return topology \n\n def backward(self, grad_output):\n if not grad_output.is_cuda:\n grad_occupancy = torch.zeros(self.occupancy.size()).type(torch.FloatTensor)\n forward_utils.occupancy_to_topology_backward(grad_output, self.occupancy, self.topology, grad_occupancy)\n else:\n grad_occupancy = torch.zeros(self.occupancy.size()).type(torch.FloatTensor).cuda()\n forward_utils_cuda.occupancy_to_topology_cuda_backward(grad_output, self.occupancy, self.topology, grad_occupancy)\n # we only need gradient on feat_points\n return grad_occupancy \n" ]
[ [ "torch.zeros", "torch.cuda.is_available" ] ]
adsar/tensorflow
[ "b4b2575ec4bf7e6da2686505f61b5f16cb9273ab" ]
[ "tensorflow/python/ops/nn_ops.py" ]
[ "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Wrappers for primitive Neural Net (NN) Operations.\"\"\"\n\n# pylint: disable=invalid-name\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.client import graph_util\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import common_shapes\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.gen_nn_ops import *\n# pylint: enable=wildcard-import\n\n\n# Aliases for some automatically-generated names.\nlocal_response_normalization = gen_nn_ops.lrn\n\n\ndef conv2d_transpose(value, filter, output_shape, strides, padding=\"SAME\",\n name=None):\n \"\"\"The transpose of `conv2d`.\n\n This operation is sometimes called \"deconvolution\" after (Deconvolutional\n Networks)[http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf], but is\n actually the transpose (gradient) of `conv2d` rather than an actual\n deconvolution.\n\n Args:\n value: A 4-D `Tensor` of type `float` and shape\n `[batch, height, width, in_channels]`.\n filter: A 4-D `Tensor` with the same type as `value` and shape\n `[height, width, output_channels, in_channels]`. `filter`'s\n `in_channels` dimension must match that of `value`.\n output_shape: A 1-D `Tensor` representing the output shape of the\n deconvolution op.\n strides: A list of ints. The stride of the sliding window for each\n dimension of the input tensor.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n name: Optional name for the returned tensor.\n\n Returns:\n A `Tensor` with the same type as `value`.\n\n Raises:\n ValueError: If input/output depth does not match `filter`'s shape, or if\n padding is other than `'VALID'` or `'SAME'`.\n \"\"\"\n with ops.op_scope([value, filter, output_shape], name,\n \"conv2d_transpose\") as name:\n value = ops.convert_to_tensor(value, name=\"value\")\n filter = ops.convert_to_tensor(filter, name=\"filter\")\n if not value.get_shape()[3].is_compatible_with(filter.get_shape()[3]):\n raise ValueError(\n \"input channels does not match filter's input channels, \"\n \"{} != {}\".format(value.get_shape()[3], filter.get_shape()[3]))\n\n output_shape_ = ops.convert_to_tensor(output_shape, name=\"output_shape\")\n if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):\n raise ValueError(\"output_shape must have shape (4,), got {}\"\n .format(output_shape_.get_shape()))\n\n if isinstance(output_shape, (list, np.ndarray)):\n # output_shape's shape should be == [4] if reached this point.\n if not filter.get_shape()[2].is_compatible_with(output_shape[3]):\n raise ValueError(\n \"output_shape does not match filter's output channels, \"\n \"{} != {}\".format(output_shape[3], filter.get_shape()[2]))\n\n if padding != \"VALID\" and padding != \"SAME\":\n raise ValueError(\"padding must be either VALID or SAME:\"\n \" {}\".format(padding))\n\n return gen_nn_ops.conv2d_backprop_input(input_sizes=output_shape_,\n filter=filter,\n out_backprop=value,\n strides=strides,\n padding=padding,\n name=name)\n\n\n# pylint: disable=protected-access\ndef bias_add(value, bias, data_format=None, name=None):\n \"\"\"Adds `bias` to `value`.\n\n This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.\n Broadcasting is supported, so `value` may have any number of dimensions.\n Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the\n case where both types are quantized.\n\n Args:\n value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,\n `int16`, `int8`, or `complex64`.\n bias: A 1-D `Tensor` with size matching the last dimension of `value`.\n Must be the same type as `value` unless `value` is a quantized type,\n in which case a different quantized type may be used.\n data_format: A string. 'NHWC' and 'NCHW\" are supported.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with the same type as `value`.\n \"\"\"\n with ops.op_scope([value, bias], name, \"BiasAdd\") as name:\n value = ops.convert_to_tensor(value, name=\"input\")\n bias = ops.convert_to_tensor(bias, dtype=value.dtype, name=\"bias\")\n return gen_nn_ops._bias_add(value, bias, data_format=data_format, name=name)\n\nops.RegisterShape(\"BiasAdd\")(common_shapes.bias_add_shape)\n\n\nops.RegisterShape(\"BiasAddGrad\")(common_shapes.bias_add_grad_shape)\n\n\n# pylint: disable=protected-access\ndef bias_add_v1(value, bias, name=None):\n \"\"\"Adds `bias` to `value`.\n\n This is a deprecated version of bias_add and will soon to be removed.\n\n This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.\n Broadcasting is supported, so `value` may have any number of dimensions.\n Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the\n case where both types are quantized.\n\n Args:\n value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,\n `int16`, `int8`, or `complex64`.\n bias: A 1-D `Tensor` with size matching the last dimension of `value`.\n Must be the same type as `value` unless `value` is a quantized type,\n in which case a different quantized type may be used.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with the same type as `value`.\n \"\"\"\n with ops.op_scope([value, bias], name, \"BiasAddV1\") as name:\n value = ops.convert_to_tensor(value, name=\"input\")\n bias = ops.convert_to_tensor(bias, dtype=value.dtype, name=\"bias\")\n return gen_nn_ops._bias_add_v1(value, bias, name=name)\n\n\nops.RegisterShape(\"BiasAddV1\")(common_shapes.bias_add_shape)\n\n\nops.RegisterShape(\"BiasAddGradV1\")(common_shapes.bias_add_grad_shape)\n\n\n\ndef relu6(features, name=None):\n \"\"\"Computes Rectified Linear 6: `min(max(features, 0), 6)`.\n\n Args:\n features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,\n `int16`, or `int8`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with the same type as `features`.\n \"\"\"\n with ops.op_scope([features], name, \"Relu6\") as name:\n features = ops.convert_to_tensor(features, name=\"features\")\n return gen_nn_ops._relu6(features, name=name)\n\n\ndef softmax_cross_entropy_with_logits(logits, labels, name=None):\n \"\"\"Computes softmax cross entropy between `logits` and `labels`.\n\n Measures the probability error in discrete classification tasks in which the\n classes are mutually exclusive (each entry is in exactly one class). For\n example, each CIFAR-10 image is labeled with one and only one label: an image\n can be a dog or a truck, but not both.\n\n **NOTE:** While the classes are mutually exclusive, their probabilities\n need not be. All that is required is that each row of `labels` is\n a valid probability distribution. If using exclusive `labels`\n (wherein one and only one class is true at a time), see\n `sparse_softmax_cross_entropy_with_logits`.\n\n **WARNING:** This op expects unscaled logits, since it performs a `softmax`\n on `logits` internally for efficiency. Do not call this op with the\n output of `softmax`, as it will produce incorrect results.\n\n `logits` and `labels` must have the same shape `[batch_size, num_classes]`\n and the same dtype (either `float32` or `float64`).\n\n Args:\n logits: Unscaled log probabilities.\n labels: Each row `labels[i]` must be a valid probability distribution.\n name: A name for the operation (optional).\n\n Returns:\n A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the\n softmax cross entropy loss.\n \"\"\"\n # The second output tensor contains the gradients. We use it in\n # _CrossEntropyGrad() in nn_grad but not here.\n cost, unused_backprop = gen_nn_ops._softmax_cross_entropy_with_logits(\n logits, labels, name=name)\n return cost\n\n\ndef sparse_softmax_cross_entropy_with_logits(logits, labels, name=None):\n \"\"\"Computes sparse softmax cross entropy between `logits` and `labels`.\n\n Measures the probability error in discrete classification tasks in which the\n classes are mutually exclusive (each entry is in exactly one class). For\n example, each CIFAR-10 image is labeled with one and only one label: an image\n can be a dog or a truck, but not both.\n\n **NOTE:** For this operation, the probability of a given label is considered\n exclusive. That is, soft classes are not allowed, and the `labels` vector\n must provide a single specific index for the true class for each row of\n `logits` (each minibatch entry). For soft softmax classification with\n a probability distribution for each entry, see\n `softmax_cross_entropy_with_logits`.\n\n **WARNING:** This op expects unscaled logits, since it performs a `softmax`\n on `logits` internally for efficiency. Do not call this op with the\n output of `softmax`, as it will produce incorrect results.\n\n `logits` and must have the shape `[batch_size, num_classes]`\n and the dtype (either `float32` or `float64`).\n\n `labels` must have the shape `[batch_size]` and the dtype `int64`.\n\n Args:\n logits: Unscaled log probabilities.\n labels: Each entry `labels[i]` must be an index in `[0, num_classes)`.\n name: A name for the operation (optional).\n\n Returns:\n A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the\n softmax cross entropy loss.\n \"\"\"\n # The second output tensor contains the gradients. We use it in\n # _CrossEntropyGrad() in nn_grad but not here.\n cost, unused_backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(\n logits, labels, name=name)\n return cost\n\n\[email protected](\"SparseSoftmaxCrossEntropyWithLogits\")\ndef _SparseSoftmaxCrossEntropyWithLogitsShape(op):\n \"\"\"Shape function for SparseSoftmaxCrossEntropyWithLogits op.\"\"\"\n logits_shape = op.inputs[0].get_shape()\n input_shape = logits_shape.with_rank(2)\n batch_size = input_shape[0]\n # labels_shape\n op.inputs[1].get_shape().merge_with(tensor_shape.vector(batch_size))\n return [tensor_shape.vector(batch_size.value), input_shape]\n\n\[email protected](\"SoftmaxCrossEntropyWithLogits\")\ndef _SoftmaxCrossEntropyWithLogitsShape(op):\n \"\"\"Shape function for SoftmaxCrossEntropyWithLogits op.\"\"\"\n logits_shape = op.inputs[0].get_shape()\n labels_shape = op.inputs[1].get_shape()\n input_shape = logits_shape.merge_with(labels_shape).with_rank(2)\n batch_size = input_shape[0]\n return [tensor_shape.vector(batch_size.value), input_shape]\n\n\ndef avg_pool(value, ksize, strides, padding, data_format=\"NHWC\", name=None):\n \"\"\"Performs the average pooling on the input.\n\n Each entry in `output` is the mean of the corresponding size `ksize`\n window in `value`.\n\n Args:\n value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type\n `float32`, `float64`, `qint8`, `quint8`, or `qint32`.\n ksize: A list of ints that has length >= 4.\n The size of the window for each dimension of the input tensor.\n strides: A list of ints that has length >= 4.\n The stride of the sliding window for each dimension of the\n input tensor.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n data_format: A string. 'NHWC' and 'NCHW\" are supported.\n name: Optional name for the operation.\n\n Returns:\n A `Tensor` with the same type as `value`. The average pooled output tensor.\n \"\"\"\n with ops.op_scope([value], name, \"AvgPool\") as name:\n value = ops.convert_to_tensor(value, name=\"input\")\n return gen_nn_ops._avg_pool(value, ksize=ksize, strides=strides,\n padding=padding,\n data_format=data_format,\n name=name)\n\n\ndef max_pool(value, ksize, strides, padding, data_format=\"NHWC\", name=None):\n \"\"\"Performs the max pooling on the input.\n\n Args:\n value: A 4-D `Tensor` with shape `[batch, height, width, channels]` and\n type `tf.float32`.\n ksize: A list of ints that has length >= 4. The size of the window for\n each dimension of the input tensor.\n strides: A list of ints that has length >= 4. The stride of the sliding\n window for each dimension of the input tensor.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n data_format: A string. 'NHWC' and 'NCHW\" are supported.\n name: Optional name for the operation.\n\n Returns:\n A `Tensor` with type `tf.float32`. The max pooled output tensor.\n \"\"\"\n with ops.op_scope([value], name, \"MaxPool\") as name:\n value = ops.convert_to_tensor(value, name=\"input\")\n return gen_nn_ops._max_pool(value, ksize=ksize, strides=strides,\n padding=padding,\n data_format=data_format,\n name=name)\n\n\nops.RegisterShape(\"Relu\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Relu6\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Elu\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Softplus\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Softsign\")(common_shapes.unchanged_shape)\n\n\[email protected](\"ReluGrad\")\[email protected](\"Relu6Grad\")\[email protected](\"EluGrad\")\[email protected](\"SoftplusGrad\")\[email protected](\"SoftsignGrad\")\ndef _BinaryElementwiseShape(op):\n \"\"\"Returns same shape as both inputs to op.\n\n Args:\n op: Input operation.\n\n Returns:\n Shape of both inputs to `op`.\n \"\"\"\n return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())]\n\n\nops.RegisterShape(\"L2Loss\")(common_shapes.scalar_shape)\n\n\nops.RegisterShape(\"LRN\")(common_shapes.unchanged_shape_with_rank(4))\n\n\[email protected](\"LRNGrad\")\ndef _LRNGradShape(op):\n \"\"\"Shape function for LRNGrad op.\"\"\"\n in_grads_shape = op.inputs[0].get_shape().with_rank(4)\n in_image_shape = op.inputs[1].get_shape().with_rank(4)\n out_image_shape = op.inputs[2].get_shape().with_rank(4)\n return [in_grads_shape.merge_with(in_image_shape).merge_with(out_image_shape)]\n\n\nops.RegisterShape(\"Softmax\")(\n common_shapes.unchanged_shape_with_rank(2))\n\n\[email protected](\"InTopK\")\ndef _InTopKShape(op):\n \"\"\"Shape function for InTopK op.\"\"\"\n predictions_shape = op.inputs[0].get_shape().with_rank(2)\n targets_shape = op.inputs[1].get_shape().with_rank(1)\n batch_size = predictions_shape[0].merge_with(targets_shape[0])\n return [tensor_shape.vector(batch_size.value)]\n\n\[email protected](\"TopK\")\[email protected](\"TopKV2\")\ndef _TopKShape(op):\n \"\"\"Shape function for TopK and TopKV2 ops.\"\"\"\n input_shape = op.inputs[0].get_shape().with_rank_at_least(1)\n if len(op.inputs) >= 2:\n k = tensor_util.constant_value(op.inputs[1])\n else:\n k = op.get_attr(\"k\")\n last = input_shape[-1].value\n if last is not None and k is not None and last < k:\n raise ValueError(\"input.shape %s must have last dimension >= k = %d\" %\n (input_shape, k))\n output_shape = input_shape[:-1].concatenate([k])\n return [output_shape, output_shape]\n\n\[email protected](\"BatchNormWithGlobalNormalization\")\ndef _BatchNormShape(op):\n \"\"\"Shape function for BatchNormWithGlobalNormalization op.\"\"\"\n input_shape = op.inputs[0].get_shape().with_rank(4)\n mean_shape = op.inputs[1].get_shape().with_rank(1)\n var_shape = op.inputs[2].get_shape().with_rank(1)\n beta_shape = op.inputs[3].get_shape().with_rank(1)\n gamma_shape = op.inputs[4].get_shape().with_rank(1)\n mean_shape[0].merge_with(input_shape[3])\n var_shape[0].merge_with(input_shape[3])\n beta_shape[0].merge_with(input_shape[3])\n gamma_shape[0].merge_with(input_shape[3])\n return [input_shape]\n\n\[email protected](\"BatchNormWithGlobalNormalizationGrad\")\ndef _BatchNormGradShape(op):\n \"\"\"Shape function for BatchNormWithGlobalNormalizationGrad op.\"\"\"\n input_shape = op.inputs[0].get_shape().with_rank(4)\n mean_shape = op.inputs[1].get_shape().with_rank(1)\n var_shape = op.inputs[2].get_shape().with_rank(1)\n beta_shape = op.inputs[3].get_shape().with_rank(1)\n out_backprop_shape = op.inputs[4].get_shape().with_rank(4)\n input_shape = input_shape.merge_with(out_backprop_shape)\n vector_dim = input_shape[3]\n vector_dim = vector_dim.merge_with(mean_shape[0])\n vector_dim = vector_dim.merge_with(var_shape[0])\n vector_dim = vector_dim.merge_with(beta_shape[0])\n return [input_shape] + ([tensor_shape.vector(vector_dim)] * 4)\n\n\nops.RegisterShape(\"Conv2D\")(common_shapes.conv2d_shape)\nops.RegisterShape(\"DepthwiseConv2dNative\")(\n common_shapes.depthwise_conv2d_native_shape)\nops.RegisterShape(\"AvgPool\")(common_shapes.avg_pool_shape)\nops.RegisterShape(\"MaxPool\")(common_shapes.max_pool_shape)\n\n\[email protected](\"MaxPoolWithArgmax\")\ndef _MaxPoolWithArgMaxShape(op):\n \"\"\"Shape function for MaxPoolWithArgmax op.\"\"\"\n return common_shapes.max_pool_shape(op) * 2\n\n\[email protected](\"AvgPoolGrad\")\ndef _AvgPoolGradShape(op):\n \"\"\"Shape function for the AvgPoolGrad op.\"\"\"\n orig_input_shape = tensor_util.constant_value(op.inputs[0])\n if orig_input_shape is not None:\n return [tensor_shape.TensorShape(orig_input_shape.tolist())]\n else:\n # NOTE(mrry): We could in principle work out the shape from the\n # gradients and the attrs, but if we do not know orig_input_shape\n # statically, then we are unlikely to know the shape of the\n # gradients either.\n return [tensor_shape.unknown_shape(ndims=4)]\n\n\[email protected](\"Conv2DBackpropFilter\")\ndef _Conv2DBackpropFilterShape(op):\n \"\"\"Shape function for the Conv2DBackpropFilter op.\"\"\"\n filter_shape = tensor_util.constant_value(op.inputs[1])\n if filter_shape is not None:\n return [tensor_shape.TensorShape(filter_shape.tolist())]\n else:\n # NOTE(mrry): We could in principle work out the shape from the\n # gradients and the attrs, but if we do not know filter_shape\n # statically, then we are unlikely to know the shape of the\n # gradients either.\n return [tensor_shape.unknown_shape(ndims=4)]\n\n\[email protected](\"Conv2DBackpropInput\")\ndef _Conv2DBackpropInputShape(op):\n \"\"\"Shape function for the Conv2DBackpropInput op.\"\"\"\n input_shape = tensor_util.constant_value(op.inputs[0])\n if input_shape is not None:\n return [tensor_shape.TensorShape(input_shape.tolist())]\n else:\n # NOTE(mrry): We could in principle work out the shape from the\n # gradients and the attrs, but if we do not know input_shape\n # statically, then we are unlikely to know the shape of the\n # gradients either.\n return [tensor_shape.unknown_shape(ndims=4)]\n\n\[email protected](\"DepthwiseConv2dNativeBackpropFilter\")\ndef _DepthwiseConv2dNativeBackpropFilterShape(op):\n \"\"\"Shape function for the DepthwiseConv2dNativeBackpropFilter op.\"\"\"\n filter_shape = tensor_util.constant_value(op.inputs[1])\n if filter_shape is not None:\n return [tensor_shape.TensorShape(filter_shape.tolist())]\n else:\n return [tensor_shape.unknown_shape(ndims=4)]\n\n\[email protected](\"DepthwiseConv2dNativeBackpropInput\")\ndef _DepthwiseConv2dNativeBackpropInputShape(op):\n \"\"\"Shape function for the DepthwiseConv2dNativeBackpropInput op.\"\"\"\n input_shape = tensor_util.constant_value(op.inputs[0])\n if input_shape is not None:\n return [tensor_shape.TensorShape(input_shape.tolist())]\n else:\n return [tensor_shape.unknown_shape(ndims=4)]\n\n\[email protected](\"MaxPoolGrad\")\[email protected](\"MaxPoolGradWithArgmax\")\ndef _MaxPoolGradShape(op):\n \"\"\"Shape function for the MaxPoolGrad op.\"\"\"\n orig_input_shape = op.inputs[0].get_shape().with_rank(4)\n return [orig_input_shape]\n\n\[email protected](\"Conv2D\", \"flops\")\ndef _calc_conv_flops(graph, node):\n \"\"\"Calculates the compute resources needed for Conv2D.\"\"\"\n input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n input_shape.assert_is_fully_defined()\n filter_shape = graph_util.tensor_shape_from_node_def_name(graph,\n node.input[1])\n filter_shape.assert_is_fully_defined()\n output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n output_shape.assert_is_fully_defined()\n filter_height = int(filter_shape[0])\n filter_width = int(filter_shape[1])\n filter_in_depth = int(filter_shape[2])\n output_count = np.prod(output_shape.as_list())\n return ops.OpStats(\"flops\", (output_count * filter_in_depth * filter_height *\n filter_width * 2))\n\n\[email protected](\"Conv2D\", \"weight_parameters\")\ndef _calc_conv_weight_params(graph, node):\n \"\"\"Calculates the on-disk size of the weights for Conv2D.\"\"\"\n input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n input_shape.assert_is_fully_defined()\n filter_shape = graph_util.tensor_shape_from_node_def_name(graph,\n node.input[1])\n filter_shape.assert_is_fully_defined()\n output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n output_shape.assert_is_fully_defined()\n filter_height = int(filter_shape[0])\n filter_width = int(filter_shape[1])\n filter_in_depth = int(filter_shape[2])\n filter_out_depth = int(filter_shape[3])\n return ops.OpStats(\"weight_parameters\", (filter_height * filter_width *\n filter_in_depth * filter_out_depth))\n\n\[email protected](\"BiasAdd\", \"flops\")\ndef _calc_bias_add_flops(graph, node):\n \"\"\"Calculates the computing needed for BiasAdd.\"\"\"\n input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n input_shape.assert_is_fully_defined()\n input_count = np.prod(input_shape.as_list())\n return ops.OpStats(\"flops\", input_count)\n\n\[email protected](\"BiasAdd\", \"weight_parameters\")\ndef _calc_bias_add_weight_params(graph, node):\n \"\"\"Calculates the on-disk weight parameters for BiasAdd.\"\"\"\n bias_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])\n bias_shape.assert_is_fully_defined()\n bias_count = np.prod(bias_shape.as_list())\n return ops.OpStats(\"weight_parameters\", bias_count)\n\n\ndef xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name\n \"\"\"Computes matmul(x, weights) + biases.\n\n Args:\n x: a 2D tensor. Dimensions typically: batch, in_units\n weights: a 2D tensor. Dimensions typically: in_units, out_units\n biases: a 1D tensor. Dimensions: out_units\n name: A name for the operation (optional). If not specified\n \"xw_plus_b\" is used.\n\n Returns:\n A 2-D Tensor computing matmul(x, weights) + biases.\n Dimensions typically: batch, out_units.\n \"\"\"\n with ops.op_scope([x, weights, biases], name, \"xw_plus_b\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n weights = ops.convert_to_tensor(weights, name=\"weights\")\n biases = ops.convert_to_tensor(biases, name=\"biases\")\n mm = math_ops.matmul(x, weights)\n return bias_add(mm, biases, name=name)\n\n\ndef xw_plus_b_v1(x, weights, biases, name=None): # pylint: disable=invalid-name\n \"\"\"Computes matmul(x, weights) + biases.\n\n This is a deprecated version of that will soon be removed.\n\n Args:\n x: a 2D tensor. Dimensions typically: batch, in_units\n weights: a 2D tensor. Dimensions typically: in_units, out_units\n biases: a 1D tensor. Dimensions: out_units\n name: A name for the operation (optional). If not specified\n \"xw_plus_b_v1\" is used.\n\n Returns:\n A 2-D Tensor computing matmul(x, weights) + biases.\n Dimensions typically: batch, out_units.\n \"\"\"\n with ops.op_scope([x, weights, biases], name, \"xw_plus_b_v1\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n weights = ops.convert_to_tensor(weights, name=\"weights\")\n biases = ops.convert_to_tensor(biases, name=\"biases\")\n mm = math_ops.matmul(x, weights)\n return bias_add_v1(mm, biases, name=name)\n\n\n# pylint: disable=invalid-name\ndef dropout(x, keep_prob, noise_shape=None, seed=None, name=None):\n \"\"\"Computes dropout.\n\n With probability `keep_prob`, outputs the input element scaled up by\n `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected\n sum is unchanged.\n\n By default, each element is kept or dropped independently. If `noise_shape`\n is specified, it must be\n [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`\n will make independent decisions. For example, if `shape(x) = [k, l, m, n]`\n and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be\n kept independently and each row and column will be kept or not kept together.\n\n Args:\n x: A tensor.\n keep_prob: A scalar `Tensor` with the same type as x. The probability\n that each element is kept.\n noise_shape: A 1-D `Tensor` of type `int32`, representing the\n shape for randomly generated keep/drop flags.\n seed: A Python integer. Used to create random seeds. See\n [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)\n for behavior.\n name: A name for this operation (optional).\n\n Returns:\n A Tensor of the same shape of `x`.\n\n Raises:\n ValueError: If `keep_prob` is not in `(0, 1]`.\n \"\"\"\n with ops.op_scope([x], name, \"dropout\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n if isinstance(keep_prob, float) and not 0 < keep_prob <= 1:\n raise ValueError(\"keep_prob must be a scalar tensor or a float in the \"\n \"range (0, 1], got %g\" % keep_prob)\n keep_prob = ops.convert_to_tensor(\n keep_prob, dtype=x.dtype, name=\"keep_prob\")\n keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())\n\n noise_shape = noise_shape or array_ops.shape(x)\n # uniform [keep_prob, 1.0 + keep_prob)\n random_tensor = keep_prob\n random_tensor += random_ops.random_uniform(\n noise_shape, seed=seed, dtype=x.dtype)\n # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)\n binary_tensor = math_ops.floor(random_tensor)\n ret = x * math_ops.inv(keep_prob) * binary_tensor\n ret.set_shape(x.get_shape())\n return ret\n\n\ndef top_k(input, k=1, sorted=True, name=None):\n \"\"\"Finds values and indices of the `k` largest entries for the last dimension.\n\n If the input is a vector (rank-1), finds the `k` largest entries in the vector\n and outputs their values and indices as vectors. Thus `values[j]` is the\n `j`-th largest entry in `input`, and its index is `indices[j]`.\n\n For matrices (resp. higher rank input), computes the top `k` entries in each\n row (resp. vector along the last dimension). Thus,\n\n values.shape = indices.shape = input.shape[:-1] + [k]\n\n If two elements are equal, the lower-index element appears first.\n\n Args:\n input: 1-D or higher `Tensor` with last dimension at least `k`.\n k: 0-D `int32` `Tensor`. Number of top elements to look for along the last\n dimension (along each row for matrices).\n sorted: If true the resulting `k` elements will be sorted by the values in\n descending order.\n name: Optional name for the operation.\n\n Returns:\n values: The `k` largest elements along each last dimensional slice.\n indices: The indices of `values` within the last dimension of `input`.\n \"\"\"\n return gen_nn_ops._top_kv2(input, k=k, sorted=sorted, name=name)\n\n\n# pylint: enable=invalid-name\n" ]
[ [ "tensorflow.python.framework.tensor_shape.unknown_shape", "tensorflow.python.ops.gen_nn_ops._top_kv2", "tensorflow.python.ops.common_shapes.unchanged_shape_with_rank", "tensorflow.python.ops.common_shapes.max_pool_shape", "tensorflow.python.ops.gen_nn_ops._sparse_softmax_cross_entropy_with_logits", "tensorflow.python.framework.ops.RegisterStatistics", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.python.ops.gen_nn_ops._max_pool", "tensorflow.python.framework.ops.OpStats", "tensorflow.python.ops.gen_nn_ops.conv2d_backprop_input", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.ops.gen_nn_ops._bias_add_v1", "tensorflow.python.framework.ops.RegisterShape", "tensorflow.python.client.graph_util.tensor_shape_from_node_def_name", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.math_ops.inv", "tensorflow.python.ops.gen_nn_ops._relu6", "tensorflow.python.framework.ops.op_scope", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.math_ops.floor", "tensorflow.python.ops.gen_nn_ops._bias_add", "tensorflow.python.ops.gen_nn_ops._avg_pool", "tensorflow.python.framework.tensor_shape.vector", "tensorflow.python.ops.gen_nn_ops._softmax_cross_entropy_with_logits" ] ]
JayKimBravekjh/deepchem
[ "842dd48ee065bee1034754540569f946cbb579eb" ]
[ "deepchem/molnet/load_function/pdbbind_datasets.py" ]
[ "\"\"\"\nPDBBind dataset loader.\n\"\"\"\nimport logging\nimport multiprocessing\nimport os\nimport re\nimport time\n\nimport deepchem\nimport numpy as np\nimport pandas as pd\nimport tarfile\nfrom deepchem.feat import RdkitGridFeaturizer\nfrom deepchem.feat import ComplexNeighborListFragmentAtomicCoordinates\nfrom deepchem.feat.graph_features import AtomicConvFeaturizer\n\nlogger = logging.getLogger(__name__)\nDEFAULT_DATA_DIR = deepchem.utils.data_utils.get_data_dir()\n\n\ndef featurize_pdbbind(data_dir=None, feat=\"grid\", subset=\"core\"):\n \"\"\"Featurizes pdbbind according to provided featurization\"\"\"\n tasks = [\"-logKd/Ki\"]\n data_dir = deepchem.utils.data_utils.get_data_dir()\n pdbbind_dir = os.path.join(data_dir, \"pdbbind\")\n dataset_dir = os.path.join(pdbbind_dir, \"%s_%s\" % (subset, feat))\n\n if not os.path.exists(dataset_dir):\n deepchem.utils.data_utils.download_url(\n \"https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/core_grid.tar.gz\"\n )\n deepchem.utils.data_utils.download_url(\n \"https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/full_grid.tar.gz\"\n )\n deepchem.utils.data_utils.download_url(\n \"https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/refined_grid.tar.gz\"\n )\n if not os.path.exists(pdbbind_dir):\n os.system('mkdir ' + pdbbind_dir)\n deepchem.utils.data_utils.untargz_file(\n os.path.join(data_dir, 'core_grid.tar.gz'), pdbbind_dir)\n deepchem.utils.data_utils.untargz_file(\n os.path.join(data_dir, 'full_grid.tar.gz'), pdbbind_dir)\n deepchem.utils.data_utils.untargz_file(\n os.path.join(data_dir, 'refined_grid.tar.gz'), pdbbind_dir)\n\n return deepchem.data.DiskDataset(dataset_dir), tasks\n\n\ndef load_pdbbind_grid(split=\"random\",\n featurizer=\"grid\",\n subset=\"core\",\n reload=True):\n \"\"\"Load PDBBind datasets. Does not do train/test split\"\"\"\n if featurizer == 'grid':\n dataset, tasks = featurize_pdbbind(feat=featurizer, subset=subset)\n\n splitters = {\n 'index': deepchem.splits.IndexSplitter(),\n 'random': deepchem.splits.RandomSplitter(),\n 'time': deepchem.splits.TimeSplitterPDBbind(dataset.ids)\n }\n splitter = splitters[split]\n train, valid, test = splitter.train_valid_test_split(dataset)\n\n transformers = []\n for transformer in transformers:\n train = transformer.transform(train)\n for transformer in transformers:\n valid = transformer.transform(valid)\n for transformer in transformers:\n test = transformer.transform(test)\n\n all_dataset = (train, valid, test)\n return tasks, all_dataset, transformers\n\n else:\n data_dir = deepchem.utils.data_utils.get_data_dir()\n if reload:\n save_dir = os.path.join(\n data_dir, \"pdbbind_\" + subset + \"/\" + featurizer + \"/\" + str(split))\n\n dataset_file = os.path.join(data_dir, subset + \"_smiles_labels.csv\")\n\n if not os.path.exists(dataset_file):\n deepchem.utils.data_utils.download_url(\n \"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/\" + subset +\n \"_smiles_labels.csv\")\n\n tasks = [\"-logKd/Ki\"]\n if reload:\n loaded, all_dataset, transformers = deepchem.utils.data_utils.load_dataset_from_disk(\n save_dir)\n if loaded:\n return tasks, all_dataset, transformers\n\n if featurizer == 'ECFP':\n featurizer = deepchem.feat.CircularFingerprint(size=1024)\n elif featurizer == 'GraphConv':\n featurizer = deepchem.feat.ConvMolFeaturizer()\n elif featurizer == 'Weave':\n featurizer = deepchem.feat.WeaveFeaturizer()\n elif featurizer == 'Raw':\n featurizer = deepchem.feat.RawFeaturizer()\n\n loader = deepchem.data.CSVLoader(\n tasks=tasks, smiles_field=\"smiles\", featurizer=featurizer)\n dataset = loader.featurize(dataset_file, shard_size=8192)\n df = pd.read_csv(dataset_file)\n\n if split == None:\n transformers = [\n deepchem.trans.NormalizationTransformer(\n transform_y=True, dataset=dataset)\n ]\n\n logger.info(\"Split is None, about to transform data.\")\n for transformer in transformers:\n dataset = transformer.transform(dataset)\n return tasks, (dataset, None, None), transformers\n\n splitters = {\n 'index': deepchem.splits.IndexSplitter(),\n 'random': deepchem.splits.RandomSplitter(),\n 'scaffold': deepchem.splits.ScaffoldSplitter(),\n }\n splitter = splitters[split]\n logger.info(\"About to split dataset with {} splitter.\".format(split))\n train, valid, test = splitter.train_valid_test_split(dataset)\n\n transformers = [\n deepchem.trans.NormalizationTransformer(\n transform_y=True, dataset=train)\n ]\n\n logger.info(\"About to transform dataset.\")\n for transformer in transformers:\n train = transformer.transform(train)\n valid = transformer.transform(valid)\n test = transformer.transform(test)\n\n if reload:\n deepchem.utils.data_utils.save_dataset_to_disk(save_dir, train, valid,\n test, transformers)\n\n return tasks, (train, valid, test), transformers\n\n\ndef load_pdbbind(reload=True,\n data_dir=None,\n subset=\"core\",\n load_binding_pocket=False,\n featurizer=\"grid\",\n split=\"random\",\n split_seed=None,\n save_dir=None,\n save_timestamp=False):\n \"\"\"Load raw PDBBind dataset by featurization and split.\n\n Parameters\n ----------\n reload: Bool, optional\n Reload saved featurized and splitted dataset or not.\n data_dir: Str, optional\n Specifies the directory storing the raw dataset.\n load_binding_pocket: Bool, optional\n Load binding pocket or full protein.\n subset: Str\n Specifies which subset of PDBBind, only \"core\" or \"refined\" for now.\n featurizer: Str\n Either \"grid\" or \"atomic\" for grid and atomic featurizations.\n split: Str\n Either \"random\" or \"index\".\n split_seed: Int, optional\n Specifies the random seed for splitter.\n save_dir: Str, optional\n Specifies the directory to store the featurized and splitted dataset when\n reload is False. If reload is True, it will load saved dataset inside save_dir.\n save_timestamp: Bool, optional\n Save featurized and splitted dataset with timestamp or not. Set it as True\n when running similar or same jobs simultaneously on multiple compute nodes.\n \"\"\"\n\n pdbbind_tasks = [\"-logKd/Ki\"]\n\n deepchem_dir = deepchem.utils.data_utils.get_data_dir()\n\n if data_dir == None:\n data_dir = DEFAULT_DATA_DIR\n data_folder = os.path.join(data_dir, \"pdbbind\", \"v2015\")\n\n if save_dir == None:\n save_dir = os.path.join(DEFAULT_DATA_DIR, \"from-pdbbind\")\n if load_binding_pocket:\n save_folder = os.path.join(\n save_dir, \"protein_pocket-%s-%s-%s\" % (subset, featurizer, split))\n else:\n save_folder = os.path.join(\n save_dir, \"full_protein-%s-%s-%s\" % (subset, featurizer, split))\n\n if save_timestamp:\n save_folder = \"%s-%s-%s\" % (save_folder,\n time.strftime(\"%Y%m%d\", time.localtime()),\n re.search(\"\\.(.*)\", str(time.time())).group(1))\n\n if reload:\n if not os.path.exists(save_folder):\n print(\n \"Dataset does not exist at {}. Reconstructing...\".format(save_folder))\n else:\n print(\n \"\\nLoading featurized and splitted dataset from:\\n%s\\n\" % save_folder)\n loaded, all_dataset, transformers = deepchem.utils.data_utils.load_dataset_from_disk(\n save_folder)\n if loaded:\n return pdbbind_tasks, all_dataset, transformers\n\n dataset_file = os.path.join(data_dir, \"pdbbind_v2015.tar.gz\")\n if not os.path.exists(dataset_file):\n logger.warning(\"About to download PDBBind full dataset. Large file, 2GB\")\n deepchem.utils.data_utils.download_url(\n \"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/pdbbind_v2015.tar.gz\",\n dest_dir=data_dir)\n if os.path.exists(data_folder):\n logger.info(\"PDBBind full dataset already exists.\")\n else:\n print(\"Untarring full dataset...\")\n deepchem.utils.data_utils.untargz_file(\n dataset_file, dest_dir=os.path.join(data_dir, \"pdbbind\"))\n\n print(\"\\nRaw dataset:\\n%s\" % data_folder)\n print(\"\\nFeaturized and splitted dataset:\\n%s\" % save_folder)\n\n if subset == \"core\":\n index_labels_file = os.path.join(data_folder, \"INDEX_core_data.2013\")\n elif subset == \"refined\":\n index_labels_file = os.path.join(data_folder, \"INDEX_refined_data.2015\")\n else:\n raise ValueError(\"Other subsets not supported\")\n\n # Extract locations of data\n with open(index_labels_file, \"r\") as g:\n pdbs = [line[:4] for line in g.readlines() if line[0] != \"#\"]\n if load_binding_pocket:\n protein_files = [\n os.path.join(data_folder, pdb, \"%s_pocket.pdb\" % pdb) for pdb in pdbs\n ]\n else:\n protein_files = [\n os.path.join(data_folder, pdb, \"%s_protein.pdb\" % pdb) for pdb in pdbs\n ]\n ligand_files = [\n os.path.join(data_folder, pdb, \"%s_ligand.sdf\" % pdb) for pdb in pdbs\n ]\n\n # Extract labels\n with open(index_labels_file, \"r\") as g:\n labels = np.array([\n # Lines have format\n # PDB code, resolution, release year, -logKd/Ki, Kd/Ki, reference, ligand name\n # The base-10 logarithm, -log kd/pk\n float(line.split()[3]) for line in g.readlines() if line[0] != \"#\"\n ])\n\n # Featurize Data\n if featurizer == \"grid\":\n featurizer = RdkitGridFeaturizer(\n voxel_width=2.0,\n feature_types=[\n 'ecfp', 'splif', 'hbond', 'salt_bridge', 'pi_stack', 'cation_pi',\n 'charge'\n ],\n flatten=True)\n elif featurizer == \"atomic\" or featurizer == \"atomic_conv\":\n # Pulled from PDB files. For larger datasets with more PDBs, would use\n # max num atoms instead of exact.\n frag1_num_atoms = 70 # for ligand atoms\n if load_binding_pocket:\n frag2_num_atoms = 1000\n complex_num_atoms = 1070\n else:\n frag2_num_atoms = 24000 # for protein atoms\n complex_num_atoms = 24070 # in total\n max_num_neighbors = 4\n # Cutoff in angstroms\n neighbor_cutoff = 4\n if featurizer == \"atomic\":\n featurizer = ComplexNeighborListFragmentAtomicCoordinates(\n frag1_num_atoms=frag1_num_atoms,\n frag2_num_atoms=frag2_num_atoms,\n complex_num_atoms=complex_num_atoms,\n max_num_neighbors=max_num_neighbors,\n neighbor_cutoff=neighbor_cutoff)\n if featurizer == \"atomic_conv\":\n featurizer = AtomicConvFeaturizer(\n labels=labels,\n frag1_num_atoms=frag1_num_atoms,\n frag2_num_atoms=frag2_num_atoms,\n complex_num_atoms=complex_num_atoms,\n neighbor_cutoff=neighbor_cutoff,\n max_num_neighbors=max_num_neighbors,\n batch_size=64)\n else:\n raise ValueError(\"Featurizer not supported\")\n\n print(\"\\nFeaturizing Complexes for \\\"%s\\\" ...\\n\" % data_folder)\n feat_t1 = time.time()\n features, failures = featurizer.featurize(ligand_files, protein_files)\n feat_t2 = time.time()\n print(\"\\nFeaturization finished, took %0.3f s.\" % (feat_t2 - feat_t1))\n\n # Delete labels and ids for failing elements\n labels = np.delete(labels, failures)\n labels = labels.reshape((len(labels), 1))\n ids = np.delete(pdbs, failures)\n\n print(\"\\nConstruct dataset excluding failing featurization elements...\")\n dataset = deepchem.data.DiskDataset.from_numpy(features, y=labels, ids=ids)\n\n # No transformations of data\n transformers = []\n\n # Split dataset\n print(\"\\nSplit dataset...\\n\")\n if split == None:\n return pdbbind_tasks, (dataset, None, None), transformers\n\n # TODO(rbharath): This should be modified to contain a cluster split so\n # structures of the same protein aren't in both train/test\n splitters = {\n 'index': deepchem.splits.IndexSplitter(),\n 'random': deepchem.splits.RandomSplitter(),\n }\n splitter = splitters[split]\n train, valid, test = splitter.train_valid_test_split(dataset, seed=split_seed)\n\n all_dataset = (train, valid, test)\n print(\"\\nSaving dataset to \\\"%s\\\" ...\" % save_folder)\n deepchem.utils.data_utils.save_dataset_to_disk(save_folder, train, valid,\n test, transformers)\n return pdbbind_tasks, all_dataset, transformers\n\n\ndef load_pdbbind_from_dir(data_folder,\n index_files,\n featurizer=\"grid\",\n split=\"random\",\n ex_ids=[],\n save_dir=None):\n \"\"\"Load and featurize raw PDBBind dataset from a local directory with the option to avoid certain IDs.\n\n Parameters\n ----------\n data_dir: String,\n Specifies the data directory to store the featurized dataset.\n index_files: List\n List of data and labels index file paths relative to the path in data_dir\n split: Str\n Either \"random\" or \"index\"\n feat: Str\n Either \"grid\" or \"atomic\" for grid and atomic featurizations.\n subset: Str\n Only \"core\" or \"refined\" for now.\n ex_ids: List\n List of PDB IDs to avoid loading if present\n save_dir: String\n Path to store featurized datasets\n \"\"\"\n pdbbind_tasks = [\"-logKd/Ki\"]\n\n index_file = os.path.join(data_folder, index_files[0])\n labels_file = os.path.join(data_folder, index_files[1])\n\n # Extract locations of data\n pdbs = []\n\n with open(index_file, \"r\") as g:\n lines = g.readlines()\n for line in lines:\n line = line.split(\" \")\n pdb = line[0]\n if len(pdb) == 4:\n pdbs.append(pdb)\n protein_files = [\n os.path.join(data_folder, pdb, \"%s_protein.pdb\" % pdb)\n for pdb in pdbs\n if pdb not in ex_ids\n ]\n ligand_files = [\n os.path.join(data_folder, pdb, \"%s_ligand.sdf\" % pdb)\n for pdb in pdbs\n if pdb not in ex_ids\n ]\n # Extract labels\n labels_tmp = {}\n with open(labels_file, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n # Skip comment lines\n if line[0] == \"#\":\n continue\n # Lines have format\n # PDB code, resolution, release year, -logKd/Ki, Kd/Ki, reference, ligand name\n line = line.split()\n # The base-10 logarithm, -log kd/pk\n log_label = line[3]\n labels_tmp[line[0]] = log_label\n\n labels = np.array([labels_tmp[pdb] for pdb in pdbs])\n print(labels)\n # Featurize Data\n if featurizer == \"grid\":\n featurizer = RdkitGridFeaturizer(\n voxel_width=2.0,\n feature_types=[\n 'ecfp', 'splif', 'hbond', 'salt_bridge', 'pi_stack', 'cation_pi',\n 'charge'\n ],\n flatten=True)\n elif featurizer == \"atomic\":\n # Pulled from PDB files. For larger datasets with more PDBs, would use\n # max num atoms instead of exact.\n frag1_num_atoms = 70 # for ligand atoms\n frag2_num_atoms = 24000 # for protein atoms\n complex_num_atoms = 24070 # in total\n max_num_neighbors = 4\n # Cutoff in angstroms\n neighbor_cutoff = 4\n featurizer = ComplexNeighborListFragmentAtomicCoordinates(\n frag1_num_atoms, frag2_num_atoms, complex_num_atoms, max_num_neighbors,\n neighbor_cutoff)\n\n else:\n raise ValueError(\"Featurizer not supported\")\n print(\"Featurizing Complexes\")\n features, failures = featurizer.featurize(ligand_files, protein_files)\n # Delete labels for failing elements\n labels = np.delete(labels, failures)\n dataset = deepchem.data.DiskDataset.from_numpy(features, labels)\n # No transformations of data\n transformers = []\n if split == None:\n return pdbbind_tasks, (dataset, None, None), transformers\n\n # TODO(rbharath): This should be modified to contain a cluster split so\n # structures of the same protein aren't in both train/test\n splitters = {\n 'index': deepchem.splits.IndexSplitter(),\n 'random': deepchem.splits.RandomSplitter(),\n }\n splitter = splitters[split]\n train, valid, test = splitter.train_valid_test_split(dataset)\n all_dataset = (train, valid, test)\n if save_dir:\n deepchem.utils.data_utils.save_dataset_to_disk(save_dir, train, valid, test,\n transformers)\n return pdbbind_tasks, all_dataset, transformers\n" ]
[ [ "numpy.array", "numpy.delete", "pandas.read_csv" ] ]
webclinic017/pyStock-1
[ "4ed6bf20130dcfc37d542bd5b3aec505a12f3106" ]
[ "indicators/bollinger.py" ]
[ "# Add import from parent directory possible\nimport matplotlib.pyplot as plt\nfrom helpers.DataOperations import FindIntersections, CreateSubsetByValues\nfrom core.indicator import indicator\n# Creates object\n\n\ndef CreateBollinger(prices, n=20, k=2):\n return Bollinger(prices, n, k)\n\n# Bollinger object which creates Bollinger data\n\n\nclass Bollinger(indicator):\n\n def __init__(self, close, n=20, k=2):\n indicator.__init__(self, 'Bollinger', 'momentum', close.index)\n self.n = n\n self.k = k\n self.consolidationLvl = 15 # percent\n self.variabilityLvl = 50 # percent\n self.mavg, self.upperBand, self.lowerBand = self.InitBollinger(\n close, self.n, self.k)\n self.std = self.upperBand - self.lowerBand\n self.absStd = (self.std * 100) / self.std.max()\n\n # Signals\n fromBottom, fromTop = FindIntersections(self.upperBand, close)\n self.sell = fromBottom\n fromBottom, fromTop = FindIntersections(self.lowerBand, close)\n self.buy = fromTop\n self.consolidation = CreateSubsetByValues(\n self.absStd, 0, self.consolidationLvl)\n self.variability = CreateSubsetByValues(\n self.absStd, self.variabilityLvl, 100)\n\n # Set Bollinger indicator\n @staticmethod\n def InitBollinger(prices, n=20, k=2):\n mavg = prices.rolling(window=n, min_periods=1).mean()\n std = prices.rolling(window=n, min_periods=1).std()\n upperBand = mavg + (std * 2)\n lowerBand = mavg - (std * 2)\n return mavg, upperBand, lowerBand\n\n # Export indicator signals to report\n def ExportSignals(self, reportSignals):\n reportSignals.AddDataframeSignals(self.buy, 'Bollinger', 'buy')\n reportSignals.AddDataframeSignals(self.sell, 'Bollinger', 'sell')\n\n # Plot method\n def Plot(self):\n # Get index values for the X axis for facebook DataFrame\n x_axis = self.toNumIndex(self.upperBand)\n\n # Plot shaded 21 Day Bollinger Band for Facebook\n plt.fill_between(x_axis, self.upperBand,\n self.lowerBand, color='#BBBBBB')\n plt.plot(self.toNumIndex(self.upperBand), self.upperBand, '--',\n linewidth=1.0, color='#940006', label='Sell band')\n plt.plot(self.toNumIndex(self.lowerBand), self.lowerBand, '--',\n linewidth=1.0, color='#169400', label='Buy band')\n plt.plot(self.toNumIndex(self.mavg), self.mavg, '--', linewidth=1.0,\n color='#0000FF', label=('MA %s days' % self.n))\n\n # Signals plottting\n if (self.buy is not None and self.buy.size):\n plt.plot(self.toNumIndex(self.buy), self.buy,\n 'o', color='#000000', ms=8)\n plt.plot(self.toNumIndex(self.buy), self.buy, 'o',\n label='Horiz. Buy', color='#00FF00')\n if (self.sell is not None and self.sell.size):\n plt.plot(self.toNumIndex(self.sell),\n self.sell, 'o', color='#000000', ms=8)\n plt.plot(self.toNumIndex(self.sell), self.sell, 'o',\n label='Horiz. Sell', color='#FF0000')\n\n # Plot method\n def PlotAbsDeviation(self):\n plt.plot(self.toNumIndex(self.absStd), self.absStd, linewidth=1.0,\n color='#333333', label='Bol.AbsDeviation')\n plt.ylim(top=100, bottom=0)\n if (self.consolidation is not None and self.consolidation.size):\n plt.plot(self.toNumIndex(self.consolidation), self.consolidation,\n 'o', label='Consolidation', color='cyan')\n if (self.variability is not None and self.variability.size):\n plt.plot(self.toNumIndex(self.variability), self.variability,\n 'o', label='Variability', color='magenta')\n" ]
[ [ "matplotlib.pyplot.fill_between", "matplotlib.pyplot.ylim" ] ]
RGiskard/Data-Structures-and-Algorithms
[ "045eab8e2167fa86aa48f194a7e2d621ce7f19ed" ]
[ "meetup pandas/Funciones/comandos_pandas_v2.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 1 07:45:15 2019\n\n@author: PAULA\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\n##########################\n# Funciones de selección #\n##########################\n\nprint('##########################')\nprint('# Funciones de selección #')\nprint('##########################')\n \n \ndf = pd.DataFrame(np.array([[1,2,3],[4,5,6],[7,8,9]]))\nprint('DataFrame: ')\nprint(df)\nprint('____________________________')\n \n#seleccionar la primera columna del DataFrame\nprint('Primera columna del DataFrame: ')\nprint(df[0])\nprint('____________________________')\n\n#seleccionar dos columnas del DataFrame\nprint('Dos columnas del DataFrame: ')\nprint(df[[0,1]])\nprint('____________________________')\n\n#Un valor de la primera fila y ultima columna del DataFrame\nprint('Valor indicando los índices del DataFrame: ')\nprint(df.iloc[0][2])\nprint('____________________________')\n\n#Usar indices del dataframe para seleccionar sus datos, \n#valores de la primera fila del dataframe\nprint('Valores de la primera fila con loc: ')\nprint(df.loc[0])\nprint('____________________________')\nprint('Valores de la primera fila con iloc: ')\nprint(df.iloc[0,:])\n\n##########################\n# Funciones de limpieza #\n##########################\n\nprint('\\n')\nprint('##########################')\nprint('# Funciones de limpieza #')\nprint('##########################')\n \n \ndf2 = pd.DataFrame({'age': [5, 6, np.NaN],\n 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n pd.Timestamp('1940-04-25')],\n 'name': ['Alfred', 'Batman', ''],\n 'toy': [None, 'Batmobile', 'Joker']})\nprint('DataFrame2: ')\nprint(df2)\nprint('____________________________')\n\n#Limpiar datos, si queremos obtener resultados confiables\n#verificar si faltan valores en el conjunto de datos\n#verdadero para valores faltantes y falso para valores no perdidos\nprint('Verificar si faltan valores en el conjunto de datos: ')\nprint(df2.isnull())\nprint('____________________________')\n\n#Eliminar datos nulos\nprint('Eliminar columnas, filas de datos nulos: ')\nprint(df2.dropna())\nprint('____________________________')\n\ndf3 = pd.DataFrame(np.array([[1,np.NaN,3],[4,5,np.NaN],[7,np.NaN,9], [4, np.NaN, 0]]))\nprint('DataFrame3: ')\nprint(df3)\nprint('____________________________')\n\n#suma de datos nulos\nprint('Suma de datos nulos: ')\nprint(df3.isnull().sum())\nprint('____________________________')\n\n#Rellenar los datos nulos con algun valor x\nprint('Rellenar con algun valor x: ')\nprint(df3.fillna(0))\nprint('____________________________')\n\n\n\n" ]
[ [ "numpy.array", "pandas.Timestamp" ] ]
timoblak/OpenAFQA
[ "dc3e4a02efac3342fc6341a2946398d19d6b7c84" ]
[ "afqa_toolbox/features/gabor.py" ]
[ "from afqa_toolbox.features import block_properties\nimport numpy as np\nimport cv2\n\n\ndef gabor_filter(theta, freq, sigma, shen=False):\n \"\"\"Produces a Gabor filter based on the provided parameters\n\n :param theta: The angle of the filter\n :param freq: The frequency of the filter\n :param sigma: The standard deviation of the gaussian envelope\n :param shen: Alternative definition of the Gabor filter by Shen et al.\n :return:\n \"\"\"\n # define range (add small eps to also include the last index\n range = np.arange(-2.5 * sigma, 2.5 * sigma + 1e-5)\n\n [x, y] = np.meshgrid(range, range)\n\n # Shen et al. define the Gabor filter a bit differently\n if shen:\n x1 = x * np.cos(theta) + y * np.sin(theta)\n y1 = -x * np.sin(theta) + y * np.cos(theta)\n else:\n x1 = x * np.sin(theta) + y * np.cos(theta)\n y1 = x * np.cos(theta) - y * np.sin(theta)\n\n return np.exp((-1/2) * ((x1 * x1) / (sigma * sigma) + (y1 * y1) / (sigma * sigma))) * \\\n np.exp(1j * 2 * np.pi * freq * x1)\n\n\nclass FeatGabor:\n \"\"\"Filters the imput image with differently oriented Gabor filters\"\"\"\n def __init__(self, blk_size, sigma=6, freq=0.1, angle_num=8):\n # Default values are suitable for fingerprint image of 500 ppi\n self.blk_size = blk_size\n self.sigma = sigma\n self.freq = freq\n self.angle_num = angle_num\n\n def gabor_stds(self, image, smooth=False, shen=False):\n \"\"\"Calculates the standard deviation of responses to differently oriented Gab filters\n\n :param image: Input image\n :param angle_num: The number of angles in half circle for which Gabor filters will be calculated\n :return:\n \"\"\"\n\n h, w = image.shape\n\n img_float = image.astype(np.float64)/255\n gauss_kernel_1 = cv2.getGaussianKernel(7, 1)\n gauss_kernel_4 = cv2.getGaussianKernel(25, 4)\n gauss_image = cv2.sepFilter2D(img_float, cv2.CV_64F, gauss_kernel_1, gauss_kernel_1)\n\n img_detail = img_float - gauss_image\n\n gauss_responses = np.zeros(shape=(h, w, self.angle_num))\n for i, angle in enumerate(range(self.angle_num)):\n theta = (np.pi*angle) / self.angle_num\n gf = gabor_filter(theta, self.freq, self.sigma, shen)\n\n # Calculate the response of Gabor filters\n response = cv2.filter2D(img_detail, cv2.CV_64F, gf.real) + 1j * cv2.filter2D(img_detail, cv2.CV_64F, gf.imag)\n magnitude = np.abs(response)\n\n # Calc Gauss of the Gabor magnitudes for smoothing\n if smooth:\n gauss_responses[:, :, i] = cv2.sepFilter2D(magnitude, cv2.CV_64F, gauss_kernel_4, gauss_kernel_4)\n else:\n gauss_responses[:, :, i] = magnitude\n\n std_local = gauss_responses.std(axis=-1, ddof=1)\n\n rows, cols = block_properties(image.shape, self.blk_size)\n return cv2.resize(std_local, (cols, rows), interpolation=cv2.INTER_AREA)\n\n\n" ]
[ [ "numpy.zeros", "numpy.abs", "numpy.cos", "numpy.exp", "numpy.arange", "numpy.sin", "numpy.meshgrid" ] ]
SunNy820828449/CINN
[ "6384f730867132508c2c60f5ff2aae12959143d7" ]
[ "python/tests/pool_utils.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) 2021 CINN Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport numpy as np\nimport sys\n\n\ndef pool2d(np_data, attrs, dtype=\"float32\"):\n pool_type = \"max\"\n ceil_mode = False\n exclusive = True\n data_format = \"NCHW\"\n for key in attrs.attr_store:\n if key == \"kernel_size\":\n kernel_size = attrs.get_attr(\"kernel_size\")\n elif key == \"stride_size\":\n stride_size = attrs.get_attr(\"stride_size\")\n elif key == \"padding_size\":\n padding_size = attrs.get_attr(\"padding_size\")\n elif key == \"pool_type\":\n pool_type = attrs.get_attr(\"pool_type\")\n elif key == \"ceil_mode\":\n ceil_mode = attrs.get_attr(\"ceil_mode\")\n elif key == \"exclusive\":\n exclusive = attrs.get_attr(\"exclusive\")\n elif key == \"data_format\":\n data_format = attrs.get_attr(\"data_format\")\n else:\n raise ValueError(\"attr_store {} is not supported\".format(key))\n\n if data_format == \"NCHW\":\n in_n, in_c, in_h, in_w = in_shape = np_data.shape\n height_axis = 2\n width_axis = 3\n elif data_format == \"NHWC\":\n in_n, in_h, in_w, in_c = in_shape = np_data.shape\n height_axis = 1\n width_axis = 2\n else:\n raise ValueError(\"data_format {} is not supported\".format(data_format))\n\n if isinstance(kernel_size, int):\n k_h = k_w = kernel_size\n else:\n k_h, k_w = kernel_size\n if isinstance(stride_size, int):\n s_h = s_w = stride_size\n else:\n s_h, s_w = stride_size\n if isinstance(padding_size, int):\n pt = pl = pb = pr = padding_size\n else:\n pt, pl, pb, pr = padding_size\n\n out_shape = list(in_shape)\n if ceil_mode:\n out_shape[height_axis] = int(\n math.ceil(float(in_shape[height_axis] - k_h + pt + pb) / s_h) + 1)\n out_shape[width_axis] = int(\n math.ceil(float(in_shape[width_axis] - k_w + pl + pr) / s_w) + 1)\n else:\n out_shape[height_axis] = int(\n math.floor(float(in_shape[height_axis] - k_h + pt + pb) / s_h) + 1)\n out_shape[width_axis] = int(\n math.floor(float(in_shape[width_axis] - k_w + pl + pr) / s_w) + 1)\n\n fill_value = 0\n if exclusive and pool_type == 'max':\n fill_value = sys.float_info.min\n\n if data_format == \"NCHW\":\n pad_np = np.full(\n shape=(in_n, in_c, in_h + pt + pb, in_w + pl + pr),\n fill_value=fill_value,\n dtype=dtype)\n no_zero = (range(in_n), range(in_c), range(pt, in_h + pt),\n range(pl, in_w + pl))\n else:\n pad_np = np.full(\n shape=(in_n, in_h + pt + pb, in_w + pl + pr, in_c),\n fill_value=fill_value,\n dtype=dtype)\n no_zero = (range(in_n), range(pt, in_h + pt), range(pl, in_w + pl),\n range(in_c))\n\n pad_np[np.ix_(*no_zero)] = np_data\n ret_np = np.zeros(shape=out_shape).astype(dtype)\n if pool_type == 'avg':\n for i in range(out_shape[height_axis]):\n for j in range(out_shape[width_axis]):\n if exclusive:\n pad_exclusive = pad_np.copy()\n pad_exclusive[np.ix_(*no_zero)] = 1\n if data_format == \"NCHW\":\n pad_count = np.sum(\n pad_exclusive[:, :, i * s_h:i * s_h +\n k_h, j * s_w:j * s_w + k_w] == 1,\n axis=(height_axis, width_axis))\n ret_np[:, :, i, j] = np.sum(\n pad_np[:, :, i * s_h:i * s_h +\n k_h, j * s_w:j * s_w + k_w],\n axis=(height_axis, width_axis)) / np.maximum(\n pad_count, 1)\n else:\n pad_count = np.sum(\n pad_exclusive[:, i * s_h:i * s_h +\n k_h, j * s_w:j * s_w + k_w, :] == 1,\n axis=(height_axis, width_axis))\n ret_np[:, i, j, :] = np.sum(\n pad_np[:, i * s_h:i * s_h + k_h, j * s_w:j * s_w +\n k_w, :],\n axis=(height_axis, width_axis)) / np.maximum(\n pad_count, 1)\n else:\n if data_format == \"NCHW\":\n ret_np[:, :,i, j] = \\\n np.mean(pad_np[:, :,\n i * s_h: i * s_h + k_h,\n j * s_w: j * s_w + k_w], axis=(height_axis, width_axis))\n else:\n ret_np[:, i, j, :] = \\\n np.mean(pad_np[:,\n i * s_h: i * s_h + k_h,\n j * s_w: j * s_w + k_w, :], axis=(height_axis, width_axis))\n elif pool_type == 'max':\n for i in range(out_shape[height_axis]):\n for j in range(out_shape[width_axis]):\n if data_format == \"NCHW\":\n ret_np[:, :, i, j] = np.max(\n pad_np[:, :, i * s_h:i * s_h + k_h, j * s_w:j * s_w +\n k_w],\n axis=(height_axis, width_axis))\n else:\n ret_np[:, i, j, :] = np.max(\n pad_np[:, i * s_h:i * s_h + k_h, j * s_w:j * s_w +\n k_w, :],\n axis=(height_axis, width_axis))\n else:\n raise ValueError(\"pool type {} is not supported\".format(pool_type))\n\n ret_np = np.maximum(ret_np, fill_value)\n return ret_np, [out_shape]\n\n\ndef pool3d(np_data, attrs, dtype=\"float32\"):\n pool_type = \"max\"\n ceil_mode = False\n exclusive = True\n data_format = \"NCDHW\"\n for key in attrs.attr_store:\n if key == \"kernel_size\":\n kernel_size = attrs.get_attr(\"kernel_size\")\n elif key == \"stride_size\":\n stride_size = attrs.get_attr(\"stride_size\")\n elif key == \"padding_size\":\n padding_size = attrs.get_attr(\"padding_size\")\n elif key == \"pool_type\":\n pool_type = attrs.get_attr(\"pool_type\")\n elif key == \"ceil_mode\":\n ceil_mode = attrs.get_attr(\"ceil_mode\")\n elif key == \"exclusive\":\n exclusive = attrs.get_attr(\"exclusive\")\n elif key == \"data_format\":\n data_format = attrs.get_attr(\"data_format\")\n else:\n raise ValueError(\"attr_store {} is not supported\".format(key))\n\n if data_format == \"NCDHW\":\n in_n, in_c, in_d, in_h, in_w = in_shape = np_data.shape\n depth_axis = 2\n height_axis = 3\n width_axis = 4\n elif data_format == \"NDHWC\":\n in_n, in_d, in_h, in_w, in_c = in_shape = np_data.shape\n depth_axis = 1\n height_axis = 2\n width_axis = 3\n else:\n raise ValueError(\"data_format {} is not supported\".format(data_format))\n\n if isinstance(kernel_size, int):\n k_d = k_h = k_w = kernel_size\n else:\n k_d, k_h, k_w = kernel_size\n if isinstance(stride_size, int):\n s_d = s_h = s_w = stride_size\n else:\n s_d, s_h, s_w = stride_size\n if isinstance(padding_size, int):\n pf = pt = pl = pk = pb = pr = padding_size\n else:\n pf, pt, pl, pk, pb, pr = padding_size\n\n out_shape = list(in_shape)\n if ceil_mode:\n out_shape[depth_axis] = int(\n math.ceil(float(in_shape[depth_axis] - k_d + pf + pk) / s_d) + 1)\n out_shape[height_axis] = int(\n math.ceil(float(in_shape[height_axis] - k_h + pt + pb) / s_h) + 1)\n out_shape[width_axis] = int(\n math.ceil(float(in_shape[width_axis] - k_w + pl + pr) / s_w) + 1)\n else:\n out_shape[depth_axis] = int(\n math.floor(float(in_shape[depth_axis] - k_d + pf + pk) / s_d) + 1)\n out_shape[height_axis] = int(\n math.floor(float(in_shape[height_axis] - k_h + pt + pb) / s_h) + 1)\n out_shape[width_axis] = int(\n math.floor(float(in_shape[width_axis] - k_w + pl + pr) / s_w) + 1)\n\n fill_value = 0\n if exclusive and pool_type == 'max':\n fill_value = sys.float_info.min\n\n if data_format == \"NCDHW\":\n pad_np = np.full(\n shape=(in_n, in_c, in_d + pf + pk, in_h + pt + pb, in_w + pl + pr),\n fill_value=fill_value,\n dtype=dtype)\n no_zero = (range(in_n), range(in_c), range(pf, in_d + pf),\n range(pt, in_h + pt), range(pl, in_w + pl))\n else:\n pad_np = np.full(\n shape=(in_n, in_d + pf + pk, in_h + pt + pb, in_w + pl + pr, in_c),\n fill_value=fill_value,\n dtype=dtype)\n no_zero = (range(in_n), range(pf, in_d + pf), range(pt, in_h + pt),\n range(pl, in_w + pl), range(in_c))\n\n pad_np[np.ix_(*no_zero)] = np_data\n ret_np = np.zeros(shape=out_shape).astype(dtype)\n if pool_type == 'avg':\n for i in range(out_shape[depth_axis]):\n for j in range(out_shape[height_axis]):\n for k in range(out_shape[width_axis]):\n if exclusive:\n pad_exclusive = pad_np.copy()\n pad_exclusive[np.ix_(*no_zero)] = 1\n if data_format == \"NCDHW\":\n pad_count = np.sum(\n pad_exclusive[:, :, i * s_d:i * s_d +\n k_d, j * s_h:j * s_h +\n k_h, k * s_w:k * s_w + k_w] == 1,\n axis=(depth_axis, height_axis, width_axis))\n ret_np[:, :, i, j, k] = np.sum(\n pad_np[:, :, i * s_d:i * s_d + k_d, j * s_h:j *\n s_h + k_h, k * s_w:k * s_w + k_w],\n axis=(depth_axis, height_axis,\n width_axis)) / np.maximum(pad_count, 1)\n else:\n pad_count = np.sum(\n pad_exclusive[:, i * s_d:i * s_d +\n k_d, j * s_h:j * s_h + k_h, k *\n s_w:k * s_w + k_w, :] == 1,\n axis=(depth_axis, height_axis, width_axis))\n ret_np[:, i, j, k, :] = np.sum(\n pad_np[:, i * s_d:i * s_d + k_d, j * s_h:j *\n s_h + k_h, k * s_w:k * s_w + k_w, :],\n axis=(depth_axis, height_axis,\n width_axis)) / np.maximum(pad_count, 1)\n else:\n if data_format == \"NCDHW\":\n ret_np[:, :,i, j, k] = \\\n np.mean(pad_np[:, :,\n i * s_d: i * s_d + k_d,\n j * s_h: j * s_h + k_h,\n k * s_w: k * s_w + k_w], axis=(depth_axis, height_axis, width_axis))\n else:\n ret_np[:, i, j, k, :] = \\\n np.mean(pad_np[:,\n i * s_d: i * s_d + k_d,\n j * s_h: j * s_h + k_h,\n k * s_w: k * s_w + k_w,\n :], axis=(depth_axis, height_axis, width_axis))\n elif pool_type == 'max':\n for i in range(out_shape[depth_axis]):\n for j in range(out_shape[height_axis]):\n for k in range(out_shape[width_axis]):\n if data_format == \"NCDHW\":\n ret_np[:, :, i, j, k] = np.max(\n pad_np[:, :, i * s_d:i * s_d + k_d, j *\n s_h:j * s_h + k_h, k * s_w:k * s_w + k_w],\n axis=(depth_axis, height_axis, width_axis))\n else:\n ret_np[:, i, j, k, :] = np.max(\n pad_np[:, i * s_d:i * s_d + k_d, j * s_h:j * s_h +\n k_h, k * s_w:k * s_w + k_w, :],\n axis=(depth_axis, height_axis, width_axis))\n else:\n raise ValueError(\"pool type {} is not supported\".format(pool_type))\n\n ret_np = np.maximum(ret_np, fill_value)\n return ret_np, [out_shape]\n\n\ndef pool1d(np_data, attrs, dtype=\"float32\"):\n pool_type = \"max\"\n ceil_mode = False\n exclusive = True\n data_format = \"NCW\"\n for key in attrs.attr_store:\n if key == \"kernel_size\":\n kernel_size = attrs.get_attr(\"kernel_size\")\n elif key == \"stride_size\":\n stride_size = attrs.get_attr(\"stride_size\")\n elif key == \"padding_size\":\n padding_size = attrs.get_attr(\"padding_size\")\n elif key == \"pool_type\":\n pool_type = attrs.get_attr(\"pool_type\")\n elif key == \"ceil_mode\":\n ceil_mode = attrs.get_attr(\"ceil_mode\")\n elif key == \"exclusive\":\n exclusive = attrs.get_attr(\"exclusive\")\n elif key == \"data_format\":\n data_format = attrs.get_attr(\"data_format\")\n else:\n raise ValueError(\"attr_store {} is not supported\".format(key))\n\n if data_format == \"NCW\":\n in_n, in_c, in_w = in_shape = np_data.shape\n width_axis = 2\n elif data_format == \"NWC\":\n in_n, in_w, in_c = in_shape = np_data.shape\n width_axis = 1\n else:\n raise ValueError(\"data_format {} is not supported\".format(data_format))\n\n if isinstance(kernel_size, int):\n k_w = kernel_size\n else:\n k_w, = kernel_size\n if isinstance(stride_size, int):\n s_w = stride_size\n else:\n s_w, = stride_size\n if isinstance(padding_size, int):\n pl = pr = padding_size\n else:\n pl, pr = padding_size\n\n out_shape = list(in_shape)\n if ceil_mode:\n out_shape[width_axis] = int(\n math.ceil(float(in_shape[width_axis] - k_w + pl + pr) / s_w) + 1)\n else:\n out_shape[width_axis] = int(\n math.floor(float(in_shape[width_axis] - k_w + pl + pr) / s_w) + 1)\n\n fill_value = 0\n if exclusive and pool_type == 'max':\n fill_value = sys.float_info.min\n\n if data_format == \"NCW\":\n pad_np = np.full(\n shape=(in_n, in_c, in_w + pl + pr),\n fill_value=fill_value,\n dtype=dtype)\n no_zero = (range(in_n), range(in_c), range(pl, in_w + pl))\n else:\n pad_np = np.full(\n shape=(in_n, in_w + pl + pr, in_c),\n fill_value=fill_value,\n dtype=dtype)\n no_zero = (range(in_n), range(pl, in_w + pl), range(in_c))\n\n pad_np[np.ix_(*no_zero)] = np_data\n ret_np = np.zeros(shape=out_shape).astype(dtype)\n if pool_type == 'avg':\n for i in range(out_shape[width_axis]):\n if exclusive:\n pad_exclusive = pad_np.copy()\n pad_exclusive[np.ix_(*no_zero)] = 1\n if data_format == \"NCW\":\n pad_count = np.sum(\n pad_exclusive[:, :, i * s_w:i * s_w + k_w] == 1,\n axis=width_axis)\n ret_np[:, :, i] = np.sum(\n pad_np[:, :, i * s_w:i * s_w + k_w],\n axis=width_axis) / np.maximum(pad_count, 1)\n else:\n pad_count = np.sum(\n pad_exclusive[:, i * s_w:i * s_w + k_w, :] == 1,\n axis=width_axis)\n ret_np[:, i, :] = np.sum(\n pad_np[:, i * s_w:i * s_w + k_w, :],\n axis=width_axis) / np.maximum(pad_count, 1)\n else:\n if data_format == \"NCW\":\n ret_np[:, :, i] = \\\n np.mean(pad_np[:, :,\n i * s_w: i * s_w + k_w], axis=width_axis)\n else:\n ret_np[:, i, :] = \\\n np.mean(pad_np[:,\n i * s_w: i * s_w + k_w,\n :], axis=width_axis)\n elif pool_type == 'max':\n for k in range(out_shape[width_axis]):\n if data_format == \"NCW\":\n ret_np[:, :, k] = np.max(\n pad_np[:, :, k * s_w:k * s_w + k_w], axis=width_axis)\n else:\n ret_np[:, k, :] = np.max(\n pad_np[:, k * s_w:k * s_w + k_w, :], axis=width_axis)\n else:\n raise ValueError(\"pool type {} is not supported\".format(pool_type))\n\n ret_np = np.maximum(ret_np, fill_value)\n return ret_np, [out_shape]\n" ]
[ [ "numpy.ix_", "numpy.sum", "numpy.zeros", "numpy.max", "numpy.maximum", "numpy.full", "numpy.mean" ] ]
PyDemic/pydemic
[ "7e748e4bbe5c1f7fb209271af0ff8afb8fbd4fd5" ]
[ "tests/models/test_sir.py" ]
[ "import numpy as np\n\nfrom pydemic.diseases import covid19\nfrom pydemic.models import eSIR\n\n\nclass TestSIR:\n def test_basic_esir_api(self):\n m = eSIR(disease=covid19)\n m.run(30)\n res = m[\"I\"]\n ok = m.data.loc[m.times[0], \"infectious\"] * np.exp(m.K * m.times)\n\n assert m.R0 == 2.74\n assert abs(m.K - m.gamma * 1.74) <= 1e-6\n assert m.iter == len(m.data) == len(m.times) == len(m.dates)\n assert np.abs(res / ok - 1).max() < 1e-4\n" ]
[ [ "numpy.abs", "numpy.exp" ] ]
dorothykiz1/pandas
[ "6033ed4b3383d874ee4a8a461724c0b8c2ca968d" ]
[ "pandas/io/excel/_xlsxwriter.py" ]
[ "from __future__ import annotations\n\nfrom typing import Any\n\nimport pandas._libs.json as json\nfrom pandas._typing import (\n FilePath,\n StorageOptions,\n WriteExcelBuffer,\n)\n\nfrom pandas.io.excel._base import ExcelWriter\nfrom pandas.io.excel._util import (\n combine_kwargs,\n validate_freeze_panes,\n)\n\n\nclass _XlsxStyler:\n # Map from openpyxl-oriented styles to flatter xlsxwriter representation\n # Ordering necessary for both determinism and because some are keyed by\n # prefixes of others.\n STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {\n \"font\": [\n ((\"name\",), \"font_name\"),\n ((\"sz\",), \"font_size\"),\n ((\"size\",), \"font_size\"),\n ((\"color\", \"rgb\"), \"font_color\"),\n ((\"color\",), \"font_color\"),\n ((\"b\",), \"bold\"),\n ((\"bold\",), \"bold\"),\n ((\"i\",), \"italic\"),\n ((\"italic\",), \"italic\"),\n ((\"u\",), \"underline\"),\n ((\"underline\",), \"underline\"),\n ((\"strike\",), \"font_strikeout\"),\n ((\"vertAlign\",), \"font_script\"),\n ((\"vertalign\",), \"font_script\"),\n ],\n \"number_format\": [((\"format_code\",), \"num_format\"), ((), \"num_format\")],\n \"protection\": [((\"locked\",), \"locked\"), ((\"hidden\",), \"hidden\")],\n \"alignment\": [\n ((\"horizontal\",), \"align\"),\n ((\"vertical\",), \"valign\"),\n ((\"text_rotation\",), \"rotation\"),\n ((\"wrap_text\",), \"text_wrap\"),\n ((\"indent\",), \"indent\"),\n ((\"shrink_to_fit\",), \"shrink\"),\n ],\n \"fill\": [\n ((\"patternType\",), \"pattern\"),\n ((\"patterntype\",), \"pattern\"),\n ((\"fill_type\",), \"pattern\"),\n ((\"start_color\", \"rgb\"), \"fg_color\"),\n ((\"fgColor\", \"rgb\"), \"fg_color\"),\n ((\"fgcolor\", \"rgb\"), \"fg_color\"),\n ((\"start_color\",), \"fg_color\"),\n ((\"fgColor\",), \"fg_color\"),\n ((\"fgcolor\",), \"fg_color\"),\n ((\"end_color\", \"rgb\"), \"bg_color\"),\n ((\"bgColor\", \"rgb\"), \"bg_color\"),\n ((\"bgcolor\", \"rgb\"), \"bg_color\"),\n ((\"end_color\",), \"bg_color\"),\n ((\"bgColor\",), \"bg_color\"),\n ((\"bgcolor\",), \"bg_color\"),\n ],\n \"border\": [\n ((\"color\", \"rgb\"), \"border_color\"),\n ((\"color\",), \"border_color\"),\n ((\"style\",), \"border\"),\n ((\"top\", \"color\", \"rgb\"), \"top_color\"),\n ((\"top\", \"color\"), \"top_color\"),\n ((\"top\", \"style\"), \"top\"),\n ((\"top\",), \"top\"),\n ((\"right\", \"color\", \"rgb\"), \"right_color\"),\n ((\"right\", \"color\"), \"right_color\"),\n ((\"right\", \"style\"), \"right\"),\n ((\"right\",), \"right\"),\n ((\"bottom\", \"color\", \"rgb\"), \"bottom_color\"),\n ((\"bottom\", \"color\"), \"bottom_color\"),\n ((\"bottom\", \"style\"), \"bottom\"),\n ((\"bottom\",), \"bottom\"),\n ((\"left\", \"color\", \"rgb\"), \"left_color\"),\n ((\"left\", \"color\"), \"left_color\"),\n ((\"left\", \"style\"), \"left\"),\n ((\"left\",), \"left\"),\n ],\n }\n\n @classmethod\n def convert(cls, style_dict, num_format_str=None):\n \"\"\"\n converts a style_dict to an xlsxwriter format dict\n\n Parameters\n ----------\n style_dict : style dictionary to convert\n num_format_str : optional number format string\n \"\"\"\n # Create a XlsxWriter format object.\n props = {}\n\n if num_format_str is not None:\n props[\"num_format\"] = num_format_str\n\n if style_dict is None:\n return props\n\n if \"borders\" in style_dict:\n style_dict = style_dict.copy()\n style_dict[\"border\"] = style_dict.pop(\"borders\")\n\n for style_group_key, style_group in style_dict.items():\n for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):\n # src is a sequence of keys into a nested dict\n # dst is a flat key\n if dst in props:\n continue\n v = style_group\n for k in src:\n try:\n v = v[k]\n except (KeyError, TypeError):\n break\n else:\n props[dst] = v\n\n if isinstance(props.get(\"pattern\"), str):\n # TODO: support other fill patterns\n props[\"pattern\"] = 0 if props[\"pattern\"] == \"none\" else 1\n\n for k in [\"border\", \"top\", \"right\", \"bottom\", \"left\"]:\n if isinstance(props.get(k), str):\n try:\n props[k] = [\n \"none\",\n \"thin\",\n \"medium\",\n \"dashed\",\n \"dotted\",\n \"thick\",\n \"double\",\n \"hair\",\n \"mediumDashed\",\n \"dashDot\",\n \"mediumDashDot\",\n \"dashDotDot\",\n \"mediumDashDotDot\",\n \"slantDashDot\",\n ].index(props[k])\n except ValueError:\n props[k] = 2\n\n if isinstance(props.get(\"font_script\"), str):\n props[\"font_script\"] = [\"baseline\", \"superscript\", \"subscript\"].index(\n props[\"font_script\"]\n )\n\n if isinstance(props.get(\"underline\"), str):\n props[\"underline\"] = {\n \"none\": 0,\n \"single\": 1,\n \"double\": 2,\n \"singleAccounting\": 33,\n \"doubleAccounting\": 34,\n }[props[\"underline\"]]\n\n return props\n\n\nclass XlsxWriter(ExcelWriter):\n engine = \"xlsxwriter\"\n supported_extensions = (\".xlsx\",)\n\n def __init__(\n self,\n path: FilePath | WriteExcelBuffer | ExcelWriter,\n engine: str | None = None,\n date_format: str | None = None,\n datetime_format: str | None = None,\n mode: str = \"w\",\n storage_options: StorageOptions = None,\n if_sheet_exists: str | None = None,\n engine_kwargs: dict[str, Any] | None = None,\n **kwargs,\n ) -> None:\n # Use the xlsxwriter module as the Excel writer.\n from xlsxwriter import Workbook\n\n engine_kwargs = combine_kwargs(engine_kwargs, kwargs)\n\n if mode == \"a\":\n raise ValueError(\"Append mode is not supported with xlsxwriter!\")\n\n super().__init__(\n path,\n engine=engine,\n date_format=date_format,\n datetime_format=datetime_format,\n mode=mode,\n storage_options=storage_options,\n if_sheet_exists=if_sheet_exists,\n engine_kwargs=engine_kwargs,\n )\n\n self._book = Workbook(self._handles.handle, **engine_kwargs)\n\n @property\n def book(self):\n \"\"\"\n Book instance of class xlsxwriter.Workbook.\n\n This attribute can be used to access engine-specific features.\n \"\"\"\n return self._book\n\n @property\n def sheets(self) -> dict[str, Any]:\n result = self.book.sheetnames\n return result\n\n def _save(self) -> None:\n \"\"\"\n Save workbook to disk.\n \"\"\"\n self.book.close()\n\n def _write_cells(\n self,\n cells,\n sheet_name: str | None = None,\n startrow: int = 0,\n startcol: int = 0,\n freeze_panes: tuple[int, int] | None = None,\n ) -> None:\n # Write the frame cells using xlsxwriter.\n sheet_name = self._get_sheet_name(sheet_name)\n\n wks = self.book.get_worksheet_by_name(sheet_name)\n if wks is None:\n wks = self.book.add_worksheet(sheet_name)\n\n style_dict = {\"null\": None}\n\n if validate_freeze_panes(freeze_panes):\n wks.freeze_panes(*(freeze_panes))\n\n for cell in cells:\n val, fmt = self._value_with_fmt(cell.val)\n\n stylekey = json.dumps(cell.style)\n if fmt:\n stylekey += fmt\n\n if stylekey in style_dict:\n style = style_dict[stylekey]\n else:\n style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))\n style_dict[stylekey] = style\n\n if cell.mergestart is not None and cell.mergeend is not None:\n wks.merge_range(\n startrow + cell.row,\n startcol + cell.col,\n startrow + cell.mergestart,\n startcol + cell.mergeend,\n val,\n style,\n )\n else:\n wks.write(startrow + cell.row, startcol + cell.col, val, style)\n" ]
[ [ "pandas.io.excel._util.combine_kwargs", "pandas.io.excel._util.validate_freeze_panes", "pandas._libs.json.dumps" ] ]
kali20gakki/code
[ "369a2e64c2bfbd18899d1d49556d8d208d01bdff" ]
[ "ppdet/data/transform/operators.py" ]
[ "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# function:\n# operators to process sample,\n# eg: decode/resize/crop image\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\ntry:\n from collections.abc import Sequence\nexcept Exception:\n from collections import Sequence\n\nfrom numbers import Number\n\nimport uuid\nimport logging\nimport random\nimport math\nimport numpy as np\nimport os\nimport six\n\nimport cv2\nfrom PIL import Image, ImageEnhance, ImageDraw, ImageOps\nimport imgaug as ia\nimport imgaug.augmenters as iaa\nfrom .minority_enhance_utils import *\n\nfrom ppdet.core.workspace import serializable\nfrom ppdet.modeling.ops import AnchorGrid\n\nfrom .op_helper import (satisfy_sample_constraint, filter_and_process,\n generate_sample_bbox, clip_bbox, data_anchor_sampling,\n satisfy_sample_constraint_coverage, crop_image_sampling,\n generate_sample_bbox_square, bbox_area_sampling,\n is_poly, gaussian_radius, draw_gaussian)\n\nlogger = logging.getLogger(__name__)\n\nregistered_ops = []\n\n\ndef register_op(cls):\n registered_ops.append(cls.__name__)\n if not hasattr(BaseOperator, cls.__name__):\n setattr(BaseOperator, cls.__name__, cls)\n else:\n raise KeyError(\"The {} class has been registered.\".format(cls.__name__))\n return serializable(cls)\n\n\nclass BboxError(ValueError):\n pass\n\n\nclass ImageError(ValueError):\n pass\n\n\nclass BaseOperator(object):\n def __init__(self, name=None):\n if name is None:\n name = self.__class__.__name__\n self._id = name + '_' + str(uuid.uuid4())[-6:]\n\n def __call__(self, sample, context=None):\n \"\"\" Process a sample.\n Args:\n sample (dict): a dict of sample, eg: {'image':xx, 'label': xxx}\n context (dict): info about this sample processing\n Returns:\n result (dict): a processed sample\n \"\"\"\n return sample\n\n def __str__(self):\n return str(self._id)\n\n\n@register_op\nclass DecodeImage(BaseOperator):\n def __init__(self, to_rgb=True, with_mixup=False, with_cutmix=False):\n \"\"\" Transform the image data to numpy format.\n Args:\n to_rgb (bool): whether to convert BGR to RGB\n with_mixup (bool): whether or not to mixup image and gt_bbbox/gt_score\n with_cutmix (bool): whether or not to cutmix image and gt_bbbox/gt_score\n \"\"\"\n\n super(DecodeImage, self).__init__()\n self.to_rgb = to_rgb\n self.with_mixup = with_mixup\n self.with_cutmix = with_cutmix\n if not isinstance(self.to_rgb, bool):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n if not isinstance(self.with_mixup, bool):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n\n def __call__(self, sample, context=None):\n \"\"\" load image if 'im_file' field is not empty but 'image' is\"\"\"\n if 'image' not in sample:\n with open(sample['im_file'], 'rb') as f:\n sample['image'] = f.read()\n\n im = sample['image']\n data = np.frombuffer(im, dtype='uint8')\n im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode\n\n if self.to_rgb:\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n sample['image'] = im\n\n if 'h' not in sample:\n sample['h'] = im.shape[0]\n elif sample['h'] != im.shape[0]:\n logger.warn(\n \"The actual image height: {} is not equal to the \"\n \"height: {} in annotation, and update sample['h'] by actual \"\n \"image height.\".format(im.shape[0], sample['h']))\n sample['h'] = im.shape[0]\n if 'w' not in sample:\n sample['w'] = im.shape[1]\n elif sample['w'] != im.shape[1]:\n logger.warn(\n \"The actual image width: {} is not equal to the \"\n \"width: {} in annotation, and update sample['w'] by actual \"\n \"image width.\".format(im.shape[1], sample['w']))\n sample['w'] = im.shape[1]\n\n # make default im_info with [h, w, 1]\n sample['im_info'] = np.array(\n [im.shape[0], im.shape[1], 1.], dtype=np.float32)\n\n # decode mixup image\n if self.with_mixup and 'mixup' in sample:\n self.__call__(sample['mixup'], context)\n\n # decode cutmix image\n if self.with_cutmix and 'cutmix' in sample:\n self.__call__(sample['cutmix'], context)\n\n # decode semantic label \n if 'semantic' in sample.keys() and sample['semantic'] is not None:\n sem_file = sample['semantic']\n sem = cv2.imread(sem_file, cv2.IMREAD_GRAYSCALE)\n sample['semantic'] = sem.astype('int32')\n\n return sample\n\n@register_op\nclass RoadEnhance(BaseOperator):\n def __init__(self, prob=0.5):\n super(RoadEnhance, self).__init__()\n print(\"[DIY] Use RoadEnhance!\")\n sometimes = lambda aug: iaa.Sometimes(prob, aug)\n seq_list = [sometimes(\n iaa.OneOf([\n iaa.GammaContrast((0.5, 2.0)),\n iaa.MultiplyAndAddToBrightness(mul=(0.5, 1.5), add=(-30, 30)),\n iaa.MotionBlur(k=7, angle=[-45, 45]),\n iaa.Snowflakes(flake_size=(0.5, 0.75), speed=(0.001, 0.03)),\n iaa.Rain(drop_size=(0.10, 0.20)),\n ]\n )\n )]\n self.seq = iaa.Sequential(seq_list, random_order=True)\n\n\n def apply(self, sample, context=None):\n im = sample['image']\n im = self.seq(image=im)\n sample['image'] = im\n return sample\n\n@register_op\nclass MinorityEnhance(BaseOperator):\n def __init__(self, obj_dir='ppdet/data/transform/class3_obj', prob=0.6):\n super(MinorityEnhance, self).__init__()\n print(\"[DIY] Use MinorityEnhance!\")\n\n self.prob = int(prob * 10)\n # 少数类的资源文件\n self.images_dir = os.path.join(obj_dir, 'images')\n self.seg_dir = os.path.join(obj_dir, 'seg')\n self.segs_list = os.listdir(self.seg_dir)\n\n def _apply(self, sample, context=None):\n if random.randint(1,10) < 7: # 0.7\n bbox_size_range = (32, 64) # 中等大小\n else:\n bbox_size_range = (16, 32)\n \n gt_bbox = sample['gt_bbox']\n gt_class = sample['gt_class']\n H, W = sample['h'], sample['w']\n bg_img = sample['image']\n\n points = select_points(gt_bbox, [H, W], 2) # 固定两张图\n for point in points:\n seg_file = random.choice(self.segs_list)\n seg_path = os.path.join(seg_dir, seg_file)\n obj_path = os.path.join(self.images_dir, seg_file.split('.')[0]+'.png')\n polygon = get_polygon(seg_path)\n obj_img = cv2.imread(obj_path)\n\n bg_img, bbox = paste_obj2img(bg_img, obj_img, polygon, point, \n random.randint(bbox_size_range[0], bbox_size_range[1]))\n \n if box is not None:\n gt_bbox.append(box)\n gt_class.append(3)\n sample['image'] = bg_img\n \n return sample\n\n def apply(self, sample, context=None):\n # if random.randint(1,10) < 6: # 0.6\n # try:\n # sample = self._apply(sample)\n # except:\n # sample = sample\n # return sample\n\n if random.randint(1,10) < 6: # 0.6\n sample = self._apply(sample)\n return sample\n\n@register_op\nclass MultiscaleTestResize(BaseOperator):\n def __init__(self,\n origin_target_size=800,\n origin_max_size=1333,\n target_size=[],\n max_size=2000,\n interp=cv2.INTER_LINEAR,\n use_flip=True):\n \"\"\"\n Rescale image to the each size in target size, and capped at max_size.\n Args:\n origin_target_size(int): original target size of image's short side.\n origin_max_size(int): original max size of image.\n target_size (list): A list of target sizes of image's short side.\n max_size (int): the max size of image.\n interp (int): the interpolation method.\n use_flip (bool): whether use flip augmentation.\n \"\"\"\n super(MultiscaleTestResize, self).__init__()\n self.origin_target_size = int(origin_target_size)\n self.origin_max_size = int(origin_max_size)\n self.max_size = int(max_size)\n self.interp = int(interp)\n self.use_flip = use_flip\n\n if not isinstance(target_size, list):\n raise TypeError(\n \"Type of target_size is invalid. Must be List, now is {}\".\n format(type(target_size)))\n self.target_size = target_size\n if not (isinstance(self.origin_target_size, int) and isinstance(\n self.origin_max_size, int) and isinstance(self.max_size, int)\n and isinstance(self.interp, int)):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n\n def __call__(self, sample, context=None):\n \"\"\" Resize the image numpy for multi-scale test.\n \"\"\"\n origin_ims = {}\n im = sample['image']\n if not isinstance(im, np.ndarray):\n raise TypeError(\"{}: image type is not numpy.\".format(self))\n if len(im.shape) != 3:\n raise ImageError('{}: image is not 3-dimensional.'.format(self))\n im_shape = im.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n if float(im_size_min) == 0:\n raise ZeroDivisionError('{}: min size of image is 0'.format(self))\n base_name_list = ['image']\n origin_ims['image'] = im\n if self.use_flip:\n sample['image_flip'] = im[:, ::-1, :]\n base_name_list.append('image_flip')\n origin_ims['image_flip'] = sample['image_flip']\n\n for base_name in base_name_list:\n im_scale = float(self.origin_target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than max_size\n if np.round(im_scale * im_size_max) > self.origin_max_size:\n im_scale = float(self.origin_max_size) / float(im_size_max)\n im_scale_x = im_scale\n im_scale_y = im_scale\n\n resize_w = np.round(im_scale_x * float(im_shape[1]))\n resize_h = np.round(im_scale_y * float(im_shape[0]))\n im_resize = cv2.resize(\n origin_ims[base_name],\n None,\n None,\n fx=im_scale_x,\n fy=im_scale_y,\n interpolation=self.interp)\n\n sample[base_name] = im_resize\n info_name = 'im_info' if base_name == 'image' else 'im_info_image_flip'\n sample[base_name] = im_resize\n sample[info_name] = np.array(\n [resize_h, resize_w, im_scale], dtype=np.float32)\n for i, size in enumerate(self.target_size):\n im_scale = float(size) / float(im_size_min)\n if np.round(im_scale * im_size_max) > self.max_size:\n im_scale = float(self.max_size) / float(im_size_max)\n im_scale_x = im_scale\n im_scale_y = im_scale\n resize_w = np.round(im_scale_x * float(im_shape[1]))\n resize_h = np.round(im_scale_y * float(im_shape[0]))\n im_resize = cv2.resize(\n origin_ims[base_name],\n None,\n None,\n fx=im_scale_x,\n fy=im_scale_y,\n interpolation=self.interp)\n\n im_info = [resize_h, resize_w, im_scale]\n # hard-code here, must be consistent with\n # ppdet/modeling/architectures/input_helper.py\n name = base_name + '_scale_' + str(i)\n info_name = 'im_info_' + name\n sample[name] = im_resize\n sample[info_name] = np.array(\n [resize_h, resize_w, im_scale], dtype=np.float32)\n return sample\n\n\n@register_op\nclass ResizeImage(BaseOperator):\n def __init__(self,\n target_size=0,\n max_size=0,\n interp=cv2.INTER_LINEAR,\n use_cv2=True,\n resize_box=False):\n \"\"\"\n Rescale image to the specified target size, and capped at max_size\n if max_size != 0.\n If target_size is list, selected a scale randomly as the specified\n target size.\n Args:\n target_size (int|list): the target size of image's short side,\n multi-scale training is adopted when type is list.\n max_size (int): the max size of image\n interp (int): the interpolation method\n use_cv2 (bool): use the cv2 interpolation method or use PIL\n interpolation method\n resize_box (bool): whether resize ground truth bbox annotations.\n \"\"\"\n super(ResizeImage, self).__init__()\n self.max_size = int(max_size)\n self.interp = int(interp)\n self.use_cv2 = use_cv2\n self.resize_box = resize_box\n if not (isinstance(target_size, int) or isinstance(target_size, list)):\n raise TypeError(\n \"Type of target_size is invalid. Must be Integer or List, now is {}\".\n format(type(target_size)))\n self.target_size = target_size\n if not (isinstance(self.max_size, int) and isinstance(self.interp,\n int)):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n\n def __call__(self, sample, context=None):\n \"\"\" Resize the image numpy.\n \"\"\"\n im = sample['image']\n if not isinstance(im, np.ndarray):\n raise TypeError(\"{}: image type is not numpy.\".format(self))\n if len(im.shape) != 3:\n raise ImageError('{}: image is not 3-dimensional.'.format(self))\n im_shape = im.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n if isinstance(self.target_size, list):\n # Case for multi-scale training\n selected_size = random.choice(self.target_size)\n else:\n selected_size = self.target_size\n if float(im_size_min) == 0:\n raise ZeroDivisionError('{}: min size of image is 0'.format(self))\n if self.max_size != 0:\n im_scale = float(selected_size) / float(im_size_min)\n # Prevent the biggest axis from being more than max_size\n if np.round(im_scale * im_size_max) > self.max_size:\n im_scale = float(self.max_size) / float(im_size_max)\n im_scale_x = im_scale\n im_scale_y = im_scale\n\n resize_w = im_scale_x * float(im_shape[1])\n resize_h = im_scale_y * float(im_shape[0])\n im_info = [resize_h, resize_w, im_scale]\n if 'im_info' in sample and sample['im_info'][2] != 1.:\n sample['im_info'] = np.append(\n list(sample['im_info']), im_info).astype(np.float32)\n else:\n sample['im_info'] = np.array(im_info).astype(np.float32)\n else:\n im_scale_x = float(selected_size) / float(im_shape[1])\n im_scale_y = float(selected_size) / float(im_shape[0])\n\n resize_w = selected_size\n resize_h = selected_size\n\n if self.use_cv2:\n im = cv2.resize(\n im,\n None,\n None,\n fx=im_scale_x,\n fy=im_scale_y,\n interpolation=self.interp)\n else:\n if self.max_size != 0:\n raise TypeError(\n 'If you set max_size to cap the maximum size of image,'\n 'please set use_cv2 to True to resize the image.')\n im = im.astype('uint8')\n im = Image.fromarray(im)\n im = im.resize((int(resize_w), int(resize_h)), self.interp)\n im = np.array(im)\n sample['image'] = im\n sample['scale_factor'] = [im_scale_x, im_scale_y] * 2\n if 'gt_bbox' in sample and self.resize_box and len(sample[\n 'gt_bbox']) > 0:\n bboxes = sample['gt_bbox'] * sample['scale_factor']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, resize_w - 1)\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, resize_h - 1)\n sample['gt_bbox'] = bboxes\n if 'semantic' in sample.keys() and sample['semantic'] is not None:\n semantic = sample['semantic']\n semantic = cv2.resize(\n semantic.astype('float32'),\n None,\n None,\n fx=im_scale_x,\n fy=im_scale_y,\n interpolation=self.interp)\n semantic = np.asarray(semantic).astype('int32')\n semantic = np.expand_dims(semantic, 0)\n sample['semantic'] = semantic\n if 'gt_segm' in sample and len(sample['gt_segm']) > 0:\n masks = [\n cv2.resize(\n gt_segm,\n None,\n None,\n fx=im_scale_x,\n fy=im_scale_y,\n interpolation=cv2.INTER_NEAREST)\n for gt_segm in sample['gt_segm']\n ]\n sample['gt_segm'] = np.asarray(masks).astype(np.uint8)\n\n return sample\n\n\n@register_op\nclass RandomFlipImage(BaseOperator):\n def __init__(self, prob=0.5, is_normalized=False, is_mask_flip=False):\n \"\"\"\n Args:\n prob (float): the probability of flipping image\n is_normalized (bool): whether the bbox scale to [0,1]\n is_mask_flip (bool): whether flip the segmentation\n \"\"\"\n super(RandomFlipImage, self).__init__()\n self.prob = prob\n self.is_normalized = is_normalized\n self.is_mask_flip = is_mask_flip\n if not (isinstance(self.prob, float) and\n isinstance(self.is_normalized, bool) and\n isinstance(self.is_mask_flip, bool)):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n\n def flip_segms(self, segms, height, width):\n def _flip_poly(poly, width):\n flipped_poly = np.array(poly)\n flipped_poly[0::2] = width - np.array(poly[0::2]) - 1\n return flipped_poly.tolist()\n\n def _flip_rle(rle, height, width):\n if 'counts' in rle and type(rle['counts']) == list:\n rle = mask_util.frPyObjects(rle, height, width)\n mask = mask_util.decode(rle)\n mask = mask[:, ::-1]\n rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))\n return rle\n\n flipped_segms = []\n for segm in segms:\n if is_poly(segm):\n # Polygon format\n flipped_segms.append([_flip_poly(poly, width) for poly in segm])\n else:\n # RLE format\n import pycocotools.mask as mask_util\n flipped_segms.append(_flip_rle(segm, height, width))\n return flipped_segms\n\n def flip_keypoint(self, gt_keypoint, width):\n for i in range(gt_keypoint.shape[1]):\n if i % 2 == 0:\n old_x = gt_keypoint[:, i].copy()\n if self.is_normalized:\n gt_keypoint[:, i] = 1 - old_x\n else:\n gt_keypoint[:, i] = width - old_x - 1\n return gt_keypoint\n\n def __call__(self, sample, context=None):\n \"\"\"Filp the image and bounding box.\n Operators:\n 1. Flip the image numpy.\n 2. Transform the bboxes' x coordinates.\n (Must judge whether the coordinates are normalized!)\n 3. Transform the segmentations' x coordinates.\n (Must judge whether the coordinates are normalized!)\n Output:\n sample: the image, bounding box and segmentation part\n in sample are flipped.\n \"\"\"\n\n samples = sample\n batch_input = True\n if not isinstance(samples, Sequence):\n batch_input = False\n samples = [samples]\n for sample in samples:\n gt_bbox = sample['gt_bbox']\n im = sample['image']\n if not isinstance(im, np.ndarray):\n raise TypeError(\"{}: image is not a numpy array.\".format(self))\n if len(im.shape) != 3:\n raise ImageError(\"{}: image is not 3-dimensional.\".format(self))\n height, width, _ = im.shape\n if np.random.uniform(0, 1) < self.prob:\n im = im[:, ::-1, :]\n if gt_bbox.shape[0] == 0:\n return sample\n oldx1 = gt_bbox[:, 0].copy()\n oldx2 = gt_bbox[:, 2].copy()\n if self.is_normalized:\n gt_bbox[:, 0] = 1 - oldx2\n gt_bbox[:, 2] = 1 - oldx1\n else:\n gt_bbox[:, 0] = width - oldx2 - 1\n gt_bbox[:, 2] = width - oldx1 - 1\n if gt_bbox.shape[0] != 0 and (\n gt_bbox[:, 2] < gt_bbox[:, 0]).all():\n m = \"{}: invalid box, x2 should be greater than x1\".format(\n self)\n raise BboxError(m)\n sample['gt_bbox'] = gt_bbox\n if self.is_mask_flip and len(sample['gt_poly']) != 0:\n sample['gt_poly'] = self.flip_segms(sample['gt_poly'],\n height, width)\n if 'gt_keypoint' in sample.keys():\n sample['gt_keypoint'] = self.flip_keypoint(\n sample['gt_keypoint'], width)\n\n if 'semantic' in sample.keys() and sample[\n 'semantic'] is not None:\n sample['semantic'] = sample['semantic'][:, ::-1]\n\n if 'gt_segm' in sample.keys() and sample['gt_segm'] is not None:\n sample['gt_segm'] = sample['gt_segm'][:, :, ::-1]\n\n sample['flipped'] = True\n sample['image'] = im\n sample = samples if batch_input else samples[0]\n return sample\n\n\n@register_op\nclass RandomErasingImage(BaseOperator):\n def __init__(self, prob=0.5, sl=0.02, sh=0.4, r1=0.3):\n \"\"\"\n Random Erasing Data Augmentation, see https://arxiv.org/abs/1708.04896\n Args:\n prob (float): probability to carry out random erasing\n sl (float): lower limit of the erasing area ratio\n sh (float): upper limit of the erasing area ratio\n r1 (float): aspect ratio of the erasing region\n \"\"\"\n super(RandomErasingImage, self).__init__()\n self.prob = prob\n self.sl = sl\n self.sh = sh\n self.r1 = r1\n\n def __call__(self, sample, context=None):\n samples = sample\n batch_input = True\n if not isinstance(samples, Sequence):\n batch_input = False\n samples = [samples]\n for sample in samples:\n gt_bbox = sample['gt_bbox']\n im = sample['image']\n if not isinstance(im, np.ndarray):\n raise TypeError(\"{}: image is not a numpy array.\".format(self))\n if len(im.shape) != 3:\n raise ImageError(\"{}: image is not 3-dimensional.\".format(self))\n\n for idx in range(gt_bbox.shape[0]):\n if self.prob <= np.random.rand():\n continue\n\n x1, y1, x2, y2 = gt_bbox[idx, :]\n w_bbox = x2 - x1 + 1\n h_bbox = y2 - y1 + 1\n area = w_bbox * h_bbox\n\n target_area = random.uniform(self.sl, self.sh) * area\n aspect_ratio = random.uniform(self.r1, 1 / self.r1)\n\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if w < w_bbox and h < h_bbox:\n off_y1 = random.randint(0, int(h_bbox - h))\n off_x1 = random.randint(0, int(w_bbox - w))\n im[int(y1 + off_y1):int(y1 + off_y1 + h), int(x1 + off_x1):\n int(x1 + off_x1 + w), :] = 0\n sample['image'] = im\n\n sample = samples if batch_input else samples[0]\n return sample\n\n\n@register_op\nclass GridMaskOp(BaseOperator):\n def __init__(self,\n use_h=True,\n use_w=True,\n rotate=1,\n offset=False,\n ratio=0.5,\n mode=1,\n prob=0.7,\n upper_iter=360000):\n \"\"\"\n GridMask Data Augmentation, see https://arxiv.org/abs/2001.04086\n Args:\n use_h (bool): whether to mask vertically\n use_w (boo;): whether to mask horizontally\n rotate (float): angle for the mask to rotate\n offset (float): mask offset\n ratio (float): mask ratio\n mode (int): gridmask mode\n prob (float): max probability to carry out gridmask\n upper_iter (int): suggested to be equal to global max_iter\n \"\"\"\n super(GridMaskOp, self).__init__()\n self.use_h = use_h\n self.use_w = use_w\n self.rotate = rotate\n self.offset = offset\n self.ratio = ratio\n self.mode = mode\n self.prob = prob\n self.upper_iter = upper_iter\n\n from .gridmask_utils import GridMask\n self.gridmask_op = GridMask(\n use_h,\n use_w,\n rotate=rotate,\n offset=offset,\n ratio=ratio,\n mode=mode,\n prob=prob,\n upper_iter=upper_iter)\n\n def __call__(self, sample, context=None):\n samples = sample\n batch_input = True\n if not isinstance(samples, Sequence):\n batch_input = False\n samples = [samples]\n for sample in samples:\n sample['image'] = self.gridmask_op(sample['image'],\n sample['curr_iter'])\n if not batch_input:\n samples = samples[0]\n return samples\n\n\n@register_op\nclass AutoAugmentImage(BaseOperator):\n def __init__(self, is_normalized=False, autoaug_type=\"v1\"):\n \"\"\"\n Args:\n is_normalized (bool): whether the bbox scale to [0,1]\n autoaug_type (str): autoaug type, support v0, v1, v2, v3, test\n \"\"\"\n super(AutoAugmentImage, self).__init__()\n self.is_normalized = is_normalized\n self.autoaug_type = autoaug_type\n if not isinstance(self.is_normalized, bool):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n\n def __call__(self, sample, context=None):\n \"\"\"\n Learning Data Augmentation Strategies for Object Detection, see https://arxiv.org/abs/1906.11172\n \"\"\"\n samples = sample\n batch_input = True\n if not isinstance(samples, Sequence):\n batch_input = False\n samples = [samples]\n for sample in samples:\n gt_bbox = sample['gt_bbox']\n im = sample['image']\n if not isinstance(im, np.ndarray):\n raise TypeError(\"{}: image is not a numpy array.\".format(self))\n if len(im.shape) != 3:\n raise ImageError(\"{}: image is not 3-dimensional.\".format(self))\n if len(gt_bbox) == 0:\n continue\n\n # gt_boxes : [x1, y1, x2, y2]\n # norm_gt_boxes: [y1, x1, y2, x2]\n height, width, _ = im.shape\n norm_gt_bbox = np.ones_like(gt_bbox, dtype=np.float32)\n if not self.is_normalized:\n norm_gt_bbox[:, 0] = gt_bbox[:, 1] / float(height)\n norm_gt_bbox[:, 1] = gt_bbox[:, 0] / float(width)\n norm_gt_bbox[:, 2] = gt_bbox[:, 3] / float(height)\n norm_gt_bbox[:, 3] = gt_bbox[:, 2] / float(width)\n else:\n norm_gt_bbox[:, 0] = gt_bbox[:, 1]\n norm_gt_bbox[:, 1] = gt_bbox[:, 0]\n norm_gt_bbox[:, 2] = gt_bbox[:, 3]\n norm_gt_bbox[:, 3] = gt_bbox[:, 2]\n\n from .autoaugment_utils import distort_image_with_autoaugment\n im, norm_gt_bbox = distort_image_with_autoaugment(im, norm_gt_bbox,\n self.autoaug_type)\n if not self.is_normalized:\n gt_bbox[:, 0] = norm_gt_bbox[:, 1] * float(width)\n gt_bbox[:, 1] = norm_gt_bbox[:, 0] * float(height)\n gt_bbox[:, 2] = norm_gt_bbox[:, 3] * float(width)\n gt_bbox[:, 3] = norm_gt_bbox[:, 2] * float(height)\n else:\n gt_bbox[:, 0] = norm_gt_bbox[:, 1]\n gt_bbox[:, 1] = norm_gt_bbox[:, 0]\n gt_bbox[:, 2] = norm_gt_bbox[:, 3]\n gt_bbox[:, 3] = norm_gt_bbox[:, 2]\n\n sample['gt_bbox'] = gt_bbox\n sample['image'] = im\n\n sample = samples if batch_input else samples[0]\n return sample\n\n\n@register_op\nclass NormalizeImage(BaseOperator):\n def __init__(self,\n mean=[0.485, 0.456, 0.406],\n std=[1, 1, 1],\n is_scale=True,\n is_channel_first=True):\n \"\"\"\n Args:\n mean (list): the pixel mean\n std (list): the pixel variance\n \"\"\"\n super(NormalizeImage, self).__init__()\n self.mean = mean\n self.std = std\n self.is_scale = is_scale\n self.is_channel_first = is_channel_first\n if not (isinstance(self.mean, list) and isinstance(self.std, list) and\n isinstance(self.is_scale, bool)):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n from functools import reduce\n if reduce(lambda x, y: x * y, self.std) == 0:\n raise ValueError('{}: std is invalid!'.format(self))\n\n def __call__(self, sample, context=None):\n \"\"\"Normalize the image.\n Operators:\n 1.(optional) Scale the image to [0,1]\n 2. Each pixel minus mean and is divided by std\n \"\"\"\n samples = sample\n batch_input = True\n if not isinstance(samples, Sequence):\n batch_input = False\n samples = [samples]\n for sample in samples:\n for k in sample.keys():\n # hard code\n if k.startswith('image'):\n im = sample[k]\n im = im.astype(np.float32, copy=False)\n if self.is_channel_first:\n mean = np.array(self.mean)[:, np.newaxis, np.newaxis]\n std = np.array(self.std)[:, np.newaxis, np.newaxis]\n else:\n mean = np.array(self.mean)[np.newaxis, np.newaxis, :]\n std = np.array(self.std)[np.newaxis, np.newaxis, :]\n if self.is_scale:\n im = im / 255.0\n im -= mean\n im /= std\n sample[k] = im\n if not batch_input:\n samples = samples[0]\n return samples\n\n\n@register_op\nclass RandomDistort(BaseOperator):\n def __init__(self,\n brightness_lower=0.5,\n brightness_upper=1.5,\n contrast_lower=0.5,\n contrast_upper=1.5,\n saturation_lower=0.5,\n saturation_upper=1.5,\n hue_lower=-18,\n hue_upper=18,\n brightness_prob=0.5,\n contrast_prob=0.5,\n saturation_prob=0.5,\n hue_prob=0.5,\n count=4,\n is_order=False):\n \"\"\"\n Args:\n brightness_lower/ brightness_upper (float): the brightness\n between brightness_lower and brightness_upper\n contrast_lower/ contrast_upper (float): the contrast between\n contrast_lower and contrast_lower\n saturation_lower/ saturation_upper (float): the saturation\n between saturation_lower and saturation_upper\n hue_lower/ hue_upper (float): the hue between\n hue_lower and hue_upper\n brightness_prob (float): the probability of changing brightness\n contrast_prob (float): the probability of changing contrast\n saturation_prob (float): the probability of changing saturation\n hue_prob (float): the probability of changing hue\n count (int): the kinds of doing distrot\n is_order (bool): whether determine the order of distortion\n \"\"\"\n super(RandomDistort, self).__init__()\n self.brightness_lower = brightness_lower\n self.brightness_upper = brightness_upper\n self.contrast_lower = contrast_lower\n self.contrast_upper = contrast_upper\n self.saturation_lower = saturation_lower\n self.saturation_upper = saturation_upper\n self.hue_lower = hue_lower\n self.hue_upper = hue_upper\n self.brightness_prob = brightness_prob\n self.contrast_prob = contrast_prob\n self.saturation_prob = saturation_prob\n self.hue_prob = hue_prob\n self.count = count\n self.is_order = is_order\n\n def random_brightness(self, img):\n brightness_delta = np.random.uniform(self.brightness_lower,\n self.brightness_upper)\n prob = np.random.uniform(0, 1)\n if prob < self.brightness_prob:\n img = ImageEnhance.Brightness(img).enhance(brightness_delta)\n return img\n\n def random_contrast(self, img):\n contrast_delta = np.random.uniform(self.contrast_lower,\n self.contrast_upper)\n prob = np.random.uniform(0, 1)\n if prob < self.contrast_prob:\n img = ImageEnhance.Contrast(img).enhance(contrast_delta)\n return img\n\n def random_saturation(self, img):\n saturation_delta = np.random.uniform(self.saturation_lower,\n self.saturation_upper)\n prob = np.random.uniform(0, 1)\n if prob < self.saturation_prob:\n img = ImageEnhance.Color(img).enhance(saturation_delta)\n return img\n\n def random_hue(self, img):\n hue_delta = np.random.uniform(self.hue_lower, self.hue_upper)\n prob = np.random.uniform(0, 1)\n if prob < self.hue_prob:\n img = np.array(img.convert('HSV'))\n img[:, :, 0] = img[:, :, 0] + hue_delta\n img = Image.fromarray(img, mode='HSV').convert('RGB')\n return img\n\n def __call__(self, sample, context):\n \"\"\"random distort the image\"\"\"\n ops = [\n self.random_brightness, self.random_contrast,\n self.random_saturation, self.random_hue\n ]\n if self.is_order:\n prob = np.random.uniform(0, 1)\n if prob < 0.5:\n ops = [\n self.random_brightness,\n self.random_saturation,\n self.random_hue,\n self.random_contrast,\n ]\n else:\n ops = random.sample(ops, self.count)\n assert 'image' in sample, \"image data not found\"\n im = sample['image']\n im = Image.fromarray(im)\n for id in range(self.count):\n im = ops[id](im)\n im = np.asarray(im)\n sample['image'] = im\n return sample\n\n\n@register_op\nclass ExpandImage(BaseOperator):\n def __init__(self, max_ratio, prob, mean=[127.5, 127.5, 127.5]):\n \"\"\"\n Args:\n max_ratio (float): the ratio of expanding\n prob (float): the probability of expanding image\n mean (list): the pixel mean\n \"\"\"\n super(ExpandImage, self).__init__()\n self.max_ratio = max_ratio\n self.mean = mean\n self.prob = prob\n\n def __call__(self, sample, context):\n \"\"\"\n Expand the image and modify bounding box.\n Operators:\n 1. Scale the image width and height.\n 2. Construct new images with new height and width.\n 3. Fill the new image with the mean.\n 4. Put original imge into new image.\n 5. Rescale the bounding box.\n 6. Determine if the new bbox is satisfied in the new image.\n Returns:\n sample: the image, bounding box are replaced.\n \"\"\"\n\n prob = np.random.uniform(0, 1)\n assert 'image' in sample, 'not found image data'\n im = sample['image']\n gt_bbox = sample['gt_bbox']\n gt_class = sample['gt_class']\n im_width = sample['w']\n im_height = sample['h']\n if prob < self.prob:\n if self.max_ratio - 1 >= 0.01:\n expand_ratio = np.random.uniform(1, self.max_ratio)\n height = int(im_height * expand_ratio)\n width = int(im_width * expand_ratio)\n h_off = math.floor(np.random.uniform(0, height - im_height))\n w_off = math.floor(np.random.uniform(0, width - im_width))\n expand_bbox = [\n -w_off / im_width, -h_off / im_height,\n (width - w_off) / im_width, (height - h_off) / im_height\n ]\n expand_im = np.ones((height, width, 3))\n expand_im = np.uint8(expand_im * np.squeeze(self.mean))\n expand_im = Image.fromarray(expand_im)\n im = Image.fromarray(im)\n expand_im.paste(im, (int(w_off), int(h_off)))\n expand_im = np.asarray(expand_im)\n if 'gt_keypoint' in sample.keys(\n ) and 'keypoint_ignore' in sample.keys():\n keypoints = (sample['gt_keypoint'],\n sample['keypoint_ignore'])\n gt_bbox, gt_class, _, gt_keypoints = filter_and_process(\n expand_bbox, gt_bbox, gt_class, keypoints=keypoints)\n sample['gt_keypoint'] = gt_keypoints[0]\n sample['keypoint_ignore'] = gt_keypoints[1]\n else:\n gt_bbox, gt_class, _ = filter_and_process(expand_bbox,\n gt_bbox, gt_class)\n sample['image'] = expand_im\n sample['gt_bbox'] = gt_bbox\n sample['gt_class'] = gt_class\n sample['w'] = width\n sample['h'] = height\n\n return sample\n\n\n@register_op\nclass CropImage(BaseOperator):\n def __init__(self, batch_sampler, satisfy_all=False, avoid_no_bbox=True):\n \"\"\"\n Args:\n batch_sampler (list): Multiple sets of different\n parameters for cropping.\n satisfy_all (bool): whether all boxes must satisfy.\n e.g.[[1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0]]\n [max sample, max trial, min scale, max scale,\n min aspect ratio, max aspect ratio,\n min overlap, max overlap]\n avoid_no_bbox (bool): whether to to avoid the\n situation where the box does not appear.\n \"\"\"\n super(CropImage, self).__init__()\n self.batch_sampler = batch_sampler\n self.satisfy_all = satisfy_all\n self.avoid_no_bbox = avoid_no_bbox\n\n def __call__(self, sample, context):\n \"\"\"\n Crop the image and modify bounding box.\n Operators:\n 1. Scale the image width and height.\n 2. Crop the image according to a radom sample.\n 3. Rescale the bounding box.\n 4. Determine if the new bbox is satisfied in the new image.\n Returns:\n sample: the image, bounding box are replaced.\n \"\"\"\n assert 'image' in sample, \"image data not found\"\n im = sample['image']\n gt_bbox = sample['gt_bbox']\n gt_class = sample['gt_class']\n im_width = sample['w']\n im_height = sample['h']\n gt_score = None\n if 'gt_score' in sample:\n gt_score = sample['gt_score']\n sampled_bbox = []\n gt_bbox = gt_bbox.tolist()\n for sampler in self.batch_sampler:\n found = 0\n for i in range(sampler[1]):\n if found >= sampler[0]:\n break\n sample_bbox = generate_sample_bbox(sampler)\n if satisfy_sample_constraint(sampler, sample_bbox, gt_bbox,\n self.satisfy_all):\n sampled_bbox.append(sample_bbox)\n found = found + 1\n im = np.array(im)\n while sampled_bbox:\n idx = int(np.random.uniform(0, len(sampled_bbox)))\n sample_bbox = sampled_bbox.pop(idx)\n sample_bbox = clip_bbox(sample_bbox)\n crop_bbox, crop_class, crop_score = \\\n filter_and_process(sample_bbox, gt_bbox, gt_class, scores=gt_score)\n if self.avoid_no_bbox:\n if len(crop_bbox) < 1:\n continue\n xmin = int(sample_bbox[0] * im_width)\n xmax = int(sample_bbox[2] * im_width)\n ymin = int(sample_bbox[1] * im_height)\n ymax = int(sample_bbox[3] * im_height)\n im = im[ymin:ymax, xmin:xmax]\n sample['image'] = im\n sample['gt_bbox'] = crop_bbox\n sample['gt_class'] = crop_class\n sample['gt_score'] = crop_score\n return sample\n return sample\n\n\n@register_op\nclass CropImageWithDataAchorSampling(BaseOperator):\n def __init__(self,\n batch_sampler,\n anchor_sampler=None,\n target_size=None,\n das_anchor_scales=[16, 32, 64, 128],\n sampling_prob=0.5,\n min_size=8.,\n avoid_no_bbox=True):\n \"\"\"\n Args:\n anchor_sampler (list): anchor_sampling sets of different\n parameters for cropping.\n batch_sampler (list): Multiple sets of different\n parameters for cropping.\n e.g.[[1, 10, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.2, 0.0]]\n [[1, 50, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],\n [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],\n [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],\n [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],\n [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0]]\n [max sample, max trial, min scale, max scale,\n min aspect ratio, max aspect ratio,\n min overlap, max overlap, min coverage, max coverage]\n target_size (bool): target image size.\n das_anchor_scales (list[float]): a list of anchor scales in data\n anchor smapling.\n min_size (float): minimum size of sampled bbox.\n avoid_no_bbox (bool): whether to to avoid the\n situation where the box does not appear.\n \"\"\"\n super(CropImageWithDataAchorSampling, self).__init__()\n self.anchor_sampler = anchor_sampler\n self.batch_sampler = batch_sampler\n self.target_size = target_size\n self.sampling_prob = sampling_prob\n self.min_size = min_size\n self.avoid_no_bbox = avoid_no_bbox\n self.das_anchor_scales = np.array(das_anchor_scales)\n\n def __call__(self, sample, context):\n \"\"\"\n Crop the image and modify bounding box.\n Operators:\n 1. Scale the image width and height.\n 2. Crop the image according to a radom sample.\n 3. Rescale the bounding box.\n 4. Determine if the new bbox is satisfied in the new image.\n Returns:\n sample: the image, bounding box are replaced.\n \"\"\"\n assert 'image' in sample, \"image data not found\"\n im = sample['image']\n gt_bbox = sample['gt_bbox']\n gt_class = sample['gt_class']\n image_width = sample['w']\n image_height = sample['h']\n gt_score = None\n if 'gt_score' in sample:\n gt_score = sample['gt_score']\n sampled_bbox = []\n gt_bbox = gt_bbox.tolist()\n\n prob = np.random.uniform(0., 1.)\n if prob > self.sampling_prob: # anchor sampling\n assert self.anchor_sampler\n for sampler in self.anchor_sampler:\n found = 0\n for i in range(sampler[1]):\n if found >= sampler[0]:\n break\n sample_bbox = data_anchor_sampling(\n gt_bbox, image_width, image_height,\n self.das_anchor_scales, self.target_size)\n if sample_bbox == 0:\n break\n if satisfy_sample_constraint_coverage(sampler, sample_bbox,\n gt_bbox):\n sampled_bbox.append(sample_bbox)\n found = found + 1\n im = np.array(im)\n while sampled_bbox:\n idx = int(np.random.uniform(0, len(sampled_bbox)))\n sample_bbox = sampled_bbox.pop(idx)\n\n if 'gt_keypoint' in sample.keys():\n keypoints = (sample['gt_keypoint'],\n sample['keypoint_ignore'])\n crop_bbox, crop_class, crop_score, gt_keypoints = \\\n filter_and_process(sample_bbox, gt_bbox, gt_class,\n scores=gt_score,\n keypoints=keypoints)\n else:\n crop_bbox, crop_class, crop_score = filter_and_process(\n sample_bbox, gt_bbox, gt_class, scores=gt_score)\n crop_bbox, crop_class, crop_score = bbox_area_sampling(\n crop_bbox, crop_class, crop_score, self.target_size,\n self.min_size)\n\n if self.avoid_no_bbox:\n if len(crop_bbox) < 1:\n continue\n im = crop_image_sampling(im, sample_bbox, image_width,\n image_height, self.target_size)\n sample['image'] = im\n sample['gt_bbox'] = crop_bbox\n sample['gt_class'] = crop_class\n sample['gt_score'] = crop_score\n if 'gt_keypoint' in sample.keys():\n sample['gt_keypoint'] = gt_keypoints[0]\n sample['keypoint_ignore'] = gt_keypoints[1]\n return sample\n return sample\n\n else:\n for sampler in self.batch_sampler:\n found = 0\n for i in range(sampler[1]):\n if found >= sampler[0]:\n break\n sample_bbox = generate_sample_bbox_square(\n sampler, image_width, image_height)\n if satisfy_sample_constraint_coverage(sampler, sample_bbox,\n gt_bbox):\n sampled_bbox.append(sample_bbox)\n found = found + 1\n im = np.array(im)\n while sampled_bbox:\n idx = int(np.random.uniform(0, len(sampled_bbox)))\n sample_bbox = sampled_bbox.pop(idx)\n sample_bbox = clip_bbox(sample_bbox)\n\n if 'gt_keypoint' in sample.keys():\n keypoints = (sample['gt_keypoint'],\n sample['keypoint_ignore'])\n crop_bbox, crop_class, crop_score, gt_keypoints = \\\n filter_and_process(sample_bbox, gt_bbox, gt_class,\n scores=gt_score,\n keypoints=keypoints)\n else:\n crop_bbox, crop_class, crop_score = filter_and_process(\n sample_bbox, gt_bbox, gt_class, scores=gt_score)\n # sampling bbox according the bbox area\n crop_bbox, crop_class, crop_score = bbox_area_sampling(\n crop_bbox, crop_class, crop_score, self.target_size,\n self.min_size)\n\n if self.avoid_no_bbox:\n if len(crop_bbox) < 1:\n continue\n xmin = int(sample_bbox[0] * image_width)\n xmax = int(sample_bbox[2] * image_width)\n ymin = int(sample_bbox[1] * image_height)\n ymax = int(sample_bbox[3] * image_height)\n im = im[ymin:ymax, xmin:xmax]\n sample['image'] = im\n sample['gt_bbox'] = crop_bbox\n sample['gt_class'] = crop_class\n sample['gt_score'] = crop_score\n if 'gt_keypoint' in sample.keys():\n sample['gt_keypoint'] = gt_keypoints[0]\n sample['keypoint_ignore'] = gt_keypoints[1]\n return sample\n return sample\n\n\n@register_op\nclass NormalizeBox(BaseOperator):\n \"\"\"Transform the bounding box's coornidates to [0,1].\"\"\"\n\n def __init__(self):\n super(NormalizeBox, self).__init__()\n\n def __call__(self, sample, context):\n gt_bbox = sample['gt_bbox']\n width = sample['w']\n height = sample['h']\n for i in range(gt_bbox.shape[0]):\n gt_bbox[i][0] = gt_bbox[i][0] / width\n gt_bbox[i][1] = gt_bbox[i][1] / height\n gt_bbox[i][2] = gt_bbox[i][2] / width\n gt_bbox[i][3] = gt_bbox[i][3] / height\n sample['gt_bbox'] = gt_bbox\n\n if 'gt_keypoint' in sample.keys():\n gt_keypoint = sample['gt_keypoint']\n\n for i in range(gt_keypoint.shape[1]):\n if i % 2:\n gt_keypoint[:, i] = gt_keypoint[:, i] / height\n else:\n gt_keypoint[:, i] = gt_keypoint[:, i] / width\n sample['gt_keypoint'] = gt_keypoint\n\n return sample\n\n\n@register_op\nclass Permute(BaseOperator):\n def __init__(self, to_bgr=True, channel_first=True):\n \"\"\"\n Change the channel.\n Args:\n to_bgr (bool): confirm whether to convert RGB to BGR\n channel_first (bool): confirm whether to change channel\n \"\"\"\n super(Permute, self).__init__()\n self.to_bgr = to_bgr\n self.channel_first = channel_first\n if not (isinstance(self.to_bgr, bool) and\n isinstance(self.channel_first, bool)):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n\n def __call__(self, sample, context=None):\n samples = sample\n batch_input = True\n if not isinstance(samples, Sequence):\n batch_input = False\n samples = [samples]\n for sample in samples:\n assert 'image' in sample, \"image data not found\"\n for k in sample.keys():\n # hard code\n if k.startswith('image'):\n im = sample[k]\n if self.channel_first:\n im = np.swapaxes(im, 1, 2)\n im = np.swapaxes(im, 1, 0)\n if self.to_bgr:\n im = im[[2, 1, 0], :, :]\n sample[k] = im\n if not batch_input:\n samples = samples[0]\n return samples\n\n\n@register_op\nclass MixupImage(BaseOperator):\n def __init__(self, alpha=1.5, beta=1.5):\n \"\"\" Mixup image and gt_bbbox/gt_score\n Args:\n alpha (float): alpha parameter of beta distribute\n beta (float): beta parameter of beta distribute\n \"\"\"\n super(MixupImage, self).__init__()\n self.alpha = alpha\n self.beta = beta\n if self.alpha <= 0.0:\n raise ValueError(\"alpha shold be positive in {}\".format(self))\n if self.beta <= 0.0:\n raise ValueError(\"beta shold be positive in {}\".format(self))\n\n def _mixup_img(self, img1, img2, factor):\n h = max(img1.shape[0], img2.shape[0])\n w = max(img1.shape[1], img2.shape[1])\n img = np.zeros((h, w, img1.shape[2]), 'float32')\n img[:img1.shape[0], :img1.shape[1], :] = \\\n img1.astype('float32') * factor\n img[:img2.shape[0], :img2.shape[1], :] += \\\n img2.astype('float32') * (1.0 - factor)\n return img.astype('uint8')\n\n def __call__(self, sample, context=None):\n if 'mixup' not in sample:\n return sample\n factor = np.random.beta(self.alpha, self.beta)\n factor = max(0.0, min(1.0, factor))\n if factor >= 1.0:\n sample.pop('mixup')\n return sample\n if factor <= 0.0:\n return sample['mixup']\n im = self._mixup_img(sample['image'], sample['mixup']['image'], factor)\n gt_bbox1 = sample['gt_bbox'].reshape((-1, 4))\n gt_bbox2 = sample['mixup']['gt_bbox'].reshape((-1, 4))\n gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)\n gt_class1 = sample['gt_class']\n gt_class2 = sample['mixup']['gt_class']\n gt_class = np.concatenate((gt_class1, gt_class2), axis=0)\n\n gt_score1 = sample['gt_score']\n gt_score2 = sample['mixup']['gt_score']\n gt_score = np.concatenate(\n (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)\n\n is_crowd1 = sample['is_crowd']\n is_crowd2 = sample['mixup']['is_crowd']\n is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)\n\n sample['image'] = im\n sample['gt_bbox'] = gt_bbox\n sample['gt_score'] = gt_score\n sample['gt_class'] = gt_class\n sample['is_crowd'] = is_crowd\n sample['h'] = im.shape[0]\n sample['w'] = im.shape[1]\n sample.pop('mixup')\n return sample\n\n\n@register_op\nclass CutmixImage(BaseOperator):\n def __init__(self, alpha=1.5, beta=1.5):\n \"\"\" \n CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features, see https://https://arxiv.org/abs/1905.04899\n Cutmix image and gt_bbbox/gt_score\n Args:\n alpha (float): alpha parameter of beta distribute\n beta (float): beta parameter of beta distribute\n \"\"\"\n super(CutmixImage, self).__init__()\n self.alpha = alpha\n self.beta = beta\n if self.alpha <= 0.0:\n raise ValueError(\"alpha shold be positive in {}\".format(self))\n if self.beta <= 0.0:\n raise ValueError(\"beta shold be positive in {}\".format(self))\n\n def _rand_bbox(self, img1, img2, factor):\n \"\"\" _rand_bbox \"\"\"\n h = max(img1.shape[0], img2.shape[0])\n w = max(img1.shape[1], img2.shape[1])\n cut_rat = np.sqrt(1. - factor)\n\n cut_w = np.int(w * cut_rat)\n cut_h = np.int(h * cut_rat)\n\n # uniform\n cx = np.random.randint(w)\n cy = np.random.randint(h)\n\n bbx1 = np.clip(cx - cut_w // 2, 0, w)\n bby1 = np.clip(cy - cut_h // 2, 0, h)\n bbx2 = np.clip(cx + cut_w // 2, 0, w)\n bby2 = np.clip(cy + cut_h // 2, 0, h)\n\n img_1 = np.zeros((h, w, img1.shape[2]), 'float32')\n img_1[:img1.shape[0], :img1.shape[1], :] = \\\n img1.astype('float32')\n img_2 = np.zeros((h, w, img2.shape[2]), 'float32')\n img_2[:img2.shape[0], :img2.shape[1], :] = \\\n img2.astype('float32')\n img_1[bby1:bby2, bbx1:bbx2, :] = img2[bby1:bby2, bbx1:bbx2, :]\n return img_1\n\n def __call__(self, sample, context=None):\n if 'cutmix' not in sample:\n return sample\n factor = np.random.beta(self.alpha, self.beta)\n factor = max(0.0, min(1.0, factor))\n if factor >= 1.0:\n sample.pop('cutmix')\n return sample\n if factor <= 0.0:\n return sample['cutmix']\n img1 = sample['image']\n img2 = sample['cutmix']['image']\n img = self._rand_bbox(img1, img2, factor)\n gt_bbox1 = sample['gt_bbox']\n gt_bbox2 = sample['cutmix']['gt_bbox']\n gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)\n gt_class1 = sample['gt_class']\n gt_class2 = sample['cutmix']['gt_class']\n gt_class = np.concatenate((gt_class1, gt_class2), axis=0)\n gt_score1 = sample['gt_score']\n gt_score2 = sample['cutmix']['gt_score']\n gt_score = np.concatenate(\n (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)\n sample['image'] = img\n sample['gt_bbox'] = gt_bbox\n sample['gt_score'] = gt_score\n sample['gt_class'] = gt_class\n sample['h'] = img.shape[0]\n sample['w'] = img.shape[1]\n sample.pop('cutmix')\n return sample\n\n\n@register_op\nclass RandomInterpImage(BaseOperator):\n def __init__(self, target_size=0, max_size=0):\n \"\"\"\n Random reisze image by multiply interpolate method.\n Args:\n target_size (int): the taregt size of image's short side\n max_size (int): the max size of image\n \"\"\"\n super(RandomInterpImage, self).__init__()\n self.target_size = target_size\n self.max_size = max_size\n if not (isinstance(self.target_size, int) and\n isinstance(self.max_size, int)):\n raise TypeError('{}: input type is invalid.'.format(self))\n interps = [\n cv2.INTER_NEAREST,\n cv2.INTER_LINEAR,\n cv2.INTER_AREA,\n cv2.INTER_CUBIC,\n cv2.INTER_LANCZOS4,\n ]\n self.resizers = []\n for interp in interps:\n self.resizers.append(ResizeImage(target_size, max_size, interp))\n\n def __call__(self, sample, context=None):\n \"\"\"Resise the image numpy by random resizer.\"\"\"\n resizer = random.choice(self.resizers)\n return resizer(sample, context)\n\n\n@register_op\nclass Resize(BaseOperator):\n \"\"\"Resize image and bbox.\n Args:\n target_dim (int or list): target size, can be a single number or a list\n (for random shape).\n interp (int or str): interpolation method, can be an integer or\n 'random' (for randomized interpolation).\n default to `cv2.INTER_LINEAR`.\n \"\"\"\n\n def __init__(self, target_dim=[], interp=cv2.INTER_LINEAR):\n super(Resize, self).__init__()\n self.target_dim = target_dim\n self.interp = interp # 'random' for yolov3\n\n def __call__(self, sample, context=None):\n w = sample['w']\n h = sample['h']\n\n interp = self.interp\n if interp == 'random':\n interp = np.random.choice(range(5))\n\n if isinstance(self.target_dim, Sequence):\n dim = np.random.choice(self.target_dim)\n else:\n dim = self.target_dim\n resize_w = resize_h = dim\n scale_x = dim / w\n scale_y = dim / h\n if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:\n scale_array = np.array([scale_x, scale_y] * 2, dtype=np.float32)\n sample['gt_bbox'] = np.clip(sample['gt_bbox'] * scale_array, 0,\n dim - 1)\n sample['scale_factor'] = [scale_x, scale_y] * 2\n sample['h'] = resize_h\n sample['w'] = resize_w\n\n sample['image'] = cv2.resize(\n sample['image'], (resize_w, resize_h), interpolation=interp)\n return sample\n\n\n@register_op\nclass ColorDistort(BaseOperator):\n \"\"\"Random color distortion.\n Args:\n hue (list): hue settings.\n in [lower, upper, probability] format.\n saturation (list): saturation settings.\n in [lower, upper, probability] format.\n contrast (list): contrast settings.\n in [lower, upper, probability] format.\n brightness (list): brightness settings.\n in [lower, upper, probability] format.\n random_apply (bool): whether to apply in random (yolo) or fixed (SSD)\n order.\n hsv_format (bool): whether to convert color from BGR to HSV\n random_channel (bool): whether to swap channels randomly\n \"\"\"\n\n def __init__(self,\n hue=[-18, 18, 0.5],\n saturation=[0.5, 1.5, 0.5],\n contrast=[0.5, 1.5, 0.5],\n brightness=[0.5, 1.5, 0.5],\n random_apply=True,\n hsv_format=False,\n random_channel=False):\n super(ColorDistort, self).__init__()\n self.hue = hue\n self.saturation = saturation\n self.contrast = contrast\n self.brightness = brightness\n self.random_apply = random_apply\n self.hsv_format = hsv_format\n self.random_channel = random_channel\n\n def apply_hue(self, img):\n low, high, prob = self.hue\n if np.random.uniform(0., 1.) < prob:\n return img\n\n img = img.astype(np.float32)\n if self.hsv_format:\n img[..., 0] += random.uniform(low, high)\n img[..., 0][img[..., 0] > 360] -= 360\n img[..., 0][img[..., 0] < 0] += 360\n return img\n\n # XXX works, but result differ from HSV version\n delta = np.random.uniform(low, high)\n u = np.cos(delta * np.pi)\n w = np.sin(delta * np.pi)\n bt = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]])\n tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.321],\n [0.211, -0.523, 0.311]])\n ityiq = np.array([[1.0, 0.956, 0.621], [1.0, -0.272, -0.647],\n [1.0, -1.107, 1.705]])\n t = np.dot(np.dot(ityiq, bt), tyiq).T\n img = np.dot(img, t)\n return img\n\n def apply_saturation(self, img):\n low, high, prob = self.saturation\n if np.random.uniform(0., 1.) < prob:\n return img\n delta = np.random.uniform(low, high)\n img = img.astype(np.float32)\n if self.hsv_format:\n img[..., 1] *= delta\n return img\n gray = img * np.array([[[0.299, 0.587, 0.114]]], dtype=np.float32)\n gray = gray.sum(axis=2, keepdims=True)\n gray *= (1.0 - delta)\n img *= delta\n img += gray\n return img\n\n def apply_contrast(self, img):\n low, high, prob = self.contrast\n if np.random.uniform(0., 1.) < prob:\n return img\n delta = np.random.uniform(low, high)\n\n img = img.astype(np.float32)\n img *= delta\n return img\n\n def apply_brightness(self, img):\n low, high, prob = self.brightness\n if np.random.uniform(0., 1.) < prob:\n return img\n delta = np.random.uniform(low, high)\n\n img = img.astype(np.float32)\n img += delta\n return img\n\n def __call__(self, sample, context=None):\n img = sample['image']\n if self.random_apply:\n functions = [\n self.apply_brightness,\n self.apply_contrast,\n self.apply_saturation,\n self.apply_hue,\n ]\n distortions = np.random.permutation(functions)\n for func in distortions:\n img = func(img)\n sample['image'] = img\n return sample\n\n img = self.apply_brightness(img)\n\n if np.random.randint(0, 2):\n img = self.apply_contrast(img)\n if self.hsv_format:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n img = self.apply_saturation(img)\n img = self.apply_hue(img)\n if self.hsv_format:\n img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)\n else:\n if self.hsv_format:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n img = self.apply_saturation(img)\n img = self.apply_hue(img)\n if self.hsv_format:\n img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)\n img = self.apply_contrast(img)\n\n if self.random_channel:\n if np.random.randint(0, 2):\n img = img[..., np.random.permutation(3)]\n sample['image'] = img\n return sample\n\n\n@register_op\nclass CornerRandColor(ColorDistort):\n \"\"\"Random color for CornerNet series models.\n Args:\n saturation (float): saturation settings.\n contrast (float): contrast settings.\n brightness (float): brightness settings.\n is_scale (bool): whether to scale the input image.\n \"\"\"\n\n def __init__(self,\n saturation=0.4,\n contrast=0.4,\n brightness=0.4,\n is_scale=True):\n super(CornerRandColor, self).__init__(\n saturation=saturation, contrast=contrast, brightness=brightness)\n self.is_scale = is_scale\n\n def apply_saturation(self, img, img_gray):\n alpha = 1. + np.random.uniform(\n low=-self.saturation, high=self.saturation)\n self._blend(alpha, img, img_gray[:, :, None])\n return img\n\n def apply_contrast(self, img, img_gray):\n alpha = 1. + np.random.uniform(low=-self.contrast, high=self.contrast)\n img_mean = img_gray.mean()\n self._blend(alpha, img, img_mean)\n return img\n\n def apply_brightness(self, img, img_gray):\n alpha = 1 + np.random.uniform(\n low=-self.brightness, high=self.brightness)\n img *= alpha\n return img\n\n def _blend(self, alpha, img, img_mean):\n img *= alpha\n img_mean *= (1 - alpha)\n img += img_mean\n\n def __call__(self, sample, context=None):\n img = sample['image']\n if self.is_scale:\n img = img.astype(np.float32, copy=False)\n img /= 255.\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n functions = [\n self.apply_brightness,\n self.apply_contrast,\n self.apply_saturation,\n ]\n distortions = np.random.permutation(functions)\n for func in distortions:\n img = func(img, img_gray)\n sample['image'] = img\n return sample\n\n\n@register_op\nclass NormalizePermute(BaseOperator):\n \"\"\"Normalize and permute channel order.\n Args:\n mean (list): mean values in RGB order.\n std (list): std values in RGB order.\n \"\"\"\n\n def __init__(self,\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.120, 57.375]):\n super(NormalizePermute, self).__init__()\n self.mean = mean\n self.std = std\n\n def __call__(self, sample, context=None):\n img = sample['image']\n img = img.astype(np.float32)\n\n img = img.transpose((2, 0, 1))\n mean = np.array(self.mean, dtype=np.float32)\n std = np.array(self.std, dtype=np.float32)\n invstd = 1. / std\n for v, m, s in zip(img, mean, invstd):\n v.__isub__(m).__imul__(s)\n sample['image'] = img\n return sample\n\n\n@register_op\nclass RandomExpand(BaseOperator):\n \"\"\"Random expand the canvas.\n Args:\n ratio (float): maximum expansion ratio.\n prob (float): probability to expand.\n fill_value (list): color value used to fill the canvas. in RGB order.\n is_mask_expand(bool): whether expand the segmentation.\n \"\"\"\n\n def __init__(self,\n ratio=4.,\n prob=0.5,\n fill_value=(127.5, ) * 3,\n is_mask_expand=False):\n super(RandomExpand, self).__init__()\n assert ratio > 1.01, \"expand ratio must be larger than 1.01\"\n self.ratio = ratio\n self.prob = prob\n assert isinstance(fill_value, (Number, Sequence)), \\\n \"fill value must be either float or sequence\"\n if isinstance(fill_value, Number):\n fill_value = (fill_value, ) * 3\n if not isinstance(fill_value, tuple):\n fill_value = tuple(fill_value)\n self.fill_value = fill_value\n self.is_mask_expand = is_mask_expand\n\n def expand_segms(self, segms, x, y, height, width, ratio):\n def _expand_poly(poly, x, y):\n expanded_poly = np.array(poly)\n expanded_poly[0::2] += x\n expanded_poly[1::2] += y\n return expanded_poly.tolist()\n\n def _expand_rle(rle, x, y, height, width, ratio):\n if 'counts' in rle and type(rle['counts']) == list:\n rle = mask_util.frPyObjects(rle, height, width)\n mask = mask_util.decode(rle)\n expanded_mask = np.full((int(height * ratio), int(width * ratio)),\n 0).astype(mask.dtype)\n expanded_mask[y:y + height, x:x + width] = mask\n rle = mask_util.encode(\n np.array(\n expanded_mask, order='F', dtype=np.uint8))\n return rle\n\n expanded_segms = []\n for segm in segms:\n if is_poly(segm):\n # Polygon format\n expanded_segms.append(\n [_expand_poly(poly, x, y) for poly in segm])\n else:\n # RLE format\n import pycocotools.mask as mask_util\n expanded_segms.append(\n _expand_rle(segm, x, y, height, width, ratio))\n return expanded_segms\n\n def __call__(self, sample, context=None):\n if np.random.uniform(0., 1.) < self.prob:\n return sample\n\n img = sample['image']\n height = int(sample['h'])\n width = int(sample['w'])\n\n expand_ratio = np.random.uniform(1., self.ratio)\n h = int(height * expand_ratio)\n w = int(width * expand_ratio)\n if not h > height or not w > width:\n return sample\n y = np.random.randint(0, h - height)\n x = np.random.randint(0, w - width)\n canvas = np.ones((h, w, 3), dtype=np.uint8)\n canvas *= np.array(self.fill_value, dtype=np.uint8)\n canvas[y:y + height, x:x + width, :] = img.astype(np.uint8)\n\n sample['h'] = h\n sample['w'] = w\n sample['image'] = canvas\n if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:\n sample['gt_bbox'] += np.array([x, y] * 2, dtype=np.float32)\n if self.is_mask_expand and 'gt_poly' in sample and len(sample[\n 'gt_poly']) > 0:\n sample['gt_poly'] = self.expand_segms(sample['gt_poly'], x, y,\n height, width, expand_ratio)\n return sample\n\n\n@register_op\nclass RandomCrop(BaseOperator):\n \"\"\"Random crop image and bboxes.\n Args:\n aspect_ratio (list): aspect ratio of cropped region.\n in [min, max] format.\n thresholds (list): iou thresholds for decide a valid bbox crop.\n scaling (list): ratio between a cropped region and the original image.\n in [min, max] format.\n num_attempts (int): number of tries before giving up.\n allow_no_crop (bool): allow return without actually cropping them.\n cover_all_box (bool): ensure all bboxes are covered in the final crop.\n is_mask_crop(bool): whether crop the segmentation.\n \"\"\"\n\n def __init__(self,\n aspect_ratio=[.5, 2.],\n thresholds=[.0, .1, .3, .5, .7, .9],\n scaling=[.3, 1.],\n num_attempts=50,\n allow_no_crop=True,\n cover_all_box=False,\n is_mask_crop=False):\n super(RandomCrop, self).__init__()\n self.aspect_ratio = aspect_ratio\n self.thresholds = thresholds\n self.scaling = scaling\n self.num_attempts = num_attempts\n self.allow_no_crop = allow_no_crop\n self.cover_all_box = cover_all_box\n self.is_mask_crop = is_mask_crop\n\n def crop_segms(self, segms, valid_ids, crop, height, width):\n def _crop_poly(segm, crop):\n xmin, ymin, xmax, ymax = crop\n crop_coord = [xmin, ymin, xmin, ymax, xmax, ymax, xmax, ymin]\n crop_p = np.array(crop_coord).reshape(4, 2)\n crop_p = Polygon(crop_p)\n\n crop_segm = list()\n for poly in segm:\n poly = np.array(poly).reshape(len(poly) // 2, 2)\n polygon = Polygon(poly)\n if not polygon.is_valid:\n exterior = polygon.exterior\n multi_lines = exterior.intersection(exterior)\n polygons = shapely.ops.polygonize(multi_lines)\n polygon = MultiPolygon(polygons)\n multi_polygon = list()\n if isinstance(polygon, MultiPolygon):\n multi_polygon = copy.deepcopy(polygon)\n else:\n multi_polygon.append(copy.deepcopy(polygon))\n for per_polygon in multi_polygon:\n inter = per_polygon.intersection(crop_p)\n if not inter:\n continue\n if isinstance(inter, (MultiPolygon, GeometryCollection)):\n for part in inter:\n if not isinstance(part, Polygon):\n continue\n part = np.squeeze(\n np.array(part.exterior.coords[:-1]).reshape(1,\n -1))\n part[0::2] -= xmin\n part[1::2] -= ymin\n crop_segm.append(part.tolist())\n elif isinstance(inter, Polygon):\n crop_poly = np.squeeze(\n np.array(inter.exterior.coords[:-1]).reshape(1, -1))\n crop_poly[0::2] -= xmin\n crop_poly[1::2] -= ymin\n crop_segm.append(crop_poly.tolist())\n else:\n continue\n return crop_segm\n\n def _crop_rle(rle, crop, height, width):\n if 'counts' in rle and type(rle['counts']) == list:\n rle = mask_util.frPyObjects(rle, height, width)\n mask = mask_util.decode(rle)\n mask = mask[crop[1]:crop[3], crop[0]:crop[2]]\n rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))\n return rle\n\n crop_segms = []\n for id in valid_ids:\n segm = segms[id]\n if is_poly(segm):\n import copy\n import shapely.ops\n from shapely.geometry import Polygon, MultiPolygon, GeometryCollection\n logging.getLogger(\"shapely\").setLevel(logging.WARNING)\n # Polygon format\n crop_segms.append(_crop_poly(segm, crop))\n else:\n # RLE format\n import pycocotools.mask as mask_util\n crop_segms.append(_crop_rle(segm, crop, height, width))\n return crop_segms\n\n def __call__(self, sample, context=None):\n if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:\n return sample\n\n h = sample['h']\n w = sample['w']\n gt_bbox = sample['gt_bbox']\n\n # NOTE Original method attempts to generate one candidate for each\n # threshold then randomly sample one from the resulting list.\n # Here a short circuit approach is taken, i.e., randomly choose a\n # threshold and attempt to find a valid crop, and simply return the\n # first one found.\n # The probability is not exactly the same, kinda resembling the\n # \"Monty Hall\" problem. Actually carrying out the attempts will affect\n # observability (just like opening doors in the \"Monty Hall\" game).\n thresholds = list(self.thresholds)\n if self.allow_no_crop:\n thresholds.append('no_crop')\n np.random.shuffle(thresholds)\n\n for thresh in thresholds:\n if thresh == 'no_crop':\n return sample\n\n found = False\n for i in range(self.num_attempts):\n scale = np.random.uniform(*self.scaling)\n if self.aspect_ratio is not None:\n min_ar, max_ar = self.aspect_ratio\n aspect_ratio = np.random.uniform(\n max(min_ar, scale**2), min(max_ar, scale**-2))\n h_scale = scale / np.sqrt(aspect_ratio)\n w_scale = scale * np.sqrt(aspect_ratio)\n else:\n h_scale = np.random.uniform(*self.scaling)\n w_scale = np.random.uniform(*self.scaling)\n crop_h = h * h_scale\n crop_w = w * w_scale\n if self.aspect_ratio is None:\n if crop_h / crop_w < 0.5 or crop_h / crop_w > 2.0:\n continue\n\n crop_h = int(crop_h)\n crop_w = int(crop_w)\n crop_y = np.random.randint(0, h - crop_h)\n crop_x = np.random.randint(0, w - crop_w)\n crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]\n iou = self._iou_matrix(\n gt_bbox, np.array(\n [crop_box], dtype=np.float32))\n if iou.max() < thresh:\n continue\n\n if self.cover_all_box and iou.min() < thresh:\n continue\n\n cropped_box, valid_ids = self._crop_box_with_center_constraint(\n gt_bbox, np.array(\n crop_box, dtype=np.float32))\n if valid_ids.size > 0:\n found = True\n break\n\n if found:\n if self.is_mask_crop and 'gt_poly' in sample and len(sample[\n 'gt_poly']) > 0:\n crop_polys = self.crop_segms(\n sample['gt_poly'],\n valid_ids,\n np.array(\n crop_box, dtype=np.int64),\n h,\n w)\n if [] in crop_polys:\n delete_id = list()\n valid_polys = list()\n for id, crop_poly in enumerate(crop_polys):\n if crop_poly == []:\n delete_id.append(id)\n else:\n valid_polys.append(crop_poly)\n valid_ids = np.delete(valid_ids, delete_id)\n if len(valid_polys) == 0:\n return sample\n sample['gt_poly'] = valid_polys\n else:\n sample['gt_poly'] = crop_polys\n\n if 'gt_segm' in sample:\n sample['gt_segm'] = self._crop_segm(sample['gt_segm'],\n crop_box)\n sample['gt_segm'] = np.take(\n sample['gt_segm'], valid_ids, axis=0)\n sample['image'] = self._crop_image(sample['image'], crop_box)\n sample['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)\n sample['gt_class'] = np.take(\n sample['gt_class'], valid_ids, axis=0)\n sample['w'] = crop_box[2] - crop_box[0]\n sample['h'] = crop_box[3] - crop_box[1]\n if 'gt_score' in sample:\n sample['gt_score'] = np.take(\n sample['gt_score'], valid_ids, axis=0)\n\n if 'is_crowd' in sample:\n sample['is_crowd'] = np.take(\n sample['is_crowd'], valid_ids, axis=0)\n return sample\n\n return sample\n\n def _iou_matrix(self, a, b):\n tl_i = np.maximum(a[:, np.newaxis, :2], b[:, :2])\n br_i = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])\n\n area_i = np.prod(br_i - tl_i, axis=2) * (tl_i < br_i).all(axis=2)\n area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)\n area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)\n area_o = (area_a[:, np.newaxis] + area_b - area_i)\n return area_i / (area_o + 1e-10)\n\n def _crop_box_with_center_constraint(self, box, crop):\n cropped_box = box.copy()\n\n cropped_box[:, :2] = np.maximum(box[:, :2], crop[:2])\n cropped_box[:, 2:] = np.minimum(box[:, 2:], crop[2:])\n cropped_box[:, :2] -= crop[:2]\n cropped_box[:, 2:] -= crop[:2]\n\n centers = (box[:, :2] + box[:, 2:]) / 2\n valid = np.logical_and(crop[:2] <= centers,\n centers < crop[2:]).all(axis=1)\n valid = np.logical_and(\n valid, (cropped_box[:, :2] < cropped_box[:, 2:]).all(axis=1))\n\n return cropped_box, np.where(valid)[0]\n\n def _crop_image(self, img, crop):\n x1, y1, x2, y2 = crop\n return img[y1:y2, x1:x2, :]\n\n def _crop_segm(self, segm, crop):\n x1, y1, x2, y2 = crop\n return segm[:, y1:y2, x1:x2]\n\n\n@register_op\nclass PadBox(BaseOperator):\n def __init__(self, num_max_boxes=50):\n \"\"\"\n Pad zeros to bboxes if number of bboxes is less than num_max_boxes.\n Args:\n num_max_boxes (int): the max number of bboxes\n \"\"\"\n self.num_max_boxes = num_max_boxes\n super(PadBox, self).__init__()\n\n def __call__(self, sample, context=None):\n assert 'gt_bbox' in sample\n bbox = sample['gt_bbox']\n gt_num = min(self.num_max_boxes, len(bbox))\n num_max = self.num_max_boxes\n fields = context['fields'] if context else []\n pad_bbox = np.zeros((num_max, 4), dtype=np.float32)\n if gt_num > 0:\n pad_bbox[:gt_num, :] = bbox[:gt_num, :]\n sample['gt_bbox'] = pad_bbox\n if 'gt_class' in fields:\n pad_class = np.zeros((num_max), dtype=np.int32)\n if gt_num > 0:\n pad_class[:gt_num] = sample['gt_class'][:gt_num, 0]\n sample['gt_class'] = pad_class\n if 'gt_score' in fields:\n pad_score = np.zeros((num_max), dtype=np.float32)\n if gt_num > 0:\n pad_score[:gt_num] = sample['gt_score'][:gt_num, 0]\n sample['gt_score'] = pad_score\n # in training, for example in op ExpandImage,\n # the bbox and gt_class is expandded, but the difficult is not,\n # so, judging by it's length\n if 'is_difficult' in fields:\n pad_diff = np.zeros((num_max), dtype=np.int32)\n if gt_num > 0:\n pad_diff[:gt_num] = sample['difficult'][:gt_num, 0]\n sample['difficult'] = pad_diff\n return sample\n\n\n@register_op\nclass BboxXYXY2XYWH(BaseOperator):\n \"\"\"\n Convert bbox XYXY format to XYWH format.\n \"\"\"\n\n def __init__(self):\n super(BboxXYXY2XYWH, self).__init__()\n\n def __call__(self, sample, context=None):\n assert 'gt_bbox' in sample\n bbox = sample['gt_bbox']\n bbox[:, 2:4] = bbox[:, 2:4] - bbox[:, :2]\n bbox[:, :2] = bbox[:, :2] + bbox[:, 2:4] / 2.\n sample['gt_bbox'] = bbox\n return sample\n\n\n@register_op\nclass Lighting(BaseOperator):\n \"\"\"\n Lighting the imagen by eigenvalues and eigenvectors\n Args:\n eigval (list): eigenvalues\n eigvec (list): eigenvectors\n alphastd (float): random weight of lighting, 0.1 by default\n \"\"\"\n\n def __init__(self, eigval, eigvec, alphastd=0.1):\n super(Lighting, self).__init__()\n self.alphastd = alphastd\n self.eigval = np.array(eigval).astype('float32')\n self.eigvec = np.array(eigvec).astype('float32')\n\n def __call__(self, sample, context=None):\n alpha = np.random.normal(scale=self.alphastd, size=(3, ))\n sample['image'] += np.dot(self.eigvec, self.eigval * alpha)\n return sample\n\n\n@register_op\nclass CornerTarget(BaseOperator):\n \"\"\"\n Generate targets for CornerNet by ground truth data. \n Args:\n output_size (int): the size of output heatmaps.\n num_classes (int): num of classes.\n gaussian_bump (bool): whether to apply gaussian bump on gt targets.\n True by default.\n gaussian_rad (int): radius of gaussian bump. If it is set to -1, the \n radius will be calculated by iou. -1 by default.\n gaussian_iou (float): the threshold iou of predicted bbox to gt bbox. \n If the iou is larger than threshold, the predicted bboox seems as\n positive sample. 0.3 by default\n max_tag_len (int): max num of gt box per image.\n \"\"\"\n\n def __init__(self,\n output_size,\n num_classes,\n gaussian_bump=True,\n gaussian_rad=-1,\n gaussian_iou=0.3,\n max_tag_len=128):\n super(CornerTarget, self).__init__()\n self.num_classes = num_classes\n self.output_size = output_size\n self.gaussian_bump = gaussian_bump\n self.gaussian_rad = gaussian_rad\n self.gaussian_iou = gaussian_iou\n self.max_tag_len = max_tag_len\n\n def __call__(self, sample, context=None):\n tl_heatmaps = np.zeros(\n (self.num_classes, self.output_size[0], self.output_size[1]),\n dtype=np.float32)\n br_heatmaps = np.zeros(\n (self.num_classes, self.output_size[0], self.output_size[1]),\n dtype=np.float32)\n\n tl_regrs = np.zeros((self.max_tag_len, 2), dtype=np.float32)\n br_regrs = np.zeros((self.max_tag_len, 2), dtype=np.float32)\n tl_tags = np.zeros((self.max_tag_len), dtype=np.int64)\n br_tags = np.zeros((self.max_tag_len), dtype=np.int64)\n tag_masks = np.zeros((self.max_tag_len), dtype=np.uint8)\n tag_lens = np.zeros((), dtype=np.int32)\n tag_nums = np.zeros((1), dtype=np.int32)\n\n gt_bbox = sample['gt_bbox']\n gt_class = sample['gt_class']\n keep_inds = ((gt_bbox[:, 2] - gt_bbox[:, 0]) > 0) & \\\n ((gt_bbox[:, 3] - gt_bbox[:, 1]) > 0)\n gt_bbox = gt_bbox[keep_inds]\n gt_class = gt_class[keep_inds]\n sample['gt_bbox'] = gt_bbox\n sample['gt_class'] = gt_class\n width_ratio = self.output_size[1] / sample['w']\n height_ratio = self.output_size[0] / sample['h']\n for i in range(gt_bbox.shape[0]):\n width = gt_bbox[i][2] - gt_bbox[i][0]\n height = gt_bbox[i][3] - gt_bbox[i][1]\n\n xtl, ytl = gt_bbox[i][0], gt_bbox[i][1]\n xbr, ybr = gt_bbox[i][2], gt_bbox[i][3]\n\n fxtl = (xtl * width_ratio)\n fytl = (ytl * height_ratio)\n fxbr = (xbr * width_ratio)\n fybr = (ybr * height_ratio)\n\n xtl = int(fxtl)\n ytl = int(fytl)\n xbr = int(fxbr)\n ybr = int(fybr)\n if self.gaussian_bump:\n width = math.ceil(width * width_ratio)\n height = math.ceil(height * height_ratio)\n if self.gaussian_rad == -1:\n radius = gaussian_radius((height, width), self.gaussian_iou)\n radius = max(0, int(radius))\n else:\n radius = self.gaussian_rad\n draw_gaussian(tl_heatmaps[gt_class[i][0]], [xtl, ytl], radius)\n draw_gaussian(br_heatmaps[gt_class[i][0]], [xbr, ybr], radius)\n else:\n tl_heatmaps[gt_class[i][0], ytl, xtl] = 1\n br_heatmaps[gt_class[i][0], ybr, xbr] = 1\n\n tl_regrs[i, :] = [fxtl - xtl, fytl - ytl]\n br_regrs[i, :] = [fxbr - xbr, fybr - ybr]\n tl_tags[tag_lens] = ytl * self.output_size[1] + xtl\n br_tags[tag_lens] = ybr * self.output_size[1] + xbr\n tag_lens += 1\n\n tag_masks[:tag_lens] = 1\n\n sample['tl_heatmaps'] = tl_heatmaps\n sample['br_heatmaps'] = br_heatmaps\n sample['tl_regrs'] = tl_regrs\n sample['br_regrs'] = br_regrs\n sample['tl_tags'] = tl_tags\n sample['br_tags'] = br_tags\n sample['tag_masks'] = tag_masks\n\n return sample\n\n\n@register_op\nclass CornerCrop(BaseOperator):\n \"\"\"\n Random crop for CornerNet\n Args:\n random_scales (list): scales of output_size to input_size.\n border (int): border of corp center\n is_train (bool): train or test\n input_size (int): size of input image\n \"\"\"\n\n def __init__(self,\n random_scales=[0.6, 0.7, 0.8, 0.9, 1., 1.1, 1.2, 1.3],\n border=128,\n is_train=True,\n input_size=511):\n super(CornerCrop, self).__init__()\n self.random_scales = random_scales\n self.border = border\n self.is_train = is_train\n self.input_size = input_size\n\n def __call__(self, sample, context=None):\n im_h, im_w = int(sample['h']), int(sample['w'])\n if self.is_train:\n scale = np.random.choice(self.random_scales)\n height = int(self.input_size * scale)\n width = int(self.input_size * scale)\n\n w_border = self._get_border(self.border, im_w)\n h_border = self._get_border(self.border, im_h)\n\n ctx = np.random.randint(low=w_border, high=im_w - w_border)\n cty = np.random.randint(low=h_border, high=im_h - h_border)\n\n else:\n cty, ctx = im_h // 2, im_w // 2\n height = im_h | 127\n width = im_w | 127\n\n cropped_image = np.zeros(\n (height, width, 3), dtype=sample['image'].dtype)\n\n x0, x1 = max(ctx - width // 2, 0), min(ctx + width // 2, im_w)\n y0, y1 = max(cty - height // 2, 0), min(cty + height // 2, im_h)\n\n left_w, right_w = ctx - x0, x1 - ctx\n top_h, bottom_h = cty - y0, y1 - cty\n\n # crop image\n cropped_ctx, cropped_cty = width // 2, height // 2\n x_slice = slice(int(cropped_ctx - left_w), int(cropped_ctx + right_w))\n y_slice = slice(int(cropped_cty - top_h), int(cropped_cty + bottom_h))\n cropped_image[y_slice, x_slice, :] = sample['image'][y0:y1, x0:x1, :]\n\n sample['image'] = cropped_image\n sample['h'], sample['w'] = height, width\n\n if self.is_train:\n # crop detections\n gt_bbox = sample['gt_bbox']\n gt_bbox[:, 0:4:2] -= x0\n gt_bbox[:, 1:4:2] -= y0\n gt_bbox[:, 0:4:2] += cropped_ctx - left_w\n gt_bbox[:, 1:4:2] += cropped_cty - top_h\n else:\n sample['borders'] = np.array(\n [\n cropped_cty - top_h, cropped_cty + bottom_h,\n cropped_ctx - left_w, cropped_ctx + right_w\n ],\n dtype=np.float32)\n\n return sample\n\n def _get_border(self, border, size):\n i = 1\n while size - border // i <= border // i:\n i *= 2\n return border // i\n\n\n@register_op\nclass CornerRatio(BaseOperator):\n \"\"\"\n Ratio of output size to image size\n Args:\n input_size (int): the size of input size\n output_size (int): the size of heatmap\n \"\"\"\n\n def __init__(self, input_size=511, output_size=64):\n super(CornerRatio, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n\n def __call__(self, sample, context=None):\n scale = (self.input_size + 1) // self.output_size\n out_height, out_width = (sample['h'] + 1) // scale, (\n sample['w'] + 1) // scale\n height_ratio = out_height / float(sample['h'])\n width_ratio = out_width / float(sample['w'])\n sample['ratios'] = np.array([height_ratio, width_ratio])\n\n return sample\n\n\n@register_op\nclass RandomScaledCrop(BaseOperator):\n \"\"\"Resize image and bbox based on long side (with optional random scaling),\n then crop or pad image to target size.\n Args:\n target_dim (int): target size.\n scale_range (list): random scale range.\n interp (int): interpolation method, default to `cv2.INTER_LINEAR`.\n \"\"\"\n\n def __init__(self,\n target_dim=512,\n scale_range=[.1, 2.],\n interp=cv2.INTER_LINEAR):\n super(RandomScaledCrop, self).__init__()\n self.target_dim = target_dim\n self.scale_range = scale_range\n self.interp = interp\n\n def __call__(self, sample, context=None):\n w = sample['w']\n h = sample['h']\n random_scale = np.random.uniform(*self.scale_range)\n dim = self.target_dim\n random_dim = int(dim * random_scale)\n dim_max = max(h, w)\n scale = random_dim / dim_max\n resize_w = int(round(w * scale))\n resize_h = int(round(h * scale))\n offset_x = int(max(0, np.random.uniform(0., resize_w - dim)))\n offset_y = int(max(0, np.random.uniform(0., resize_h - dim)))\n if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:\n scale_array = np.array([scale, scale] * 2, dtype=np.float32)\n shift_array = np.array([offset_x, offset_y] * 2, dtype=np.float32)\n boxes = sample['gt_bbox'] * scale_array - shift_array\n boxes = np.clip(boxes, 0, dim - 1)\n # filter boxes with no area\n area = np.prod(boxes[..., 2:] - boxes[..., :2], axis=1)\n valid = (area > 1.).nonzero()[0]\n sample['gt_bbox'] = boxes[valid]\n sample['gt_class'] = sample['gt_class'][valid]\n\n img = sample['image']\n img = cv2.resize(img, (resize_w, resize_h), interpolation=self.interp)\n img = np.array(img)\n canvas = np.zeros((dim, dim, 3), dtype=img.dtype)\n canvas[:min(dim, resize_h), :min(dim, resize_w), :] = img[\n offset_y:offset_y + dim, offset_x:offset_x + dim, :]\n sample['h'] = dim\n sample['w'] = dim\n sample['image'] = canvas\n sample['im_info'] = [resize_h, resize_w, scale]\n return sample\n\n\n@register_op\nclass ResizeAndPad(BaseOperator):\n \"\"\"Resize image and bbox, then pad image to target size.\n Args:\n target_dim (int): target size\n interp (int): interpolation method, default to `cv2.INTER_LINEAR`.\n \"\"\"\n\n def __init__(self, target_dim=512, interp=cv2.INTER_LINEAR):\n super(ResizeAndPad, self).__init__()\n self.target_dim = target_dim\n self.interp = interp\n\n def __call__(self, sample, context=None):\n w = sample['w']\n h = sample['h']\n interp = self.interp\n dim = self.target_dim\n dim_max = max(h, w)\n scale = self.target_dim / dim_max\n resize_w = int(round(w * scale))\n resize_h = int(round(h * scale))\n if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:\n scale_array = np.array([scale, scale] * 2, dtype=np.float32)\n sample['gt_bbox'] = np.clip(sample['gt_bbox'] * scale_array, 0,\n dim - 1)\n img = sample['image']\n img = cv2.resize(img, (resize_w, resize_h), interpolation=interp)\n img = np.array(img)\n canvas = np.zeros((dim, dim, 3), dtype=img.dtype)\n canvas[:resize_h, :resize_w, :] = img\n sample['h'] = dim\n sample['w'] = dim\n sample['image'] = canvas\n sample['im_info'] = [resize_h, resize_w, scale]\n return sample\n\n\n@register_op\nclass TargetAssign(BaseOperator):\n \"\"\"Assign regression target and labels.\n Args:\n image_size (int or list): input image size, a single integer or list of\n [h, w]. Default: 512\n min_level (int): min level of the feature pyramid. Default: 3\n max_level (int): max level of the feature pyramid. Default: 7\n anchor_base_scale (int): base anchor scale. Default: 4\n num_scales (int): number of anchor scales. Default: 3\n aspect_ratios (list): aspect ratios.\n Default: [(1, 1), (1.4, 0.7), (0.7, 1.4)]\n match_threshold (float): threshold for foreground IoU. Default: 0.5\n \"\"\"\n\n def __init__(self,\n image_size=512,\n min_level=3,\n max_level=7,\n anchor_base_scale=4,\n num_scales=3,\n aspect_ratios=[(1, 1), (1.4, 0.7), (0.7, 1.4)],\n match_threshold=0.5):\n super(TargetAssign, self).__init__()\n assert image_size % 2 ** max_level == 0, \\\n \"image size should be multiple of the max level stride\"\n self.image_size = image_size\n self.min_level = min_level\n self.max_level = max_level\n self.anchor_base_scale = anchor_base_scale\n self.num_scales = num_scales\n self.aspect_ratios = aspect_ratios\n self.match_threshold = match_threshold\n\n @property\n def anchors(self):\n if not hasattr(self, '_anchors'):\n anchor_grid = AnchorGrid(self.image_size, self.min_level,\n self.max_level, self.anchor_base_scale,\n self.num_scales, self.aspect_ratios)\n self._anchors = np.concatenate(anchor_grid.generate())\n return self._anchors\n\n def iou_matrix(self, a, b):\n tl_i = np.maximum(a[:, np.newaxis, :2], b[:, :2])\n br_i = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])\n area_i = np.prod(br_i - tl_i, axis=2) * (tl_i < br_i).all(axis=2)\n area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)\n area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)\n area_o = (area_a[:, np.newaxis] + area_b - area_i)\n # return area_i / (area_o + 1e-10)\n return np.where(area_i == 0., np.zeros_like(area_i), area_i / area_o)\n\n def match(self, anchors, gt_boxes):\n # XXX put smaller matrix first would be a little bit faster\n mat = self.iou_matrix(gt_boxes, anchors)\n max_anchor_for_each_gt = mat.argmax(axis=1)\n max_for_each_anchor = mat.max(axis=0)\n anchor_to_gt = mat.argmax(axis=0)\n anchor_to_gt[max_for_each_anchor < self.match_threshold] = -1\n # XXX ensure each gt has at least one anchor assigned,\n # see `force_match_for_each_row` in TF implementation\n one_hot = np.zeros_like(mat)\n one_hot[np.arange(mat.shape[0]), max_anchor_for_each_gt] = 1.\n max_anchor_indices = one_hot.sum(axis=0).nonzero()[0]\n max_gt_indices = one_hot.argmax(axis=0)[max_anchor_indices]\n anchor_to_gt[max_anchor_indices] = max_gt_indices\n return anchor_to_gt\n\n def encode(self, anchors, boxes):\n wha = anchors[..., 2:] - anchors[..., :2] + 1\n ca = anchors[..., :2] + wha * .5\n whb = boxes[..., 2:] - boxes[..., :2] + 1\n cb = boxes[..., :2] + whb * .5\n offsets = np.empty_like(anchors)\n offsets[..., :2] = (cb - ca) / wha\n offsets[..., 2:] = np.log(whb / wha)\n return offsets\n\n def __call__(self, sample, context=None):\n gt_boxes = sample['gt_bbox']\n gt_labels = sample['gt_class']\n labels = np.full((self.anchors.shape[0], 1), 0, dtype=np.int32)\n targets = np.full((self.anchors.shape[0], 4), 0., dtype=np.float32)\n sample['gt_label'] = labels\n sample['gt_target'] = targets\n\n if len(gt_boxes) < 1:\n sample['fg_num'] = np.array(0, dtype=np.int32)\n return sample\n\n anchor_to_gt = self.match(self.anchors, gt_boxes)\n matched_indices = (anchor_to_gt >= 0).nonzero()[0]\n labels[matched_indices] = gt_labels[anchor_to_gt[matched_indices]]\n\n matched_boxes = gt_boxes[anchor_to_gt[matched_indices]]\n matched_anchors = self.anchors[matched_indices]\n matched_targets = self.encode(matched_anchors, matched_boxes)\n targets[matched_indices] = matched_targets\n sample['fg_num'] = np.array(len(matched_targets), dtype=np.int32)\n return sample\n\n\n@register_op\nclass DebugVisibleImage(BaseOperator):\n \"\"\"\n In debug mode, visualize images according to `gt_box`.\n (Currently only supported when not cropping and flipping image.)\n \"\"\"\n\n def __init__(self,\n output_dir='output/debug',\n use_vdl=False,\n is_normalized=False):\n super(DebugVisibleImage, self).__init__()\n self.is_normalized = is_normalized\n self.output_dir = output_dir\n self.use_vdl = use_vdl\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n if not isinstance(self.is_normalized, bool):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n if self.use_vdl:\n assert six.PY3, \"VisualDL requires Python >= 3.5\"\n from visualdl import LogWriter\n self.vdl_writer = LogWriter(self.output_dir)\n\n def __call__(self, sample, context=None):\n out_file_name = sample['im_file'].split('/')[-1]\n if self.use_vdl:\n origin_image = Image.open(sample['im_file']).convert('RGB')\n origin_image = ImageOps.exif_transpose(origin_image)\n image_np = np.array(origin_image)\n self.vdl_writer.add_image(\"original/{}\".format(out_file_name),\n image_np, 0)\n\n if not isinstance(sample['image'], np.ndarray):\n raise TypeError(\"{}: sample[image] type is not numpy.\".format(self))\n image = Image.fromarray(np.uint8(sample['image']))\n\n width = sample['w']\n height = sample['h']\n gt_bbox = sample['gt_bbox']\n gt_class = sample['gt_class']\n\n if 'gt_poly' in sample.keys():\n poly_to_mask = Poly2Mask()\n sample = poly_to_mask(sample)\n\n if 'gt_segm' in sample.keys():\n import pycocotools.mask as mask_util\n from ppdet.utils.colormap import colormap\n image_np = np.array(image).astype('float32')\n mask_color_id = 0\n w_ratio = .4\n alpha = 0.7\n color_list = colormap(rgb=True)\n gt_segm = sample['gt_segm']\n for mask in gt_segm:\n color_mask = color_list[mask_color_id % len(color_list), 0:3]\n mask_color_id += 1\n for c in range(3):\n color_mask[c] = color_mask[c] * (1 - w_ratio\n ) + w_ratio * 255\n idx = np.nonzero(mask)\n image_np[idx[0], idx[1], :] *= 1.0 - alpha\n image_np[idx[0], idx[1], :] += alpha * color_mask\n image = Image.fromarray(np.uint8(image_np))\n\n draw = ImageDraw.Draw(image)\n for i in range(gt_bbox.shape[0]):\n if self.is_normalized:\n gt_bbox[i][0] = gt_bbox[i][0] * width\n gt_bbox[i][1] = gt_bbox[i][1] * height\n gt_bbox[i][2] = gt_bbox[i][2] * width\n gt_bbox[i][3] = gt_bbox[i][3] * height\n\n xmin, ymin, xmax, ymax = gt_bbox[i]\n draw.line(\n [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),\n (xmin, ymin)],\n width=2,\n fill='green')\n # draw label\n text = 'id' + str(gt_class[i][0])\n tw, th = draw.textsize(text)\n draw.rectangle(\n [(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill='green')\n draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))\n\n if 'gt_keypoint' in sample.keys():\n gt_keypoint = sample['gt_keypoint']\n if self.is_normalized:\n for i in range(gt_keypoint.shape[1]):\n if i % 2:\n gt_keypoint[:, i] = gt_keypoint[:, i] * height\n else:\n gt_keypoint[:, i] = gt_keypoint[:, i] * width\n for i in range(gt_keypoint.shape[0]):\n keypoint = gt_keypoint[i]\n for j in range(int(keypoint.shape[0] / 2)):\n x1 = round(keypoint[2 * j])\n y1 = round(keypoint[2 * j + 1])\n draw.ellipse(\n (x1, y1, x1 + 5, y1 + 5), fill='green', outline='green')\n save_path = os.path.join(self.output_dir, out_file_name)\n if self.use_vdl:\n preprocess_image_np = np.array(image)\n self.vdl_writer.add_image(\"preprocess/{}\".format(out_file_name),\n preprocess_image_np, 0)\n else:\n image.save(save_path, quality=95)\n return sample\n\n\n@register_op\nclass Poly2Mask(BaseOperator):\n \"\"\"\n gt poly to mask annotations\n \"\"\"\n\n def __init__(self):\n super(Poly2Mask, self).__init__()\n import pycocotools.mask as maskUtils\n self.maskutils = maskUtils\n\n def _poly2mask(self, mask_ann, img_h, img_w):\n if isinstance(mask_ann, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = self.maskutils.frPyObjects(mask_ann, img_h, img_w)\n rle = self.maskutils.merge(rles)\n elif isinstance(mask_ann['counts'], list):\n # uncompressed RLE\n rle = self.maskutils.frPyObjects(mask_ann, img_h, img_w)\n else:\n # rle\n rle = mask_ann\n mask = self.maskutils.decode(rle)\n return mask\n\n def __call__(self, sample, context=None):\n assert 'gt_poly' in sample\n im_h = sample['h']\n im_w = sample['w']\n masks = [\n self._poly2mask(gt_poly, im_h, im_w)\n for gt_poly in sample['gt_poly']\n ]\n sample['gt_segm'] = np.asarray(masks).astype(np.uint8)\n return sample\n" ]
[ [ "numpy.ones", "numpy.take", "numpy.ones_like", "numpy.asarray", "numpy.log", "numpy.full", "numpy.concatenate", "numpy.logical_and", "numpy.cos", "numpy.random.choice", "numpy.empty_like", "numpy.expand_dims", "numpy.random.rand", "numpy.delete", "numpy.where", "numpy.round", "numpy.nonzero", "numpy.minimum", "numpy.random.uniform", "numpy.sqrt", "numpy.int", "numpy.uint8", "numpy.zeros", "numpy.random.normal", "numpy.arange", "numpy.max", "numpy.min", "numpy.prod", "numpy.maximum", "numpy.zeros_like", "numpy.random.shuffle", "numpy.squeeze", "numpy.random.permutation", "numpy.swapaxes", "numpy.random.beta", "numpy.clip", "numpy.array", "numpy.sin", "numpy.dot", "numpy.random.randint", "numpy.frombuffer" ] ]
double-fire-0/SystemNoise
[ "ab042dd54371482a18117eb13f816a7472e51590" ]
[ "EOC/prototype/data/utils/imagenet_s_gen.py" ]
[ "import os.path as osp\nimport numpy as np\nfrom PIL import Image\nimport io\nimport cv2\nimport ffmpeg\nimport copy\nimport math\nimport random\nimport os\nfrom tqdm import tqdm\n# try:\n# import mc\n# except ImportError:\n# pass\nimport argparse\n\n\npil_resize_mode_dict = {\n \"pil-bilinear\": Image.BILINEAR,\n \"pil-nearest\": Image.NEAREST,\n \"pil-box\": Image.BOX,\n \"pil-hamming\": Image.HAMMING,\n \"pil-cubic\": Image.BICUBIC,\n \"pil-lanczos\": Image.LANCZOS\n}\n\ncv_resize_mode_dict = {\n \"opencv-nearest\": cv2.INTER_NEAREST,\n \"opencv-bilinear\": cv2.INTER_LINEAR,\n \"opencv-area\": cv2.INTER_AREA,\n \"opencv-cubic\": cv2.INTER_CUBIC,\n \"opencv-lanczos\": cv2.INTER_LANCZOS4\n}\n\n\nclass ImageTransfer:\n def __init__(self, root_dir, meta_file, save_root, decoder_type='pil',\n resize_type='pil-bilinear', resize=224, transform_type='val'):\n self.root_dir = root_dir\n self.meta_file = meta_file\n self.decoder_type = decoder_type\n self.resize_type = resize_type\n self.save_root = save_root\n self.transform_type = transform_type\n\n if isinstance(resize, tuple):\n self.resize = resize\n else:\n self.resize = (resize, resize)\n self.color_mode = 'RGB'\n\n with open(meta_file) as f:\n lines = f.readlines()\n self.num = len(lines)\n self.metas = []\n for line in lines:\n filename, label = line.rstrip().split()\n self.metas.append({'filename': filename, 'label': label})\n\n def write_to_filesystem(self):\n new_meta_file_name = self.decoder_type + '_' + self.resize_type + '.txt'\n new_meta_file = open(new_meta_file_name, 'w')\n save_dir = osp.join(self.save_root, self.decoder_type, self.resize_type)\n if not osp.exists(save_dir):\n os.makedirs(save_dir)\n\n for idx in tqdm(range(self.num)):\n np_image, label = self.getimage(idx)\n save_file_name = self.metas[idx]['filename'] + '.npy'\n save_path = osp.join(save_dir, save_file_name)\n np.save(save_path, np_image)\n\n new_meta_file.write(f'{osp.join(self.decoder_type, self.resize_type, save_file_name)} {label}'+'\\n')\n\n def getimage(self, idx):\n curr_meta = copy.deepcopy(self.metas[idx])\n filename = osp.join(self.root_dir, curr_meta['filename'])\n label = int(curr_meta['label'])\n # add root_dir to filename\n curr_meta['filename'] = filename\n img_bytes = self.read_file(curr_meta)\n\n img_after_decode = self.image_decoder(img_bytes, filepath=filename)\n assert isinstance(img_after_decode, np.ndarray)\n\n y, x, h, w = self.get_params(img_after_decode)\n img_after_resize = self.image_resize(img_after_decode, y, x, h, w)\n\n return img_after_resize, label\n\n\n def image_resize(self, img, y, x, h, w):\n if 'pil' in self.resize_type:\n img = self.toPIL(img)\n interpolation = pil_resize_mode_dict[self.resize_type]\n elif 'opencv' in self.resize_type:\n interpolation = cv_resize_mode_dict[self.resize_type]\n else:\n raise NotImplementedError\n\n if self.transform_type == 'train':\n i, j = y, x\n size = self.resize\n if 'pil' in self.resize_type:\n img = img.crop((j, i, j + w, i + h))\n return self.toNumpy(self.PIL_resize(img, size, interpolation))\n elif 'opencv' in self.resize_type:\n img = img[y: y + h, x: x + w]\n img = cv2.resize(img, self.resize, interpolation=interpolation)\n return img\n else:\n raise NotImplementedError\n elif self.transform_type == 'val':\n if 'pil' in self.resize_type:\n frist_resize = tuple(size * 8 / 7 for size in self.resize)\n img = self.PIL_resize(img, frist_resize, interpolation)\n\n w, h = img.size\n th, tw = self.resize\n i = int(round((h - th) / 2.))\n j = int(round((w - tw) / 2.))\n img = img.crop((j, i, j + tw, i + th))\n return self.toNumpy(img)\n elif 'opencv' in self.resize_type:\n width, height = tuple(int(size * 8 / 7) for size in self.resize)\n img = cv2.resize(img, (width, height), interpolation=interpolation)\n\n h, w, c = img.shape\n th, tw = self.resize\n dy = int(round((h - th) / 2.))\n dx = int(round((w - tw) / 2.))\n return img[dy: dy + th, dx: dx + tw]\n else:\n raise NotImplementedError\n\n\n\n\n def PIL_resize(self, img, size, interpolation):\n if isinstance(size, int):\n w, h = img.size\n if (w <= h and w == size) or (h <= w and h == size):\n return img\n if w < h:\n ow = size\n oh = int(size * h / w)\n return img.resize((ow, oh), interpolation)\n else:\n oh = size\n ow = int(size * w / h)\n return img.resize((ow, oh), interpolation)\n else:\n return img.resize(size[::-1], interpolation)\n\n\n\n def toNumpy(self, img):\n return np.asarray(img)\n\n def toPIL(self, img):\n return Image.fromarray(img)\n\n def image_decoder(self, filebytes, filepath=None):\n if self.decoder_type == 'pil':\n buff = io.BytesIO(filebytes)\n try:\n with Image.open(buff) as img:\n img = img.convert('RGB')\n if self.color_mode == \"BGR\":\n b, g, r = img.split()\n img = Image.merge(\"RGB\", (r, g, b))\n elif self.color_mode == \"GRAY\":\n img = img.convert('L')\n\n except IOError:\n print('Failed in loading {}'.format(filepath))\n image_array = np.array(img)\n return image_array\n elif self.decoder_type == 'opencv':\n try:\n img = cv2.imdecode(filebytes, cv2.IMREAD_COLOR)\n if self.color_mode == \"RGB\":\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n elif self.color_mode == \"GRAY\":\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n except IOError:\n print('Failed in loading {}'.format(filepath))\n return img\n elif self.decoder_type == 'ffmpeg':\n img = cv2.imdecode(filebytes, cv2.IMREAD_COLOR)\n height = img.shape[0]\n width = img.shape[1]\n out, _ = (\n ffmpeg\n .input(filepath)\n .output('pipe:', format='rawvideo', pix_fmt='rgb24')\n .run(capture_stdout=True)\n )\n img = (\n np\n .frombuffer(out, np.uint8)\n .reshape([height, width, 3])\n )\n return img\n else:\n raise NotImplementedError\n\n def get_params(self, img, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)):\n \"\"\"Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (PIL Image): Image to be cropped.\n scale (tuple): range of size of the origin size cropped\n ratio (tuple): range of aspect ratio of the origin aspect ratio cropped\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for a random\n sized crop.\n \"\"\"\n\n area = img.shape[0] * img.shape[1]\n height, width = img.shape[0], img.shape[1]\n\n for attempt in range(10):\n target_area = random.uniform(*scale) * area\n log_ratio = (math.log(ratio[0]), math.log(ratio[1]))\n aspect_ratio = math.exp(random.uniform(*log_ratio))\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if 0 < w <= width and 0 < h <= height:\n i = random.randint(0, height - h)\n j = random.randint(0, width - w)\n return i, j, h, w\n\n in_ratio = float(width) / float(height)\n if (in_ratio < min(ratio)):\n w = width\n h = int(round(w / min(ratio)))\n elif (in_ratio > max(ratio)):\n h = height\n w = int(round(h * max(ratio)))\n else: # whole image\n w = width\n h = height\n i = (height - h) // 2\n j = (width - w) // 2\n return i, j, h, w\n\n def read_file(self, meta_dict):\n filebytes = np.fromfile(meta_dict['filename'], dtype=np.uint8)\n return filebytes\n # self._init_memcached()\n # value = mc.pyvector()\n # self.mclient.Get(meta_dict['filename'], value)\n # value_str = mc.ConvertBuffer(value)\n # filebytes = np.frombuffer(value_str.tobytes(), dtype=np.uint8)\n # return filebytes\n #\n # def _init_memcached(self):\n # if not self.initialized:\n # server_list_config_file = \"/mnt/lustre/share/memcached_client/server_list.conf\"\n # client_config_file = \"/mnt/lustre/share/memcached_client/client.conf\"\n # self.mclient = mc.MemcachedClient.GetInstance(server_list_config_file, client_config_file)\n # self.initialized = True\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Generate Dataset')\n parser.add_argument('--decoder', required=False, type=str, default='pil', choices=['pil', 'opencv', 'ffmpeg'])\n parser.add_argument('--resize', required=False, type=str, default='pil-bilinear',\n choices=['pil-bilinear', 'pil-nearest', 'pil-box', 'pil-hamming', 'pil-cubic', 'pil-lanczos',\n 'opencv-nearest', 'opencv-bilinear', 'opencv-area', 'opencv-cubic', 'opencv-lanczos'])\n parser.add_argument('--transform-type', required=False, type=str, default='val', choices=['val', 'train'])\n # train: Random Resize Crop\n # val: Resize (outsize * (8/7)) + Center Crop\n\n args = parser.parse_args()\n\n ImageTransfer(root_dir='/mnt/lustre/share/images/val', meta_file='/meta/val.txt',\n save_root='/mnt/lustre/wangyan3/dataset-decoder-resize', decoder_type=args.decoder,\n transform_type=args.transform_type, resize_type=args.resize).write_to_filesystem()\n\n" ]
[ [ "numpy.save", "numpy.fromfile", "numpy.asarray", "numpy.array", "numpy.frombuffer" ] ]
zehuilu/How-to-Use-Qualisys-Motion-Capture-System-in-AIMS-Lab
[ "862860a9a5d28fc60ee01954e4929a908bf80533" ]
[ "python/streaming_6dof_data.py" ]
[ "#!/usr/bin/python3\n\n\"\"\"\n Streaming 6-DOF data from QTM forever\n (start QTM first, Capture->Continuous Capture)\n\"\"\"\n\nimport asyncio\nimport xml.etree.ElementTree as ET\nimport pkg_resources\nimport qtm\nimport json\nimport numpy as np\nimport socket\n\n\ndef create_body_index(xml_string):\n \"\"\" Extract a name to index dictionary from 6-DOF settings xml \"\"\"\n xml = ET.fromstring(xml_string)\n\n body_to_index = {}\n for index, body in enumerate(xml.findall(\"*/Body/Name\")):\n body_to_index[body.text.strip()] = index\n\n return body_to_index\n\n\ndef publisher_udp_main(json_file_data):\n \"\"\"\n The following two lines show what is json_file_data\n\n json_file = open('mocap_config.json')\n json_file_data = json.load(json_file)\n \"\"\"\n\n # IP for publisher\n HOST_UDP = json_file_data['HOST_UDP']\n # Port for publisher\n PORT_UDP = int(json_file_data['PORT_UDP'])\n\n server_address_udp = (HOST_UDP, PORT_UDP)\n # Create a UDP socket\n sock_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n return sock_udp, server_address_udp\n\n\nasync def main(network_config_file_name):\n # Read the configuration from the json file\n json_file = open(network_config_file_name)\n json_file_data = json.load(json_file)\n\n # 1 for realtime streaming, 0 for loading qtm file\n flag_realtime = int(json_file_data['FLAG_REALTIME'])\n\n # IP address for the mocap server\n IP_server = json_file_data['IP_SERVER']\n\n # If you want to stream recorded data in a real-time way, change json file and load it here.\n # There might be a bug about file path. Will test it later. -- Sept. 08, 2020\n file_name_qtm = json_file_data['NAME_FILE_LOADED_QTM']\n QTM_FILE = pkg_resources.resource_filename(\"qtm\", file_name_qtm)\n\n # Connect to qtm\n connection = await qtm.connect(IP_server)\n\n # Connection failed?\n if connection is None:\n print(\"Failed to connect\")\n return\n\n # Take control of qtm, context manager will automatically release control after scope end\n async with qtm.TakeControl(connection, \"password\"):\n if not flag_realtime:\n # Load qtm file\n await connection.load(QTM_FILE)\n # start rtfromfile\n await connection.start(rtfromfile=True)\n\n # Get 6-DOF settings from QTM\n xml_string = await connection.get_parameters(parameters=[\"6d\"])\n\n # Create a UDP socket for data streaming\n sock_udp, server_address_udp = publisher_udp_main(json_file_data)\n\n # parser for mocap rigid bodies indexing\n body_index = create_body_index(xml_string)\n\n wanted_body = json_file_data['NAME_SINGLE_BODY']\n\n def on_packet(packet):\n # Get the 6-DOF data\n bodies = packet.get_6d()[1]\n\n if wanted_body is not None and wanted_body in body_index:\n # Extract one specific body\n wanted_index = body_index[wanted_body]\n position, rotation = bodies[wanted_index]\n # You can use position and rotation here. Notice that the unit for position is mm!\n print(wanted_body)\n\n print(\"Position in numpy [meter]\")\n position_np = np.array([[position.x/1000.0], [position.y/1000.0], [position.z/1000.0]], dtype=np.float64)\n print(position_np)\n\n # rotation.matrix is a tuple with 9 elements.\n print(\"Rotation matrix in numpy\")\n rotation_np = np.asarray(rotation.matrix, dtype=np.float64).reshape(3, 3)\n print(rotation_np)\n\n # send 6-DOF data via UDP\n # concatenate the position and rotation matrix vertically\n msg = np.asarray((position.x/1000.0, position.y/1000.0, position.z/1000.0) + rotation.matrix, dtype=np.float64).tobytes()\n sock_udp.sendto(msg, server_address_udp)\n print(\"6-DOF data sent via UDP!\")\n \n else:\n # Print all bodies\n for position, rotation in bodies:\n print(\"There is no such a rigid body! Print all bodies.\")\n print(\"Pos: {} - Rot: {}\".format(position, rotation))\n\n # Start streaming frames\n # Make sure the component matches with the data fetch function, for example: packet.get_6d() with \"6d\"\n # Reference: https://qualisys.github.io/qualisys_python_sdk/index.html\n await connection.stream_frames(components=[\"6d\"], on_packet=on_packet)\n\n\nif __name__ == \"__main__\":\n network_config_file_name = 'mocap_config.json'\n # Run our asynchronous main function forever\n asyncio.ensure_future(main(network_config_file_name))\n asyncio.get_event_loop().run_forever()\n" ]
[ [ "numpy.array", "numpy.asarray" ] ]
markmac99/WesternMeteorPyLib
[ "c5104974c3f1e2259b0d0ea63a9bbaa15d236be2" ]
[ "wmpl/Trajectory/Orbit.py" ]
[ "from __future__ import print_function, division, absolute_import\n\nimport os\nimport sys\nimport datetime\nimport argparse\n\nimport numpy as np\n\nfrom jplephem.spk import SPK\n\n\nfrom wmpl.Config import config\n\nfrom wmpl.Utils.Earth import calcEarthRectangularCoordJPL\nfrom wmpl.Utils.ShowerAssociation import associateShower\nfrom wmpl.Utils.SolarLongitude import jd2SolLonJPL\nfrom wmpl.Utils.TrajConversions import J2000_JD, J2000_OBLIQUITY, AU, SUN_MU, SUN_MASS, G, SIDEREAL_YEAR, \\\n jd2LST, jd2Date, jd2DynamicalTimeJD, eci2RaDec, altAz2RADec, raDec2AltAz, raDec2Ecliptic, cartesian2Geo,\\\n equatorialCoordPrecession, eclipticToRectangularVelocityVect, correctedEclipticCoord, datetime2JD, \\\n geo2Cartesian\nfrom wmpl.Utils.Math import vectNorm, vectMag, rotateVector, cartesianToSpherical, sphericalToCartesian\nfrom wmpl.Utils.Pickling import loadPickle\n\n\n\nclass Orbit(object):\n \"\"\" Structure for storing the orbit solution of a meteor. \"\"\"\n\n def __init__(self):\n\n\n ### Apparent radiant in ECI (Earth's rotation is included) ###\n\n # Apparent radiant position (ECI, radians)\n self.ra = None\n self.dec = None\n\n # Apparent azimuth and altitude (ECI)\n self.azimuth_apparent = None\n self.elevation_apparent = None\n\n # Estimated average velocity (ECI)\n self.v_avg = None\n\n # Estimated initial velocity (ECI)\n self.v_init = None\n\n ### ###\n\n\n\n ### Apparent radiant which includes no Earth's rotation (reference to the ground) ###\n\n # Apparent radiant position (no Earth's rotation, radians)\n self.ra_norot = None\n self.dec_norot = None\n\n # Apparent azimuth and altitude (no Earth's rotation)\n self.azimuth_apparent_norot = None\n self.elevation_apparent_norot = None\n\n # Estimated average velocity (no Earth's rotation)\n self.v_avg_norot = None\n\n # Estimated initial velocity (no Earth's rotation)\n self.v_init_norot = None\n\n ### ###\n\n\n\n # Reference Julian date for the trajectory. Can be the time of the first point on the trajectory or the\n # average time of the meteor\n self.jd_ref = None\n\n # Dynamical Julian date\n self.jd_dyn = None\n\n # reference Local Sidreal Time of the reference trajectory position\n self.lst_ref = None\n\n # Longitude of the reference point on the trajectory (rad)\n self.lon_ref = None\n\n # Latitude of the reference point on the trajectory (rad)\n self.lat_ref = None\n\n # Height of the reference point on the trajectory (meters)\n self.ht_ref = None\n\n # Geocentric latitude of the reference point on the trajectory (rad)\n self.lat_geocentric = None\n\n # Apparent zenith angle (before the correction for Earth's gravity)\n self.zc = None\n\n # Zenith distance of the geocentric radiant (after the correction for Earth's gravity)\n self.zg = None\n\n # Velocity at infinity\n self.v_inf = None\n\n # Geocentric velocity (m/s)\n self.v_g = None\n\n # Geocentric radiant position (radians)\n self.ra_g = None\n self.dec_g = None\n\n # Ecliptic coordinates of the radiant (radians)\n self.L_g = None\n self.B_g = None\n\n # Sun-centered ecliptic rectangular coordinates of the average position on the meteor's trajectory \n # (in kilometers)\n self.meteor_pos = None\n\n # Helioventric velocity of the meteor (m/s)\n self.v_h = None\n\n # Components of the heliocentric velocity vector of the meteoroid\n self.v_h_x = None\n self.v_h_y = None\n self.v_h_z = None\n\n # Heliocentric ecliptic coordinates of the meteor\n self.L_h = None\n self.B_h = None\n\n # Solar longitude (radians)\n self.la_sun = None\n\n # Semi-major axis (AU)\n self.a = None\n\n # Eccentricty\n self.e = None\n\n # Inclination (radians)\n self.i = None\n\n # Argument of perihelion (radians)\n self.peri = None\n\n # Ascending node (radians)\n self.node = None\n\n # Longitude of perihelion (radians)\n self.pi = None\n\n # Latitude of perihelion (radians)\n self.b = None\n\n # Perihelion distance (AU)\n self.q = None\n\n # Aphelion distance (AU)\n self.Q = None\n\n # True anomaly at the moment of contact with Earth (radians)\n self.true_anomaly = None\n\n # Exxentric anomaly (radians)\n self.eccentric_anomaly = None\n\n # Mean anomaly (radians)\n self.mean_anomaly = None\n\n # Calculate the date and time of the last perihelion passage (datetime object)\n self.last_perihelion = None\n\n # Mean motion in the orbit (rad/day)\n self.n = None\n\n # Tisserand's parameter with respect to Jupiter\n self.Tj = None\n\n # Orbital period\n self.T = None\n\n\n def fixMissingParameters(self):\n \"\"\" Some old orbit files might have missing parameters that were not computed. This function computes\n them.\n \"\"\"\n\n if (not hasattr(self, 'b')) and (self.v_g is not None):\n self.b = calcLatitudeOfPerihelion(self.peri, self.node, self.i)\n\n\n def __repr__(self, uncertainties=None, v_init_ht=None):\n \"\"\" String to be printed out when the Orbit object is printed. \"\"\"\n\n def _uncer(str_format, std_name, multi=1.0, deg=False):\n \"\"\" Internal function. Returns the formatted uncertanty, if the uncertanty is given. If not,\n it returns nothing. \n\n Arguments:\n str_format: [str] String format for the unceertanty.\n std_name: [str] Name of the uncertanty attribute, e.g. if it is 'x', then the uncertanty is \n stored in uncertainties.x.\n \n Keyword arguments:\n multi: [float] Uncertanty multiplier. 1.0 by default. This is used to scale the uncertanty to\n different units (e.g. from m/s to km/s).\n deg: [bool] Converet radians to degrees if True. False by defualt.\n \"\"\"\n\n if deg:\n multi *= np.degrees(1.0)\n\n if uncertainties is not None:\n if hasattr(uncertainties, std_name):\n return \" +/- \" + str_format.format(getattr(uncertainties, std_name)*multi)\n\n \n return ''\n\n\n out_str = \"\"\n #out_str += \"--------------------\\n\"\n\n # Check if the orbit was calculated\n if self.ra_g is not None:\n out_str += \" JD dynamic = {:20.12f} \\n\".format(self.jd_dyn)\n out_str += \" LST apparent = {:.10f} deg\\n\".format(np.degrees(self.lst_ref))\n\n\n ### Apparent radiant in ECI ###\n\n out_str += \"Radiant (apparent in ECI which includes Earth's rotation, epoch of date):\\n\"\n out_str += \" R.A. = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.ra), _uncer('{:.4f}', 'ra', \n deg=True))\n out_str += \" Dec = {:>+9.5f}{:s} deg\\n\".format(np.degrees(self.dec), _uncer('{:.4f}', 'dec', \n deg=True))\n out_str += \" Azimuth = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.azimuth_apparent), \\\n _uncer('{:.4f}', 'azimuth_apparent', deg=True))\n out_str += \" Elevation = {:>+9.5f}{:s} deg\\n\".format(np.degrees(self.elevation_apparent), \\\n _uncer('{:.4f}', 'elevation_apparent', deg=True))\n out_str += \" Vavg = {:>9.5f}{:s} km/s\\n\".format(self.v_avg/1000, _uncer('{:.4f}', 'v_avg', \n multi=1.0/1000))\n\n\n if v_init_ht is not None:\n v_init_ht_str = ' (average above {:.2f} km)'.format(v_init_ht)\n else:\n v_init_ht_str = ''\n\n out_str += \" Vinit = {:>9.5f}{:s} km/s{:s}\\n\".format(self.v_init/1000, _uncer('{:.4f}', 'v_init', \n multi=1.0/1000), v_init_ht_str)\n\n\n ### ###\n\n\n ### Apparent radiant in ECEF (no rotation included) ###\n\n out_str += \"Radiant (apparent ground-fixed, epoch of date):\\n\"\n out_str += \" R.A. = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.ra_norot), _uncer('{:.4f}', \\\n 'ra', deg=True))\n out_str += \" Dec = {:>+9.5f}{:s} deg\\n\".format(np.degrees(self.dec_norot), _uncer('{:.4f}', \\\n 'dec', deg=True))\n out_str += \" Azimuth = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.azimuth_apparent_norot), \\\n _uncer('{:.4f}', 'azimuth_apparent', deg=True))\n out_str += \" Elevation = {:>+9.5f}{:s} deg\\n\".format(np.degrees(self.elevation_apparent_norot), \\\n _uncer('{:.4f}', 'elevation_apparent', deg=True))\n out_str += \" Vavg = {:>9.5f}{:s} km/s\\n\".format(self.v_avg_norot/1000, _uncer('{:.4f}', \\\n 'v_avg', multi=1.0/1000))\n out_str += \" Vinit = {:>9.5f}{:s} km/s{:s}\\n\".format(self.v_init_norot/1000, _uncer('{:.4f}', \\\n 'v_init', multi=1.0/1000), v_init_ht_str)\n\n\n\n ### ###\n\n\n # Check if the orbital elements could be calculated, and write them out\n if self.ra_g is not None:\n\n out_str += \"Radiant (geocentric, J2000):\\n\"\n out_str += \" R.A. = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.ra_g), _uncer('{:.4f}', 'ra_g', \n deg=True))\n out_str += \" Dec = {:>+9.5f}{:s} deg\\n\".format(np.degrees(self.dec_g), _uncer('{:.4f}', 'dec_g', \n deg=True))\n out_str += \" Vg = {:>9.5f}{:s} km/s\\n\".format(self.v_g/1000, _uncer('{:.4f}', 'v_g', \n multi=1.0/1000))\n out_str += \" Vinf = {:>9.5f}{:s} km/s\\n\".format(self.v_inf/1000, _uncer('{:.4f}', 'v_inf', \n multi=1.0/1000))\n out_str += \" Zc = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.zc), _uncer('{:.4f}', 'zc', \n deg=True))\n out_str += \" Zg = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.zg), _uncer('{:.4f}', 'zg', \n deg=True))\n out_str += \"Radiant (ecliptic geocentric, J2000):\\n\"\n out_str += \" Lg = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.L_g), _uncer('{:.4f}', 'L_g', \n deg=True))\n out_str += \" Bg = {:>+9.5f}{:s} deg\\n\".format(np.degrees(self.B_g), _uncer('{:.4f}', 'B_g', \n deg=True))\n out_str += \" Vh = {:>9.5f}{:s} km/s\\n\".format(self.v_h/1000, _uncer('{:.4f}', 'v_h', \n multi=1/1000.0))\n out_str += \"Radiant (ecliptic heliocentric, J2000):\\n\"\n out_str += \" Lh = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.L_h), _uncer('{:.4f}', 'L_h', \n deg=True))\n out_str += \" Bh = {:>+9.5f}{:s} deg\\n\".format(np.degrees(self.B_h), _uncer('{:.4f}', 'B_h', \n deg=True))\n out_str += \" Vh_x = {:>9.5f}{:s} km/s\\n\".format(self.v_h_x, _uncer('{:.4f}', 'v_h_x'))\n out_str += \" Vh_y = {:>9.5f}{:s} km/s\\n\".format(self.v_h_y, _uncer('{:.4f}', 'v_h_y'))\n out_str += \" Vh_z = {:>9.5f}{:s} km/s\\n\".format(self.v_h_z, _uncer('{:.4f}', 'v_h_z'))\n out_str += \"Orbit:\\n\"\n out_str += \" La Sun = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.la_sun), _uncer('{:.4f}', 'la_sun', \n deg=True))\n out_str += \" a = {:>10.6f}{:s} AU\\n\".format(self.a, _uncer('{:.4f}', 'a'))\n out_str += \" e = {:>10.6f}{:s}\\n\".format(self.e, _uncer('{:.4f}', 'e'))\n out_str += \" i = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.i), _uncer('{:.4f}', 'i', \n deg=True))\n out_str += \" peri = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.peri), _uncer('{:.4f}', 'peri', \n deg=True))\n out_str += \" node = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.node), _uncer('{:.4f}', 'node', \n deg=True))\n out_str += \" Pi = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.pi), _uncer('{:.4f}', 'pi', \n deg=True))\n if hasattr(self, 'b'):\n out_str += \" b = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.b), _uncer('{:.4f}', 'b', \n deg=True))\n out_str += \" q = {:>10.6f}{:s} AU\\n\".format(self.q, _uncer('{:.4f}', 'q'))\n out_str += \" f = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.true_anomaly), _uncer('{:.4f}', \n 'true_anomaly', deg=True))\n out_str += \" M = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.mean_anomaly), _uncer('{:.4f}', \n 'mean_anomaly', deg=True))\n out_str += \" Q = {:>10.6f}{:s} AU\\n\".format(self.Q, _uncer('{:.4f}', 'Q'))\n out_str += \" n = {:>10.6f}{:s} deg/day\\n\".format(np.degrees(self.n), _uncer('{:.4f}', 'n', \n deg=True))\n out_str += \" T = {:>10.6f}{:s} years\\n\".format(self.T, _uncer('{:.4f}', 'T'))\n \n if self.last_perihelion is not None:\n out_str += \" Last perihelion JD = {:.6f} \".format(datetime2JD(self.last_perihelion)) \\\n + \"(\" + str(self.last_perihelion) + \")\" + _uncer('{:.4f} days', 'last_perihelion') \\\n + \"\\n\"\n else:\n out_str += \" Last perihelion JD = NaN \\n\"\n\n out_str += \" Tj = {:>10.6f}{:s}\\n\".format(self.Tj, _uncer('{:.4f}', 'Tj'))\n\n\n out_str += \"Shower association:\\n\"\n\n # Perform shower association\n shower_obj = associateShower(self.la_sun, self.L_g, self.B_g, self.v_g)\n if shower_obj is None:\n shower_no = -1\n shower_code = '...'\n else:\n shower_no = shower_obj.IAU_no\n shower_code = shower_obj.IAU_code\n\n out_str += \" IAU No. = {:>4d}\\n\".format(shower_no)\n out_str += \" IAU code = {:>4s}\\n\".format(shower_code)\n\n\n return out_str\n\n\n\ndef calcLatitudeOfPerihelion(peri, node, incl):\n \"\"\" Calculate the latitude of perihelion. \n Source: https://en.wikipedia.org/wiki/Longitude_of_the_periapsis#Derivation_of_ecliptic_longitude_and_latitude_of_perihelion_for_inclined_orbits\n \"\"\"\n\n Ap = np.cos(peri)*np.cos(node) - np.sin(peri)*np.sin(node)*np.cos(incl)\n Bp = np.cos(J2000_OBLIQUITY)*(np.cos(peri)*np.sin(node) + np.sin(peri)*np.cos(node)*np.cos(incl)) \\\n - np.sin(J2000_OBLIQUITY)*np.sin(peri)*np.sin(incl)\n Cp = np.sin(J2000_OBLIQUITY)*(np.cos(peri)*np.sin(node) + np.sin(peri)*np.cos(node)*np.cos(incl)) \\\n + np.cos(J2000_OBLIQUITY)*np.sin(peri)*np.sin(incl)\n\n # RA/Dec of the direction of perihelion\n ra_p = np.arctan2(Bp, Ap)%(2*np.pi)\n dec_p = np.arcsin(Cp)\n \n # Longitue of perihelion\n # pi_t = np.arctan2(np.sin(ra_p)*np.cos(J2000_OBLIQUITY) + np.tan(dec_p)*np.sin(J2000_OBLIQUITY), np.cos(ra_p))%(2*np.pi)\n \n # Latitude of perihelion\n b = np.arcsin(np.sin(dec_p)*np.cos(J2000_OBLIQUITY) - np.cos(dec_p)*np.sin(J2000_OBLIQUITY)*np.sin(ra_p))\n\n return b\n\n\ndef calcOrbit(radiant_eci, v_init, v_avg, eci_ref, jd_ref, stations_fixed=False, reference_init=True, \\\n rotation_correction=False):\n \"\"\" Calculate the meteor's orbit from the given meteor trajectory. The orbit of the meteoroid is defined \n relative to the centre of the Sun (heliocentric).\n\n Arguments:\n radiant_eci: [3 element ndarray] Radiant vector in ECI coordinates (meters).\n v_init: [float] Initial velocity (m/s).\n v_avg: [float] Average velocity of a meteor (m/s).\n eci_ref: [float] reference ECI coordinates in the epoch of date (meters, in the epoch of date) of the \n meteor trajectory. They can be calculated with the geo2Cartesian function. Ceplecha (1987) assumes \n this to the the average point on the trajectory, while Jennsikens et al. (2011) assume this to be \n the first point on the trajectory as that point is not influenced by deceleration.\n NOTE: If the stations are not fixed, the reference ECI coordinates should be the ones\n of the initial point on the trajectory, NOT of the average point!\n jd_ref: [float] reference Julian date of the meteor trajectory. Ceplecha (1987) takes this as the \n average time of the trajectory, while Jenniskens et al. (2011) take this as the the first point\n on the trajectory.\n \n Keyword arguments:\n stations_fixed: [bool] If True, the correction for Earth's rotation will be performed on the radiant,\n but not the velocity. This should be True ONLY in two occasions:\n - if the ECEF coordinate system was used for trajectory estimation\n - if the ECI coordinate system was used for trajectory estimation, BUT the stations were not\n moved in time, but were kept fixed at one point, regardless of the trajectory estimation\n method.\n It is necessary to perform this correction for the intersecting planes method, but not for\n the lines of sight method ONLY when the stations are not fixed. Of course, if one is using the \n lines of sight method with fixed stations, one should perform this correction!\n reference_init: [bool] If True (default), the initial point on the trajectory is given as the reference\n one, i.e. the reference ECI coordinates are the ECI coordinates of the initial point on the\n trajectory, where the meteor has the velocity v_init. If False, then the reference point is the\n average point on the trajectory, and the average velocity will be used to do the corrections.\n rotation_correction: [bool] If True, the correction of the initial velocity for Earth's rotation will\n be performed. False by default. This should ONLY be True if the coordiante system for trajectory\n estimation was ECEF, i.e. did not rotate with the Earth. In all other cases it should be False, \n even if fixed station coordinates were used in the ECI coordinate system!\n\n Return:\n orb: [Orbit object] Object containing the calculated orbit.\n\n \"\"\"\n\n\n ### Correct the velocity vector for the Earth's rotation if the stations are fixed ###\n ##########################################################################################################\n\n eci_x, eci_y, eci_z = eci_ref\n\n # Calculate the geocentric latitude (latitude which considers the Earth as an elipsoid) of the reference \n # trajectory point\n lat_geocentric = np.arctan2(eci_z, np.sqrt(eci_x**2 + eci_y**2))\n\n\n # Calculate the dynamical JD\n jd_dyn = jd2DynamicalTimeJD(jd_ref)\n\n # Calculate the geographical coordinates of the reference trajectory ECI position\n lat_ref, lon_ref, ht_ref = cartesian2Geo(jd_ref, *eci_ref)\n\n\n # Initialize a new orbit structure and assign calculated parameters\n orb = Orbit()\n\n\n\n # Calculate the velocity of the Earth rotation at the position of the reference trajectory point (m/s)\n v_e = 2*np.pi*vectMag(eci_ref)*np.cos(lat_geocentric)/86164.09053\n\n \n # Calculate the equatorial coordinates of east from the reference position on the trajectory\n azimuth_east = np.pi/2\n altitude_east = 0\n ra_east, dec_east = altAz2RADec(azimuth_east, altitude_east, jd_ref, lat_ref, lon_ref)\n\n\n # Compute velocity components of the state vector\n if reference_init:\n\n # If the initial velocity was the reference velocity, use it for the correction\n v_ref_vect = v_init*radiant_eci\n\n\n else:\n # Calculate reference velocity vector using the average point on the trajectory and the average\n # velocity\n v_ref_vect = v_avg*radiant_eci\n\n\n\n # Apply the Earth rotation correction if the station coordinates are fixed (a MUST for the \n # intersecting planes method!)\n if stations_fixed:\n\n ### Set fixed stations radiant info ###\n\n # If the stations are fixed, then the input state vector is already fixed to the ground\n orb.ra_norot, orb.dec_norot = eci2RaDec(radiant_eci)\n\n # Apparent azimuth and altitude (no rotation)\n orb.azimuth_apparent_norot, orb.elevation_apparent_norot = raDec2AltAz(orb.ra_norot, orb.dec_norot, \\\n jd_ref, lat_ref, lon_ref)\n\n # Estimated average velocity (no rotation)\n orb.v_avg_norot = v_avg\n\n # Estimated initial velocity (no rotation)\n orb.v_init_norot = v_init\n\n ### ###\n\n\n v_ref_corr = np.zeros(3)\n\n # Calculate the corrected reference velocity vector/radiant\n v_ref_corr[0] = v_ref_vect[0] - v_e*np.cos(ra_east)\n v_ref_corr[1] = v_ref_vect[1] - v_e*np.sin(ra_east)\n v_ref_corr[2] = v_ref_vect[2]\n\n\n\n else:\n\n # MOVING STATIONS\n # Velocity vector will remain unchanged if the stations were moving\n if reference_init:\n v_ref_corr = v_init*radiant_eci\n\n else:\n v_ref_corr = v_avg*radiant_eci\n\n\n\n ### ###\n # If the rotation correction does not have to be applied, meaning that the rotation is already\n # included, compute a version of the radiant and the velocity without Earth's rotation\n # (REPORTING PURPOSES ONLY, THESE VALUES ARE NOT USED IN THE CALCULATION)\n\n v_ref_nocorr = np.zeros(3)\n\n # Calculate the derotated reference velocity vector/radiant\n v_ref_nocorr[0] = v_ref_vect[0] + v_e*np.cos(ra_east)\n v_ref_nocorr[1] = v_ref_vect[1] + v_e*np.sin(ra_east)\n v_ref_nocorr[2] = v_ref_vect[2]\n\n # Compute the radiant without Earth's rotation included\n orb.ra_norot, orb.dec_norot = eci2RaDec(vectNorm(v_ref_nocorr))\n orb.azimuth_apparent_norot, orb.elevation_apparent_norot = raDec2AltAz(orb.ra_norot, orb.dec_norot, \\\n jd_ref, lat_ref, lon_ref)\n orb.v_init_norot = vectMag(v_ref_nocorr)\n orb.v_avg_norot = orb.v_init_norot - v_init + v_avg\n\n ### ###\n\n\n \n\n ##########################################################################################################\n\n\n\n ### Correct velocity for Earth's gravity ###\n ##########################################################################################################\n\n # If the reference velocity is the initial velocity\n if reference_init:\n\n # Use the corrected velocity for Earth's rotation (when ECEF coordinates are used)\n if rotation_correction:\n v_init_corr = vectMag(v_ref_corr)\n\n else:\n # IMPORTANT NOTE: The correction in this case is only done on the radiant (even if the stations \n # were fixed, but NOT on the initial velocity!). Thus, correction from Ceplecha 1987, \n # equation (35) is not needed. If the initial velocity is determined from time vs. length and in \n # ECI coordinates, whose coordinates rotate with the Earth, the moving stations play no role in \n # biasing the velocity.\n v_init_corr = v_init\n\n else:\n\n if rotation_correction:\n\n # Calculate the corrected initial velocity if the reference velocity is the average velocity\n v_init_corr = vectMag(v_ref_corr) + v_init - v_avg\n \n\n else:\n v_init_corr = v_init\n\n\n\n # Calculate apparent RA and Dec from radiant state vector\n orb.ra, orb.dec = eci2RaDec(radiant_eci)\n orb.v_init = v_init\n orb.v_avg = v_avg\n\n # Calculate the apparent azimuth and altitude (geodetic latitude, because ra/dec are calculated from ECI,\n # which is calculated from WGS84 coordinates)\n orb.azimuth_apparent, orb.elevation_apparent = raDec2AltAz(orb.ra, orb.dec, jd_ref, lat_ref, lon_ref)\n\n orb.jd_ref = jd_ref\n orb.lon_ref = lon_ref\n orb.lat_ref = lat_ref\n orb.ht_ref = ht_ref\n orb.lat_geocentric = lat_geocentric\n\n # Assume that the velocity in infinity is the same as the initial velocity (after rotation correction, if\n # it was needed)\n orb.v_inf = v_init_corr\n\n\n # Make sure the velocity of the meteor is larger than the escape velocity\n if v_init_corr**2 > (2*6.67408*5.9722)*1e13/vectMag(eci_ref):\n\n # Calculate the geocentric velocity (sqrt of squared inital velocity minus the square of the Earth escape \n # velocity at the height of the trajectory), units are m/s.\n # Square of the escape velocity is: 2GM/r, where G is the 2014 CODATA-recommended value of \n # 6.67408e-11 m^3/(kg s^2), and the mass of the Earth is M = 5.9722e24 kg\n v_g = np.sqrt(v_init_corr**2 - (2*6.67408*5.9722)*1e13/vectMag(eci_ref))\n\n\n # Calculate the radiant corrected for Earth's rotation (ONLY if the stations were fixed, otherwise it\n # is the same as the apparent radiant)\n ra_corr, dec_corr = eci2RaDec(vectNorm(v_ref_corr))\n\n # Calculate the Local Sidreal Time of the reference trajectory position\n lst_ref = np.radians(jd2LST(jd_ref, np.degrees(lon_ref))[0])\n\n # Calculate the apparent zenith angle\n zc = np.arccos(np.sin(dec_corr)*np.sin(lat_geocentric) \\\n + np.cos(dec_corr)*np.cos(lat_geocentric)*np.cos(lst_ref - ra_corr))\n\n # Calculate the zenith attraction correction\n delta_zc = 2*np.arctan2((v_init_corr - v_g)*np.tan(zc/2), v_init_corr + v_g)\n\n # Zenith distance of the geocentric radiant\n zg = zc + np.abs(delta_zc)\n\n ##########################################################################################################\n\n\n\n ### Calculate the geocentric radiant ###\n ##########################################################################################################\n\n # Get the azimuth from the corrected RA and Dec\n azimuth_corr, _ = raDec2AltAz(ra_corr, dec_corr, jd_ref, lat_geocentric, lon_ref)\n\n # Calculate the geocentric radiant\n ra_g, dec_g = altAz2RADec(azimuth_corr, np.pi/2 - zg, jd_ref, lat_geocentric, lon_ref)\n \n\n ### Precess ECI coordinates to J2000 ###\n\n # Convert rectangular to spherical coordiantes\n re, delta_e, alpha_e = cartesianToSpherical(*eci_ref)\n\n # Precess coordinates to J2000\n alpha_ej, delta_ej = equatorialCoordPrecession(jd_ref, J2000_JD.days, alpha_e, delta_e)\n\n # Convert coordinates back to rectangular\n eci_ref = sphericalToCartesian(re, delta_ej, alpha_ej)\n eci_ref = np.array(eci_ref)\n\n ######\n\n # Precess the geocentric radiant to J2000\n ra_g, dec_g = equatorialCoordPrecession(jd_ref, J2000_JD.days, ra_g, dec_g)\n\n\n # Calculate the ecliptic latitude and longitude of the geocentric radiant (J2000 epoch)\n L_g, B_g = raDec2Ecliptic(J2000_JD.days, ra_g, dec_g)\n\n\n # Load the JPL ephemerids data\n jpl_ephem_data = SPK.open(config.jpl_ephem_file)\n \n # Get the position of the Earth (km) and its velocity (km/s) at the given Julian date (J2000 epoch)\n # The position is given in the ecliptic coordinates, origin of the coordinate system is in the centre\n # of the Sun\n earth_pos, earth_vel = calcEarthRectangularCoordJPL(jd_dyn, jpl_ephem_data, sun_centre_origin=True)\n\n # print('Earth position:')\n # print(earth_pos)\n # print('Earth velocity:')\n # print(earth_vel)\n\n # Convert the Earth's position to rectangular equatorial coordinates (FK5)\n earth_pos_eq = rotateVector(earth_pos, np.array([1, 0, 0]), J2000_OBLIQUITY)\n\n # print('Earth position (FK5):')\n # print(earth_pos_eq)\n\n # print('Meteor ECI:')\n # print(eci_ref)\n\n # Add the position of the meteor's trajectory to the position of the Earth to calculate the \n # equatorial coordinates of the meteor (in kilometers)\n meteor_pos = earth_pos_eq + eci_ref/1000\n\n\n # print('Meteor position (FK5):')\n # print(meteor_pos)\n\n # Convert the position of the trajectory from FK5 to heliocentric ecliptic coordinates\n meteor_pos = rotateVector(meteor_pos, np.array([1, 0, 0]), -J2000_OBLIQUITY)\n\n # print('Meteor position:')\n # print(meteor_pos)\n\n\n ##########################################################################################################\n\n # Calculate components of the heliocentric velocity of the meteor (km/s)\n v_h = np.array(earth_vel) + np.array(eclipticToRectangularVelocityVect(L_g, B_g, v_g/1000))\n\n # Calculate the heliocentric velocity in km/s\n v_h_mag = vectMag(v_h)\n\n\n # Calculate the heliocentric ecliptic coordinates of the meteoroid using the method of \n # Sato and Watanabe (2014).\n L_h, B_h, met_v_h = correctedEclipticCoord(L_g, B_g, v_g/1000, earth_vel)\n\n\n # Calculate the solar longitude\n la_sun = jd2SolLonJPL(jd_dyn)\n\n\n # Calculations below done using Dave Clark's Master thesis equations\n\n # Specific orbital energy\n epsilon = (vectMag(v_h)**2)/2 - SUN_MU/vectMag(meteor_pos)\n\n # Semi-major axis in AU\n a = -SUN_MU/(2*epsilon*AU)\n\n # Calculate mean motion in rad/day\n n = np.sqrt(G*SUN_MASS/((np.abs(a)*AU*1000.0)**3))*86400.0\n\n\n # Calculate the orbital period in years\n # avoid floating point error if orbit is hyperbolic\n if a > 0: \n T = 2*np.pi*np.sqrt(((a*AU)**3)/SUN_MU)/(86400*SIDEREAL_YEAR)\n else:\n T = np.nan\n\n # Calculate the orbit angular momentum\n h_vect = np.cross(meteor_pos, v_h)\n \n # Calculate inclination\n incl = np.arccos(h_vect[2]/vectMag(h_vect))\n\n\n # Calculate eccentricity\n e_vect = np.cross(v_h, h_vect)/SUN_MU - vectNorm(meteor_pos)\n eccentricity = vectMag(e_vect)\n\n\n # Calculate perihelion distance (source: Jenniskens et al., 2011, CAMS overview paper)\n if eccentricity == 1:\n q = (vectMag(meteor_pos) + np.dot(e_vect, meteor_pos))/(1 + vectMag(e_vect))\n else:\n q = a*(1.0 - eccentricity)\n\n # Calculate the aphelion distance\n Q = a*(1.0 + eccentricity)\n\n\n # Normal vector to the XY reference frame\n k_vect = np.array([0, 0, 1])\n\n # Vector from the Sun pointing to the ascending node\n n_vect = np.cross(k_vect, h_vect)\n\n # Calculate node\n if vectMag(n_vect) == 0:\n node = 0\n else:\n node = np.arctan2(n_vect[1], n_vect[0])\n\n node = node%(2*np.pi)\n\n\n # Calculate argument of perihelion\n if vectMag(n_vect) != 0:\n peri = np.arccos(np.dot(n_vect, e_vect)/(vectMag(n_vect)*vectMag(e_vect)))\n\n if e_vect[2] < 0:\n peri = 2*np.pi - peri\n\n else:\n peri = np.arccos(e_vect[0]/vectMag(e_vect))\n\n peri = peri%(2*np.pi)\n\n\n\n # Calculate the longitude of perihelion\n pi = (node + peri)%(2*np.pi)\n\n # Calculate the latitude of perihelion\n b = calcLatitudeOfPerihelion(peri, node, incl)\n\n\n ### Calculate true anomaly\n true_anomaly = np.arccos(np.dot(e_vect, meteor_pos)/(vectMag(e_vect)*vectMag(meteor_pos)))\n if np.dot(meteor_pos, v_h) < 0:\n true_anomaly = 2*np.pi - true_anomaly\n\n true_anomaly = true_anomaly%(2*np.pi)\n\n ###\n\n\n # Calculate eccentric anomaly\n # not meaningful for eccentricity > 1\n if eccentricity < 1: \n eccentric_anomaly = np.arctan2(np.sqrt(1 - eccentricity**2)*np.sin(true_anomaly), eccentricity \\\n + np.cos(true_anomaly))\n\n # Calculate mean anomaly\n mean_anomaly = eccentric_anomaly - eccentricity * np.sin(eccentric_anomaly)\n mean_anomaly = mean_anomaly % (2 * np.pi)\n else:\n eccentric_anomaly = np.nan\n mean_anomaly = np.nan\n\n # Calculate the time in days since the last perihelion passage of the meteoroid\n # not meaningful for non-closed orbits\n if a > 0:\n dt_perihelion = (mean_anomaly*a**(3.0/2))/0.01720209895\n else:\n dt_perihelion = np.nan\n\n if not np.isnan(dt_perihelion):\n \n # Calculate the date and time of the last perihelion passage\n last_perihelion = jd2Date(jd_dyn - dt_perihelion, dt_obj=True)\n\n else:\n last_perihelion = None\n\n\n # Calculate Tisserand's parameter with respect to Jupiter\n Tj = 2*np.sqrt((1 - eccentricity**2)*a/5.204267)*np.cos(incl) + 5.204267/a\n\n\n\n # Assign calculated parameters\n orb.lst_ref = lst_ref\n orb.jd_dyn = jd_dyn\n orb.v_g = v_g\n orb.ra_g = ra_g\n orb.dec_g = dec_g\n\n orb.meteor_pos = meteor_pos\n orb.L_g = L_g\n orb.B_g = B_g\n\n orb.v_h_x, orb.v_h_y, orb.v_h_z = met_v_h\n orb.L_h = L_h\n orb.B_h = B_h\n\n orb.zc = zc\n orb.zg = zg\n\n orb.v_h = v_h_mag*1000\n\n orb.la_sun = la_sun\n\n orb.a = a\n orb.e = eccentricity\n orb.i = incl\n orb.peri = peri\n orb.node = node\n orb.pi = pi\n orb.b = b\n orb.q = q\n orb.Q = Q\n orb.true_anomaly = true_anomaly\n orb.eccentric_anomaly = eccentric_anomaly\n orb.mean_anomaly = mean_anomaly\n orb.last_perihelion = last_perihelion\n orb.n = n\n orb.T = T\n\n orb.Tj = Tj\n\n\n return orb\n\n\n\n\nif __name__ == \"__main__\":\n\n from wmpl.Utils.TrajConversions import raDec2ECI\n\n ### COMMAND LINE ARGUMENTS\n\n # Init the command line arguments parser\n arg_parser = argparse.ArgumentParser(description=\"\"\" Compute the orbit from given trajectory parameters, or recompute the orbit using the given trajectory pickle file and a few modified trajectory values.\n Usage:\n\n a) Recomputing an orbit using an existing trajectory, but modifying one one of the trajectory parameters, e.g. with the initial velocity of 20.5 km/s:\n python -m wmpl.Trajectory.Orbit trajectory.pickle -v 20.5\n\n b) Compute the orbit from scratch:\n python -m wmpl.Trajectory.Orbit -r 317.74 -d 31.72 -v 54.9 -t \"20180614-072809.3\" -a 44.43 -o -81.56 -e 105.8\n\n c) If the apparent radiant was given in J2000, use the --j2000 option.\n \"\"\",\n formatter_class=argparse.RawTextHelpFormatter)\n\n arg_parser.add_argument('pickle_file', type=str, nargs='?', help='Path to the trajectory pickle file.')\n\n arg_parser.add_argument('-r', '--ra', help='Custom right ascention of the apparent radiant (deg) in the epoch of date (use option --j2000 to use the J2000 epoch).', type=float, \\\n default=None)\n\n arg_parser.add_argument('-d', '--dec', help='Custom declination of the apparent radiant (deg) in the epoch of date (use option --j2000 to use the J2000 epoch).', type=float, \\\n default=None)\n\n arg_parser.add_argument('-v', '--vinit', help='Custom initial velocity in km/s.', type=float, \\\n default=None)\n\n arg_parser.add_argument('-w', '--vavg', help='Custom average velocity in km/s.', type=float, \\\n default=None)\n\n arg_parser.add_argument('-t', '--time', help='Reference UTC date and time for which the relative time of the meteor is t = 0. Format: YYYYMMDD-HHMMSS.uuu', \\\n type=str, default=None)\n\n arg_parser.add_argument('-a', '--lat', help='Latitude +N of the reference position on the trajectory (deg).', \\\n type=float, default=None)\n\n arg_parser.add_argument('-o', '--lon', help='Longitude +E of the reference position on the trajectory (deg).', \\\n type=float, default=None)\n\n arg_parser.add_argument('-e', '--ele', help='Height of the reference position on the trajectory (km).', \\\n type=float, default=None)\n\n arg_parser.add_argument('-j', '--j2000', \\\n help=\"Give the radiant in J2000.\", \\\n action=\"store_true\")\n\n arg_parser.add_argument('-k', '--refavg', \\\n help=\"The average position on the trajectory is used as a reference position instead of the initial position (e.g. with MILIG). The correction for Earth's rotation will be applied.\", \\\n action=\"store_true\")\n\n arg_parser.add_argument('-c', '--vrotcorr', \\\n help=\"Correct the magnitude of the velocity due to the Earth's rotation.\", \\\n action=\"store_true\")\n\n arg_parser.add_argument('-s', '--statfixed', \\\n help=\"Shoud be used if the stations were fixed during trajectory estimation (e.g. with MILIG).\", \\\n action=\"store_true\")\n\n arg_parser.add_argument('-m', '--milig', \\\n help=\"MILIG input mode, i.e. the trajectory was estimated with fixed stations and reference average position on the trajectory. This replaces calling both options --refavg and --statfixed.\", \\\n action=\"store_true\")\n\n\n # Parse the command line arguments\n cml_args = arg_parser.parse_args()\n\n ############################\n\n\n # Load the pickle file, if given\n if cml_args.pickle_file is not None:\n traj = loadPickle(*os.path.split(cml_args.pickle_file))\n\n else:\n traj = None\n\n\n\n parameter_missing_message = \"To compute the orbit without the existing trajectory file, {:s} must also be provided!\"\n\n if cml_args.ra is not None:\n ra = np.radians(cml_args.ra)\n elif traj is not None:\n ra = traj.orbit.ra\n else:\n print(parameter_missing_message.format('RA'))\n sys.exit()\n\n if cml_args.dec is not None:\n dec = np.radians(cml_args.dec)\n elif traj is not None:\n dec = traj.orbit.dec\n else:\n print(parameter_missing_message.format('Dec'))\n sys.exit()\n\n if cml_args.vinit is not None:\n v_init = 1000*cml_args.vinit\n elif traj is not None:\n v_init = traj.orbit.v_init\n else:\n print(parameter_missing_message.format('initial velocity'))\n sys.exit()\n\n if cml_args.vavg is not None:\n v_avg = 1000*cml_args.vavg\n elif traj is not None:\n v_avg = traj.orbit.v_avg\n elif v_init is not None:\n v_avg = v_init\n else:\n print(parameter_missing_message.format('average velocity'))\n sys.exit()\n\n if cml_args.time is not None:\n dt_ref = datetime.datetime.strptime(cml_args.time, \"%Y%m%d-%H%M%S.%f\")\n jd_ref = datetime2JD(dt_ref)\n elif traj is not None:\n jd_ref = traj.orbit.jd_ref\n else:\n print(parameter_missing_message.format('reference time'))\n sys.exit()\n\n\n # Parse reference location\n if (cml_args.lat is None) and (cml_args.lon is None) and (cml_args.ele is None):\n\n # Reuse the ECI coordinates from the given trajectory file\n if traj is not None:\n eci_ref = traj.state_vect_mini\n\n else:\n print(parameter_missing_message.format('lat, lon, ht'))\n sys.exit()\n\n\n else:\n\n # Parse individual location parameters\n if cml_args.lat is not None:\n lat_ref = np.radians(cml_args.lat)\n elif traj is not None:\n lat_ref = traj.orbit.lat_ref\n else:\n print(parameter_missing_message.format('latitude'))\n sys.exit()\n\n if cml_args.lon is not None:\n lon_ref = np.radians(cml_args.lon)\n elif traj is not None:\n lon_ref = traj.orbit.lon_ref\n else:\n print(parameter_missing_message.format('longitude'))\n sys.exit()\n\n if cml_args.ele is not None:\n ht_ref = 1000*cml_args.ele\n elif traj is not None:\n ht_ref = traj.orbit.ht_ref\n else:\n print(parameter_missing_message.format('height'))\n sys.exit()\n\n\n # Compute the ECI coordinates of the reference point on the trajectory\n eci_ref = geo2Cartesian(lat_ref, lon_ref, ht_ref, jd_ref)\n\n\n\n # Presess to epoch of date if given in J2000\n if cml_args.j2000:\n ra, dec = equatorialCoordPrecession(J2000_JD.days, jd_ref, ra, dec)\n\n # Compute the radiant vector in ECI coordinates\n radiant_eci = np.array(raDec2ECI(ra, dec))\n\n\n # Set the right flags\n reference_init = (not cml_args.refavg) and (not cml_args.milig)\n rotation_correction = cml_args.vrotcorr or cml_args.milig #or cml_args.statfixed\n stations_fixed = cml_args.statfixed or cml_args.milig\n\n\n # # Test values\n # radiant_eci = np.array(raDec2ECI(np.radians(265.16047), np.radians(-18.84373)))\n # v_init = 16424.81\n # v_avg = 15768.71\n # eci_ref = np.array([3757410.98, -2762153.20, 4463901.73])\n # jd_ref = 2457955.794670294970\n\n # Compute the orbit\n orb = calcOrbit(radiant_eci, v_init, v_avg, eci_ref, jd_ref, reference_init=reference_init, \\\n rotation_correction=rotation_correction, stations_fixed=stations_fixed)\n\n # Print the results\n print('Ref JD:', jd_ref)\n print('ECI ref:', *eci_ref)\n\n print(orb)\n\n" ]
[ [ "numpy.degrees", "numpy.arctan2", "numpy.arcsin", "numpy.zeros", "numpy.cross", "numpy.cos", "numpy.abs", "numpy.tan", "numpy.isnan", "numpy.sqrt", "numpy.sin", "numpy.dot", "numpy.array", "numpy.radians" ] ]
vervacity/ggr-project
[ "7a9155c5fb573f7b877c63390f8052fcda5b6f6e" ]
[ "ggr/analyses/filtering.py" ]
[ "\"\"\"Contains functions for filtering\n\"\"\"\n\nimport math\nimport pandas as pd\n\n\ndef remove_media_timepoints(in_mat_file, args, out_mat_file):\n \"\"\"Takes an input matrix and removes specific timepoints\n \"\"\"\n media_timepoints = args.misc[\"media_timepoints\"]\n data = pd.read_table(in_mat_file)\n keep_columns = [column for column in data.columns\n if column.split('_')[0] not in media_timepoints]\n data_filt = data.filter(items=keep_columns)\n data_filt.to_csv(out_mat_file, sep='\\t', compression=\"gzip\")\n\n return None\n\n\ndef filter_for_ids(mat_file, keep_ids_file, gz_out_file, opposite=False, counts=False):\n \"\"\"Given a list of ids, filter the matrix file to keep\n only those ids. First column must be ids.\n \"\"\"\n data_df = pd.read_csv(mat_file, sep='\\t', header=0, index_col=0)\n keep_ids = pd.read_csv(keep_ids_file, header=None)\n if opposite:\n \tkeep_data = data_df.loc[~data_df.index.isin(keep_ids[0])]\n else:\n \tkeep_data = data_df.loc[data_df.index.isin(keep_ids[0])]\n if counts:\n keep_data = keep_data.applymap(int)\n keep_data.to_csv(gz_out_file, sep='\\t', compression='gzip')\n\n return None\n\n\ndef remove_mat_columns(mat_file, columns, out_mat_file, remove_zero_rows=True):\n \"\"\"Given a mat file and columns, \n remove these columns from the matrix and return\n \"\"\"\n assert out_mat_file.endswith(\".gz\")\n data = pd.read_table(mat_file, header=0, index_col=0)\n data = data.drop(labels=columns, axis=1)\n\n # also remove rows that are now zero because of dropped columns\n if remove_zero_rows:\n data = data.loc[~(data==0).all(axis=1)]\n\n data.to_csv(out_mat_file, compression=\"gzip\", sep=\"\\t\")\n\n return None\n\n\ndef get_ordered_subsample(in_file, out_file, out_nrow=2000):\n \"\"\"Given an input text file, grab an ordered sample\n \"\"\"\n num_lines = 0\n with open(in_file, \"r\") as fp:\n for line in fp:\n num_lines += 1\n\n skip = math.ceil(float(num_lines) / out_nrow)\n\n num_lines = 0\n with open(out_file, \"w\") as out:\n with open(in_file, \"r\") as fp:\n for line in fp:\n if num_lines % skip == 0:\n out.write(line)\n num_lines += 1\n \n return None\n\n\ndef sort_by_clusters(\n cluster_files,\n out_clusters_file,\n out_list_file):\n \"\"\"Given (cluster_file, cluster_column) in order,\n bring together and sort according to order\n \"\"\"\n # pull first file as initial\n cluster_file, cluster_cols = cluster_files[0]\n data = pd.read_table(cluster_file)\n sort_columns = cluster_cols\n #data = data[[\"id\", cluster_col]]\n \n # read in the rest\n for cluster_file, cluster_cols in cluster_files[1:]:\n cluster_data = pd.read_table(cluster_file)\n #cluster_data = cluster_data[[\"id\", cluster_col]]\n data = data.merge(cluster_data, on=\"id\")\n sort_columns += cluster_cols\n \n # sort and save out. shuffle first to spread more evenly\n data = data.sample(frac=1.0, random_state=42)\n data_sorted = data.sort_values(sort_columns, ascending=True)\n data_sorted.to_csv(out_clusters_file, sep=\"\\t\", index=False)\n data_sorted.to_csv(out_list_file, columns=[\"id\"], compression=\"gzip\", sep=\"\\t\",\n index=False, header=False)\n\n return None\n" ]
[ [ "pandas.read_table", "pandas.read_csv" ] ]
graphnj/mmdetection
[ "a53cc3766cf2bf54a28392212d07cff4486f6bb3" ]
[ "mmdet/core/export/pytorch2onnx.py" ]
[ "from functools import partial\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.runner import load_checkpoint\n\n\ndef generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config):\n \"\"\"Prepare sample input and wrap model for ONNX export.\n\n The ONNX export API only accept args, and all inputs should be\n torch.Tensor or corresponding types (such as tuple of tensor).\n So we should call this function before exporting. This function will:\n\n 1. generate corresponding inputs which are used to execute the model.\n 2. Wrap the model's forward function.\n\n For example, the MMDet models' forward function has a parameter\n ``return_loss:bool``. As we want to set it as False while export API\n supports neither bool type or kwargs. So we have to replace the forward\n like: ``model.forward = partial(model.forward, return_loss=False)``\n\n Args:\n config_path (str): the OpenMMLab config for the model we want to\n export to ONNX\n checkpoint_path (str): Path to the corresponding checkpoint\n input_config (dict): the exactly data in this dict depends on the\n framework. For MMSeg, we can just declare the input shape,\n and generate the dummy data accordingly. However, for MMDet,\n we may pass the real img path, or the NMS will return None\n as there is no legal bbox.\n\n Returns:\n tuple: (model, tensor_data) wrapped model which can be called by \\\n model(*tensor_data) and a list of inputs which are used to execute \\\n the model while exporting.\n \"\"\"\n\n model = build_model_from_cfg(config_path, checkpoint_path)\n one_img, one_meta = preprocess_example_input(input_config)\n tensor_data = [one_img]\n model.forward = partial(\n model.forward, img_metas=[[one_meta]], return_loss=False)\n\n # pytorch has some bug in pytorch1.3, we have to fix it\n # by replacing these existing op\n opset_version = 11\n # put the import within the function thus it will not cause import error\n # when not using this function\n try:\n from mmcv.onnx.symbolic import register_extra_symbolics\n except ModuleNotFoundError:\n raise NotImplementedError('please update mmcv to version>=v1.0.4')\n register_extra_symbolics(opset_version)\n\n return model, tensor_data\n\n\ndef build_model_from_cfg(config_path, checkpoint_path):\n \"\"\"Build a model from config and load the given checkpoint.\n\n Args:\n config_path (str): the OpenMMLab config for the model we want to\n export to ONNX\n checkpoint_path (str): Path to the corresponding checkpoint\n\n Returns:\n torch.nn.Module: the built model\n \"\"\"\n from mmdet.models import build_detector\n\n cfg = mmcv.Config.fromfile(config_path)\n # import modules from string list.\n if cfg.get('custom_imports', None):\n from mmcv.utils import import_modules_from_strings\n import_modules_from_strings(**cfg['custom_imports'])\n cfg.model.pretrained = None\n cfg.data.test.test_mode = True\n\n # build the model\n model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)\n load_checkpoint(model, checkpoint_path, map_location='cpu')\n model.cpu().eval()\n return model\n\n\ndef preprocess_example_input(input_config):\n \"\"\"Prepare an example input image for ``generate_inputs_and_wrap_model``.\n\n Args:\n input_config (dict): customized config describing the example input.\n\n Returns:\n tuple: (one_img, one_meta), tensor of the example input image and \\\n meta information for the example input image.\n\n Examples:\n >>> from mmdet.core.export import preprocess_example_input\n >>> input_config = {\n >>> 'input_shape': (1,3,224,224),\n >>> 'input_path': 'demo/demo.jpg',\n >>> 'normalize_cfg': {\n >>> 'mean': (123.675, 116.28, 103.53),\n >>> 'std': (58.395, 57.12, 57.375)\n >>> }\n >>> }\n >>> one_img, one_meta = preprocess_example_input(input_config)\n >>> print(one_img.shape)\n torch.Size([1, 3, 224, 224])\n >>> print(one_meta)\n {'img_shape': (224, 224, 3),\n 'ori_shape': (224, 224, 3),\n 'pad_shape': (224, 224, 3),\n 'filename': '<demo>.png',\n 'scale_factor': 1.0,\n 'flip': False}\n \"\"\"\n input_path = input_config['input_path']\n input_shape = input_config['input_shape']\n one_img = mmcv.imread(input_path)\n if 'normalize_cfg' in input_config.keys():\n normalize_cfg = input_config['normalize_cfg']\n mean = np.array(normalize_cfg['mean'], dtype=np.float32)\n std = np.array(normalize_cfg['std'], dtype=np.float32)\n one_img = mmcv.imnormalize(one_img, mean, std)\n one_img = mmcv.imresize(one_img, input_shape[2:][::-1]).transpose(2, 0, 1)\n one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(\n True)\n (_, C, H, W) = input_shape\n one_meta = {\n 'img_shape': (H, W, C),\n 'ori_shape': (H, W, C),\n 'pad_shape': (H, W, C),\n 'filename': '<demo>.png',\n 'scale_factor': 1.0,\n 'flip': False\n }\n\n return one_img, one_meta\n" ]
[ [ "numpy.array", "torch.from_numpy" ] ]
modi712/Computer-Vision
[ "a34d3d73f883beae812c50b879f4dc8ef679b3ac" ]
[ "src/projectSift.py" ]
[ "# Single projection original code\n\nimport argparse\nimport cv2\nimport numpy as np\nimport math\nimport os\nfrom objloader_simple import *\n\n# PARAMETERS\nTHRESHOLD = 10\t# min number of matches to be recognized\n# 105 for mark4, 65 - mark2\n# for sift: 10 for mark4\nSIZE = 3\t\t# size for the display obj\n# 3 for rat,fox, 1 for wolf, 100 for Rixa\nranthresh = 5.0\t#5.0\n#SIFT\nsig = 2\nloweRatio = 0.55\t# 0.55 criteria for selectionf of features\nbestMatchNumber = 2\t#no of matches for points 2 for lowe ratio test\n#PATHS\nref ='reference/mark2.jpg'\nmod ='models/rat.obj'\n\n#This functions loads the target surface image,\ndef main():\n\n homo = None\n l= None\n\n# camera_parameters = np.array([[800, 0, 320], [0, 800, 240], [0, 0, 1]])\n camera_parameters = np.array([[824.05762458, 0, 381.10745975],[0, 839.01299642, 134.22842609],[0, 0, 1]])\n # create ORB/SIFT keypoint detector\n# orb = cv2.ORB_create()\n sift = cv2.xfeatures2d.SIFT_create(sigma=sig)#<>sigma\n \n # create BFMatcher object based on hamming distance\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # load the reference surface that will be searched in the video stream\n dir_name = os.getcwd()\n model = cv2.imread(os.path.join(dir_name, ref), 0)\n # Compute model keypoints and its descriptors\n# kp_model, des_model = orb.detectAndCompute(model, None)\n kp_model,des_model = sift.detectAndCompute(model,None)\n kp_modelKP = kp_model\n kp_model = np.float32([k.pt for k in kp_model])\n # Load 3D model from OBJ file\n obj = OBJ(os.path.join(dir_name, mod ), swapyz=True)\n # init video capture\n cap = cv2.VideoCapture(0)\n\n while True:\n # read the current frame\n ret, frame = cap.read()\n if not ret:\n print (\"Unable to capture video\")\n return\n # find and draw the keypoints of the frame\n #orb\n# kp_frame, des_frame = orb.detectAndCompute(frame, None)\n #sift\n kp_frame,des_frame = sift.detectAndCompute(frame,None)\n kp_frameKP = kp_frame\n kp_frame = np.float32([k.pt for k in kp_frame])\n # match frame descriptors with model descriptors\n try:\n# \tmatches = bf.match(des_model, des_frame)\n \tmatches = matcher(kp_model,kp_frame,des_model,des_frame)\n except:\n \tprint(\"Too Dark\")\n \tcap.release()\n \treturn 0\n\n # sort them in the order of their distance\n # the lower the distance, the better the match\n# matches = sorted(matches, key=lambda x: x.distance)\n\n # compute Homography if enough matches are found\n if len(matches) > THRESHOLD:\n # differenciate between source points and destination points\n print( \"Enough matches found - %d/%d\" % (len(matches), THRESHOLD) )\n #orb\n# src_pts = np.float32([kp_model[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\n# dst_pts = np.float32([kp_frame[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)\n# sift\n src_pts = np.float32([kp_model[i] for (_, i) in matches])\n dst_pts = np.float32([kp_frame[i] for (i, _) in matches])\n # compute Homography\n homo, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, ranthresh)\n \n # Draw a rectangle that marks the found model in the frame\n if args.rectangle:\n h, w = model.shape\n pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)\n # project corners into frame\n dst = cv2.perspectiveTransform(pts, homo)\n # connect them with lines\n frame = cv2.polylines(frame, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)\n \n # if a valid homography matrix was found render cube on model plane\n if (homo is not None )and (not args.model):\n try:\n # obtain 3D projection matrix from homography matrix and camera parameters\n (projection,l) = projection_matrix(camera_parameters, homo)\n # project cube or model\n frame = render(frame, obj, projection, model, False)\n #frame = render(frame, model, projection)\n except:\n pass\n \n # print pose of camera\n if args.pose:\n \tprint('Pose of camera')\n \tprint(l) \n # draw first 10 matches.\n# if args.matches:\n# frame = cv2.drawMatches(model, kp_model, frame, kp_frame, matches[:10], 0, flags=2)\n# frame = cv2.drawMatches(model, kp_modelKP, frame, kp_frameKP, matches[:10], 0, flags=2)\n\n # show result\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n else:\n print( \"Not enough matches found - %d/%d\" % (len(matches), THRESHOLD) )\n # draw first 10 matches.\n# if args.matches:\n# frame = cv2.drawMatches(model, kp_model, frame, kp_frame, matches[:10], 0, flags=2)\n# frame = cv2.drawMatches(model, kp_modelKP, frame, kp_frameKP, matches[:10], 0, flags=2)\n # show result\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n return 0\n#---END Main---#\n\ndef matcher(kp1,kp2,features1,features2):\n\tmatcher = cv2.DescriptorMatcher_create(\"BruteForce\")\n\trawMatches = matcher.knnMatch(features1, features2, bestMatchNumber)\n\t# keeping only good matches wrt to lowe ratio ## check\n\tmatches=[]\n\tfor m,n in rawMatches:\n\t\tif m.distance < n.distance*loweRatio:\n\t\t\tmatches.append((m.trainIdx,n.queryIdx))\n\treturn matches\n\n#Render a loaded obj model into the current video frame\ndef render(img, obj, projection, model, color=False):\n\n vertices = obj.vertices\n scale_matrix = np.eye(3) * SIZE\n h, w = model.shape\n\n for face in obj.faces:\n face_vertices = face[0]\n points = np.array([vertices[vertex - 1] for vertex in face_vertices])\n points = np.dot(points, scale_matrix)\n # render model in the middle of the reference surface. To do so,\n # model points must be displaced\n points = np.array([[p[0] + w / 2, p[1] + h / 2, p[2]] for p in points])\n dst = cv2.perspectiveTransform(points.reshape(-1, 1, 3), projection)\n imgpts = np.int32(dst)\n if color is False:\n cv2.fillConvexPoly(img, imgpts, (137, 27, 211))\n else:\n color = hex_to_rgb(face[-1])\n color = color[::-1] # reverse\n cv2.fillConvexPoly(img, imgpts, color)\n\n return img\n\ndef projection_matrix(camera_parameters, homography):\n \"\"\"\n From the camera calibration matrix and the estimated homography\n compute the 3D projection matrix\n \"\"\"\n # Compute rotation along the x and y axis as well as the translation\n homography = homography * (-1)\n rot_and_transl = np.dot(np.linalg.inv(camera_parameters), homography)\n col_1 = rot_and_transl[:, 0]\n col_2 = rot_and_transl[:, 1]\n col_3 = rot_and_transl[:, 2]\n # normalise vectors\n l = math.sqrt(np.linalg.norm(col_1, 2) * np.linalg.norm(col_2, 2))\n rot_1 = col_1 / l\n rot_2 = col_2 / l\n translation = col_3 / l\n # compute the orthonormal basis\n c = rot_1 + rot_2\n p = np.cross(rot_1, rot_2)\n d = np.cross(c, p)\n rot_1 = np.dot(c / np.linalg.norm(c, 2) + d / np.linalg.norm(d, 2), 1 / math.sqrt(2))\n rot_2 = np.dot(c / np.linalg.norm(c, 2) - d / np.linalg.norm(d, 2), 1 / math.sqrt(2))\n rot_3 = np.cross(rot_1, rot_2)\n # finally, compute the 3D projection matrix from the model to the current frame\n projection = np.stack((rot_1, rot_2, rot_3, translation)).T\n return (np.dot(camera_parameters, projection),projection)\n#---projection END---#\n\n#Helper function to convert hex strings to RGB\ndef hex_to_rgb(hex_color):\n \n hex_color = hex_color.lstrip('#')\n h_len = len(hex_color)\n return tuple(int(hex_color[i:i + h_len // 3], 16) for i in range(0, h_len, h_len // 3))\n\n\n# Command line argument parsing\nparser = argparse.ArgumentParser(description='Augmented reality application')\n\nparser.add_argument('-mo','--model', help = 'do not draw model on target surface on frame', action = 'store_true')\nparser.add_argument('-r','--rectangle', help = 'draw rectangle delimiting target surface on frame', action = 'store_true')\n#parser.add_argument('-ma','--matches', help = 'draw matches between keypoints', action = 'store_true')\nparser.add_argument('-po','--pose', help = 'print camera pose for each frame', action = 'store_true')\n\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.eye", "numpy.linalg.inv", "numpy.stack", "numpy.cross", "numpy.float32", "numpy.int32", "numpy.array", "numpy.dot", "numpy.linalg.norm" ] ]
soar-telescope/sami
[ "8a9e2b28e3e7d753d05220abd0bac6912fa36ad1" ]
[ "soar_simager/data_reduction/reduce.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\"\"\"\n SAMI XJoin\n\n This script simply joins the four existing extensions inside a FITS file\n created during observations with SAMI (SAM Imager). During the reduce,\n it also fits a 2nd degree polynomium to the OVERSCAN region that is\n subtracted from the corresponding image.\n\n The user also may want to add flags in order to reduce the images\n according to the following options (in order):\n\n - BIAS subtraction;\n - DARK subtraction;\n - Remove hot pixels and cosmic rays;\n - Remove overglow using a long exposure DARK image;\n - Divide by the FLAT;\n - Divide by the exposure time;\n\n The documentation for each reduce is shown in the corresponding function.\n\n Todo\n ----\n - Use multithread or multiprocessing to run this script faster.\n - Use astropy.ccdproc to reduce the data.\n\n Bruno Quint (bquint at ctio.noao.edu)\n May 2016\n\n Thanks to Andrei Tokovinin and Claudia M. de Oliveira for the ideas that\n were implemented here.\n\"\"\"\n\nimport numpy as _np\n\nfrom ccdproc import cosmicray_lacosmic as _cosmicray_lacosmic\nfrom scipy import stats\n\nfrom astropy import wcs\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\n\nfrom soar_simager.io import pyfits as _pyfits\nfrom soar_simager.io.logging import get_logger\nfrom soar_simager.tools import slices\n\nlogger = get_logger(__name__)\n\n\n# Piece of code from cosmics.py\n# We define the laplacian kernel to be used\n_laplkernel = _np.array([[0.0, -1.0, 0.0], [-1.0, 4.0, -1.0], [0.0, -1.0, 0.0]])\n\n# Other kernels :\n_growkernel = _np.ones((3, 3))\n\n# dilation structure for some morphological operations\n_dilstruct = _np.ones((5, 5))\n_dilstruct[0, 0] = 0\n_dilstruct[0, 4] = 0\n_dilstruct[4, 0] = 0\n_dilstruct[4, 4] = 0\n\n\nclass Reducer:\n \"\"\"\n This class holds all the methods used to join the extensions within a\n FITS file obtained with SAMI.\n\n Parameters\n ----------\n zero_file : str\n The filename of the master zero that will be used in subtraction.\n\n clean : bool\n Clean bad collumns by taking the _median value of the pixels around\n them.\n\n cosmic_rays : bool\n Clean cosmic rays using LACosmic package. See noted bellow for\n reference.\n\n dark_file : str\n Master Dark's filename to be used for dark subtraction.\n\n debug : bool\n Turn on debug mode with lots of printing.\n\n flat_file : str\n Master Flat filename to be used for normalization.\n\n glow_file : str\n Master file that contains the lateral glowings sometimes present in\n SAMI's data.\n\n time : bool\n Divide each pixel's values by the exposure time and update header.\n\n verbose : bool\n Turn on verbose mode (not so talktive as debug mode).\n\n Attributes\n ----------\n gain : list\n A list containing the gain that converts ADU values to eletrons for \n each simager amplifier.\n \n read_noise : list\n A list containing the read noise on each simager amplifier.\n\n See also\n --------\n LACosmic - http://www.astro.yale.edu/dokkum/lacosmic/\n \"\"\"\n\n gain = [2.6, 2.6, 2.6, 2.6]\n read_noise = [10., 10., 10., 10.]\n\n def __init__(self, clean=False, cosmic_rays=False, dark_file=None,\n debug=False, flat_file=None, glow_file=None, merge=False,\n overscan=False, norm_flat=False, time=False, verbose=False,\n zero_file=None):\n\n logger.setLevel(\"ERROR\")\n\n if verbose:\n logger.setLevel(\"INFO\")\n\n if debug:\n logger.setLevel(\"DEBUG\")\n\n self.clean = clean\n self.cosmic_rays = cosmic_rays\n self.dark_file = dark_file\n self.flat_file = flat_file\n self.glow_file = glow_file\n self._merge = merge\n self.norm_flat = norm_flat\n self.overscan = overscan\n self.time = time\n self.zero_file = zero_file\n\n return\n\n def reduce(self, hdu_list, prefix=\"\"):\n\n # If the number of extensions is just 1, then the file is already\n # processed.\n if len(hdu_list) == 1:\n return hdu_list, ''\n\n # Merge file\n data, header, prefix = self.merge(hdu_list)\n\n # Correct ZERO\n data, header, prefix = self.correct_zero(\n data, header, prefix, self.zero_file\n )\n\n # Correct DARK\n data, header, prefix = self.correct_dark(\n data, header, prefix, self.dark_file\n )\n\n # Remove cosmic rays and hot pixels\n data, header, prefix = self.remove_cosmic_rays(\n data, header, prefix, self.cosmic_rays\n )\n\n # Remove lateral glows\n data, header, prefix = self.correct_lateral_glow(\n data, header, prefix, self.glow_file\n )\n\n # Correct FLAT\n data, header, prefix = self.correct_flat(\n data, header, prefix, self.flat_file\n )\n\n # Normalize by the EXPOSURE TIME\n data, header, prefix = self.divide_by_exposuretime(\n data, header, prefix, self.time\n )\n\n # Clean known bad columns and lines\n data, header, prefix = self.clean_hot_columns_and_lines(\n data, header, prefix, self.clean\n )\n\n # Add WCS\n data, header = self.create_wcs(\n data, header\n )\n\n return data, header, prefix\n\n @staticmethod\n def create_wcs(data, header):\n \"\"\"\n Creates a first guess of the WCS using the telescope coordinates, the\n CCDSUM (binning), position angle and plate scale.\n\n Parameters\n ----------\n data : numpy.ndarray\n 2D array with the data.\n\n header : astropy.io.fits.Header\n Primary Header to be updated.\n\n Returns\n -------\n header : astropy.io.fits.Header\n Primary Header with updated WCS information.\n \"\"\"\n h = header\n\n if 'EQUINOX' not in h:\n h['EQUINOX'] = 2000.\n\n if 'EPOCH' not in h:\n h['EPOCH'] = 2000.\n\n if h['PIXSCAL1'] != h['PIXSCAL2']:\n logger.warning('Pixel scales for X and Y do not mach.')\n\n if h['OBSTYPE'] != 'OBJECT':\n return data, header\n\n binning = _np.array([int(b) for b in h['CCDSUM'].split(' ')])\n plate_scale = h['PIXSCAL1'] * u.arcsec\n p = plate_scale.to('degree').value\n w = wcs.WCS(naxis=2)\n\n try:\n coordinates = SkyCoord(ra=h['RA'], dec=h['DEC'],\n unit=(u.hourangle, u.deg))\n\n except ValueError:\n\n logger.error(\n '\"RA\" and \"DEC\" missing. Using \"TELRA\" and \"TELDEC\" instead.')\n\n coordinates = SkyCoord(ra=h['TELRA'], dec=h['TELDEC'],\n unit=(u.hourangle, u.deg))\n\n ra = coordinates.ra.to('degree').value\n dec = coordinates.dec.to('degree').value\n\n w.wcs.crpix = [data.shape[1] / 2, data.shape[0] / 2]\n w.wcs.cdelt = p * binning\n w.wcs.crval = [ra, dec]\n w.wcs.ctype = [\"RA---TAN\", \"DEC--TAN\"]\n\n wcs_header = w.to_header()\n\n theta = _np.deg2rad(h['DECPANGL'])\n wcs_header['cd1_1'] = p * binning[0] * _np.cos(theta)\n wcs_header['cd2_2'] = p * binning[0] * _np.cos(theta)\n wcs_header['cd1_2'] = p * binning[0] * _np.sin(theta)\n wcs_header['cd2_1'] = - p * binning[0] * _np.sin(theta)\n\n for key in wcs_header.keys():\n header[key] = wcs_header[key]\n\n return data, header\n\n @staticmethod\n def check_header(hdu_list, prefix):\n\n for i in range(5):\n\n h = hdu_list[i].header\n\n try:\n h['RADESYSa'] = h['RADECSYS']\n del h['RADECSYS']\n except KeyError:\n pass\n\n if 'EQUINOX' in h and 'unavail' in h['EQUINOX']:\n h['EQUINOX'] = 2000.\n\n if 'EPOCH' not in h:\n h['EPOCH'] = 2000.\n\n return hdu_list, prefix\n\n @staticmethod\n def clean_column(_data, x0, y0, yf, n=5):\n \"\"\"\n Substitutes a single column by the _median of the neighbours columns.\n\n Args:\n\n _data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n x0 (int) : X position of the pixel to be cleaned.\n\n y0 (int) : Start position of the column.\n\n yf (int) : Final position of the column.\n\n n (int, optional) : Number of neighbour columns (Default=5).\n\n Returns:\n\n _data (numpy.ndarray) : Processed 2D numpy array.\n\n See also:\n\n Reducer.clean_columns\n Reducer.clean_line\n Reducer.clean_lines\n \"\"\"\n\n if not isinstance(_data, _np.ndarray):\n raise (TypeError, 'Please, use a np.array as input')\n\n if _data.ndim is not 2:\n raise (TypeError, 'Data contains %d dimensions while it was '\n 'expected 2 dimensions.')\n\n t1 = _data[y0:yf, x0 - n:x0]\n t2 = _data[y0:yf, x0 + 1:x0 + n]\n t = _np.hstack((t1, t2))\n _data[y0:yf, x0] = _np.median(t, axis=1)\n\n return _data\n\n def clean_columns(self, data, header):\n \"\"\"\n Clean the known bad columns that exists in most of SAMI's, SOI's or\n SIFS's data. This method is meant to be overwritten via inheritance.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n See also:\n\n Reducer.clean_column\n Reducer.clean_line\n Reducer.clean_lines\n \"\"\"\n return data, header\n\n @staticmethod\n def clean_line(_data, x0, xf, y, n=5):\n \"\"\"\n Substitutes a single column by the _median of the neighbours columns.\n\n Args:\n\n _data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n x0 (int) : Start position of the line.\n\n xf (int) : Final position of the line.\n\n y (int) : Y position of the pixel to be cleaned.\n\n n (int) : Number of neighbour columns. (Default=5)\n\n See also:\n\n Reducer.clean_column\n Reducer.clean_columns\n Reducer.clean_lines\n \"\"\"\n if not isinstance(_data, _np.ndarray):\n raise (TypeError, 'Please, use a np.array as input')\n\n if _data.ndim is not 2:\n raise (TypeError, 'Data contains %d dimensions while it was '\n 'expected 2 dimensions.')\n\n t1 = _data[y - n:y, x0:xf]\n t2 = _data[y + 1:y + n, x0:xf]\n t = _np.vstack((t1, t2))\n _data[y, x0:xf] = _np.median(t, axis=0)\n\n return _data\n\n def clean_lines(self, data, header):\n \"\"\"\n Clean the known bad lines that exists in most of SAMI's, SOI's or\n SIFS's data. This method is meant to be overwritten via inheritance.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n See also:\n\n Reducer.clean_column\n Reducer.clean_line\n Reducer.clean_lines\n \"\"\"\n return data, header\n\n def clean_hot_columns_and_lines(self, data, header, prefix, clean):\n \"\"\"\n Clean known hot columns and lines from SAMI's images.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n prefix (str) : File prefix that is added after each reduce.\n\n clean (bool) : Should I perform the clean?\n\n See also:\n\n Reducer.clean_column\n Reducer.clean_columns\n Reducer.clean_line\n Reducer.clean_lines\n \"\"\"\n if clean is True:\n\n data = self.clean_columns(data, header)\n data = self.clean_lines(data, header)\n header.add_history('Cleaned bad columns and lines.')\n prefix = 'c' + prefix\n\n return data, header, prefix\n\n @staticmethod\n def correct_dark(data, header, prefix, dark_file=None):\n \"\"\"\n Subtract the dark file from data and add HISTORY to header.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n prefix : str\n File prefix that is added after each reduce.\n\n dark_file: str | None\n Master Dark filename. If None is given, nothing is done.\n \"\"\"\n\n if not isinstance(prefix, str):\n raise (TypeError, 'Expected string but found %s instead.' %\n prefix.__class__)\n\n if dark_file is not None:\n\n dark = _pyfits.open(dark_file)[0]\n dark.data = dark.data / float(dark.header['EXPTIME'])\n\n data = data - dark.data * header['EXPTIME']\n header['DARKFILE'] = dark_file\n prefix = 'd' + prefix\n\n return data, header, prefix\n\n @staticmethod\n def correct_flat(data, header, prefix, flat_file):\n \"\"\"\n Divide the image by the master flat file and add HISTORY to header.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n prefix (str) : File prefix that is added after each reduce.\n\n flat_file (str or None) : Master flat filename. If None is given,\n nothing is done.\n \"\"\"\n if not isinstance(prefix, str):\n raise (TypeError, 'Expected string but found %s instead.' %\n prefix.__class__)\n\n if flat_file is not None:\n flat = _pyfits.open(flat_file)[0]\n\n data /= flat.data\n header['FLATFILE'] = flat_file\n prefix = 'f' + prefix\n\n return data, header, prefix\n\n def correct_lateral_glow(self, data, header, prefix, glow_file):\n \"\"\"\n Remove lateral glows by scaling the glows in the `glow_file` based\n on `data` and subtracting it.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n prefix : str\n Filename prefix to flag images that were clean.\n\n glow_file : str\n Path to a long dark file that contains the lateral glow.\n \"\"\"\n\n if glow_file is not None:\n\n # Create four different regions.\n regions = [\n [_np.median(data[539:589, 6:56]), # Top Left\n _np.median(data[539:589, 975:1019])], # Top Right\n [_np.median(data[449:506, 6:56]), # Bottom Left\n _np.median(data[449:506, 975:1019])] # Bottom Right\n ]\n\n min_std_region = _np.argmin(regions) % 2\n\n # The upper reg has background lower or equal to the lower reg\n midpt1 = regions[0][min_std_region]\n midpt2 = regions[1][min_std_region]\n diff = midpt2 - midpt1\n\n dark = _pyfits.getdata(glow_file)\n dark = self.clean_columns(dark)\n dark = self.clean_lines(dark)\n\n dark_regions = [\n [_np.median(dark[539:589, 6:56]), # Top Left\n _np.median(dark[539:589, 975:1019])], # Top Right\n [_np.median(dark[449:506, 6:56]), # Bottom Left\n _np.median(dark[449:506, 975:1019])] # Bottom Right\n ]\n\n dark_midpt1 = dark_regions[0][min_std_region]\n dark_midpt2 = dark_regions[1][min_std_region]\n\n dark_diff = dark_midpt2 - dark_midpt1\n dark -= dark_midpt1\n\n k = diff / dark_diff\n temp_dark = dark * k\n data -= midpt1\n data -= temp_dark\n\n header.add_history('Lateral glow removed using %s file' % glow_file)\n prefix = 'g' + prefix\n\n return data, header, prefix\n\n @staticmethod\n def correct_zero(data, header, prefix, zero_file):\n \"\"\"\n Subtract zero from data.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n prefix (str) : File prefix that is added after each reduce.\n\n zero_file (str | None) : Master Bias filename. If None is given,\n nothing is done.\n\n \"\"\"\n from os.path import abspath\n\n if zero_file is not None:\n\n zero = _pyfits.open(abspath(zero_file))[0]\n data = data - zero.data\n header['BIASFILE'] = zero_file\n prefix = 'z' + prefix\n\n return data, header, prefix\n\n @staticmethod\n def divide_by_exposuretime(data, header, prefix, time):\n \"\"\"\n Divide the image by the exposure time and add HISTORY to header.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n prefix : str\n File prefix that is added after each reduce.\n\n time: bool\n Divide image by exposure time?\n \"\"\"\n if time is True:\n\n h = header\n\n try:\n\n h['UNITS'] = 'adu / s'\n t = float(h['EXPTIME'])\n d = data / t\n\n header = h\n data = d\n\n except AttributeError:\n header = h\n\n except KeyError:\n pass\n\n prefix = 't' + prefix\n\n return data, header, prefix\n\n def get_header(self, hdu_source):\n \"\"\"\n Return the header of the primary HDU extension of a FITS file.\n\n Args:\n\n hdu_source (str or astropy.io.fits.HDUList) : HDUList or name of the\n file which contains a HDUList.\n \"\"\"\n from os.path import exists\n\n if isinstance(hdu_source, str):\n\n if not exists(hdu_source):\n raise (IOError, '%s file not found.' % hdu_source)\n\n hdu_source = _pyfits.open(hdu_source)\n\n h0 = hdu_source[0].header\n h1 = hdu_source[1].header\n\n h0.append('UNITS')\n h0.set('UNITS', value='ADU', comment='Pixel intensity units.')\n\n # Save the CCD binning in the main header\n h0['CCDSUM'] = h1['CCDSUM']\n h0['DETSEC'] = h1['DETSEC']\n\n # Save the area that corresponds to each amplifier\n bin_size = _np.array(h0['CCDSUM'].split(' '), dtype=int)\n\n dx, dy = slices.iraf2python(h0['DETSEC'])\n dx, dy = dx // bin_size[0], dy // bin_size[1]\n\n h0['AMP_SEC1'] = slices.python2iraf(\n dx[0], dx[1], dy[0], dy[1])\n\n h0['AMP_SEC2'] = slices.python2iraf(\n dx[0] + dx[1], dx[1] + dx[1], dy[0], dy[1])\n\n h0['AMP_SEC3'] = slices.python2iraf(\n dx[0], dx[1], dy[0] + dy[1], dy[1] + dy[1])\n\n h0['AMP_SEC4'] = slices.python2iraf(\n dx[0] + dx[1], dx[1] + dx[1], dy[0] + dy[1], dy[1] + dy[1])\n\n return h0\n\n def get_prefix(self):\n \"\"\"\n Return a prefix to be added to the file deppending on the data\n reduction steps.\n\n Returns\n -------\n prefix : (str)\n The prefix that can be used.\n m = merged amplifiers.\n z = zero subtracted.\n f = flat corrected.\n \"\"\"\n\n prefix = 'm_'\n\n if self.zero_file:\n prefix = 'z' + prefix\n\n if self.dark_file:\n prefix = 'd' + prefix\n\n if self.flat_file:\n prefix = 'f' + prefix\n\n return prefix\n\n def merge(self, hdul):\n \"\"\"\n Open a FITS image and try to join its extensions in a single array.\n\n Args:\n\n hdul (astropy.io.fits.HDUList) : an HDUList that contains one\n PrimaryHDU and four ImageHDU\n\n \"\"\"\n w, h = slices.iraf2python(hdul[1].header['DETSIZE'])\n\n if len(hdul) is 1:\n logger.warning('%s file contains a single extension. ' % hdul +\n 'Not doing anything')\n return hdul[0].data\n\n # Correct for binning\n bin_size = _np.array(hdul[1].header['CCDSUM'].split(' '),\n dtype=int)\n bw, bh = w[1] // bin_size[0], h[1] // bin_size[1]\n\n # Create empty full frame\n new_data = _np.empty((bh, bw), dtype=float)\n\n # Process each extension\n for i in range(1, 5):\n tx, ty = slices.iraf2python(hdul[i].header['TRIMSEC'])\n bx, by = slices.iraf2python(hdul[i].header['BIASSEC'])\n\n data = hdul[i].data\n trim = data[ty[0]:ty[1], tx[0]:tx[1]]\n bias = data[by[0]:by[1], bx[0]:bx[1]]\n\n # Collapse the bias columns to a single column.\n bias = _np.median(bias, axis=1)\n\n # Fit and remove OVERSCAN\n x = _np.arange(bias.size) + 1\n bias_fit_pars = _np.polyfit(x, bias, 2) # Last par = inf\n bias_fit = _np.polyval(bias_fit_pars, x)\n bias_fit = bias_fit.reshape((bias_fit.size, 1))\n bias_fit = _np.repeat(bias_fit, trim.shape[1], axis=1)\n\n trim = trim - bias_fit\n dx, dy = slices.iraf2python(hdul[i].header['DETSEC'])\n dx, dy = dx // bin_size[0], dy // bin_size[1]\n new_data[dy[0]:dy[1], dx[0]:dx[1]] = trim\n\n header = self.get_header(hdul)\n\n return new_data, header, \"m_\"\n\n @staticmethod\n def remove_cosmic_rays(data, header, prefix, cosmic_rays):\n \"\"\"\n Use LACosmic to remove cosmic rays.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n prefix : str\n Filename prefix to flag images that were clean.\n\n cosmic_rays : bool\n Flag to indicate if cosmic rays removal should be performed.\n \"\"\"\n if cosmic_rays:\n\n d = data\n d, _ = _cosmicray_lacosmic(\n d, gain=2.6, readnoise=10.0, sigclip=2.5, sigfrac=0.3,\n objlim=5.0)\n d /= 2.6\n\n h = header\n h.add_history(\n 'Cosmic rays and hot pixels removed using LACosmic')\n\n data = d\n header = h\n\n return data, header, prefix\n\n @staticmethod\n def remove_wcs(header):\n\n return header\n\n\nclass SamiReducer(Reducer):\n\n gain = [2.1, 2.0537, 2.1, 2.0823]\n read_noise = [10., 10., 10., 10.]\n \n def reduce(self, hdu_list, prefix=\"\"):\n\n # If the number of extensions is just 1, then the file is already\n # processed.\n if len(hdu_list) == 1:\n return hdu_list, ''\n\n # Merge file\n data, header, prefix = self.merge(hdu_list)\n\n # Removing bad column and line\n data, header, prefix = self.remove_central_bad_columns(\n data, header, prefix,\n )\n\n # Correct ZERO\n data, header, prefix = self.correct_zero(\n data, header, prefix, self.zero_file\n )\n\n # Correct DARK\n data, header, prefix = self.correct_dark(\n data, header, prefix, self.dark_file\n )\n\n # Remove cosmic rays and hot pixels\n data, header, prefix = self.remove_cosmic_rays(\n data, header, prefix, self.cosmic_rays\n )\n\n # Remove lateral glows\n data, header, prefix = self.correct_lateral_glow(\n data, header, prefix, self.glow_file\n )\n\n # Correct FLAT\n data, header, prefix = self.correct_flat(\n data, header, prefix, self.flat_file\n )\n\n # Normalize by the EXPOSURE TIME\n data, header, prefix = self.divide_by_exposuretime(\n data, header, prefix, self.time\n )\n\n # Clean known bad columns and lines\n data, header, prefix = self.clean_hot_columns_and_lines(\n data, header, prefix, self.clean\n )\n\n # Add WCS\n data, header = self.create_wcs(\n data, header\n )\n\n return data, header, prefix\n\n def clean_columns(self, data, header):\n \"\"\"\n Clean the known bad columns that exists in most of SAMI's, SOI's or\n SIFS's data. This method is meant to be overwritten via inheritance.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n See also:\n\n Reducer.clean_column\n Reducer.clean_line\n Reducer.clean_lines\n \"\"\"\n binning = header['CCDSUM'].split(' ')[0]\n binning = int(binning.strip())\n\n if binning == 4:\n bad_columns = [\n [167, 0, 513],\n [213, 513, 1023],\n [304, 0, 513],\n [309, 1, 512],\n [386, 0, 513],\n [476, 0, 513],\n [602, 0, 513],\n [671, 0, 513],\n [673, 475, 513],\n [678, 0, 513],\n [741, 0, 513],\n [810, 0, 513],\n [919, 0, 513],\n [212, 513, 1023],\n [680, 513, 1023],\n [725, 513, 1023],\n [848, 513, 1023],\n [948, 0, 512],\n [949, 0, 512]\n ]\n else:\n []\n\n for column in bad_columns:\n x0 = column[0]\n y0 = column[1]\n yf = column[2]\n data = self.clean_column(data, x0, y0, yf)\n\n return data\n\n def clean_lines(self, data, header):\n \"\"\"\n Clean the known bad lines that exists in most of SAMI's, SOI's or\n SIFS's data. This method is meant to be overwritten via inheritance.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n See also:\n\n Reducer.clean_column\n Reducer.clean_line\n Reducer.clean_lines\n \"\"\"\n binning = header['CCDSUM'].split(' ')[0]\n binning = int(binning.strip())\n\n if binning == 4:\n bad_lines = [\n [166, 206, 282],\n [212, 258, 689],\n [214, 239, 688],\n [304, 345, 291],\n [386, 422, 454],\n [398, 422, 38],\n [477, 516, 490],\n [387, 429, 455],\n [574, 603, 494],\n [574, 603, 493],\n [640, 672, 388],\n [604, 671, 388],\n [698, 746, 198],\n [706, 634, 634],\n [772, 812, 354],\n [900, 938, 426],\n [904, 920, 396]\n ]\n\n else:\n bad_lines = []\n\n for line in bad_lines:\n x0 = line[0]\n xf = line[1]\n y = line[2]\n data = self.clean_line(data, x0, xf, y)\n\n return data\n\n @staticmethod\n def remove_central_bad_columns(data, header, prefix):\n \"\"\"\n Remove central bad columns at the interface of the four extensions.\n\n Parameter\n ---------\n data : numpy.ndarray\n 2D Array containing the data.\n \"\"\"\n n_rows, n_columns = data.shape\n\n # Copy the central bad columns to a temp array\n temp_column = data[:, n_columns // 2 - 1:n_columns // 2 + 1]\n\n # Shift the whole image by two columns\n data[:, n_columns // 2 - 1:-2] = data[:, n_columns // 2 + 1:]\n\n # Copy the bad array in the end (right) of the image).\n data[:, -2:] = temp_column\n\n return data, header, prefix\n\n\nclass SifsReducer(SamiReducer):\n pass\n\n\nclass SoiReducer(Reducer):\n \"\"\"\n SoiReducer\n\n This class holds all the methods used to join the extensions within a\n FITS file obtained with SOI.\n\n Parameters\n ----------\n zero_file : str\n The filename of the master zero that will be used in subtraction.\n\n clean : bool\n Clean bad collumns by taking the _median value of the pixels around\n them.\n\n cosmic_rays : bool\n Clean cosmic rays using LACosmic package. See noted bellow for\n reference.\n\n dark_file : str\n Master Dark's filename to be used for dark subtraction.\n\n debug : bool\n Turn on debug mode with lots of printing.\n\n flat_file : str\n Master Flat filename to be used for normalization.\n\n glow_file : str\n Master file that contains the lateral glowings sometimes present in\n SAMI's data.\n\n time : bool\n Divide each pixel's values by the exposure time and update header.\n\n verbose : bool\n Turn on verbose mode (not so talktive as debug mode).\n\n See also\n --------\n LACosmic - http://www.astro.yale.edu/dokkum/lacosmic/\n \"\"\"\n\n @staticmethod\n def add_gap(data, header, interpolation_factor=10):\n \"\"\"\n SOI has two detectors which are separated by 7.8 arcsec (or 102\n unbinned pixels). This method reads an merged array and adds the gap\n based on the detector's binning.\n\n Parameters\n ----------\n data : numpy.ndarray\n 2D array with the data merged.\n\n header : astropy.io.fits.Header\n a header that contains the binning information on the 'CCDSUM'\n key.\n \"\"\"\n if header['OBSTYPE'] == 'OBJECT':\n\n binning = header['CCDSUM']\n binning = int(binning.split()[0])\n\n gap_size = 7.8 # arcseconds\n pixel_scale = 0.0767 # arcsecond / pixel\n gap_pixel = int(round(gap_size / pixel_scale / binning, 0))\n\n nrow, ncol = data.shape\n\n data = _np.append(data, _np.zeros((nrow, gap_pixel)), axis=1)\n data[:, ncol // 2 + gap_pixel:] = data[:, ncol // 2:- gap_pixel]\n data[:, ncol // 2:ncol // 2 + gap_pixel] = 0\n\n return data, header\n\n def clean_columns(self, _data, _header):\n \"\"\"\n Clean the known bad columns that exists in most of SAMI's data.\n\n Parameters\n ----------\n _data : numpy.ndarray\n A 2D numpy array that contains the data.\n\n _header : astropy.io.fits.Header\n a header that contains the binning information on the 'CCDSUM'\n key.\n\n See also\n --------\n SoiMerger.clean_column\n SoiMerger.clean_line\n SoiMerger.clean_lines\n \"\"\"\n if not isinstance(_data, _np.ndarray):\n raise (TypeError, 'Please, use a np.array as input')\n if _data.ndim is not 2:\n raise (TypeError, 'Data contains %d dimensions while it was '\n 'expected 2 dimensions.')\n\n b = int(_header['CCDSUM'].strip().split(' ')[0])\n\n if b == 1:\n bad_columns = []\n elif b == 2:\n bad_columns = [\n [855, 0, 2047],\n ]\n elif b == 4:\n bad_columns = [\n [427, 0, 1023]\n ]\n else:\n logger.warning(\n 'Skipping clean_columns for binning {} x {}'.format(b, b))\n bad_columns = []\n\n for column in bad_columns:\n x0 = column[0]\n y0 = column[1]\n yf = column[2]\n _data = self.clean_column(_data, x0, y0, yf)\n\n return _data\n\n def clean_lines(self, hdu_list):\n \"\"\"\n Clean the known bad lines that exists in most of SAMI's, SOI's or\n SIFS's data. This method is meant to be overwritten via inheritance.\n\n Args:\n\n hdu_list (astropy.io.fits.HDUList)\n\n See also:\n\n Reducer.clean_column\n Reducer.clean_line\n Reducer.clean_lines\n \"\"\"\n if not isinstance(hdu_list, _pyfits.HDUList):\n raise TypeError('Please, use a HDUList as input')\n\n if len(hdu_list) != 5:\n raise ValueError(\n \"HDUList is expected to have 1 + 4 elements. Found {}\".format(\n len(hdu_list)))\n\n for i in range(1, len(hdu_list)):\n\n _data = hdu_list[i].data\n _hdr = hdu_list[i].header\n\n bad_lines = [\n # [166, 206, 282],\n # [212, 258, 689],\n # [214, 239, 688],\n # [304, 345, 291],\n # [386, 422, 454],\n # [398, 422, 38],\n # [477, 516, 490],\n # [387, 429, 455],\n # [574, 603, 494],\n # [574, 603, 493],\n # [640, 672, 388],\n # [604, 671, 388],\n # [698, 746, 198],\n # [706, 634, 634],\n # [772, 812, 354],\n # [900, 938, 426],\n # [904, 920, 396]\n ]\n\n for line in bad_lines:\n x0 = line[0]\n xf = line[1]\n y = line[2]\n _data = self.clean_line(_data, x0, xf, y)\n\n hdu_list[i].data = _data\n\n return hdu_list\n\n\ndef _normalize_data(data):\n \"\"\"\n This method is intended to normalize flat data before it is applied to the\n images that are being reduced. A total of 1000 random points are used to\n estimate the _median level that will be used for normalization.\n\n Args:\n\n data (numpy.ndarray) : Data that will be normalized\n\n Returns:\n norm_data (numpy.ndarray) : Normalized data.\n \"\"\"\n sample = _np.random.randint(0, high=data.size - 1, size=1000)\n mode = stats.mode(data.ravel()[sample])[0]\n\n return data / mode\n" ]
[ [ "numpy.vstack", "numpy.ones", "numpy.empty", "numpy.zeros", "numpy.polyval", "numpy.argmin", "numpy.median", "numpy.cos", "numpy.repeat", "numpy.arange", "numpy.hstack", "numpy.polyfit", "numpy.array", "numpy.sin", "numpy.random.randint", "numpy.deg2rad" ] ]
haophancs/TREQS
[ "49e354ce2a08cf963ec139d99936020e0f80ced8" ]
[ "LeafNATS/eval_scripts/eval_class_v1.py" ]
[ "import numpy as np\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import mean_squared_error\n\ndef evaluation(args):\n '''\n We use f-score, accuracy, MSE to evaluation the performance of different models.\n Here, the best model is selected based on the averaged f-score.\n '''\n score_test = 0.0\n score_validate = 0.0\n mdx_test = 1\n mdx_validate = 1\n memo = []\n for epoch in range(1, args.n_epoch+1):\n print('='*50)\n print('Epoch: {}'.format(epoch))\n score_dict = {}\n\n mem_score = {'validate': [], 'test': []}\n\n pred_data = np.loadtxt('../nats_results/validate_pred_{}.txt'.format(epoch))\n true_data = np.loadtxt('../nats_results/validate_true_{}.txt'.format(epoch))\n\n (p1, r1, f1, _) = precision_recall_fscore_support(true_data, pred_data, average='macro')\n accu = accuracy_score(true_data, pred_data)\n mse = mean_squared_error(true_data, pred_data)\n\n print('f_score={}, Accuracy={}, MSE={}'.format(\n np.round(f1, 4), np.round(accu, 4), np.round(mse, 4)))\n mem_score['validate']= [p1, r1, f1, accu, mse]\n\n pred_data = np.loadtxt('../nats_results/test_pred_{}.txt'.format(epoch))\n true_data = np.loadtxt('../nats_results/test_true_{}.txt'.format(epoch))\n\n if accu > score_validate:\n score_validate = accu\n mdx_validate = epoch\n\n (p1, r1, f1, _) = precision_recall_fscore_support(true_data, pred_data, average='macro')\n accu = accuracy_score(true_data, pred_data)\n mse = mean_squared_error(true_data, pred_data)\n print('f_score={}, Accuracy={}, MSE={}'.format(\n np.round(f1, 4), np.round(accu, 4), np.round(mse, 4)))\n mem_score['test'] = [p1, r1, f1, accu, mse]\n\n if accu > score_test:\n score_test = accu\n mdx_test = epoch\n\n memo.append(mem_score)\n\n print('='*50)\n print('Best epoch {}'.format(mdx_validate))\n print('='*50)\n print('Val')\n [p1, r1, f1, accu, mse] = memo[mdx_validate-1]['validate']\n print('f_score={}, Accuracy={}, MSE={}'.format(\n np.round(f1, 4), np.round(accu, 4), np.round(mse, 4)))\n print('Test')\n [p1, r1, f1, accu, mse] = memo[mdx_validate-1]['test']\n print('f_score={}, Accuracy={}, MSE={}'.format(\n np.round(f1, 4), np.round(accu, 4), np.round(mse, 4)))\n print('='*50)\n print('Max epoch {}'.format(mdx_test))\n print('='*50)\n print('Val')\n [p1, r1, f1, accu, mse] = memo[mdx_test-1]['validate']\n print('f_score={}, Accuracy={}, MSE={}'.format(\n np.round(f1, 4), np.round(accu, 4), np.round(mse, 4)))\n print('Test')\n [p1, r1, f1, accu, mse] = memo[mdx_test-1]['test']\n print('f_score={}, Accuracy={}, MSE={}'.format(\n np.round(f1, 4), np.round(accu, 4), np.round(mse, 4)))" ]
[ [ "numpy.round", "sklearn.metrics.accuracy_score", "sklearn.metrics.precision_recall_fscore_support", "sklearn.metrics.mean_squared_error" ] ]
nsfzyzz/dispersion-score
[ "ac0c633fe3af091e83d2d198809d98545a0a311a" ]
[ "eval/output_ds_synthetic.py" ]
[ "\"\"\"This script is used to measure output dispersion score of synthetic datasets\n\"\"\"\nimport os\nimport sys\nimport numpy as np\nimport torch\nimport random\nimport tqdm\nimport time\nfrom pathlib import Path\nfrom os.path import join\nfrom model.model import EncoderDecoder\nsys.path.append(join(os.path.dirname(os.path.abspath(__file__)), \"../\"))\nfrom dataset.toy_dataset.toydataset import ToyDataset\nfrom auxiliary.my_utils import plant_seeds\nfrom auxiliary.metric_parser import parser\nfrom model.pseudo_network import Generator\nfrom eval.metric import ChamferDistanceL2, compute_ptcloud_dismatrix_batch, cluster_eval\nfrom eval.eval_utils import get_logger, CountFrequency, dic_to_array, mean_std\nimport auxiliary.ChamferDistancePytorch.chamfer3D.dist_chamfer_3D as dist_chamfer_3D\n\nopt = parser()\n###Mkdir and logger\nopt.device = torch.device(\"cuda\")\nres_path = join(opt.dir_name, opt.res_folder)\nPath(res_path).mkdir(parents=True, exist_ok=True)\nproc_logger = get_logger(\"process\", res_path, \"process.log\")\nres_logger = get_logger(\"results\", res_path, \"score.log\")\nopt.logger = proc_logger\nprint(opt.trained_exp_dir)\n\n\nnviews_dic = {\"train\":opt.nviews_train, \"test\":opt.nviews_test}\nnum_seed = max(len(opt.seed_list), 1)\nscore_collect = {}\neval_label_list = set()\n\nfor seed_idx in range(num_seed):\n if opt.seed_list:\n opt.seed = opt.seed_list[seed_idx]\n score_collect.update({str(opt.seed):{}})\n plant_seeds(opt.seed)\n\n ##Loading Data and Network\n if opt.split == 'pred':\n eval_loss = ChamferDistanceL2().to(opt.device)\n distChamfer = dist_chamfer_3D.chamfer_3DDist()\n\n if opt.network=='atlasnet':\n network = EncoderDecoder(opt)\n opt.logger.info(f\"Reloading Network Weights from {opt.reload_model_path}...\")\n network.load_state_dict(torch.load(opt.reload_model_path)['model_state_dict'])\n network.to(opt.device)\n \n if opt.split == \"train\":\n dataset = ToyDataset(data_base_dir=opt.data_base_dir, \n json_file=opt.train_json_file,\n num_points=opt.number_points, \n train=True, \n normalization=opt.normalization, \n logger=opt.logger) \n elif opt.split == \"test\" or opt.split == \"pred\":\n dataset = ToyDataset(data_base_dir=opt.data_base_dir, \n json_file=opt.test_json_file,\n num_points=opt.number_points, \n train=False, \n normalization=opt.normalization, \n logger=opt.logger) \n else:\n raise NotImplementedError()\n\n loader = torch.utils.data.DataLoader(dataset, \n batch_size=opt.pred_batch_size, \n shuffle=False, num_workers=8)\n if opt.rsample == 1:\n sample_num = len(dataset)\n opt.nsample = len(dataset)\n else:\n if opt.rsample != -1:\n opt.nsample = int(opt.rsample * len(dataset))\n subset_index = random.sample(range(len(dataset)), opt.nsample)\n dataset = torch.utils.data.Subset(dataset, subset_index)\n sample_num = len(subset_index)\n data = None\n pred_loss = 0.0\n with torch.set_grad_enabled(False): \n for batch in tqdm.tqdm(loader, desc=f\"loading {opt.split} {opt.type} data\"):\n if opt.split == 'pred':\n input_img = batch['image'].to(opt.device)\n pred_points = network(input_img, train=False)\n pred_points = pred_points.transpose(2, 3).contiguous()\n B = pred_points.shape[0]\n pred_points = pred_points.view(B, -1, 3)\n gt_points = batch['points'].to(opt.device)\n assert gt_points.shape[0] == B, f'gt {gt_points.shape[0]}, while pred {B}'\n if data is None:\n data = pred_points\n else:\n data = torch.cat((data, pred_points), dim=0)\n pred_loss += eval_loss(gt_points, pred_points).item()\n dist1, dist2, idx1, idx2 = distChamfer(gt_points, pred_points)\n opt.type = 'points'\n\n pred_loss /= len(loader)\n proc_logger.info(f\"Pred Chamfer Loss: {pred_loss:4f}\")\n start_time = time.time()\n\n if opt.type == 'points':\n data = data.to(opt.device)\n metric = ChamferDistanceL2().to(opt.device)\n distance_matrix = compute_ptcloud_dismatrix_batch(data, data, metric, \n opt.pred_batch_size, opt.device, proc_logger)\n else:\n raise NotImplementedError()\n\n elasp_time = (time.time() - start_time) / 60\n\n distance_matrix = distance_matrix.cpu().numpy()\n\n score_collect[str(opt.seed)].update({\"dm\": distance_matrix})\n score_collect[str(opt.seed)].update({\"pred_chamfer\": pred_loss})\n \n n_evals = len(opt.perf_pc_list)\n for index in range(n_evals):\n c_method, e_method, n_cluster, perf_pc = opt.c_method[index], opt.e_method[index], opt.cluster_k[index], opt.perf_pc_list[index]\n\n score, part_label = cluster_eval(c_method=c_method, e_method=e_method, distance_matrix=distance_matrix, \n seed=opt.seed, n_cluster=n_cluster, pc=perf_pc)\n\n label_stat_verbose = \"\"\n freq = CountFrequency(part_label)\n for key, value in freq.items(): \n label_stat_verbose += \"% d :% d | \"%(key, value)\n\n proc_logger.info(f\"{opt.type} mode: {opt.mode}, split: {opt.split} \" + \n f\"nviews: train {opt.nviews_train}, test {opt.nviews_test}, sample num:{sample_num} \" + \n f\"seed{opt.seed}, metric{opt.metric} perf{perf_pc}% \" + \n f\"samp{distance_matrix.shape[0]}, Pred Chamfer: {pred_loss:.4f}, score: {score:.4f} DM\" + \n f\"{distance_matrix.shape[0]}, compute time {elasp_time:2f} min\")\n\n eval_label = f\"{c_method}_{e_method}_k{n_cluster}p{perf_pc}\"\n score_collect[str(opt.seed)].update({eval_label: {}})\n eval_label_list.add(eval_label)\n score_collect[str(opt.seed)][eval_label].update({\"score\": score})\n score_collect[str(opt.seed)][eval_label].update({\"label\": np.array(part_label)}) # cluster label\n score_collect[str(opt.seed)][eval_label].update({\"perf_percent\": perf_pc})\n score_collect[str(opt.seed)][eval_label].update({\"label_stats\": dic_to_array(freq)})\n \neval_label_list = list(eval_label_list)\neval_label_list.sort()\nss_list = {}\nfor eval_label in eval_label_list:\n ss_list.update({eval_label:[]})\n\npred_list = []\n\nfor seed in score_collect:\n pred_list.append(score_collect[seed]['pred_chamfer'])\n for eval_label in eval_label_list:\n ss_list[eval_label].append(score_collect[seed][eval_label][\"score\"])\n\nfor eval_label in eval_label_list:\n avg_score_lst = [score/sample_num for score in ss_list[eval_label]]\n ss_mean, ss_std = mean_std(ss_list[eval_label])\n avg_ss_mean, avg_ss_std = mean_std(avg_score_lst)\n score_collect.update({f'{eval_label}': np.array([ss_mean, ss_std])})\n score_collect.update({f'avg_{eval_label}': np.array([avg_ss_mean, avg_ss_std])})\n\npred_loss_mean, pred_loss_std = mean_std(pred_list)\n\nscore_collect.update({'split': opt.split})\nscore_collect.update({'type': opt.type})\nscore_collect.update({'mode': opt.mode})\nscore_collect.update({'sample_num': sample_num})\nscore_collect.update({'chamfer_stats': np.array([pred_loss_mean, pred_loss_std])})\nscore_collect.update({'trainnv': np.array([opt.nviews_train])})\nscore_collect.update({'testnv': np.array([opt.nviews_test])})\n\nfor eval_label in eval_label_list:\n ss_mean, ss_std = score_collect[f'{eval_label}'][0], score_collect[f'{eval_label}'][1]\n avg_ss_mean, avg_ss_std = score_collect[f'avg_{eval_label}'][0], score_collect[f'avg_{eval_label}'][1]\n res_logger.info(f\"{opt.network} {opt.type} mode: {opt.mode}, split: {opt.split}, \" + \n f\"nviews: train {opt.nviews_train}, test {opt.nviews_test}, sample num: {sample_num} \" + \n f\"seed_list {opt.seed_list}, metric {opt.metric} perf: {perf_pc} % {opt.metric} {opt.trained_exp_dir} {eval_label} \" + \n f\"Sum of Score: (mean: {ss_mean:.4f}|std: {ss_std:.4f}) \"+ \n f\"Average Score: (mean: {avg_ss_mean:.4f}|std: {avg_ss_std:.4f}) \"+ \n f\"Pred Chamfer: (mean:{pred_loss_mean:.4f}|std: {pred_loss_std:.4f}) \" +\n f\"DM compute time {elasp_time:.2f} min\")\n \nnp.savez_compressed(os.path.join(res_path, \nf\"{opt.network}_{opt.mode}_{opt.split}_{opt.type}_{sample_num}_{opt.trained_exp_dir.split('/')[-1]}.npz\"), **score_collect)\n \nres_logger.info(f\"###############END OF {opt.type} {opt.network} {opt.trained_exp_dir} PIPELINE#################\")\n\n\n\n" ]
[ [ "torch.utils.data.DataLoader", "torch.load", "torch.utils.data.Subset", "torch.set_grad_enabled", "numpy.array", "torch.device", "torch.cat" ] ]
elifesciences-publications/nelpy
[ "68c1ffff5eee5de60fc365c4f5df3c7200f5c319" ]
[ "nelpy/decoding.py" ]
[ "\"\"\"Bayesian encoding and decoding\"\"\"\n\n__all__ = ['decode1D',\n 'decode2D',\n 'k_fold_cross_validation',\n 'cumulative_dist_decoding_error_using_xval',\n 'cumulative_dist_decoding_error',\n 'get_mode_pth_from_array',\n 'get_mean_pth_from_array']\n\nimport numpy as np\nfrom . import auxiliary\n\ndef get_mode_pth_from_array(posterior, tuningcurve=None):\n \"\"\"If tuningcurve is provided, then we map it back to the external coordinates / units.\n Otherwise, we stay in the bin space.\"\"\"\n n_xbins = posterior.shape[0]\n\n if tuningcurve is None:\n xmin = 0\n xmax = n_xbins\n else:\n # TODO: this only works for TuningCurve1D currently\n if isinstance(tuningcurve, auxiliary.TuningCurve1D):\n xmin = tuningcurve.bins[0]\n xmax = tuningcurve.bins[-1]\n else:\n raise TypeError(\"tuningcurve type not yet supported!\")\n\n _, bins = np.histogram([], bins=n_xbins, range=(xmin,xmax))\n xbins = (bins + xmax/n_xbins)[:-1]\n\n mode_pth = np.argmax(posterior, axis=0)*xmax/n_xbins\n mode_pth = np.where(np.isnan(posterior.sum(axis=0)), np.nan, mode_pth)\n\n return mode_pth\n\ndef get_mean_pth_from_array(posterior, tuningcurve=None):\n \"\"\"If tuningcurve is provided, then we map it back to the external coordinates / units.\n Otherwise, we stay in the bin space.\"\"\"\n n_xbins = posterior.shape[0]\n\n if tuningcurve is None:\n xmin = 0\n xmax = 1\n else:\n # TODO: this only works for TuningCurve1D currently\n if isinstance(tuningcurve, auxiliary.TuningCurve1D):\n xmin = tuningcurve.bins[0]\n xmax = tuningcurve.bins[-1]\n else:\n raise TypeError(\"tuningcurve type not yet supported!\")\n\n _, bins = np.histogram([], bins=n_xbins, range=(xmin,xmax))\n xbins = (bins + xmax/n_xbins)[:-1]\n\n mean_pth = (xbins * posterior.T).sum(axis=1)\n\n return mean_pth\n\ndef decode1D(bst, ratemap, xmin=0, xmax=100, w=1, nospk_prior=None, _skip_empty_bins=True):\n \"\"\"Decodes binned spike trains using a ratemap with shape (n_units, n_ext)\n\n TODO: complete docstring\n TODO: what if we have higher dimensional external correlates? This\n function assumes a 1D correlate. Even if we linearize a 2D\n environment, for example, then mean_pth decoding no longer works as\n expected, so this function should probably be refactored.\n\n Parameters\n ----------\n bst :\n ratemap: array_like\n Firing rate map with shape (n_units, n_ext), where n_ext is the\n number of external correlates, e.g., position bins. The rate map\n is in spks/second.\n xmin : float\n xmax : float\n w : int\n nospk_prior : array_like\n Prior distribution over external correlates with shape (n_ext,)\n that will be used if no spikes are observed in a decoding window\n Default is np.nan.\n If nospk_prior is any scalar, then a uniform prior is assumed.\n\n _skip_empty_bins is only used to return the posterior regardless of\n whether any spikes were observed, so that we can understand the spatial\n distribution in the absence of spikes, or at low firing rates.\n\n Returns\n -------\n posteriors : array\n Posterior distribution with shape (n_ext, n_posterior_bins),\n where n_posterior bins <= bst.n_bins, but depends on w and the\n event lengths.\n cum_posterior_lengths : array\n\n mode_pth :\n\n mean_pth :\n\n Examples\n --------\n\n \"\"\"\n\n if w is None:\n w=1\n assert float(w).is_integer(), \"w must be a positive integer!\"\n assert w > 0, \"w must be a positive integer!\"\n\n n_units, t_bins = bst.data.shape\n _, n_xbins = ratemap.shape\n\n # if we pass a TuningCurve1D object, extract the ratemap and re-order\n # units if necessary\n if isinstance(ratemap, auxiliary.TuningCurve1D):\n xmin = ratemap.bins[0]\n xmax = ratemap.bins[-1]\n bin_centers = ratemap.bin_centers\n # re-order units if necessary\n ratemap = ratemap.reorder_units_by_ids(bst.unit_ids)\n ratemap = ratemap.ratemap\n else:\n xmin = 0\n xmax = n_xbins\n bin_centers = np.arange(n_xbins)\n\n if nospk_prior is None:\n nospk_prior = np.full(n_xbins, np.nan)\n elif isinstance(nospk_priors, numbers.Number):\n nospk_prior = np.full(n_xbins, 1.0)\n\n assert nospk_prior.shape[0] == n_xbins, \"prior must have length {}\".format(n_xbins)\n assert nospk_prior.size == n_xbins, \"prior must be a 1D array with length {}\".format(n_xbins)\n\n lfx = np.log(ratemap)\n\n eterm = -ratemap.sum(axis=0)*bst.ds*w\n\n # if we decode using multiple bins at a time (w>1) then we have to decode each epoch separately:\n\n # first, we determine the number of bins we will decode. This requires us to scan over the epochs\n n_bins = 0\n cumlengths = np.cumsum(bst.lengths)\n posterior_lengths = np.zeros(bst.n_epochs, dtype=np.int)\n prev_idx = 0\n for ii, to_idx in enumerate(cumlengths):\n datalen = to_idx - prev_idx\n prev_idx = to_idx\n posterior_lengths[ii] = np.max((1,datalen - w + 1))\n\n n_bins = posterior_lengths.sum()\n posterior = np.zeros((n_xbins, n_bins))\n\n # next, we decode each epoch separately, one bin at a time\n cum_posterior_lengths = np.insert(np.cumsum(posterior_lengths),0,0)\n prev_idx = 0\n for ii, to_idx in enumerate(cumlengths):\n data = bst.data[:,prev_idx:to_idx]\n prev_idx = to_idx\n datacum = np.cumsum(data, axis=1) # ii'th data segment, with column of zeros prepended\n datacum = np.hstack((np.zeros((n_units,1)), datacum))\n re = w # right edge ptr\n # TODO: check if datalen < w and act appropriately\n if posterior_lengths[ii] > 1: # more than one full window fits into data length\n for tt in range(posterior_lengths[ii]):\n obs = datacum[:, re] - datacum[:, re-w] # spikes in window of size w\n re+=1\n post_idx = cum_posterior_lengths[ii] + tt\n if obs.sum() == 0 and _skip_empty_bins:\n # no spikes to decode in window!\n posterior[:,post_idx] = nospk_prior\n else:\n posterior[:,post_idx] = (np.tile(np.array(obs, ndmin=2).T, n_xbins) * lfx).sum(axis=0) + eterm\n else: # only one window can fit in, and perhaps only partially. We just take all the data we can get,\n # and ignore the scaling problem where the window size is now possibly less than bst.ds*w\n post_idx = cum_posterior_lengths[ii]\n obs = datacum[:, -1] # spikes in window of size at most w\n if obs.sum() == 0 and _skip_empty_bins:\n # no spikes to decode in window!\n posterior[:,post_idx] = nospk_prior\n else:\n posterior[:,post_idx] = (np.tile(np.array(obs, ndmin=2).T, n_xbins) * lfx).sum(axis=0) + eterm\n\n # normalize posterior:\n posterior = np.exp(posterior) / np.tile(np.exp(posterior).sum(axis=0),(n_xbins,1))\n\n # TODO: what was my rationale behid the following? Why not use bin centers?\n # _, bins = np.histogram([], bins=n_xbins, range=(xmin,xmax))\n # xbins = (bins + xmax/n_xbins)[:-1]\n\n mode_pth = np.argmax(posterior, axis=0)*xmax/n_xbins\n mode_pth = np.where(np.isnan(posterior.sum(axis=0)), np.nan, mode_pth)\n mean_pth = (bin_centers * posterior.T).sum(axis=1)\n return posterior, cum_posterior_lengths, mode_pth, mean_pth\n\ndef decode2D(bst, ratemap, xmin=0, xmax=100, ymin=0, ymax=100, w=1, nospk_prior=None, _skip_empty_bins=True):\n \"\"\"Decodes binned spike trains using a ratemap with shape (n_units, ext_nx, ext_ny)\n\n TODO: complete docstring\n TODO: what if we have higher dimensional external correlates? This\n function assumes a 2D correlate. Even if we linearize a 2D\n environment, for example, then mean_pth decoding no longer works as\n expected, so this function should probably be refactored.\n\n Parameters\n ----------\n bst :\n ratemap: array_like\n Firing rate map with shape (n_units, ext_nx, ext_ny), where n_ext is the\n number of external correlates, e.g., position bins. The rate map\n is in spks/second.\n xmin : float\n xmax : float\n w : int\n nospk_prior : array_like\n Prior distribution over external correlates with shape (n_ext,)\n that will be used if no spikes are observed in a decoding window\n Default is np.nan.\n If nospk_prior is any scalar, then a uniform prior is assumed.\n\n _skip_empty_bins is only used to return the posterior regardless of\n whether any spikes were observed, so that we can understand the spatial\n distribution in the absence of spikes, or at low firing rates.\n\n Returns\n -------\n posteriors : array\n Posterior distribution with shape (ext_nx, ext_ny, n_posterior_bins),\n where n_posterior bins <= bst.n_tbins, but depends on w and the\n event lengths.\n cum_posterior_lengths : array\n\n mode_pth :\n\n mean_pth :\n\n Examples\n --------\n\n \"\"\"\n\n def tile_obs(obs, nx, ny):\n n_units = len(obs)\n out = np.zeros((n_units, nx, ny))\n for unit in range(n_units):\n out[unit,:,:] = obs[unit]\n return out\n\n if w is None:\n w=1\n assert float(w).is_integer(), \"w must be a positive integer!\"\n assert w > 0, \"w must be a positive integer!\"\n\n n_units, t_bins = bst.data.shape\n\n xbins = None\n ybins = None\n\n # if we pass a TuningCurve2D object, extract the ratemap and re-order\n # units if necessary\n if isinstance(ratemap, auxiliary.TuningCurve2D):\n xbins = ratemap.xbins\n ybins = ratemap.ybins\n xbin_centers = ratemap.xbin_centers\n ybin_centers = ratemap.ybin_centers\n # re-order units if necessary\n ratemap = ratemap.reorder_units_by_ids(bst.unit_ids)\n ratemap = ratemap.ratemap\n\n _, n_xbins, n_ybins = ratemap.shape\n\n if nospk_prior is None:\n nospk_prior = np.full((n_xbins, n_ybins), np.nan)\n elif isinstance(nospk_priors, numbers.Number):\n nospk_prior = np.full((n_xbins, n_ybins), 1.0)\n\n assert nospk_prior.shape == (n_xbins, n_ybins), \"prior must have shape ({}, {})\".format(n_xbins, n_ybins)\n\n lfx = np.log(ratemap)\n\n eterm = -ratemap.sum(axis=0)*bst.ds*w\n\n # if we decode using multiple bins at a time (w>1) then we have to decode each epoch separately:\n\n # first, we determine the number of bins we will decode. This requires us to scan over the epochs\n n_tbins = 0\n cumlengths = np.cumsum(bst.lengths)\n posterior_lengths = np.zeros(bst.n_epochs, dtype=np.int)\n prev_idx = 0\n for ii, to_idx in enumerate(cumlengths):\n datalen = to_idx - prev_idx\n prev_idx = to_idx\n posterior_lengths[ii] = np.max((1,datalen - w + 1))\n\n n_tbins = posterior_lengths.sum()\n\n ########################################################################\n posterior = np.zeros((n_xbins, n_ybins, n_tbins))\n\n # next, we decode each epoch separately, one bin at a time\n cum_posterior_lengths = np.insert(np.cumsum(posterior_lengths),0,0)\n prev_idx = 0\n for ii, to_idx in enumerate(cumlengths):\n data = bst.data[:,prev_idx:to_idx]\n prev_idx = to_idx\n datacum = np.cumsum(data, axis=1) # ii'th data segment, with column of zeros prepended\n datacum = np.hstack((np.zeros((n_units,1)), datacum))\n re = w # right edge ptr\n # TODO: check if datalen < w and act appropriately\n if posterior_lengths[ii] > 1: # more than one full window fits into data length\n for tt in range(posterior_lengths[ii]):\n obs = datacum[:, re] - datacum[:, re-w] # spikes in window of size w\n re+=1\n post_idx = cum_posterior_lengths[ii] + tt\n if obs.sum() == 0 and _skip_empty_bins:\n # no spikes to decode in window!\n posterior[:,:,post_idx] = nospk_prior\n else:\n posterior[:,:,post_idx] = (tile_obs(obs, n_xbins, n_ybins) * lfx).sum(axis=0) + eterm\n else: # only one window can fit in, and perhaps only partially. We just take all the data we can get,\n # and ignore the scaling problem where the window size is now possibly less than bst.ds*w\n post_idx = cum_posterior_lengths[ii]\n obs = datacum[:, -1] # spikes in window of size at most w\n if obs.sum() == 0 and _skip_empty_bins:\n # no spikes to decode in window!\n posterior[:,:,post_idx] = nospk_prior\n else:\n posterior[:,:,post_idx] = (tile_obs(obs, n_xbins, n_ybins) * lfx).sum(axis=0) + eterm\n\n # normalize posterior:\n # see http://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/\n for tt in range(n_tbins):\n posterior[:,:,tt] = posterior[:,:,tt] - posterior[:,:,tt].max()\n posterior[:,:,tt] = np.exp(posterior[:,:,tt])\n posterior[:,:,tt] = posterior[:,:,tt] / posterior[:,:,tt].sum()\n\n # if xbins is None:\n # _, bins = np.histogram([], bins=n_xbins, range=(xmin,xmax))\n # xbins = (bins + xmax/n_xbins)[:-1]\n # if ybins is None:\n # _, bins = np.histogram([], bins=n_ybins, range=(ymin,ymax))\n # ybins = (bins + ymax/n_ybins)[:-1]\n\n mode_pth = np.zeros((2, n_tbins))\n for tt in range(n_tbins):\n if np.any(np.isnan(posterior[:,:,tt])):\n mode_pth[0,tt] = np.nan\n mode_pth[0,tt] = np.nan\n else:\n x_, y_ = np.unravel_index(np.argmax(posterior[:,:,tt]), (n_xbins, n_ybins))\n mode_pth[0,tt] = xbins[x_]\n mode_pth[1,tt] = ybins[y_]\n\n expected_x = (xbin_centers * posterior.sum(axis=1).T).sum(axis=1)\n expected_y = (ybin_centers * posterior.sum(axis=0).T).sum(axis=1)\n mean_pth = np.vstack((expected_x, expected_y))\n\n posterior = np.transpose(posterior, axes=[1,0,2])\n\n return posterior, cum_posterior_lengths, mode_pth, mean_pth\n\ndef k_fold_cross_validation(X, k=None, randomize=False):\n \"\"\"\n Generates K (training, validation) pairs from the items in X.\n\n Each pair is a partition of X, where validation is an iterable\n of length len(X)/K. So each training iterable is of length\n (K-1)*len(X)/K.\n\n Parameters\n ----------\n X : list or int\n list of items, or list of indices, or integer number of indices\n k : int, or str, optional\n k > 1 number of folds for k-fold cross validation; k='loo' or\n 'LOO' for leave-one-out cross-validation (equivalent to\n k==n_samples). Default is 5.\n randomize : bool\n If true, a copy of X is shuffled before partitioning, otherwise\n its order is preserved in training and validation.\n\n Returns\n -------\n (training, validation)\n\n Example\n -------\n >>> X = [i for i in range(97)]\n >>> for training, validation in k_fold_cross_validation(X, k=5):\n >>> print(training, validation)\n >>> for x in X: assert (x in training) ^ (x in validation), x\n\n \"\"\"\n # deal with default values:\n if isinstance(X, int):\n X = range(X)\n n_samples = len(X)\n if k is None:\n k=5\n elif k=='loo' or k=='LOO':\n k=n_samples\n\n if randomize:\n from random import shuffle\n X=list(X)\n shuffle(X)\n for _k_ in range(k):\n training = [x for i, x in enumerate(X) if i % k != _k_]\n validation = [x for i, x in enumerate(X) if i % k == _k_]\n yield training, validation\n\ndef cumulative_dist_decoding_error_using_xval(bst, extern,*, decodefunc=decode1D, tuningcurve=None, k=5, transfunc=None, n_extern=100, extmin=0, extmax=100, sigma=3, n_bins=None):\n \"\"\"Cumulative distribution of decoding errors during epochs in\n BinnedSpikeTrainArray, evaluated using a k-fold cross-validation\n procedure.\n\n Parameters\n ----------\n bst: BinnedSpikeTrainArray\n BinnedSpikeTrainArray containing all the epochs to be decoded.\n Should typically have the same type of epochs as the ratemap\n (e.g., online epochs), but this is not a requirement.\n tuningcurve : TuningCurve1D\n extern : query-able object of external correlates (e.g. pos AnalogSignalArray)\n ratemap : array_like\n The ratemap (in Hz) with shape (n_units, n_ext) where n_ext are\n the external correlates, e.g., position bins.\n k : int, optional\n Number of fold for k-fold cross-validation. Default is k=5.\n n_bins : int\n Number of decoding error bins, ranging from tuningcurve.extmin\n to tuningcurve.extmax.\n\n Returns\n -------\n\n (error, cum_prob)\n (see Fig 3.(b) of \"Analysis of Hippocampal Memory Replay Using\n Neural Population Decoding\", Fabian Kloosterman, 2012)\n\n NOTE: should we allow for an optional tuning curve to be specified,\n or should we always recompute it ourselves?\n \"\"\"\n\n def _trans_func(extern, at):\n \"\"\"Default transform function to map extern into numerical bins\"\"\"\n\n _, ext = extern.asarray(at=at)\n\n return ext\n\n if transfunc is None:\n transfunc = _trans_func\n\n if n_bins is None:\n n_bins = 200\n\n max_error = extmax - extmin\n\n # indices of training and validation epochs / events\n\n hist = np.zeros(n_bins)\n for training, validation in k_fold_cross_validation(bst.n_epochs, k=k):\n # estimate place fields using bst[training]\n tc = auxiliary.TuningCurve1D(bst=bst[training], extern=extern, n_extern=n_extern, extmin=extmin, extmax=extmax, sigma=sigma)\n # decode position using bst[validation]\n posterior, _, mode_pth, mean_pth = decodefunc(bst[validation], tc)\n # calculate validation error (for current fold) by comapring\n # decoded pos v target pos\n target = transfunc(extern, at=bst[validation].bin_centers)\n\n histnew, bins = np.histogram(np.abs(target - mean_pth), bins=n_bins, range=(0, max_error))\n hist = hist + histnew\n\n # build cumulative error distribution\n cumhist = np.cumsum(hist)\n cumhist = cumhist / cumhist[-1]\n bincenters = (bins + (bins[1] - bins[0])/2)[:-1]\n\n # modify to start at (0,0):\n cumhist = np.insert(cumhist, 0, 0)\n bincenters = np.insert(bincenters, 0, 0)\n\n # modify to end at (max_error,1):\n cumhist = np.append(cumhist, 1)\n bincenters = np.append(bincenters, max_error)\n\n return cumhist, bincenters\n\ndef cumulative_dist_decoding_error(bst, *, tuningcurve, extern,\n decodefunc=decode1D, transfunc=None,\n n_bins=None):\n \"\"\"Cumulative distribution of decoding errors during epochs in\n BinnedSpikeTrainArray using a fixed TuningCurve.\n\n Parameters\n ----------\n bst: BinnedSpikeTrainArray\n BinnedSpikeTrainArray containing all the epochs to be decoded.\n Should typically have the same type of epochs as the ratemap\n (e.g., online epochs), but this is not a requirement.\n tuningcurve : TuningCurve1D\n extern : query-able object of external correlates (e.g. pos AnalogSignalArray)\n n_bins : int\n Number of decoding error bins, ranging from tuningcurve.extmin\n to tuningcurve.extmax.\n\n Returns\n -------\n\n (cumhist, bincenters)\n (see Fig 3.(b) of \"Analysis of Hippocampal Memory Replay Using\n Neural Population Decoding\", Fabian Kloosterman, 2012)\n\n \"\"\"\n\n def _trans_func(extern, at):\n \"\"\"Default transform function to map extern into numerical bins\"\"\"\n\n _, ext = extern.asarray(at=at)\n\n return ext\n\n if transfunc is None:\n transfunc = _trans_func\n if n_bins is None:\n n_bins = 200\n\n # indices of training and validation epochs / events\n\n max_error = tuningcurve.bins[-1] - tuningcurve.bins[0]\n\n posterior, _, mode_pth, mean_pth = decodefunc(bst=bst, ratemap=tuningcurve)\n target = transfunc(extern, at=bst.bin_centers)\n hist, bins = np.histogram(\n np.abs(target - mean_pth),\n bins=n_bins,\n range=(0, max_error))\n\n # build cumulative error distribution\n cumhist = np.cumsum(hist)\n cumhist = cumhist / cumhist[-1]\n bincenters = (bins + (bins[1] - bins[0])/2)[:-1]\n\n # modify to start at (0,0):\n cumhist = np.insert(cumhist, 0, 0)\n bincenters = np.insert(bincenters, 0, 0)\n\n # modify to end at (max_error,1):\n cumhist = np.append(cumhist, 1)\n bincenters = np.append(bincenters, max_error)\n\n return cumhist, bincenters\n\ndef rmse(predictions, targets):\n \"\"\"Calculate the root mean squared error of an array of predictions.\n\n Parameters\n ----------\n predictions : array_like\n Array of predicted values.\n targets : array_like\n Array of target values.\n\n Returns\n -------\n rmse: float\n Root mean squared error of the predictions wrt the targets.\n \"\"\"\n predictions = np.asanyarray(predictions)\n targets = np.asanyarray(targets)\n rmse = np.sqrt(np.nanmean((predictions - targets) ** 2))\n return rmse" ]
[ [ "numpy.vstack", "numpy.cumsum", "numpy.transpose", "numpy.zeros", "numpy.append", "numpy.histogram", "numpy.nanmean", "numpy.abs", "numpy.insert", "numpy.asanyarray", "numpy.exp", "numpy.arange", "numpy.argmax", "numpy.log", "numpy.max", "numpy.isnan", "numpy.array", "numpy.full" ] ]
griff4692/fairseq
[ "3a1b078e93d6b359282868d8369eb97ed9fdb2e5" ]
[ "fairseq/trainer.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nTrain a network across multiple GPUs.\n\"\"\"\n\nimport contextlib\nimport logging\nimport sys\nimport time\nfrom argparse import Namespace\nfrom itertools import chain\nfrom typing import Any, Dict, List\n\nimport torch\nfrom fairseq import checkpoint_utils, models, optim, utils\nfrom fairseq.dataclass.configs import FairseqConfig\nfrom fairseq.dataclass.utils import convert_namespace_to_omegaconf\nfrom fairseq.distributed import utils as distributed_utils\nfrom fairseq.file_io import PathManager\nfrom fairseq.logging import meters, metrics\nfrom fairseq.nan_detector import NanDetector\nfrom fairseq.optim import lr_scheduler\n\nfrom omegaconf import OmegaConf\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Trainer(object):\n \"\"\"Main class for data parallel training.\n\n This class supports synchronous distributed data parallel training,\n where multiple workers each have a full model replica and gradients\n are accumulated across workers before each update. We use\n :class:`~torch.nn.parallel.DistributedDataParallel` to handle\n communication of the gradients across workers.\n \"\"\"\n\n def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None):\n\n if isinstance(cfg, Namespace):\n logger.warning(\n \"argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf\"\n )\n cfg = convert_namespace_to_omegaconf(cfg)\n\n self.cfg = cfg\n self.task = task\n\n # catalog shared parameters\n shared_params = _catalog_shared_params(model)\n self.tpu = cfg.common.tpu\n self.cuda = torch.cuda.is_available() and not cfg.common.cpu and not self.tpu\n if self.cuda:\n self.device = torch.device(\"cuda\")\n elif self.tpu:\n self.device = utils.get_tpu_device()\n else:\n self.device = torch.device(\"cpu\")\n\n if self.cfg.distributed_training.ddp_backend == \"fully_sharded\":\n if self.cfg.common.bf16:\n raise ValueError(\n \"FullyShardedDataParallel is not compatible with --bf16 or \"\n \"--memory-efficient-bf16\"\n )\n if self.cfg.distributed_training.zero_sharding != \"none\":\n raise ValueError(\n \"FullyShardedDataParallel is not compatible with --zero-sharding \"\n \"option (it's already built in)\"\n )\n else:\n if self.cfg.distributed_training.cpu_offload:\n raise ValueError(\"--cpu-offload requires --ddp-backend=fully_sharded\")\n\n # copy model and criterion to current device/dtype\n self._criterion = criterion\n self._model = model\n if cfg.distributed_training.ddp_backend != \"fully_sharded\":\n if cfg.common.fp16:\n self._criterion = self._criterion.half()\n self._model = self._model.half()\n elif cfg.common.bf16:\n self._criterion = self._criterion.to(dtype=torch.bfloat16)\n self._model = self._model.to(dtype=torch.bfloat16)\n if (\n not cfg.distributed_training.pipeline_model_parallel\n # the DistributedFairseqModel wrapper will handle moving to device,\n # so only handle cases which don't use the wrapper\n and not self.use_distributed_wrapper\n ):\n self._criterion = self._criterion.to(device=self.device)\n self._model = self._model.to(device=self.device)\n self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel\n self.last_device = None\n if self.cuda and self.pipeline_model_parallel:\n self.last_device = torch.device(\n cfg.distributed_training.pipeline_devices[-1]\n )\n\n # check that shared parameters are preserved after device transfer\n for shared_param in shared_params:\n ref = _get_module_by_path(self._model, shared_param[0])\n for path in shared_param[1:]:\n logger.info(\n \"detected shared parameter: {} <- {}\".format(shared_param[0], path)\n )\n _set_module_by_path(self._model, path, ref)\n\n self._dummy_batch = None # indicates we don't have a dummy batch at first\n self._lr_scheduler = None\n self._num_updates = 0\n self._num_xla_compiles = 0 # for TPUs\n self._optim_history = None\n self._optimizer = None\n self._warn_once = set()\n self._wrapped_criterion = None\n self._wrapped_model = None\n\n # TODO(myleott): support tpu\n if self.cuda and self.data_parallel_world_size > 1:\n self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size)\n else:\n self._grad_norm_buf = None\n\n self.quantizer = quantizer\n if self.quantizer is not None:\n self.quantizer.set_trainer(self)\n\n # get detailed cuda environment\n if self.cuda:\n self.cuda_env = utils.CudaEnvironment()\n if self.data_parallel_world_size > 1:\n self.cuda_env_arr = distributed_utils.all_gather_list(\n self.cuda_env, group=distributed_utils.get_global_group()\n )\n else:\n self.cuda_env_arr = [self.cuda_env]\n if self.data_parallel_rank == 0:\n utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr)\n else:\n self.cuda_env = None\n self.cuda_env_arr = None\n\n metrics.log_start_time(\"wall\", priority=790, round=0)\n\n self._start_time = time.time()\n self._previous_training_time = 0\n self._cumulative_training_time = None\n\n def reinitialize(self):\n \"\"\"Reinitialize the Trainer, typically after model params change.\"\"\"\n self._lr_scheduler = None\n self._optimizer = None\n self._wrapped_criterion = None\n self._wrapped_model = None\n\n @property\n def data_parallel_world_size(self):\n if self.cfg.distributed_training.distributed_world_size == 1:\n return 1\n return distributed_utils.get_data_parallel_world_size()\n\n @property\n def data_parallel_process_group(self):\n return distributed_utils.get_data_parallel_group()\n\n @property\n def data_parallel_rank(self):\n if self.cfg.distributed_training.distributed_world_size == 1:\n return 0\n return distributed_utils.get_data_parallel_rank()\n\n @property\n def is_data_parallel_master(self):\n # NOTE: this returns true for all model parallel replicas with data\n # parallel rank 0\n return self.data_parallel_rank == 0\n\n @property\n def use_distributed_wrapper(self) -> bool:\n return (\n self.data_parallel_world_size > 1\n and not self.cfg.optimization.use_bmuf\n ) or (\n self.cfg.distributed_training.ddp_backend == \"fully_sharded\"\n and self.cfg.distributed_training.cpu_offload\n )\n\n @property\n def should_save_checkpoint_on_current_rank(self) -> bool:\n \"\"\"Indicates whether to save checkpoints on the current DDP rank.\"\"\"\n if self.cfg.distributed_training.ddp_backend == \"fully_sharded\":\n return True\n else:\n return self.is_data_parallel_master\n\n @property\n def checkpoint_suffix(self) -> str:\n \"\"\"Suffix to add to the checkpoint file name.\"\"\"\n if self.cfg.distributed_training.ddp_backend == \"fully_sharded\":\n return self.cfg.checkpoint.checkpoint_suffix + \"-shard{0}\".format(self.data_parallel_rank)\n else:\n return self.cfg.checkpoint.checkpoint_suffix or \"\"\n\n @property\n def criterion(self):\n if self._wrapped_criterion is None:\n if (\n utils.has_parameters(self._criterion)\n and self.use_distributed_wrapper\n ):\n self._wrapped_criterion = models.DistributedFairseqModel(\n self.cfg.distributed_training,\n self._criterion,\n process_group=self.data_parallel_process_group,\n device=self.device,\n )\n else:\n self._wrapped_criterion = self._criterion\n return self._wrapped_criterion\n\n @property\n def model(self):\n if self._wrapped_model is None:\n if self.use_distributed_wrapper:\n self._wrapped_model = models.DistributedFairseqModel(\n self.cfg.distributed_training,\n self._model,\n process_group=self.data_parallel_process_group,\n device=self.device,\n )\n else:\n self._wrapped_model = self._model\n return self._wrapped_model\n\n @property\n def optimizer(self):\n if self._optimizer is None:\n self._build_optimizer()\n return self._optimizer\n\n @property\n def lr_scheduler(self):\n if self._lr_scheduler is None:\n self._build_optimizer() # this will initialize self._lr_scheduler\n return self._lr_scheduler\n\n def _build_optimizer(self):\n params = list(\n filter(\n lambda p: p.requires_grad,\n chain(self.model.parameters(), self.criterion.parameters()),\n )\n )\n\n if (\n self.cfg.distributed_training.ddp_backend == \"fully_sharded\"\n and self.cfg.common.fp16\n ):\n # FullyShardedDataParallel always uses MemoryEfficientFP16 wrapper,\n # mostly for the grad scaling. But if we don't have the\n # --memory-efficient-fp16 flag set, then we're effectively doing\n # regular --fp16 and can allow the use of optimizers that would\n # otherwise be unsupported by MemoryEfficientFP16Optimizer.\n allow_unsupported = not self.cfg.common.memory_efficient_fp16\n self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(\n self.cfg, params, allow_unsupported=allow_unsupported\n )\n elif self.cfg.common.fp16 or self.cfg.common.bf16:\n if self.cuda and torch.cuda.get_device_capability(0)[0] < 7:\n logger.info(\n \"NOTE: your device does NOT support faster training with --fp16, \"\n \"please switch to FP32 which is likely to be faster\"\n )\n if (\n self.cfg.common.memory_efficient_fp16\n or self.cfg.common.memory_efficient_bf16\n ):\n self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(\n self.cfg, params\n )\n else:\n self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params)\n else:\n if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7:\n logger.info(\"NOTE: your device may support faster training with --fp16\")\n self._optimizer = optim.build_optimizer(self.cfg.optimizer, params)\n\n if self.cfg.distributed_training.ddp_backend == \"fully_sharded\":\n assert not self.cfg.optimization.use_bmuf, \\\n \"--ddp-backend=fully_sharded is not compatible with BMUF\"\n assert self._optimizer.supports_flat_params, (\n \"--ddp-backend=fully_sharded is only compatible with pointwise \"\n \"optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.). \"\n \"However, the sharding will result in slightly different results when \"\n \"using non-pointwise optimizers (e.g., Adagrad, Adafactor, LAMB)\"\n )\n\n if self.cfg.optimization.use_bmuf:\n self._optimizer = optim.FairseqBMUF(\n self.cfg.bmuf,\n self._optimizer,\n )\n\n if self.cfg.distributed_training.zero_sharding == \"os\":\n if (\n self.cfg.common.fp16\n and not self.cfg.common.memory_efficient_fp16\n and not self.cfg.common.memory_efficient_bf16\n ) and not self.cfg.common.fp16_no_flatten_grads:\n raise ValueError(\n \"ZeRO is incomptabile with fp16 and flattened grads. \"\n \"Please use --fp16-no-flatten-grads\"\n )\n else:\n optim.shard_(self._optimizer, self.data_parallel_process_group)\n\n # We should initialize the learning rate scheduler immediately after\n # building the optimizer, so that the initial learning rate is set.\n self._lr_scheduler = lr_scheduler.build_lr_scheduler(\n self.cfg.lr_scheduler,\n self.optimizer,\n )\n self._lr_scheduler.step_update(0)\n\n def consolidate_optimizer(self):\n \"\"\"For OSS, we need to consolidate the state dict.\"\"\"\n if hasattr(self.optimizer.optimizer, \"consolidate_state_dict\"):\n self.optimizer.optimizer.consolidate_state_dict()\n\n def state_dict(self):\n state_dict = {\n \"args\": None, # legacy\n \"cfg\": (\n OmegaConf.to_container(self.cfg)\n if OmegaConf.is_config(self.cfg) else self.cfg\n ),\n \"model\": self.model.state_dict(),\n \"criterion\": (\n self.criterion.state_dict()\n if utils.has_parameters(self.criterion) else None\n ),\n \"optimizer_history\": (self._optim_history or [])\n + [\n {\n \"criterion_name\": self.get_criterion().__class__.__name__,\n \"optimizer_name\": self.optimizer.__class__.__name__,\n \"lr_scheduler_state\": self.lr_scheduler.state_dict(),\n \"num_updates\": self.get_num_updates(),\n }\n ],\n \"task_state\": self.task.state_dict() if self.task is not None else {},\n \"extra_state\": {\n \"metrics\": metrics.state_dict(),\n \"previous_training_time\": self.cumulative_training_time(),\n }\n }\n if not self.cfg.checkpoint.no_save_optimizer_state:\n state_dict[\"last_optimizer_state\"] = self.optimizer.state_dict()\n return state_dict\n\n def save_checkpoint(self, filename, extra_state):\n \"\"\"Save all training state in a checkpoint file.\"\"\"\n logger.info(f\"Saving checkpoint to {filename}\")\n # call state_dict on all ranks in case it needs internal communication\n state_dict = utils.move_to_cpu(self.state_dict())\n state_dict[\"extra_state\"].update(extra_state)\n if self.should_save_checkpoint_on_current_rank:\n checkpoint_utils.torch_persistent_save(\n state_dict,\n filename,\n async_write=self.cfg.checkpoint.write_checkpoints_asynchronously,\n )\n logger.info(f\"Finished saving checkpoint to {filename}\")\n\n def load_checkpoint(\n self,\n filename,\n reset_optimizer=False,\n reset_lr_scheduler=False,\n optimizer_overrides=None,\n reset_meters=False,\n ):\n \"\"\"\n Load all training state from a checkpoint file.\n rank = 0 will load the checkpoint, and then broadcast it to all\n other ranks.\n \"\"\"\n extra_state, self._optim_history, last_optim_state = None, [], None\n\n logger.info(f\"Preparing to load checkpoint {filename}\")\n is_distributed = self.data_parallel_world_size > 1\n bexists = PathManager.isfile(filename)\n if bexists:\n load_on_all_ranks = (\n self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks\n # TPUs don't support broadcast yet, so load checkpoints\n # on every worker for now\n or self.tpu\n # FSDP requires loading checkpoint shards on all ranks\n or self.cfg.distributed_training.ddp_backend == \"fully_sharded\"\n )\n\n if load_on_all_ranks or self.data_parallel_rank == 0:\n state = checkpoint_utils.load_checkpoint_to_cpu(\n filename, load_on_all_ranks=load_on_all_ranks\n )\n last_optim_state = state.get(\"last_optimizer_state\", None)\n\n # If doing zero_sharding, do not broadcast global optimizer\n # state. Later we will broadcast sharded states to each rank\n # to avoid memory from exploding.\n if (\n not load_on_all_ranks\n and self.cfg.distributed_training.zero_sharding == \"os\"\n and \"last_optimizer_state\" in state\n and is_distributed\n ):\n state[\"last_optimizer_state\"] = \"SHARDED\"\n else:\n last_optim_state = None\n state = None\n\n if is_distributed and not load_on_all_ranks:\n state = distributed_utils.broadcast_object(\n state,\n src_rank=0,\n group=self.data_parallel_process_group,\n dist_device=self.device,\n )\n if self.data_parallel_rank > 0:\n last_optim_state = state.get(\"last_optimizer_state\", None)\n\n # load model parameters\n try:\n self.model.load_state_dict(\n state[\"model\"], strict=True, model_cfg=self.cfg.model\n )\n # save memory for later steps\n del state[\"model\"]\n if utils.has_parameters(self.get_criterion()):\n self.get_criterion().load_state_dict(\n state[\"criterion\"], strict=True\n )\n del state[\"criterion\"]\n\n except Exception:\n raise Exception(\n \"Cannot load model parameters from checkpoint {}; \"\n \"please ensure that the architectures match.\".format(filename)\n )\n extra_state = state[\"extra_state\"]\n self._optim_history = state[\"optimizer_history\"]\n\n if last_optim_state is not None and not reset_optimizer:\n # rebuild optimizer after loading model, since params may have changed\n self._build_optimizer()\n\n # only reload optimizer and lr_scheduler if they match\n last_optim = self._optim_history[-1]\n assert (\n last_optim[\"criterion_name\"] == self.get_criterion().__class__.__name__\n ), f\"Criterion does not match; please reset the optimizer (--reset-optimizer). {last_optim['criterion_name']} vs {self.get_criterion().__class__.__name__}\"\n assert (\n last_optim[\"optimizer_name\"] == self.optimizer.__class__.__name__\n ), f\"Optimizer does not match; please reset the optimizer (--reset-optimizer). {last_optim['optimizer_name']} vs {self.optimizer.__class__.__name__}\"\n\n if not reset_lr_scheduler:\n self.lr_scheduler.load_state_dict(last_optim[\"lr_scheduler_state\"])\n\n if not load_on_all_ranks and is_distributed:\n last_optim_state = self.optimizer.broadcast_global_state_dict(\n last_optim_state\n )\n self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)\n\n self.set_num_updates(last_optim[\"num_updates\"])\n\n if extra_state is not None:\n itr_state = extra_state[\"train_iterator\"]\n epoch = itr_state[\"epoch\"]\n\n if \"previous_training_time\" in extra_state:\n self._previous_training_time = extra_state[\"previous_training_time\"]\n self._start_time = time.time()\n\n self.lr_step(epoch)\n\n if itr_state.get(\"version\", 1) >= 2 and itr_state[\"iterations_in_epoch\"] == 0:\n # reset meters at start of epoch\n reset_meters = True\n\n if \"metrics\" in extra_state and not reset_meters:\n metrics.load_state_dict(extra_state[\"metrics\"])\n\n # reset TimeMeters, since their start times don't make sense anymore\n for meter in metrics.get_meters(\"default\"):\n if isinstance(meter, meters.TimeMeter):\n meter.reset()\n\n logger.info(\n \"Loaded checkpoint {} (epoch {} @ {} updates)\".format(\n filename, epoch, self.get_num_updates()\n )\n )\n\n else:\n logger.info(\"No existing checkpoint found {}\".format(filename))\n\n return extra_state\n\n def get_train_iterator(\n self,\n epoch,\n combine=True,\n load_dataset=True,\n data_selector=None,\n shard_batch_itr=True,\n disable_iterator_cache=False,\n ):\n \"\"\"Return an EpochBatchIterator over the training set for a given epoch.\"\"\"\n if load_dataset:\n logger.info(\"loading train data for epoch {}\".format(epoch))\n self.task.load_dataset(\n self.cfg.dataset.train_subset,\n epoch=epoch,\n combine=combine,\n data_selector=data_selector,\n tpu=self.tpu,\n )\n batch_iterator = self.task.get_batch_iterator(\n dataset=self.task.dataset(self.cfg.dataset.train_subset),\n max_tokens=self.cfg.dataset.max_tokens,\n max_sentences=self.cfg.dataset.batch_size,\n max_positions=utils.resolve_max_positions(\n self.task.max_positions(),\n self.model.max_positions(),\n self.cfg.dataset.max_tokens,\n ),\n ignore_invalid_inputs=True,\n required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,\n seed=self.cfg.common.seed,\n num_shards=self.data_parallel_world_size if shard_batch_itr else 1,\n shard_id=self.data_parallel_rank if shard_batch_itr else 0,\n num_workers=self.cfg.dataset.num_workers,\n epoch=epoch,\n data_buffer_size=self.cfg.dataset.data_buffer_size,\n disable_iterator_cache=disable_iterator_cache,\n )\n self.reset_dummy_batch(batch_iterator.first_batch)\n return batch_iterator\n\n def get_valid_iterator(\n self,\n subset,\n disable_iterator_cache=False,\n ):\n \"\"\"Return an EpochBatchIterator over given validation subset for a given epoch.\"\"\"\n batch_iterator = self.task.get_batch_iterator(\n dataset=self.task.dataset(subset),\n max_tokens=self.cfg.dataset.max_tokens_valid,\n max_sentences=self.cfg.dataset.batch_size_valid,\n max_positions=utils.resolve_max_positions(\n self.task.max_positions(),\n self.model.max_positions(),\n ),\n ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,\n seed=self.cfg.common.seed,\n num_shards=self.data_parallel_world_size,\n shard_id=self.data_parallel_rank,\n num_workers=self.cfg.dataset.num_workers,\n # always pass a fixed \"epoch\" to keep validation data consistent\n # across training epochs\n epoch=1,\n data_buffer_size=self.cfg.dataset.data_buffer_size,\n disable_iterator_cache=disable_iterator_cache,\n )\n self.reset_dummy_batch(batch_iterator.first_batch)\n return batch_iterator\n\n def begin_epoch(self, epoch):\n \"\"\"Called at the beginning of each epoch.\"\"\"\n logger.info(\"begin training epoch {}\".format(epoch))\n\n self.lr_step_begin_epoch(epoch)\n\n if self.quantizer is not None:\n self.quantizer.begin_epoch(epoch)\n\n # task specific setup per epoch\n self.task.begin_epoch(epoch, self.get_model())\n\n if self.tpu:\n import torch_xla.core.xla_model as xm\n\n xm.rendezvous(\"begin_epoch\") # wait for all workers\n xm.mark_step()\n\n def begin_valid_epoch(self, epoch):\n \"\"\"Called at the beginning of each validation epoch.\"\"\"\n\n # task specific setup per validation epoch\n self.task.begin_valid_epoch(epoch, self.get_model())\n\n def reset_dummy_batch(self, batch):\n self._dummy_batch = batch\n\n @metrics.aggregate(\"train\")\n def train_step(self, samples, raise_oom=False):\n \"\"\"Do forward, backward and parameter update.\"\"\"\n self._set_seed()\n self.model.train()\n self.criterion.train()\n self.zero_grad()\n\n metrics.log_start_time(\"train_wall\", priority=800, round=0)\n\n # forward and backward pass\n logging_outputs, sample_size, ooms = [], 0, 0\n for i, sample in enumerate(samples): # delayed update loop\n sample, is_dummy_batch = self._prepare_sample(sample)\n\n def maybe_no_sync():\n \"\"\"\n Whenever *samples* contains more than one mini-batch, we\n want to accumulate gradients locally and only call\n all-reduce in the last backwards pass.\n \"\"\"\n if (\n self.data_parallel_world_size > 1\n and hasattr(self.model, \"no_sync\")\n and i < len(samples) - 1\n ):\n return self.model.no_sync()\n else:\n return contextlib.ExitStack() # dummy contextmanager\n\n try:\n with maybe_no_sync():\n # forward and backward\n loss, sample_size_i, logging_output = self.task.train_step(\n sample=sample,\n model=self.model,\n criterion=self.criterion,\n optimizer=self.optimizer,\n update_num=self.get_num_updates(),\n ignore_grad=is_dummy_batch,\n )\n del loss\n\n logging_outputs.append(logging_output)\n sample_size += sample_size_i\n\n # emptying the CUDA cache after the first step can\n # reduce the chance of OOM\n if self.cuda and self.get_num_updates() == 0:\n torch.cuda.empty_cache()\n except RuntimeError as e:\n if \"out of memory\" in str(e):\n self._log_oom(e)\n if raise_oom:\n raise e\n logger.warning(\n \"attempting to recover from OOM in forward/backward pass\"\n )\n ooms += 1\n self.zero_grad()\n if self.cuda:\n torch.cuda.empty_cache()\n if self.cfg.distributed_training.distributed_world_size == 1:\n return None\n else:\n raise e\n\n if self.tpu and i < len(samples) - 1:\n # tpu-comment: every XLA operation before marking step is\n # appended to the IR graph, and processing too many batches\n # before marking step can lead to OOM errors.\n # To handle gradient accumulation use case, we explicitly\n # mark step here for every forward pass without a backward pass\n self._xla_markstep_and_send_to_cpu()\n\n if is_dummy_batch:\n if torch.is_tensor(sample_size):\n sample_size.zero_()\n else:\n sample_size *= 0.0\n\n if torch.is_tensor(sample_size):\n sample_size = sample_size.float()\n else:\n sample_size = float(sample_size)\n\n # gather logging outputs from all replicas\n if self._sync_stats():\n train_time = self._local_cumulative_training_time()\n logging_outputs, (\n sample_size,\n ooms,\n total_train_time,\n ) = self._aggregate_logging_outputs(\n logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch\n )\n self._cumulative_training_time = (\n total_train_time / self.data_parallel_world_size\n )\n\n overflow = False\n try:\n with torch.autograd.profiler.record_function(\"reduce-grads\"):\n # reduce gradients across workers\n self.optimizer.all_reduce_grads(self.model)\n if utils.has_parameters(self.criterion):\n self.optimizer.all_reduce_grads(self.criterion)\n\n with torch.autograd.profiler.record_function(\"multiply-grads\"):\n # multiply gradients by (data_parallel_size / sample_size) since\n # DDP normalizes by the number of data parallel workers for\n # improved fp16 precision.\n # Thus we get (sum_of_gradients / sample_size) at the end.\n # In case of fp16, this step also undoes loss scaling.\n # (Debugging note: Some optimizers perform this scaling on the\n # fly, so inspecting model.parameters() or optimizer.params may\n # still show the original, unscaled gradients.)\n numer = (\n self.data_parallel_world_size\n if not self.cfg.optimization.use_bmuf or self._sync_stats()\n else 1\n )\n self.optimizer.multiply_grads(numer / (sample_size or 1.0))\n # Note: (sample_size or 1.0) handles the case of a zero gradient, in a\n # way that avoids CPU/device transfers in case sample_size is a GPU or\n # TPU object. The assumption is that the gradient itself is also 0.\n\n with torch.autograd.profiler.record_function(\"clip-grads\"):\n # clip grads\n grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm)\n\n # check that grad norms are consistent across workers\n # on tpu check tensor is slow\n if not self.tpu:\n if (\n not self.cfg.optimization.use_bmuf\n and self.cfg.distributed_training.ddp_backend != \"slow_mo\"\n ):\n self._check_grad_norms(grad_norm)\n if not torch.isfinite(grad_norm).all():\n # check local gradnorm single GPU case, trigger NanDetector\n raise FloatingPointError(\"gradients are Nan/Inf\")\n\n with torch.autograd.profiler.record_function(\"optimizer\"):\n # take an optimization step\n self.task.optimizer_step(\n self.optimizer, model=self.model, update_num=self.get_num_updates()\n )\n\n except FloatingPointError:\n # re-run the forward and backward pass with hooks attached to print\n # out where it fails\n self.zero_grad()\n with NanDetector(self.get_model()):\n for _, sample in enumerate(samples):\n sample, _ = self._prepare_sample(sample)\n self.task.train_step(\n sample,\n self.model,\n self.criterion,\n self.optimizer,\n self.get_num_updates(),\n ignore_grad=False,\n )\n raise\n except OverflowError as e:\n overflow = True\n logger.info(f\"NOTE: gradient overflow detected, ignoring gradient, {str(e)}\")\n grad_norm = torch.tensor(0.0).cuda()\n self.zero_grad()\n except RuntimeError as e:\n if \"out of memory\" in str(e):\n self._log_oom(e)\n logger.error(\"OOM during optimization, irrecoverable\")\n raise e\n\n # Some distributed wrappers (e.g., SlowMo) need access to the optimizer\n # after the step\n if hasattr(self.model, \"perform_additional_optimizer_actions\"):\n if hasattr(self.optimizer, \"fp32_params\"):\n self.model.perform_additional_optimizer_actions(\n self.optimizer.optimizer, self.optimizer.fp32_params\n )\n else:\n self.model.perform_additional_optimizer_actions(\n self.optimizer.optimizer\n )\n\n logging_output = None\n if not overflow or self.cfg.distributed_training.ddp_backend == \"slow_mo\":\n self.set_num_updates(self.get_num_updates() + 1)\n\n if self.tpu:\n import torch_xla.core.xla_model as xm\n\n # mark step on TPUs\n self._xla_markstep_and_send_to_cpu()\n\n # only log stats every log_interval steps\n # this causes wps to be misreported when log_interval > 1\n logging_output = {}\n if self.get_num_updates() % self.cfg.common.log_interval == 0:\n # log memory usage\n mem_info = xm.get_memory_info(self.device)\n gb_free = mem_info[\"kb_free\"] / 1024 / 1024\n gb_total = mem_info[\"kb_total\"] / 1024 / 1024\n metrics.log_scalar(\n \"gb_free\", gb_free, priority=1500, round=1, weight=0\n )\n metrics.log_scalar(\n \"gb_total\", gb_total, priority=1600, round=1, weight=0\n )\n logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs)\n logging_output = self._reduce_and_log_stats(\n logging_outputs, sample_size, grad_norm\n )\n\n # log whenever there's an XLA compilation, since these\n # slow down training and may indicate opportunities for\n # optimization\n self._check_xla_compilation()\n else:\n if self.cuda and self.cuda_env is not None:\n # log minimum free memory over the iteration\n gb_used = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024\n torch.cuda.reset_peak_memory_stats()\n gb_free = self.cuda_env.total_memory_in_GB - gb_used\n metrics.log_scalar(\n \"gb_free\", gb_free, priority=1500, round=1, weight=0\n )\n\n # log stats\n logging_output = self._reduce_and_log_stats(\n logging_outputs, sample_size, grad_norm\n )\n\n # clear CUDA cache to reduce memory fragmentation\n if (\n self.cuda\n and self.cfg.common.empty_cache_freq > 0\n and (\n (self.get_num_updates() + self.cfg.common.empty_cache_freq - 1)\n % self.cfg.common.empty_cache_freq\n )\n == 0\n ):\n torch.cuda.empty_cache()\n\n if self.cfg.common.fp16:\n metrics.log_scalar(\n \"loss_scale\",\n self.optimizer.scaler.loss_scale,\n priority=700,\n round=4,\n weight=0,\n )\n\n metrics.log_stop_time(\"train_wall\")\n return logging_output\n\n @metrics.aggregate(\"valid\")\n def valid_step(self, sample, raise_oom=False):\n \"\"\"Do forward pass in evaluation mode.\"\"\"\n if self.tpu:\n import torch_xla.core.xla_model as xm\n xm.rendezvous(\"valid_step\") # wait for all workers\n\n with torch.no_grad():\n self.model.eval()\n self.criterion.eval()\n\n sample, is_dummy_batch = self._prepare_sample(sample)\n\n try:\n _loss, sample_size, logging_output = self.task.valid_step(\n sample, self.model, self.criterion\n )\n except RuntimeError as e:\n if \"out of memory\" in str(e):\n self._log_oom(e)\n if not raise_oom:\n logger.warning(\n \"ran out of memory in validation step, retrying batch\"\n )\n for p in self.model.parameters():\n if p.grad is not None:\n p.grad = None # free some memory\n if self.cuda:\n torch.cuda.empty_cache()\n return self.valid_step(sample, raise_oom=True)\n raise e\n\n logging_outputs = [logging_output]\n if is_dummy_batch:\n if torch.is_tensor(sample_size):\n sample_size.zero_()\n else:\n sample_size *= 0.0\n\n # gather logging outputs from all replicas\n if self.data_parallel_world_size > 1:\n logging_outputs, (sample_size,) = self._aggregate_logging_outputs(\n logging_outputs,\n sample_size,\n ignore=is_dummy_batch,\n )\n\n # log validation stats\n if self.tpu:\n logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs)\n logging_output = self._reduce_and_log_stats(logging_outputs, sample_size)\n\n return logging_output\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def lr_step_begin_epoch(self, epoch):\n \"\"\"Adjust the learning rate at the beginning of the epoch.\"\"\"\n self.lr_scheduler.step_begin_epoch(epoch)\n # prefer updating the LR based on the number of steps\n return self.lr_step_update()\n\n def lr_step(self, epoch, val_loss=None):\n \"\"\"Adjust the learning rate at the end of the epoch.\"\"\"\n self.lr_scheduler.step(epoch, val_loss)\n # prefer updating the LR based on the number of steps\n return self.lr_step_update()\n\n def lr_step_update(self):\n \"\"\"Update the learning rate after each update.\"\"\"\n new_lr = self.lr_scheduler.step_update(self.get_num_updates())\n if isinstance(new_lr, dict):\n for k, v in new_lr.items():\n metrics.log_scalar(f\"lr_{k}\", v, weight=0, priority=300)\n new_lr = new_lr.get(\"default\", next(iter(new_lr.values())))\n else:\n metrics.log_scalar(\"lr\", new_lr, weight=0, priority=300)\n return new_lr\n\n def get_lr(self):\n \"\"\"Get the current learning rate.\"\"\"\n return self.optimizer.get_lr()\n\n def get_model(self):\n \"\"\"Get the (non-wrapped) model instance.\"\"\"\n return self._model\n\n def get_criterion(self):\n \"\"\"Get the (non-wrapped) criterion instance.\"\"\"\n return self._criterion\n\n def get_meter(self, name):\n \"\"\"[deprecated] Get a specific meter by name.\"\"\"\n from fairseq import meters\n\n if \"get_meter\" not in self._warn_once:\n self._warn_once.add(\"get_meter\")\n utils.deprecation_warning(\n \"Trainer.get_meter is deprecated. Please use fairseq.metrics instead.\"\n )\n\n train_meters = metrics.get_meters(\"train\")\n if train_meters is None:\n train_meters = {}\n\n if name == \"train_loss\" and \"loss\" in train_meters:\n return train_meters[\"loss\"]\n elif name == \"train_nll_loss\":\n # support for legacy train.py, which assumed this meter is\n # always initialized\n m = train_meters.get(\"nll_loss\", None)\n return m or meters.AverageMeter()\n elif name == \"wall\":\n # support for legacy train.py, which assumed this meter is\n # always initialized\n m = metrics.get_meter(\"default\", \"wall\")\n return m or meters.TimeMeter()\n elif name == \"wps\":\n m = metrics.get_meter(\"train\", \"wps\")\n return m or meters.TimeMeter()\n elif name in {\"valid_loss\", \"valid_nll_loss\"}:\n # support for legacy train.py, which assumed these meters\n # are always initialized\n k = name[len(\"valid_\") :]\n m = metrics.get_meter(\"valid\", k)\n return m or meters.AverageMeter()\n elif name == \"oom\":\n return meters.AverageMeter()\n elif name in train_meters:\n return train_meters[name]\n return None\n\n def get_num_updates(self):\n \"\"\"Get the number of parameters updates.\"\"\"\n return self._num_updates\n\n def set_num_updates(self, num_updates):\n \"\"\"Set the number of parameters updates.\"\"\"\n self._num_updates = num_updates\n self.lr_step_update()\n if self.quantizer:\n self.quantizer.step_update(self._num_updates)\n metrics.log_scalar(\"num_updates\", self._num_updates, weight=0, priority=200)\n\n def clip_grad_norm(self, clip_norm):\n\n def agg_norm_fn(total_norm):\n total_norm = total_norm.cuda().float() ** 2\n total_norm = distributed_utils.all_reduce(\n total_norm, group=self.data_parallel_process_group\n )\n return total_norm ** 0.5\n\n should_agg_norm = (\n self.cfg.distributed_training.ddp_backend == \"fully_sharded\"\n and (\n self.data_parallel_process_group is not None\n or torch.distributed.is_initialized()\n )\n )\n return self.optimizer.clip_grad_norm(\n clip_norm, aggregate_norm_fn=agg_norm_fn if should_agg_norm else None\n )\n\n def cumulative_training_time(self):\n if self._cumulative_training_time is None:\n # single GPU\n return self._local_cumulative_training_time()\n else:\n return self._cumulative_training_time\n\n def _local_cumulative_training_time(self):\n \"\"\"Aggregate training time in seconds.\"\"\"\n return time.time() - self._start_time + self._previous_training_time\n\n def _prepare_sample(self, sample, is_dummy=False):\n if sample == \"DUMMY\":\n raise Exception(\n \"Trying to use an uninitialized 'dummy' batch. This usually indicates \"\n \"that the total number of batches is smaller than the number of \"\n \"participating GPUs. Try reducing the batch size or using fewer GPUs.\"\n )\n\n if sample is None or len(sample) == 0:\n assert (\n self._dummy_batch is not None and len(self._dummy_batch) > 0\n ), \"Invalid dummy batch: {}\".format(self._dummy_batch)\n sample, _ = self._prepare_sample(self._dummy_batch, is_dummy=True)\n return sample, True\n\n if self.cuda:\n if self.pipeline_model_parallel:\n if \"target\" in sample:\n sample[\"target\"] = utils.move_to_cuda(\n sample[\"target\"], device=self.last_device\n )\n else:\n sample = utils.move_to_cuda(sample)\n elif self.tpu and is_dummy:\n # the dummy batch may not be on the appropriate device\n sample = utils.move_to_cuda(sample, device=self.device)\n\n def apply_half(t):\n if t.dtype is torch.float32:\n return t.half()\n return t\n\n def apply_bfloat16(t):\n if t.dtype is torch.float32:\n return t.to(dtype=torch.bfloat16)\n return t\n\n if self.cfg.common.fp16:\n sample = utils.apply_to_sample(apply_half, sample)\n\n if self.cfg.common.bf16:\n sample = utils.apply_to_sample(apply_bfloat16, sample)\n\n if self._dummy_batch == \"DUMMY\":\n self._dummy_batch = sample\n\n return sample, False\n\n def _set_seed(self):\n # Set seed based on args.seed and the update number so that we get\n # reproducible results when resuming from checkpoints\n seed = self.cfg.common.seed + self.get_num_updates()\n utils.set_torch_seed(seed)\n\n def _sync_stats(self):\n # Return True if it's using multiple GPUs and DDP or multiple GPUs with\n # BMUF and it's a bmuf sync with warmup iterations completed before.\n if self.data_parallel_world_size == 1:\n return False\n elif self.cfg.optimization.use_bmuf:\n return (\n self.get_num_updates() + 1\n ) % self.cfg.bmuf.global_sync_iter == 0 and (\n self.get_num_updates() + 1\n ) > self.cfg.bmuf.warmup_iterations\n else:\n return True\n\n def _log_oom(self, exc):\n msg = \"OOM: Ran out of memory with exception: {}\".format(exc)\n logger.warning(msg)\n if torch.cuda.is_available() and hasattr(torch.cuda, \"memory_summary\"):\n for device_idx in range(torch.cuda.device_count()):\n logger.warning(torch.cuda.memory_summary(device=device_idx))\n sys.stderr.flush()\n\n def _aggregate_logging_outputs(\n self,\n logging_outputs: List[Dict[str, Any]],\n *extra_stats_to_sum,\n ignore=False,\n ):\n if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()):\n return self._fast_stat_sync_sum(\n logging_outputs, *extra_stats_to_sum, ignore=ignore\n )\n else:\n return self._all_gather_list_sync(\n logging_outputs, *extra_stats_to_sum, ignore=ignore\n )\n\n def _all_gather_list_sync(\n self,\n logging_outputs: List[Dict[str, Any]],\n *extra_stats_to_sum,\n ignore=False,\n ):\n \"\"\"\n Sync logging outputs across workers. all_gather_list_sync is\n suitable when logging outputs are complex types.\n \"\"\"\n if self.tpu:\n raise NotImplementedError\n if ignore:\n logging_outputs = []\n results = list(\n zip(\n *distributed_utils.all_gather_list(\n [logging_outputs] + list(extra_stats_to_sum),\n max_size=getattr(self.cfg.common, \"all_gather_list_size\", 16384),\n group=self.data_parallel_process_group,\n )\n )\n )\n logging_outputs, extra_stats_to_sum = results[0], results[1:]\n logging_outputs = list(chain.from_iterable(logging_outputs))\n extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum]\n return logging_outputs, extra_stats_to_sum\n\n def _fast_stat_sync_sum(\n self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False,\n ):\n \"\"\"\n Sync logging outputs across workers. fast_stat_sync_sum is\n faster than all_gather_list_sync, but is only suitable when\n logging outputs are scalars and can be summed. Note that\n *logging_outputs* cannot contain any nested dicts/lists.\n \"\"\"\n data = {}\n for i, stat in enumerate(extra_stats_to_sum):\n data[\"extra_stats_\" + str(i)] = stat\n if len(logging_outputs) > 0:\n log_keys = list(logging_outputs[0].keys())\n for k in log_keys:\n if not ignore:\n v = sum(log[k] for log in logging_outputs if k in log)\n else:\n v = logging_outputs[0][k]\n v = torch.zeros_like(v) if torch.is_tensor(v) else 0\n data[\"logging_outputs_\" + k] = v\n else:\n log_keys = None\n\n data = distributed_utils.all_reduce_dict(\n data, device=self.device, group=self.data_parallel_process_group\n )\n\n extra_stats_to_sum = [\n data[\"extra_stats_\" + str(i)] for i in range(len(extra_stats_to_sum))\n ]\n if log_keys is not None:\n logging_outputs = [{k: data[\"logging_outputs_\" + k] for k in log_keys}]\n else:\n logging_outputs = []\n return logging_outputs, extra_stats_to_sum\n\n def _check_grad_norms(self, grad_norm):\n \"\"\"Check that grad norms are consistent across workers.\"\"\"\n if self._grad_norm_buf is not None:\n self._grad_norm_buf.zero_()\n self._grad_norm_buf[self.data_parallel_rank] = grad_norm\n distributed_utils.all_reduce(\n self._grad_norm_buf, group=self.data_parallel_process_group\n )\n\n def is_consistent(tensor):\n max_abs_diff = torch.max(torch.abs(tensor - tensor[0]))\n return (\n torch.isfinite(tensor).all()\n and (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all()\n )\n\n if not is_consistent(self._grad_norm_buf):\n pretty_detail = \"\\n\".join(\n \"rank {:3d} = {:.8f}\".format(r, n)\n for r, n in enumerate(self._grad_norm_buf.tolist())\n )\n error_detail = \"grad_norm across the workers:\\n{}\\n\".format(\n pretty_detail\n )\n # use FloatingPointError to trigger NanDetector\n raise FloatingPointError(\n \"Fatal error: gradients are inconsistent between workers. \"\n \"Try --ddp-backend=legacy_ddp. \"\n \"Or are you mixing up different generation of GPUs in training?\"\n + \"\\n\"\n + \"-\" * 80\n + \"\\n{}\\n\".format(error_detail)\n + \"-\" * 80\n )\n\n def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None):\n if grad_norm is not None and (\n not torch.is_tensor(grad_norm) or torch.isfinite(grad_norm)\n ):\n metrics.log_speed(\"ups\", 1.0, priority=100, round=2)\n metrics.log_scalar(\"gnorm\", grad_norm, priority=400, round=3)\n if self.cfg.optimization.clip_norm > 0:\n metrics.log_scalar(\n \"clip\",\n torch.where(\n grad_norm > self.cfg.optimization.clip_norm,\n grad_norm.new_tensor(100),\n grad_norm.new_tensor(0),\n ),\n priority=500,\n round=1,\n )\n\n with metrics.aggregate() as agg:\n if logging_outputs is not None:\n self.task.reduce_metrics(logging_outputs, self.get_criterion())\n del logging_outputs\n\n # extra warning for criterions that don't properly log a loss value\n if \"loss\" not in agg:\n if \"loss\" not in self._warn_once:\n self._warn_once.add(\"loss\")\n logger.warning(\n \"Criterion.reduce_metrics did not log a 'loss' value, \"\n \"which may break some functionality\"\n )\n metrics.log_scalar(\"loss\", -1)\n\n # support legacy interface\n if self.tpu:\n logging_output = {}\n else:\n logging_output = agg.get_smoothed_values()\n logging_output[\"sample_size\"] = sample_size\n for key_to_delete in [\"ppl\", \"wps\", \"wpb\", \"bsz\"]:\n if key_to_delete in logging_output:\n del logging_output[key_to_delete]\n return logging_output\n\n def _check_xla_compilation(self):\n import torch_xla.debug.metrics as met\n\n compile_stats = met.metric_data(\"CompileTime\")\n if compile_stats is None:\n return\n num_xla_compiles = compile_stats[0]\n if num_xla_compiles > self._num_xla_compiles:\n logger.warning(\n \"XLA compilation detected on device #{}; too many of these can lead \"\n \"to slow training, but we expect a few in the beginning\".format(\n self.cfg.distributed_training.distributed_rank\n )\n )\n self._num_xla_compiles = num_xla_compiles\n\n def _xla_markstep_and_send_to_cpu(self, data=None):\n import torch_xla.core.xla_model as xm\n xm.mark_step()\n if data is not None:\n from fairseq.utils import xla_device_to_cpu\n return xla_device_to_cpu(data)\n\n\ndef _catalog_shared_params(module, memo=None, prefix=\"\"):\n if memo is None:\n first_call = True\n memo = {}\n else:\n first_call = False\n for name, param in module._parameters.items():\n param_prefix = prefix + (\".\" if prefix else \"\") + name\n if param not in memo:\n memo[param] = []\n memo[param].append(param_prefix)\n for name, m in module._modules.items():\n if m is None:\n continue\n submodule_prefix = prefix + (\".\" if prefix else \"\") + name\n _catalog_shared_params(m, memo, submodule_prefix)\n if first_call:\n return [x for x in memo.values() if len(x) > 1]\n\n\ndef _get_module_by_path(module, path):\n path = path.split(\".\")\n for name in path:\n module = getattr(module, name)\n return module\n\n\ndef _set_module_by_path(module, path, value):\n path = path.split(\".\")\n for name in path[:-1]:\n module = getattr(module, name)\n setattr(module, path[-1], value)\n" ]
[ [ "torch.cuda.empty_cache", "torch.cuda.memory_summary", "torch.zeros_like", "torch.no_grad", "torch.autograd.profiler.record_function", "torch.tensor", "torch.cuda.device_count", "torch.distributed.is_initialized", "torch.cuda.reset_peak_memory_stats", "torch.cuda.is_available", "torch.is_tensor", "torch.abs", "torch.cuda.get_device_capability", "torch.cuda.max_memory_allocated", "torch.cuda.DoubleTensor", "torch.device", "torch.isfinite" ] ]
xixiobba/MVP-Net
[ "07bf00390080670b5d9a643b99f633419322a1ec" ]
[ "lib/modeling/rpn_heads.py" ]
[ "from torch import nn\nfrom torch.nn import init\nimport torch.nn.functional as F\n\nfrom core.config import cfg\nfrom modeling.generate_anchors import generate_anchors\nfrom modeling.generate_proposals import GenerateProposalsOp\nfrom modeling.generate_proposal_labels import GenerateProposalLabelsOp\nimport modeling.FPN as FPN\nimport utils.net as net_utils\nfrom model.utils.loss import focal_loss\n\n\n# ---------------------------------------------------------------------------- #\n# RPN and Faster R-CNN outputs and losses\n# ---------------------------------------------------------------------------- #\n\ndef generic_rpn_outputs(dim_in, spatial_scale_in):\n \"\"\"Add RPN outputs (objectness classification and bounding box regression)\n to an RPN model. Abstracts away the use of FPN.\n \"\"\"\n if cfg.FPN.FPN_ON:\n # Delegate to the FPN module\n return FPN.fpn_rpn_outputs(dim_in, spatial_scale_in)\n else:\n # Not using FPN, add RPN to a single scale\n return single_scale_rpn_outputs(dim_in, spatial_scale_in)\n\n\ndef generic_rpn_losses(*inputs, **kwargs):\n \"\"\"Add RPN losses. Abstracts away the use of FPN.\"\"\"\n if cfg.FPN.FPN_ON:\n return FPN.fpn_rpn_losses(*inputs, **kwargs)\n else:\n return single_scale_rpn_losses(*inputs, **kwargs)\n\n\nclass single_scale_rpn_outputs(nn.Module):\n \"\"\"Add RPN outputs to a single scale model (i.e., no FPN).\"\"\"\n def __init__(self, dim_in, spatial_scale):\n super().__init__()\n self.dim_in = dim_in\n self.dim_out = dim_in if cfg.RPN.OUT_DIM_AS_IN_DIM else cfg.RPN.OUT_DIM\n anchors = generate_anchors(\n stride=1. / spatial_scale,\n sizes=cfg.RPN.SIZES,\n aspect_ratios=cfg.RPN.ASPECT_RATIOS)\n num_anchors = anchors.shape[0]\n\n # RPN hidden representation\n self.RPN_conv = nn.Conv2d(self.dim_in, self.dim_out, 3, 1, 1)\n # Proposal classification scores\n self.n_score_out = num_anchors * 2 if cfg.RPN.CLS_ACTIVATION == 'softmax' \\\n else num_anchors\n self.RPN_cls_score = nn.Conv2d(self.dim_out, self.n_score_out, 1, 1, 0)\n # Proposal bbox regression deltas\n self.RPN_bbox_pred = nn.Conv2d(self.dim_out, num_anchors * 4, 1, 1, 0)\n\n self.RPN_GenerateProposals = GenerateProposalsOp(anchors, spatial_scale)\n self.RPN_GenerateProposalLabels = GenerateProposalLabelsOp()\n\n self._init_weights()\n\n def _init_weights(self):\n init.normal_(self.RPN_conv.weight, std=0.01)\n init.constant_(self.RPN_conv.bias, 0)\n init.normal_(self.RPN_cls_score.weight, std=0.01)\n init.constant_(self.RPN_cls_score.bias, 0)\n init.normal_(self.RPN_bbox_pred.weight, std=0.01)\n init.constant_(self.RPN_bbox_pred.bias, 0)\n\n def detectron_weight_mapping(self):\n detectron_weight_mapping = {\n 'RPN_conv.weight': 'conv_rpn_w',\n 'RPN_conv.bias': 'conv_rpn_b',\n 'RPN_cls_score.weight': 'rpn_cls_logits_w',\n 'RPN_cls_score.bias': 'rpn_cls_logits_b',\n 'RPN_bbox_pred.weight': 'rpn_bbox_pred_w',\n 'RPN_bbox_pred.bias': 'rpn_bbox_pred_b'\n }\n orphan_in_detectron = []\n return detectron_weight_mapping, orphan_in_detectron\n\n def forward(self, x, im_info, roidb=None):\n \"\"\"\n x: feature maps from the backbone network. (Variable)\n im_info: (CPU Variable)\n roidb: (list of ndarray)\n \"\"\"\n rpn_conv = F.relu(self.RPN_conv(x), inplace=True)\n\n rpn_cls_logits = self.RPN_cls_score(rpn_conv)\n\n rpn_bbox_pred = self.RPN_bbox_pred(rpn_conv)\n\n return_dict = {\n 'rpn_cls_logits': rpn_cls_logits, 'rpn_bbox_pred': rpn_bbox_pred}\n\n if not self.training or cfg.MODEL.FASTER_RCNN:\n # Proposals are needed during:\n # 1) inference (== not model.train) for RPN only and Faster R-CNN\n # OR\n # 2) training for Faster R-CNN\n # Otherwise (== training for RPN only), proposals are not needed\n if cfg.RPN.CLS_ACTIVATION == 'softmax':\n B, C, H, W = rpn_cls_logits.size()\n rpn_cls_prob = F.softmax(\n rpn_cls_logits.view(B, 2, C // 2, H, W), dim=1)\n rpn_cls_prob = rpn_cls_prob[:, 1].squeeze(dim=1)\n else:\n rpn_cls_prob = F.sigmoid(rpn_cls_logits)\n\n rpn_rois, rpn_rois_prob = self.RPN_GenerateProposals(\n rpn_cls_prob, rpn_bbox_pred, im_info)\n\n return_dict['rpn_rois'] = rpn_rois\n return_dict['rpn_roi_probs'] = rpn_rois_prob\n\n if cfg.MODEL.FASTER_RCNN :\n if self.training:\n # Add op that generates training labels for in-network RPN proposals\n blobs_out = self.RPN_GenerateProposalLabels(rpn_rois, roidb, im_info)\n return_dict.update(blobs_out)\n else:\n # Alias rois to rpn_rois for inference\n return_dict['rois'] = return_dict['rpn_rois']\n\n return return_dict\n\n\ndef single_scale_rpn_losses(\n rpn_cls_logits, rpn_bbox_pred,\n rpn_labels_int32_wide, rpn_bbox_targets_wide,\n rpn_bbox_inside_weights_wide, rpn_bbox_outside_weights_wide):\n \"\"\"Add losses for a single scale RPN model (i.e., no FPN).\"\"\"\n h, w = rpn_cls_logits.shape[2:]\n rpn_labels_int32 = rpn_labels_int32_wide[:, :, :h, :w] # -1 means ignore\n h, w = rpn_bbox_pred.shape[2:]\n rpn_bbox_targets = rpn_bbox_targets_wide[:, :, :h, :w]\n rpn_bbox_inside_weights = rpn_bbox_inside_weights_wide[:, :, :h, :w]\n rpn_bbox_outside_weights = rpn_bbox_outside_weights_wide[:, :, :h, :w]\n\n #fg_num = (rpn_labels_int32_wide==1).data.sum()\n #bg_num = (rpn_labels_int32_wide==0).data.sum()\n #print(\"RCNN training fg/bg: %d/%d\"%(fg_num, bg_num))\n\n if cfg.RPN.CLS_ACTIVATION == 'softmax':\n B, C, H, W = rpn_cls_logits.size()\n rpn_cls_logits = rpn_cls_logits.view(\n B, 2, C // 2, H, W).permute(0, 2, 3, 4, 1).contiguous().view(-1, 2)\n rpn_labels_int32 = rpn_labels_int32.contiguous().view(-1).long()\n # the loss is averaged over non-ignored targets\n if cfg.TRAIN.FOCAL_LOSS:\n loss_rpn_cls = focal_loss(rpn_cls_logits, rpn_labels_int32, softmax=False, size_average=False)\n else:\n \tloss_rpn_cls = F.cross_entropy(\n rpn_cls_logits, rpn_labels_int32, ignore_index=-1)\n else:\n weight = (rpn_labels_int32 >= 0).float()\n if cfg.TRAIN.FOCAL_LOSS:\n loss_rpn_cls = focal_loss(\n rpn_cls_logits.view(-1, 1), rpn_labels_int32.contiguous().view(-1, 1).float(), weight.view(-1, 1).float(), softmax=True, size_average=False)\n else:\n loss_rpn_cls = F.binary_cross_entropy_with_logits(\n rpn_cls_logits, rpn_labels_int32.float(), weight, size_average=False)\n loss_rpn_cls /= weight.sum()\n\n loss_rpn_bbox = net_utils.smooth_l1_loss(\n rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights,\n beta=1/9)\n\n return loss_rpn_cls, loss_rpn_bbox\n" ]
[ [ "torch.nn.functional.sigmoid", "torch.nn.init.constant_", "torch.nn.init.normal_", "torch.nn.Conv2d", "torch.nn.functional.cross_entropy" ] ]
sebaslherrera/holbertonschool-machine_learning
[ "a4c09230688700aee199f4099de32261104918be" ]
[ "math/0x00-linear_algebra/9-let_the_butcher_slice_it.py" ]
[ "#!/usr/bin/env python3\nimport numpy as np\nmatrix = np.array([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12],\n [13, 14, 15, 16, 17, 18], [19, 20, 21, 22, 23, 24]])\nmat1 = matrix[1:3]\nmat2 = matrix[:, 2:4]\nmat3 = matrix[1:, 3:]\nprint(\"The middle two rows of the matrix are:\\n{}\".format(mat1))\nprint(\"The middle two columns of the matrix are:\\n{}\".format(mat2))\nprint(\"The bottom-right, square, 3x3 matrix is:\\n{}\".format(mat3))\n" ]
[ [ "numpy.array" ] ]
KuangHaofei/pytorch-deepFEPE
[ "012651c93f948cfd793cf8bba9670ab69abc0e04" ]
[ "deepFEPE/utils/plot_tools.py" ]
[ "import argparse\nimport time\nimport csv\nimport yaml\nimport os\nimport logging\nfrom pathlib import Path\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom tensorboardX import SummaryWriter\nimport cv2\nimport matplotlib.pyplot as plt\n\n\nclass plot_results(object):\n def __init__(self, frame_list=[100], mode='base'):\n # frame_list = [0, 100, 200, 300]\n # frame_list = [100, 700, 1200]\n # frame_list = [100]\n self.frame_list = frame_list\n print(f\"mode = {mode}\")\n self.get_image_names(mode=mode)\n pass\n\n def get_image_names(self, mode='base'):\n frame_list = self.frame_list\n plot_folder = \"plots/\"\n image_name = None\n if mode == 'base':\n prefix = [\"Si-Df-k\", \"Sp-Df-fp-end-k\"]\n plot_name = \"mask_conf_\" # 'corr_all_'\n # image_name = [f\"{plot_folder}{plot_name}{prefix}{i:06}_{(i+1):06}.png\" for i in frame_list]\n elif mode == 'good' or mode == 'bad':\n prefix = [f\"Si-Df-fp-k_{mode}\", f\"Sp-Df-fp-end-k_{mode}\"]\n plot_name = \"mask_conf_\" # \"mask_conf_\" # 'corr_all_'\n elif mode == 'freeze':\n print(f\"freeze!\")\n iter_list = [0, 400, 1000]\n prefix_base = \"Sp-Df-f-end-k-freezeDf\"\n plot_name = 'corr_all_random_' # 'corr_all_', \"mask_conf_\" \"epi_dist_all_\" \"corr_all_random_\"\n print(f\"plot_name: {plot_name}\")\n # prefix = [f'{prefix_base}_{iter/1000}k_' for iter in iter_list] # 'Sp-Df-fp-end-k'\n prefix = [f'{prefix_base}_s{frame_list[0]}_{iter/1000}k' for iter in iter_list] # 'Sp-Df-fp-end-k'\n image_name = [f\"{plot_folder}{plot_name}{p}.png\" for p in prefix]\n # prefix = f'Sp-Df-f-end-k-freezeDf_s{j}_{iter/1000}k'\n # image_name = [\n # f\"{plot_folder}{plot_name}{pre}{i:06}_{(i+1):06}.png\"\n # for i in frame_list\n # for pre in prefix\n # ]\n if image_name is None:\n image_name = [\n f\"{plot_folder}{plot_name}{pre}_{i}.png\"\n for i in frame_list\n for pre in prefix\n ]\n self.prefix = prefix\n self.image_name = image_name\n self.image_data = []\n self.plot_name = plot_name\n print(image_name) \n\n def __len__(self):\n return len(self.image_name)\n\n def read_images(self):\n image_data = []\n image_name = self.image_name\n for i, file in enumerate(image_name):\n img = cv2.imread(file)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n image_data.append(img)\n print(f\"read {i}: {file}\")\n # plt.imshow(img)\n # plt.show()\n self.image_data = image_data\n pass\n\n def plot_images(\n self, row=2, col=2, col_labels=[\"Baseline - Si-Df-fp\", \"Ours - Sp-Df-fp-end\"],\n save=True,\n figsize=(48,12),\n ext='pdf'\n ):\n ## create subgraph for combinations\n # row, col = 2, 2\n img_num = row * col\n assert self.__len__() >= img_num\n image_data = self.image_data\n\n f, axarr = plt.subplots(row, col, figsize=figsize)\n # f, axarr = plt.subplots(row, col, figsize=(48, 12))\n\n axarr = axarr.reshape(-1, col)\n for i in range(img_num):\n print(f\"axarr: {axarr.shape}, i= {i}\")\n axarr[int(i / col), int(i % col)].imshow(image_data[i])\n axarr[int(i / col), int(i % col)].axis(\"off\")\n # axarr[i/2,i%2].imshow(imaget(_datas[1])\n # axarr[1,0].imshow(image_datas[2])\n # axarr[1,1].imshow(image_datas[3])\n\n for ax, col_name in zip(axarr[0], col_labels):\n ax.set_title(col_name, fontsize=figsize[0])\n\n f.tight_layout()\n # f.suptitle(f'{self.prefix}', fontsize=12)\n savefile = f\"{self.plot_name}_{str('_').join(self.prefix)}_{str('_').join([str(f) for f in self.frame_list])}\"\n if save:\n if ext == 'pdf':\n file = f\"plots/{savefile}.pdf\"\n plt.savefig(file, bbox_inches=\"tight\")\n else:\n file = f\"plots/{savefile}.png\"\n plt.savefig(file, dpi=300, bbox_inches=\"tight\")\n logging.info(f\"save image: {savefile}\")\n print(f\"save image: {file}\")\n else:\n print(f\"not saved!!\")\n # logging.info(f\"save image: {file}\")\n plt.show()\n\nif __name__ == \"__main__\":\n plot_helper = plot_class()\n plot_helper.read_images()\n # plot_helper.plot_images(row=3,col=2)\n plot_helper.plot_images(row=1,col=2)\n\n\n# class plot_class(object):\n# def __init__(self):\n# # frame_list = [0, 100, 200, 300]\n# frame_list = [100, 700, 1200]\n# # frame_list = [100]\n# prefix = ['Si-Df-k', 'Sp-Df-fp-end-k']\n# plot_folder = 'plots/'\n# plot_name = 'mask_conf_' # 'corr_all_'\n# # image_name = [f\"{plot_folder}{plot_name}{prefix}{i:06}_{(i+1):06}.png\" for i in frame_list]\n# image_name = [f\"{plot_folder}{plot_name}{pre}{i:06}_{(i+1):06}.png\" for i in frame_list for pre in prefix ]\n# self.frame_list = frame_list\n# self.prefix = prefix\n# self.image_name = image_name\n# self.image_data = []\n# print(image_name) \n# pass\n# def __len__(self):\n# return len(self.image_name)\n \n# def read_images(self):\n# image_data = []\n# image_name = self.image_name\n# for i, file in enumerate(image_name):\n# img = cv2.imread(file)\n# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n# image_data.append(img)\n# print(f\"read {i}: {file}\")\n# # plt.imshow(img)\n# # plt.show()\n# self.image_data = image_data\n# pass\n \n# def plot_images(self, row=2, col=2, col_labels=['Baseline - Si-Df-fp', 'Ours - Sp-Df-fp-end']):\n# ## create subgraph for combinations\n# # row, col = 2, 2\n# img_num = row*col\n# assert self.__len__() >= img_num\n# image_data = self.image_data\n \n# f, axarr = plt.subplots(row, col, figsize=(48, 12))\n# # f, axarr = plt.subplots(row, col, figsize=(48, 12))\n \n# axarr = axarr.reshape(-1, col)\n# for i in range(img_num):\n# print(f'axarr: {axarr.shape}, i= {i}')\n# axarr[int(i/col),int(i%col)].imshow(image_data[i])\n# axarr[int(i/col),int(i%col)].axis('off')\n# # axarr[i/2,i%2].imshow(imaget(_datas[1])\n# # axarr[1,0].imshow(image_datas[2])\n# # axarr[1,1].imshow(image_datas[3])\n\n \n# for ax, col_name in zip(axarr[0], col_labels):\n# ax.set_title(col_name)\n \n# f.tight_layout()\n# # f.suptitle(f'{self.prefix}', fontsize=12)\n# savefile = f\"{str('_').join(self.prefix)}_{str('_').join([str(f) for f in self.frame_list])}\"\n# file = f\"plots/{savefile}.png\"\n# # logging.info(f\"save image: {file}\")\n# print(f\"save image: {file}\")\n# plt.show() \n\n\n\n# def plot_imgs(imgs, titles=None, cmap='brg', ylabel='', normalize=False, ax=None, dpi=100):\n# n = len(imgs)\n# if not isinstance(cmap, list):\n# cmap = [cmap]*n\n# if ax is None:\n# fig, ax = plt.subplots(1, n, figsize=(6*n, 6), dpi=dpi)\n# if n == 1:\n# ax = [ax]\n# else:\n# if not isinstance(ax, list):\n# ax = [ax]\n# assert len(ax) == len(imgs)\n# for i in range(n):\n# if imgs[i].shape[-1] == 3:\n# imgs[i] = imgs[i][..., ::-1] # BGR to RGB\n# ax[i].imshow(imgs[i], cmap=plt.get_cmap(cmap[i]),\n# vmin=None if normalize else 0,\n# vmax=None if normalize else 1)\n# if titles:\n# ax[i].set_title(titles[i])\n# ax[i].get_yaxis().set_ticks([])\n# ax[i].get_xaxis().set_ticks([])\n# for spine in ax[i].spines.values(): # remove frame\n# spine.set_visible(False)\n# ax[0].set_ylabel(ylabel)\n# plt.tight_layout()\n\n\n# # from utils.draw import img_overlap\n# def img_overlap(img_r, img_g, img_gray): # img_b repeat\n# img = np.concatenate((img_gray, img_gray, img_gray), axis=0)\n# img[0, :, :] += img_r[0, :, :]\n# img[1, :, :] += img_g[0, :, :]\n# img[img > 1] = 1\n# img[img < 0] = 0\n# return img\n\n# def draw_keypoints(img, corners, color=(0, 255, 0), radius=3, s=3):\n# '''\n\n# :param img:\n# image:\n# numpy [H, W]\n# :param corners:\n# Points\n# numpy [N, 2]\n# :param color:\n# :param radius:\n# :param s:\n# :return:\n# overlaying image\n# numpy [H, W]\n# '''\n# img = np.repeat(cv2.resize(img, None, fx=s, fy=s)[..., np.newaxis], 3, -1)\n# for c in np.stack(corners).T:\n# # cv2.circle(img, tuple(s * np.flip(c, 0)), radius, color, thickness=-1)\n# cv2.circle(img, tuple((s * c[:2]).astype(int)), radius, color, thickness=-1)\n# return img\n\n# # def draw_keypoints(img, corners, color=(0, 255, 0), radius=3, s=3):\n# # '''\n\n# # :param img:\n# # np (H, W)\n# # :param corners:\n# # np (3, N)\n# # :param color:\n# # :param radius:\n# # :param s:\n# # :return:\n# # '''\n# # img = np.repeat(cv2.resize(img, None, fx=s, fy=s)[..., np.newaxis], 3, -1)\n# # for c in np.stack(corners).T:\n# # # cv2.circle(img, tuple(s * np.flip(c, 0)), radius, color, thickness=-1)\n# # cv2.circle(img, tuple((s*c[:2]).astype(int)), radius, color, thickness=-1)\n# # return img\n\n# def draw_matches(rgb1, rgb2, match_pairs, filename='matches.png', show=False):\n# '''\n\n# :param rgb1:\n# image1\n# numpy (H, W)\n# :param rgb2:\n# image2\n# numpy (H, W)\n# :param match_pairs:\n# numpy (keypoiny1 x, keypoint1 y, keypoint2 x, keypoint 2 y)\n# :return:\n# None\n# '''\n# from matplotlib import pyplot as plt\n\n# h1, w1 = rgb1.shape[:2]\n# h2, w2 = rgb2.shape[:2]\n# canvas = np.zeros((max(h1, h2), w1 + w2, 3), dtype=rgb1.dtype)\n# canvas[:h1, :w1] = rgb1[:,:,np.newaxis]\n# canvas[:h2, w1:] = rgb2[:,:,np.newaxis]\n# # fig = plt.figure(frameon=False)\n# fig = plt.imshow(canvas)\n\n# xs = match_pairs[:, [0, 2]]\n# xs[:, 1] += w1\n# ys = match_pairs[:, [1, 3]]\n\n# alpha = 1\n# sf = 5\n# lw = 0.5\n# # markersize = 1\n# markersize = 2\n\n# plt.plot(\n# xs.T, ys.T,\n# alpha=alpha,\n# linestyle=\"-\",\n# linewidth=lw,\n# aa=False,\n# marker='o',\n# markersize=markersize,\n# fillstyle='none',\n# color=[0.0, 0.8, 0.0],\n# );\n# plt.tight_layout()\n# plt.savefig(filename, dpi=300, bbox_inches='tight')\n# print('#Matches = {}'.format(len(match_pairs)))\n# if show:\n# plt.show()\n\n# # from utils.draw import draw_matches_cv\n# def draw_matches_cv(data):\n# keypoints1 = [cv2.KeyPoint(p[1], p[0], 1) for p in data['keypoints1']]\n# keypoints2 = [cv2.KeyPoint(p[1], p[0], 1) for p in data['keypoints2']]\n# inliers = data['inliers'].astype(bool)\n# matches = np.array(data['matches'])[inliers].tolist()\n# def to3dim(img):\n# if img.ndim == 2:\n# img = img[:, :, np.newaxis]\n# return img\n# img1 = to3dim(data['image1'])\n# img2 = to3dim(data['image2'])\n# img1 = np.concatenate([img1, img1, img1], axis=2)\n# img2 = np.concatenate([img2, img2, img2], axis=2)\n# return cv2.drawMatches(img1, keypoints1, img2, keypoints2, matches,\n# None, matchColor=(0,255,0), singlePointColor=(0, 0, 255))\n\n\n# def drawBox(points, img, offset=np.array([0,0]), color=(0,255,0)):\n# # print(\"origin\", points)\n# offset = offset[::-1]\n# points = points + offset\n# points = points.astype(int)\n# for i in range(len(points)):\n# img = img + cv2.line(np.zeros_like(img),tuple(points[-1+i]), tuple(points[i]), color,5)\n# return img\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
XiangLiK/cv_course
[ "da7c2318fd4128bbdab96db26ddbb2524f37d0a0", "da7c2318fd4128bbdab96db26ddbb2524f37d0a0" ]
[ "chapter_03/example-3_1.py", "chapter_07e/face_detection/config_farm/acc_model.py" ]
[ "#-*-coding:utf-8-*-\r\n# date:2020-03-28\r\n# Author: xiang li\r\n\r\nimport torch # 加载torch库\r\nimport numpy as np # 加载Numpy库\r\nif __name__ == \"__main__\":\r\n print(torch.__version__)# 查看 torch 版本\r\n print('-----------------------')\r\n y = torch.rand(2,3)# 随机矩阵\r\n print(y)\r\n print(y.size())\r\n print('-----------------------')\r\n print(torch.zeros(2,2))#全0矩阵\r\n print('-----------------------')\r\n print(torch.ones(2,2))#全1矩阵\r\n print('-----------------------')\r\n print(torch.eye(3,3))# 单位矩阵\r\n print('-----------------------')\r\n print(torch.rand_like(input = y, dtype = torch.double))# 输出和input矩阵相同size的随机矩阵\r\n", "import torch\r\nimport torch.nn as nn\r\nimport torchvision\r\nimport time\r\nimport numpy as np\r\nimport sys\r\n\r\n\r\n\r\ndef get_model_op(model_,print_flag = False):\r\n print('/********************* modules *******************/')\r\n op_dict = {}\r\n idx = 0\r\n for m in model_.modules():\r\n idx += 1\r\n if isinstance(m, nn.Conv2d):\r\n if 'Conv2d' not in op_dict.keys():\r\n op_dict['Conv2d'] = 1\r\n else:\r\n op_dict['Conv2d'] += 1\r\n if print_flag:\r\n print('{}) {}'.format(idx,m))\r\n pass\r\n elif isinstance(m, nn.BatchNorm2d):\r\n if 'BatchNorm2d' not in op_dict.keys():\r\n op_dict['BatchNorm2d'] = 1\r\n else:\r\n op_dict['BatchNorm2d'] += 1\r\n if print_flag:\r\n print('{}) {}'.format(idx,m))\r\n pass\r\n elif isinstance(m, nn.Linear):\r\n if 'Linear' not in op_dict.keys():\r\n op_dict['Linear'] = 1\r\n else:\r\n op_dict['Linear'] += 1\r\n if print_flag:\r\n print('{}) {}'.format(idx,m))\r\n pass\r\n elif isinstance(m, nn.Sequential):\r\n if print_flag:\r\n print('*******************{}) {}'.format(idx,m))\r\n for n in m:\r\n if print_flag:\r\n print('{}) {}'.format(idx,n))\r\n if 'Conv2d' not in op_dict.keys():\r\n op_dict['Conv2d'] = 1\r\n else:\r\n op_dict['Conv2d'] += 1\r\n if 'BatchNorm2d' not in op_dict.keys():\r\n op_dict['BatchNorm2d'] = 1\r\n else:\r\n op_dict['BatchNorm2d'] += 1\r\n if 'Linear' not in op_dict.keys():\r\n op_dict['Linear'] = 1\r\n else:\r\n op_dict['Linear'] += 1\r\n if 'ReLU6' not in op_dict.keys():\r\n op_dict['ReLU6'] = 1\r\n else:\r\n op_dict['ReLU6'] += 1\r\n pass\r\n elif isinstance(m, nn.ReLU6):\r\n if print_flag:\r\n print('{}) {}'.format(idx,m))\r\n if 'ReLU6' not in op_dict.keys():\r\n op_dict['ReLU6'] = 1\r\n else:\r\n op_dict['ReLU6'] += 1\r\n pass\r\n elif isinstance(m, nn.Module):\r\n if print_flag:\r\n print('{}) {}'.format(idx,m))\r\n for n in m.modules():\r\n if isinstance(n, nn.Conv2d):\r\n if print_flag:\r\n print('{}) {}'.format(idx,n))\r\n if 'Conv2d' not in op_dict.keys():\r\n op_dict['Conv2d'] = 1\r\n else:\r\n op_dict['Conv2d'] += 1\r\n if 'BatchNorm2d' not in op_dict.keys():\r\n op_dict['BatchNorm2d'] = 1\r\n else:\r\n op_dict['BatchNorm2d'] += 1\r\n if 'Linear' not in op_dict.keys():\r\n op_dict['Linear'] = 1\r\n else:\r\n op_dict['Linear'] += 1\r\n if 'ReLU6' not in op_dict.keys():\r\n op_dict['ReLU6'] = 1\r\n else:\r\n op_dict['ReLU6'] += 1\r\n pass\r\n pass\r\n\r\n else:\r\n if print_flag:\r\n print('{}) {}'.format(idx,m))\r\n pass\r\n\r\n # print('\\n/********************** {} ********************/\\n'.format(ops.network))\r\n for key in op_dict.keys():\r\n print(' operation - {} : {}'.format(key,op_dict[key]))\r\n\r\nclass DummyModule(nn.Module):\r\n def __init__(self):\r\n super(DummyModule, self).__init__()\r\n\r\n def forward(self, x):\r\n return x\r\n\r\ndef fuse(conv, bn):\r\n # https://tehnokv.com/posts/fusing-batchnorm-and-conv/\r\n with torch.no_grad():\r\n # init\r\n if isinstance(conv, nn.Conv2d):\r\n fusedconv = torch.nn.Conv2d(conv.in_channels,\r\n conv.out_channels,\r\n kernel_size=conv.kernel_size,\r\n stride=conv.stride,\r\n padding=conv.padding,\r\n bias=True)\r\n elif isinstance(conv, nn.ConvTranspose2d): # not supprot nn.ConvTranspose2d\r\n fusedconv = nn.ConvTranspose2d(\r\n conv.in_channels,\r\n conv.out_channels,\r\n kernel_size=conv.kernel_size,\r\n stride=conv.stride,\r\n padding=conv.padding,\r\n output_padding=conv.output_padding,\r\n bias=True)\r\n else:\r\n print(\"error\")\r\n exit()\r\n\r\n # prepare filters\r\n w_conv = conv.weight.clone().view(conv.out_channels, -1)\r\n w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\r\n fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))\r\n\r\n # prepare spatial bias\r\n if conv.bias is not None:\r\n b_conv = conv.bias\r\n #b_conv = conv.bias.mul(bn.weight.div(torch.sqrt(bn.running_var + bn.eps))) # maybe, you should this one ?\r\n else:\r\n b_conv = torch.zeros(conv.weight.size(0))\r\n b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\r\n fusedconv.bias.copy_(b_conv + b_bn)\r\n\r\n return fusedconv\r\n\r\n\r\ndef fuse_module(m):\r\n children = list(m.named_children())\r\n c = None\r\n cn = None\r\n\r\n for name, child in children:\r\n # print(\"name {}, child {}\".format(name, child))\r\n if isinstance(child, nn.BatchNorm2d) and c is not None:\r\n bc = fuse(c, child)\r\n m._modules[cn] = bc\r\n # print('DummyModule() : ',DummyModule())\r\n m._modules[name] = DummyModule()\r\n c = None\r\n elif isinstance(child, nn.Conv2d):\r\n c = child\r\n cn = name\r\n else:\r\n fuse_module(child)\r\n\r\ndef test_net(ops,m):\r\n\r\n use_cuda = torch.cuda.is_available()\r\n use_cpu = False\r\n if ops.force_cpu or use_cuda == False:\r\n p = torch.randn([1, 3, 256, 256])\r\n device = torch.device(\"cpu\")\r\n use_cpu = True\r\n else:\r\n p = torch.randn([1, 3, 256, 256]).cuda()\r\n device = torch.device(\"cuda:0\")\r\n\r\n count = 50\r\n time_org = []\r\n m_o = m.to(device)\r\n get_model_op(m_o)\r\n # print(m)\r\n for i in range(count):\r\n s1 = time.time()\r\n if use_cpu:\r\n o_output = m_o(p)\r\n else:\r\n o_output = m_o(p).cpu()\r\n s2 = time.time()\r\n time_org.append(s2 - s1)\r\n print(\"Original time: \", s2 - s1)\r\n print('------------------------------------>>>>')\r\n\r\n fuse_module(m.to(torch.device(\"cpu\")))\r\n\r\n # print(m)\r\n\r\n m_f = m.to(device)\r\n get_model_op(m_f)\r\n\r\n time_fuse = []\r\n for i in range(count):\r\n s1 = time.time()\r\n if use_cpu:\r\n f_output = m_f(p)\r\n else:\r\n f_output = m_f(p).cpu()\r\n s2 = time.time()\r\n time_fuse.append(s2 - s1)\r\n print(\"Fused time: \", s2 - s1)\r\n\r\n print(\"-\" * 50)\r\n print(\"org time:\", np.mean(time_org))\r\n print(\"fuse time:\", np.mean(time_fuse))\r\n for o in o_output:\r\n print(\"org size:\", o.size())\r\n for o in f_output:\r\n print(\"fuse size:\", o.size())\r\n for i in range(len(o_output)):\r\n assert o_output[i].size()==f_output[i].size()\r\n print(\"output[{}] max abs diff: {}\".format(i, (o_output[i] - f_output[i]).abs().max().item()))\r\n print(\"output[{}] MSE diff: {}\".format(i, nn.MSELoss()(o_output[i], f_output[i]).item()))\r\n\r\n\r\ndef acc_model(ops,m):\r\n print('\\n-------------------------------->>> before acc model')\r\n get_model_op(m)\r\n fuse_module(m)\r\n print('\\n-------------------------------->>> after acc model')\r\n get_model_op(m)\r\n\r\n return m\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import os\r\n import argparse\r\n from models.resnet import resnet18, resnet34, resnet50, resnet101, resnet152\r\n\r\n parser = argparse.ArgumentParser(description=' Acc Model')\r\n\r\n parser.add_argument('--network', type=str, default='resnet_101',\r\n help='Backbone network : resnet_18,resnet_34,resnet_50,resnet_101,resnet_152,mobilenetv2')\r\n parser.add_argument('--model', type=str, default = './resnet101/model_epoch-1300.pth',\r\n help = 'model') # 模型路径\r\n parser.add_argument('--input_shape', type=tuple , default = (1,3,256,256),\r\n help = 'input_shape') #\r\n parser.add_argument('--num_classes', type=int , default = 196,\r\n help = 'num_classes') # 模型输入图片颜色偏置\r\n parser.add_argument('--force_cpu', type=bool, default = False,\r\n help = 'force_cpu') # 前向推断硬件选择\r\n parser.add_argument('--GPUS', type=str, default = '0',\r\n help = 'GPUS') # GPU选择\r\n\r\n print('\\n/******************* {} ******************/\\n'.format(parser.description))\r\n #--------------------------------------------------------------------------\r\n ops = parser.parse_args()# 解析添加参数\r\n #--------------------------------------------------------------------------\r\n print('----------------------------------')\r\n\r\n unparsed = vars(ops) # parse_args()方法的返回值为namespace,用vars()内建函数化为字典\r\n for key in unparsed.keys():\r\n print('{} : {}'.format(key,unparsed[key]))\r\n\r\n os.environ['CUDA_VISIBLE_DEVICES'] = ops.GPUS\r\n #---------------------------------------------------------------- 构建 landmarks 模型\r\n if ops.network == 'resnet_18':\r\n model_=resnet18(num_classes=ops.num_classes, img_size=ops.input_shape[2])\r\n elif ops.network == 'resnet_34':\r\n model_=resnet34(num_classes=ops.num_classes, img_size=ops.input_shape[2])\r\n elif ops.network == 'resnet_50':\r\n model_=resnet50(num_classes=ops.num_classes, img_size=ops.input_shape[2])\r\n elif ops.network == 'resnet_101':\r\n model_=resnet101(num_classes=ops.num_classes, img_size=ops.input_shape[2])\r\n elif ops.network == 'resnet_152':\r\n model_=resnet152(num_classes=ops.num_classes, img_size=ops.input_shape[2])\r\n elif ops.network == 'mobilenetv2':\r\n model_=MobileNetV2(n_class =ops.num_classes, input_size=ops.input_shape[2])\r\n else:\r\n print('error no the struct model : {}'.format(ops.network))\r\n\r\n # 加载测试模型\r\n if os.access(ops.model,os.F_OK):# checkpoint\r\n chkpt = torch.load(ops.model, map_location=lambda storage, loc: storage)\r\n # chkpt = torch.load(ops.model)\r\n model_.load_state_dict(chkpt)\r\n print(' \\nload model : {}'.format(ops.model))\r\n\r\n model_.eval()\r\n test_net(ops,model_)\r\n" ]
[ [ "torch.ones", "torch.rand_like", "torch.rand", "torch.zeros", "torch.eye" ], [ "torch.load", "torch.nn.MSELoss", "torch.randn", "torch.no_grad", "torch.sqrt", "torch.mm", "torch.cuda.is_available", "torch.nn.Conv2d", "torch.device", "numpy.mean", "torch.nn.ConvTranspose2d" ] ]
samcom12/anuga_core
[ "f4378114dbf02d666fe6423de45798add5c42806" ]
[ "validation_tests/analytical_exact/transcritical_without_shock/analytical_without_shock.py" ]
[ "\"\"\"\r\nTranscritical flow over a bump without a shock.\r\nRef1: Houghton & Kasahara, Nonlinear shallow fluid flow over an isolated ridge.\r\nComm. Pure and Applied Math. DOI:10.1002/cpa.3160210103\r\n\r\nRef2: Delestre et al, 2012, SWASHES: a compilation of shallow water\r\nanalytic solutions..., Int J Numer Meth Fluids, DOI:10.1002/fld.3741\r\n\r\nSudi Mungkasi, ANU 2012\r\n\"\"\"\r\nfrom numpy import zeros, linspace\r\nfrom scipy.optimize import fsolve\r\nfrom pylab import plot, show\r\nfrom anuga import g\r\n\r\n\r\nq0 = 1.53 # This is the imposed momentum\r\nh_d = 0.66 # This is the water height downstream\r\n\r\ndef analytic_sol(x):\r\n def elevation(x):\r\n z_b = zeros(len(x))\r\n for i in range(len(x)):\r\n if (8.0 <= x[i] <= 12.0):\r\n z_b[i] = 0.2 - 0.05*(x[i]-10.0)**2.0\r\n else:\r\n z_b[i] = 0.0\r\n return z_b\r\n z = elevation(x)\r\n zM= max(z)\r\n\r\n def find_hM(hM): #to find the water height at the maxima of the bump\r\n return h_d**3 + (-q0**2/(2*g*hM**2)-hM-zM)*h_d**2 + q0**2/(2*g)\r\n hM = fsolve(find_hM, 0.5)\r\n\r\n def find_h(h): #to find the water height at every spatial point after hM is found\r\n return h**3 + (zb-q0**2/(2*g*hM**2)-hM-zM)*h**2 + q0**2/(2*g)\r\n h = zeros(len(x))\r\n for i in range(len(x)):\r\n zb = z[i]\r\n #h[i] = fsolve(find_h, 1.0)\r\n if x[i] < 10:\r\n h[i] = fsolve(find_h, 1.0)\r\n else:\r\n h[i] = fsolve(find_h, 0.4)\r\n return h, z\r\n\r\n##N = 401\r\n##L = 25.\r\n##x = linspace(0.0,L,N)\r\n##h,z=analytic_sol(x)\r\n##plot(x,h+z, x,z)\r\n##plot(x, 1.53/h)\r\n##show()\r\n" ]
[ [ "scipy.optimize.fsolve" ] ]
JaesikKim/HiG2Vec
[ "62803d421a29336d89d0a1336054b33672434fe3" ]
[ "evalGene/score_prediction_NN.py" ]
[ "import torch as th\nimport torch.nn as nn\nimport argparse\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom torch.utils.data import DataLoader, TensorDataset\nimport pandas as pd\nimport numpy as np\nimport copy\n\nclass Net(nn.Module):\n def __init__(self, dim):\n super(Net, self).__init__()\n self.main = nn.Sequential(\n nn.Linear(dim, int(dim/2)),\n nn.BatchNorm1d(int(dim/2)),\n nn.Dropout(),\n nn.ReLU(),\n nn.Linear(int(dim/2), 1)\n )\n def forward(self, x):\n out = self.main(x)\n return out.view(-1)\n\nclass Scheduler():\n def __init__(self, optimizer, init_lr, n_warmup, epochs):\n self.optim = optimizer\n self.n_warmup = n_warmup\n self.lr_multiplier = 0.1\n self.init_lr = init_lr\n self.total_epochs = epochs\n\n def zero_grad(self):\n self.optim.zero_grad()\n\n def step_and_update_lr(self, curr_eph):\n self.update_lr(curr_eph)\n self.optim.step()\n \n def update_lr(self, curr_eph):\n if curr_eph < self.n_warmup:\n lr = self.init_lr * self.lr_multiplier \n else:\n lr = self.init_lr * max(0.0,float(self.total_epochs-curr_eph))/float(max(1.0,self.total_epochs-self.n_warmup))\n for param_group in self.optim.param_groups:\n param_group['lr'] = lr\n\ndef load_data(samples, objects):\n x_ls = []\n y_ls = []\n for i in range(len(samples)):\n g1 = samples.iloc[i,0]\n g2 = samples.iloc[i,1]\n if g1 in objects and g2 in objects:\n g1i = objects.index(g1)\n g2i = objects.index(g2)\n x_ls.append([g1i, g2i])\n y_ls.append(samples.iloc[i,2])\n return np.array(x_ls), np.array(y_ls)\n\ndef map_to_vec(samples, embeddings):\n x_ls = []\n for i in range(len(samples)):\n x_ls.append(np.concatenate((embeddings[int(samples[i,0].item())], embeddings[int(samples[i,1].item())])).tolist())\n return th.FloatTensor(x_ls)\n \ndef main():\n parser = argparse.ArgumentParser(description='Predict protein interaction')\n parser.add_argument('-model', help='Embedding model', type=str)\n parser.add_argument('-dim', help='Embedding dimension', type=int)\n parser.add_argument('-dset', help='protein-protein interactions', type=str)\n parser.add_argument('-fout', help='Prediction output', type=str)\n parser.add_argument('-lr', help='Learning rate', type=float)\n parser.add_argument('-gpu', help='GPU id', type=int, default=0)\n parser.add_argument('-burnin', help='Epochs of burn in', type=int, default=20)\n parser.add_argument('-epochs', help='Number of epochs', type=int, default=200)\n parser.add_argument('-batchsize', help='Batchsize', type=int, default=50)\n parser.add_argument('-print_each', help='Print loss each n-th epoch', type=int, default=10)\n opt = parser.parse_args()\n\n # load embeddings\n if opt.model[-3:] == \"pth\":\n model = th.load(opt.model, map_location=\"cpu\")\n objects, embeddings = model['objects'], model['embeddings'].numpy()\n\n else:\n model = np.load(opt.embeddings, allow_pickle=True).item()\n objects, embeddings = model['objects'], model['embeddings']\n\n # dataset processing\n print(\"... load data ...\")\n if opt.dset[-3:] == \"tsv\":\n data = pd.read_csv(opt.dset, sep=\"\\t\")\n else:\n data = pd.read_csv(opt.dset)\n\n device = th.device('cuda:'+str(opt.gpu) if th.cuda.is_available() else 'cpu')\n X, y = load_data(data, objects)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.25, random_state=42)\n\n net = Net(2*opt.dim).to(device)\n criterion = nn.MSELoss()\n optimizer = th.optim.Adam(net.parameters(), lr=opt.lr)\n scheduler = Scheduler(optimizer, opt.lr, opt.burnin, opt.epochs)\n\n # Dataloader\n train_dataset = TensorDataset(th.FloatTensor(X_train), th.FloatTensor(y_train.astype('float64')))\n val_dataset = TensorDataset(th.FloatTensor(X_val), th.FloatTensor(y_val.astype('float64')))\n test_dataset = TensorDataset(th.FloatTensor(X_test), th.FloatTensor(y_test.astype('float64')))\n train_loader = DataLoader(\n train_dataset,\n batch_size=opt.batchsize,\n shuffle=True,\n )\n val_loader = DataLoader(\n val_dataset,\n batch_size=opt.batchsize,\n shuffle=False,\n )\n test_loader = DataLoader(\n test_dataset,\n batch_size=opt.batchsize,\n shuffle=False,\n )\n\n # Train the model\n print(\"... Train Network ...\")\n opt_eph = 0\n opt_loss = np.inf\n opt_model_state_dict = net.state_dict()\n for epoch in range(opt.epochs):\n epoch_loss = []\n net.train()\n for samples, targets in train_loader:\n samples = map_to_vec(samples, embeddings)\n samples = samples.to(device)\n targets = targets.to(device)\n preds = net(samples)\n loss = criterion(preds, targets)\n scheduler.zero_grad()\n loss.backward()\n scheduler.step_and_update_lr(epoch)\n epoch_loss.append(loss.item())\n with th.no_grad():\n net.eval()\n val_loss = []\n for samples, labels in val_loader:\n samples = map_to_vec(samples, embeddings)\n samples = samples.to(device)\n labels = labels.to(device)\n preds = net(samples)\n loss = criterion(preds, labels)\n val_loss.append(loss.item())\n if np.mean(val_loss) < opt_loss:\n opt_loss = np.mean(val_loss)\n opt_eph = epoch\n opt_model_state_dict = copy.deepcopy(net.state_dict())\n\n if (epoch+1) % opt.print_each == 0:\n print(\"Epoch [{}/{}] Train Loss: {:.3f} Val Loss: {:.3f}\".format(epoch+1, opt.epochs, np.mean(epoch_loss), np.mean(val_loss)))\n\n # Save the test result\n net.load_state_dict(opt_model_state_dict)\n print(\"Optimal tuning: Epoch {}, Val Loss: {:.3f}\".format(opt_eph+1, opt_loss))\n y = []\n yhat = []\n with th.no_grad():\n net.eval()\n for samples, targets in test_loader:\n samples = map_to_vec(samples, embeddings)\n samples = samples.to(device)\n preds = net(samples)\n yhat += preds.cpu().tolist()\n y += targets.tolist()\n print(\"R2: \"+str(r2_score(y, yhat)))\n print(\"RMSE: \"+str(np.sqrt(mean_squared_error(y, yhat))))\n pd.DataFrame({'y' : y, 'yhat' : yhat}).to_csv(opt.fout, index=False)\n\n \nif __name__ == '__main__':\n main()\n\n" ]
[ [ "torch.utils.data.DataLoader", "torch.FloatTensor", "numpy.load", "torch.nn.MSELoss", "torch.load", "torch.nn.Dropout", "pandas.read_csv", "sklearn.metrics.mean_squared_error", "torch.no_grad", "pandas.DataFrame", "torch.cuda.is_available", "numpy.array", "sklearn.metrics.r2_score", "torch.nn.ReLU", "sklearn.model_selection.train_test_split", "numpy.mean" ] ]
RoosterQMonee/GTAG-PyAI
[ "1bef3cfc85da034f9129a008bd6c5e9114ce3cfd" ]
[ "Chat/commands/exe_command.py" ]
[ "import speech_recognition as sr\nfrom pydub import AudioSegment\nimport os\nfrom datetime import date\nimport sounddevice as sd\nfrom scipy.io.wavfile import write\nfrom random import choice, randint\nimport pyttsx3\nimport time\nimport webbrowser\nfrom playsound import playsound\n\n# Commands\n\nhello = [\"hi\", \"Hi\", \"hello\", \"Hello\", \"wsg\", \"Wsg\", \"WSG\", \"sup\", \"Sup\", \"hey\", \"Hey\", \"hi!\", \"Hi!\", \"hello!\",\n \"Hello!\", \"wsg!\", \"Wsg!\", \"WSG!\", \"sup!\", \"Sup!\", \"hey!\", \"Hey!\", \"hi :)\", \"Hi :)\", \"hello :)\", \"Hello :)\",\n \"wsg :)\", \"Wsg :)\", \"WSG :)\", \"sup :)\", \"Sup :)\", \"hey :)\", \"Hey :)\", \"hi! :)\", \"Hi! :)\", \"hello! :)\",\n \"Hello! :)\", \"wsg! :)\", \"Wsg! :)\", \"WSG! :)\", \"sup! :)\", \"Sup! :)\", \"hey! :)\", \"Hey! :)\", \"Ello\", \"ello\",\n \"'Ello\", \"'ello\"]\nbye = [\"bye\", \"Bye\", \"goodbye\", \"Goodbye\", \"good bye\", \"Good Bye\", \"see you\", \"See you\", \"later\", \"Later\", \"byee\",\n \"Byee\", \"byeee\", \"Byeee\"]\n\ninsult = [\"fucktard\", \"idot\", \"idiot\", \"dumbass\", \"motherfucker\", \"stupid\", \"gay\", \"fucker\", \"Fucktard\", \"Idot\",\n \"Idiot\", \"Dumbass\", \"Motherfucker\", \"Stupid\", \"Gay\", \"Fucker\" \"ur fat\", \"Ur fat\", \"your fat\", \"Your fat\",\n \"youre fat\", \"youre fat\", \"faggot\", \"retard\", \"bitch\", \"whore\", \"thot\", \"fat\", \"fatty\", \"ur gay\", \"Ur gay\",\n \"your gay\", \"youre gay\", \"Youre gay\", \"Fag\", \"fag\", \"Loser\", \"loser\"]\ncompliment = [\"gg\", \"good job\", \"nice\", \"great\", \"awesome\", \"good\", \"your hot\", \"ur hot\", \"youre hot\", \"youre awesome\",\n \"youre cool\", \"Nice\"]\n\nhi = [\"Sup\", \"Hello\", \"Hi\", \"good morning\", \"Good morning\", \"Good afternoon\", \"good afternoon\", \"good evening\",\n \"Good evening\"]\nhi2 = [\"Sup\", \"Hello\", \"Hi\"]\ngn = [\"Good night\", \"good night\"]\n\nyes = [\"yes\", \"Sure!\", \"sure\", \"of course\", \"yeah\"]\nno = [\"yeah no\", \"no\", \"heck no\"]\n\nthankYou = [\"thank you\", \"Thank you\", \"Thanks\", \"thanks\", \"Thank you\", \"thank you\", \"thx!\", \"Thx!\", \"Ty!\", \"ty!\",\n \"Thanks!\", \"thanks!\", \"Thank u\", \"thank u\"]\n\nstartTimer = [\"Can you start a timer\", \"Can you start a timer?\", \"can you start a timer\", \"can you start a timer?\",\n \"please start a timer\", \"Please start a timer\", \"timer start\", \"Timer start\", \"start timer\",\n \"Start timer\", \"can you please start a timer?\", \"can you start a timer please\",\n \"Can you start a timer please\", \"can you start a timer please?\", \"Can you start a timer please?\"]\nendTimer = [\"End the timer please\", \"end the timer please\", \"please end the timer\", \"Please end the timer\", \"timer end\",\n \"Timer end\", \"End timer\", \"end timer\", \"Stop the timer please\", \"stop the timer please\",\n \"please stop the timer\", \"Please stop the timer\", \"timer stop\", \"Timer stop\", \"Stop timer\", \"stop timer\"]\n\nhowMany = [\"How many\", \"how many\", \"how many?\", \"How many?\"]\ncanIJoin = [\"can i join\", \"Can i join\", \"Can i join?\", \"can i join?\", \"can I join\", \"Can I join\", \"Can I join?\",\n \"can I join?\"]\nhowAreYou = [\"How are you\", \"how are you\", \"How are you?\", \"how are you?\", \"How are you doing\", \"how are you doing\",\n \"how are you doing?\", \"How are you doing?\", \"How are u\", \"how are u\", \"How are u?\", \"how are u?\"]\nhowImDoing = [\"Ok so far\", \"Pretty good\", \"Good\", \"Great\"]\n\nwyd = [\"What are you doing\", \"what are you doing\", \"Wyd\", \"wyd\", \"WYD\", \"What are you doing?\", \"what are you doing?\",\n \"Wyd?\", \"wyd?\", \"WYD?\"]\nwid = [\"Smoking crack\", \"Coding\", \"Talking to people\", \"Nothing right now\", \"Playing piano\", \"Invading poland\",\n \"Making tacos\"]\n\ninvpoland = [\"wanna go invade poland\", \"Wanna go invade poland\", \"Wanna go invade poland?\", \"wanna go invade poland?\",\n \"want to go invade poland\"]\nily = [\"i love you\", \"I love you\", \"ily\", \"Ily\", \"ILY\", \"i <3 you\", \"I <3 you\", \"i <3 u\", \"i love u\", \"I love u\"]\nisFren = [\"Are you a friend\", \"are you a friend\", \"Are you a friend?\", \"are you a friend?\", \"Are you fren\",\n \"are you fren\", \"Are you a fren?\", \"are you a fren?\", \"Are you a fren\", \"are you a fren\", \"Are you a fren?\",\n \"are you a fren?\", \"Are you fren?\", \"are you fren?\", \"are you fren\", \"Are you fren\"]\n\nwhatCanYouDo = [\"What can you do\", \"what can you do\", \"what can you do?\", \"What can you do?\", \"What do you do?\",\n \"what do you do?\", \"cmd use\", \"Cmd use\", \"!use\"]\ntheDate = [\"What is the date\", \"what is the date\", \"what is today\", \"What is today\", \"can you please tell me the date\",\n \"Can you please tell me the date\", \"what is the date today\", \"What is the date today\", \"What is the date?\",\n \"what is the date?\", \"what is today?\", \"What is today?\", \"can you please tell me the date?\",\n \"Can you please tell me the date?\", \"what is the date today?\", \"What is the date today?\"]\n\n\n\nenable_speech = [\"enable speech\", \"speech enable\", \"speech on\"]\ndisable_speech = [\"disable speech\", \"speech disable\", \"speech off\"]\n\nenable_man = [\"enable manual\", \"manual enable\", \"manual on\"]\ndisable_man = [\"disable manual\", \"manual disable\", \"manual off\"]\n\nopenSite = [\"Open site\", \"open site\", \"website\", \"site\", \"site open\"]\n\nengine = pyttsx3.init()\nfs = 44100\nseconds = 3\n\nstrtTime = 0\nendtime = 0\n\nmanual = False\nspeech = True\nbot_name = ['ivan', 'hey ivan', 'boot ivan', 'help ivan', 'Yo ivan wake up']\ntoSay = ''\ncount = 0\nwindow = Tk()\n\ntry:\n os.remove('output.wav', 'transcript.wav')\nexcept:\n pass\n\nprint(\"Started!\")\n\n\ndef main():\n global count\n while count < 3:\n myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)\n sd.wait()\n write('output.wav', fs, myrecording) # Save as WAV file\n\n sound = AudioSegment.from_wav('output.wav')\n sound.export('transcript.wav', format=\"wav\")\n\n AUDIO_FILE = \"transcript.wav\"\n\n r = sr.Recognizer()\n with sr.AudioFile(AUDIO_FILE) as source:\n global speech\n global manual\n global strtTime\n global endtime\n global toSay\n audio = r.record(source)\n try:\n transcribed = r.recognize_google(audio)\n except:\n transcribed = \"Sorry, i did not understand\"\n engine.say(transcribed)\n engine.runAndWait()\n if manual == True:\n transcribed = input(\"Manual Command> \")\n\n try:\n print(\"Transcription: \" + transcribed)\n text = transcribed.lower()\n\n if text in theDate:\n toSay = (date.today())\n\n elif text in openSite:\n engine.say(\"What site do you want to open?\")\n engine.runAndWait()\n\n myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)\n sd.wait()\n write('output.wav', fs, myrecording) # Save as WAV file\n\n AUDIO_FILE = \"output.wav\"\n\n r = sr.Recognizer()\n with sr.AudioFile(AUDIO_FILE) as source:\n audio = r.record(source)\n speech = True\n try:\n transcribed = r.recognize_google(audio)\n except:\n transcribed = \"I couldn't understand what you said\"\n engine.say(transcribed)\n engine.runAndWait()\n\n print(transcribed)\n engine.say(\"Opening site.\")\n engine.runAndWait()\n\n if transcribed != \"I couldn't understand what you said\":\n url = f'https://www.{transcribed}.org'\n webbrowser.open(url)\n\n if transcribed.lower() != 'python':\n url = f'https://www.{transcribed}.com'\n webbrowser.open(url)\n\n elif text in compliment:\n toSay = choice(thankYou)\n\n elif text in whatCanYouDo:\n toSay = f\"I am {bot_name}. I can answer questions and run commands as you wish! Just remember i was made by a thirteen year old and a twelve year old\"\n\n elif text in isFren:\n toSay = \"Of course, im always here to help\"\n\n elif text in canIJoin:\n toSay = 'Sure'\n\n elif text in insult:\n toSay = \"You do know i don't get offended, right?\"\n\n elif text in enable_man:\n manual = True\n\n elif text in disable_man:\n manual = False\n\n elif text in ily:\n playsound('yugay.wav')\n\n elif text in wyd:\n toSay = choice(wid)\n\n elif text in thankYou:\n toSay = \"You're welcome\"\n\n elif text in howMany:\n toSay = str(randint(1, 50))\n\n elif text in howAreYou:\n toSay = choice(howImDoing)\n\n elif text in invpoland:\n toSay = \"Sure\"\n\n elif text in hi:\n toSay = choice(hi2)\n\n elif text in hello:\n toSay = choice(hi2)\n\n elif text in bye:\n toSay = choice(bye)\n\n elif text in startTimer:\n strtTime == time.time()\n toSay = 'Ok'\n\n elif text in endTimer:\n endtime == time.time()\n toSay = (f'Ok, Time is {str(endtime - strtTime)}')\n\n elif text in enable_speech:\n global speech\n speech = True\n toSay = \"Ok\"\n\n elif text in disable_speech:\n global speech\n speech = False\n toSay = \"Ok\"\n \n elif text == 'what is the time':\n t = time.localtime()\n current_time = time.strftime(\"%H:%M:%S\", t)\n print(current_time)\n\n else:\n toSay = \"Unknown command\"\n\n print(toSay)\n\n if speech == True:\n engine.say(toSay)\n engine.runAndWait()\n\n else:\n count += 1\n pass\n\n input(\"\")\n except:\n pass\n # input(\"Continue? \")\n count = 0\n\n\nwhile True:\n myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)\n sd.wait()\n write('output.wav', fs, myrecording)\n\n sound = AudioSegment.from_wav(\"output.wav\")\n sound.export(\"transcript.wav\", format=\"wav\")\n AUDIO_FILE = \"transcript.wav\"\n\n r = sr.Recognizer()\n with sr.AudioFile(AUDIO_FILE) as source:\n audio = r.record(source)\n speech = True\n try:\n transcribed = r.recognize_google(audio)\n except:\n pass\n\n try:\n if transcribed.lower() in bot_name and transcribed:\n print(\"Voice Acivated\")\n engine.say(f\"Hello {os.getenv('USERNAME')}, how may i help\")\n engine.runAndWait()\n\n main()\n except:\n pass\n" ]
[ [ "scipy.io.wavfile.write" ] ]
snikhil17/mlzoomcamp
[ "dd04a23aa1ed506247adf9922c73069ad211044d" ]
[ "9_Serverless/lambda_function.py" ]
[ "import tflite_runtime.interpreter as tflite\r\nfrom PIL import Image\r\nfrom io import BytesIO\r\nfrom urllib import request\r\nimport numpy as np\r\n\r\n\r\n#import model\r\ninterpreter = tflite.Interpreter(model_path='cats-dogs-v2.tflite')\r\ninterpreter.allocate_tensors()\r\n\r\n\r\n# get input and output index\r\ninput_index = interpreter.get_input_details()[0]['index']\r\noutput_index = interpreter.get_output_details()[0]['index']\r\n\r\n\r\ndef download_image(url):\r\n with request.urlopen(url) as resp:\r\n buffer = resp.read()\r\n stream = BytesIO(buffer)\r\n img = Image.open(stream)\r\n return img\r\n\r\n\r\ndef prepare_image(img, target_size):\r\n if img.mode != 'RGB':\r\n img = img.convert('RGB')\r\n img = img.resize(target_size, Image.NEAREST)\r\n\r\n return img\r\n\r\n# url = 'https://upload.wikimedia.org/wikipedia/commons/1/18/Vombatus_ursinus_-Maria_Island_National_Park.jpg'\r\n\r\n\r\ndef preprocessor(img):\r\n x = np.array(img, dtype='float32') / 255\r\n\r\n return np.array([x])\r\n\r\n\r\ndef predict(url):\r\n\r\n img = download_image(url)\r\n img = prepare_image(img, (150, 150))\r\n X = preprocessor(img)\r\n\r\n interpreter.set_tensor(input_index, X)\r\n interpreter.invoke()\r\n preds = interpreter.get_tensor(output_index)\r\n\r\n float_predictions = preds[0].tolist()\r\n label = ['cat' if preds[0] < 0.5 else 'dog']\r\n\r\n return dict(zip(label, float_predictions))\r\n\r\n\r\ndef lambda_handler(event, context):\r\n url = event['url']\r\n result = predict(url)\r\n return result\r\n" ]
[ [ "numpy.array" ] ]
cmccully/pysalt
[ "d67262a42114bd359efc6ec23fc2d05be66d2025" ]
[ "saltred/saltmosaic.py" ]
[ "#!/usr/bin/env python\n\n# LICENSE\n# Copyright (c) 2014, South African Astronomical Observatory (SAAO)\n# All rights reserved. See License file for more details\n\n\"\"\"\nSALTMOSAIC is a task to apply the CCD geometric corrections to MEF style SALT\ndata.\n\nAuthor Version Date\n-----------------------------------------------\nMartin Still (SAAO) 0.1 16 Oct 2006\nSM Crawford (SAAO) 0.2 19 Mar 2006\n\nUpdates\n--------------------\n20120319 - Update to new error handling\n - Changed the mosaic to use the whole frame and not trim some data\n off\n20141111 - Added option to replace the masked regions\n\"\"\"\n\nimport os\nimport time\nimport numpy\nfrom scipy import ndimage as nd\nimport pyfits\nfrom pyraf import iraf\n\nfrom math import cos, sin, pi\nfrom scipy.ndimage import geometric_transform\n\nimport saltsafekey as saltkey\nimport saltsafeio as saltio\nimport saltsafestring as saltstring\nfrom saltsafelog import logging, history\n\nfrom salterror import SaltError\n\ndebug = True\n\n\n# -----------------------------------------------------------\n# core routine\n\ndef saltmosaic(images, outimages, outpref, geomfile, interp='linear',\n geotran=True, fill=False, cleanup=True, clobber=False,\n logfile=None, verbose=True):\n\n # Start the logging\n with logging(logfile, debug) as log:\n\n # Check the input images\n infiles = saltio.argunpack('Input', images)\n\n # create list of output files\n outfiles = saltio.listparse('Outfile', outimages, outpref, infiles, '')\n\n # verify that the input and output lists are the same length\n saltio.comparelists(infiles, outfiles, 'Input', 'output')\n\n # does CCD geometry definition file exist\n geomfilefile = geomfile.strip()\n saltio.fileexists(geomfile)\n\n gap = 0\n xshift = [0, 0]\n yshift = [0, 0]\n rotation = [0, 0]\n gap, xshift, yshift, rotation = saltio.readccdgeom(geomfile)\n\n # open each raw image file and apply the transformation to it\n for img, oimg in zip(infiles, outfiles):\n\n # open the structure\n struct = saltio.openfits(img)\n\n # create the mosaic\n ostruct = make_mosaic(\n struct,\n gap,\n xshift,\n yshift,\n rotation,\n interp_type=interp,\n geotran=geotran,\n fill=fill,\n cleanup=cleanup,\n log=log,\n verbose=verbose)\n\n # update the header information\n # housekeeping keywords\n fname, hist = history(\n level=1, wrap=False, exclude=[\n 'images', 'outimages', 'outpref'])\n saltkey.housekeeping(\n ostruct[0],\n 'SMOSAIC',\n 'Images have been mosaicked ',\n hist)\n\n # write the image out\n saltio.writefits(ostruct, oimg, clobber=clobber)\n\n # close the files\n saltio.closefits(struct)\n saltio.closefits(ostruct)\n\n\ndef make_mosaic(struct, gap, xshift, yshift, rotation, interp_type='linear',\n boundary='constant', constant=0, geotran=True, fill=False,\n cleanup=True, log=None, verbose=False):\n \"\"\"Given a SALT image struct, combine each of the individual amplifiers and\n apply the geometric CCD transformations to the image\n \"\"\"\n\n # get the name of the file\n infile = saltkey.getimagename(struct[0], base=True)\n outpath = './'\n\n # identify instrument\n instrume, keyprep, keygain, keybias, keyxtalk, keyslot = \\\n saltkey.instrumid(struct)\n\n # how many amplifiers?\n nsciext = saltkey.get('NSCIEXT', struct[0])\n nextend = saltkey.get('NEXTEND', struct[0])\n nccds = saltkey.get('NCCDS', struct[0])\n amplifiers = nccds * 2\n\n if nextend > nsciext:\n varframe = True\n else:\n varframe = False\n\n # CCD geometry coefficients\n if (instrume == 'RSS' or instrume == 'PFIS'):\n xsh = [0., xshift[0], 0., xshift[1]]\n ysh = [0., yshift[0], 0., yshift[1]]\n rot = [0., rotation[0], 0., rotation[1]]\n elif instrume == 'SALTICAM':\n xsh = [0., xshift[0], 0.]\n ysh = [0., yshift[0], 0.]\n rot = [0., rotation[0], 0]\n\n # how many extensions?\n nextend = saltkey.get('NEXTEND', struct[0])\n\n # CCD on-chip binning\n xbin, ybin = saltkey.ccdbin(struct[0])\n\n # create temporary primary extension\n outstruct = []\n outstruct.append(struct[0])\n # define temporary FITS file store tiled CCDs\n\n tilefile = saltio.tmpfile(outpath)\n tilefile += 'tile.fits'\n if varframe:\n tilehdu = [None] * (3 * int(nsciext / 2) + 1)\n else:\n tilehdu = [None] * int(nsciext / 2 + 1)\n tilehdu[0] = pyfits.PrimaryHDU()\n tilehdu[0].header = struct[0].header\n\n if log:\n log.message('', with_stdout=verbose)\n\n # iterate over amplifiers, stich them to produce file of CCD images\n for i in range(int(nsciext / 2)):\n hdu = i * 2 + 1\n # amplifier = hdu%amplifiers\n # if (amplifier == 0): amplifier = amplifiers\n\n # read DATASEC keywords\n datasec1 = saltkey.get('DATASEC', struct[hdu])\n datasec2 = saltkey.get('DATASEC', struct[hdu + 1])\n xdsec1, ydsec1 = saltstring.secsplit(datasec1)\n xdsec2, ydsec2 = saltstring.secsplit(datasec2)\n\n # read images\n imdata1 = saltio.readimage(struct, hdu)\n imdata2 = saltio.readimage(struct, hdu + 1)\n\n # tile 2n amplifiers to yield n CCD images\n outdata = numpy.zeros((ydsec1[1] +\n abs(ysh[i +\n 1] /\n ybin), xdsec1[1] +\n xdsec2[1] +\n abs(xsh[i +\n 1] /\n xbin)), numpy.float32)\n\n # set up the variance frame\n if varframe:\n vardata = outdata.copy()\n vdata1 = saltio.readimage(struct, struct[hdu].header['VAREXT'])\n vdata2 = saltio.readimage(struct, struct[hdu + 1].header['VAREXT'])\n\n bpmdata = outdata.copy()\n bdata1 = saltio.readimage(struct, struct[hdu].header['BPMEXT'])\n bdata2 = saltio.readimage(struct, struct[hdu + 1].header['BPMEXT'])\n\n x1 = xdsec1[0] - 1\n if x1 != 0:\n msg = 'The data in %s have not been trimmed prior to mosaicking.' \\\n % infile\n log.error(msg)\n if xsh[i + 1] < 0:\n x1 += abs(xsh[i + 1] / xbin)\n x2 = x1 + xdsec1[1]\n y1 = ydsec1[0] - 1\n if ysh[i + 1] < 0:\n y1 += abs(ysh[i + 1] / ybin)\n y2 = y1 + ydsec1[1]\n outdata[y1:y2, x1:x2] =\\\n imdata1[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]\n\n if varframe:\n vardata[y1:y2, x1:x2] =\\\n vdata1[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]\n bpmdata[y1:y2, x1:x2] =\\\n bdata1[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]\n\n x1 = x2\n x2 = x1 + xdsec2[1]\n y1 = ydsec2[0] - 1\n if ysh[i + 1] < 0:\n y1 += abs(ysh[i + 1] / ybin)\n y2 = y1 + ydsec2[1]\n outdata[y1:y2, x1:x2] =\\\n imdata2[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]\n\n if varframe:\n vardata[y1:y2, x1:x2] =\\\n vdata2[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]\n bpmdata[y1:y2, x1:x2] =\\\n bdata2[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]\n\n # size of new image\n naxis1 = str(xdsec1[1] + xdsec2[1])\n naxis2 = str(ydsec1[1])\n\n # add image and keywords to HDU list\n tilehdu[i + 1] = pyfits.ImageHDU(outdata)\n tilehdu[i + 1].header = struct[hdu].header\n tilehdu[\n i + 1].header['DATASEC'] = '[1:' + naxis1 + ',1:' + naxis2 + ']'\n\n if varframe:\n vext = i + 1 + int(nsciext / 2.)\n tilehdu[vext] = pyfits.ImageHDU(vardata)\n tilehdu[vext].header = struct[struct[hdu].header['VAREXT']].header\n tilehdu[vext].header[\n 'DATASEC'] = '[1:' + naxis1 + ',1:' + naxis2 + ']'\n\n bext = i + 1 + 2 * int(nsciext / 2.)\n tilehdu[bext] = pyfits.ImageHDU(bpmdata)\n tilehdu[bext].header = struct[struct[hdu].header['BPMEXT']].header\n tilehdu[bext].header[\n 'DATASEC'] = '[1:' + naxis1 + ',1:' + naxis2 + ']'\n\n # image tile log message #1\n if log:\n message = os.path.basename(infile) + '[' + str(hdu) + ']['\n message += str(xdsec1[0]) + ':' + str(xdsec1[1]) + ','\n message += str(ydsec1[0]) + ':' + str(ydsec1[1]) + '] --> '\n message += os.path.basename(tilefile) + '[' + str(i + 1) + ']['\n message += str(xdsec1[0]) + ':' + str(xdsec1[1]) + ','\n message += str(ydsec1[0]) + ':' + str(ydsec1[1]) + ']'\n log.message(message, with_stdout=verbose, with_header=False)\n message = os.path.basename(infile) + '[' + str(hdu + 1) + ']['\n message += str(xdsec1[0]) + ':' + str(xdsec1[1]) + ','\n message += str(ydsec1[0]) + ':' + str(ydsec1[1]) + '] --> '\n message += os.path.basename(tilefile) + '[' + str(i + 1) + ']['\n message += str(xdsec1[1] + 1) + ':' + \\\n str(xdsec1[1] + xdsec2[1]) + ','\n message += str(ydsec2[0]) + ':' + str(ydsec2[1]) + ']'\n log.message(message, with_stdout=verbose, with_header=False)\n\n # write temporary file of tiled CCDs\n hdulist = pyfits.HDUList(tilehdu)\n hdulist.writeto(tilefile)\n\n # iterate over CCDs, transform and rotate images\n yrot = [None] * 4\n xrot = [None] * 4\n\n tranfile = [' ']\n tranhdu = [0]\n if varframe:\n tranfile = [''] * (3 * int(nsciext / 2) + 1)\n tranhdu = [0] * (3 * int(nsciext / 2) + 1)\n else:\n tranfile = [''] * int(nsciext / 2 + 1)\n tranhdu = [0] * int(nsciext / 2 + 1)\n\n # this is hardwired for SALT where the second CCD is considered the\n # fiducial\n for hdu in range(1, int(nsciext / 2 + 1)):\n tranfile[hdu] = saltio.tmpfile(outpath)\n tranfile[hdu] += 'tran.fits'\n if varframe:\n tranfile[hdu + nccds] = saltio.tmpfile(outpath) + 'tran.fits'\n tranfile[hdu + 2 * nccds] = saltio.tmpfile(outpath) + 'tran.fits'\n\n ccd = hdu % nccds\n if (ccd == 0):\n ccd = nccds\n\n # correct rotation for CCD binning\n yrot[ccd] = rot[ccd] * ybin / xbin\n xrot[ccd] = rot[ccd] * xbin / ybin\n dxshift = xbin * int(float(int(gap) / xbin) + 0.5) - gap\n\n # transformation using geotran IRAF task\n # if (ccd == 1):\n if (ccd != 2):\n\n if geotran:\n message = '\\nSALTMOSAIC -- geotran ' + tilefile + \\\n '[' + str(ccd) + '] ' + tranfile[hdu]\n message += ' \\\"\\\" \\\"\\\" xshift=' + \\\n str((xsh[ccd] + (2 - ccd) * dxshift) / xbin) + ' '\n message += 'yshift=' + \\\n str(ysh[ccd] / ybin) + ' xrotation=' + str(xrot[ccd]) + ' '\n message += 'yrotation=' + \\\n str(yrot[ccd]) + ' xmag=1 ymag=1 xmin=\\'INDEF\\''\n message += 'xmax=\\'INDEF\\' ymin=\\'INDEF\\' ymax=\\'INDEF\\' '\n message += 'ncols=\\'INDEF\\' '\n message += 'nlines=\\'INDEF\\' verbose=\\'no\\' '\n message += 'fluxconserve=\\'yes\\' nxblock=2048 '\n message += 'nyblock=2048 interpolant=\\'' + \\\n interp_type + '\\' boundary=\\'constant\\' constant=0'\n log.message(message, with_stdout=verbose)\n\n yd, xd = tilehdu[ccd].data.shape\n ncols = 'INDEF' # ncols=xd+abs(xsh[ccd]/xbin)\n nlines = 'INDEF' # nlines=yd+abs(ysh[ccd]/ybin)\n geo_xshift = xsh[ccd] + (2 - ccd) * dxshift / xbin\n geo_yshift = ysh[ccd] / ybin\n iraf.images.immatch.geotran(tilefile + \"[\" + str(ccd) + \"]\",\n tranfile[hdu],\n \"\",\n \"\",\n xshift=geo_xshift,\n yshift=geo_yshift,\n xrotation=xrot[ccd],\n yrotation=yrot[ccd],\n xmag=1, ymag=1, xmin='INDEF',\n xmax='INDEF', ymin='INDEF',\n ymax='INDEF', ncols=ncols,\n nlines=nlines, verbose='no',\n fluxconserve='yes', nxblock=2048,\n nyblock=2048, interpolant=\"linear\",\n boundary=\"constant\", constant=0)\n if varframe:\n var_infile = tilefile + \"[\" + str(ccd + nccds) + \"]\"\n iraf.images.immatch.geotran(var_infile,\n tranfile[hdu + nccds],\n \"\",\n \"\",\n xshift=geo_xshift,\n yshift=geo_yshift,\n xrotation=xrot[ccd],\n yrotation=yrot[ccd],\n xmag=1, ymag=1, xmin='INDEF',\n xmax='INDEF', ymin='INDEF',\n ymax='INDEF', ncols=ncols,\n nlines=nlines, verbose='no',\n fluxconserve='yes',\n nxblock=2048, nyblock=2048,\n interpolant=\"linear\",\n boundary=\"constant\",\n constant=0)\n var2_infile = tilefile + \"[\" + str(ccd + 2 * nccds) + \"]\"\n iraf.images.immatch.geotran(var2_infile,\n tranfile[hdu + 2 * nccds],\n \"\",\n \"\",\n xshift=geo_xshift,\n yshift=geo_yshift,\n xrotation=xrot[ccd],\n yrotation=yrot[ccd],\n xmag=1, ymag=1, xmin='INDEF',\n xmax='INDEF', ymin='INDEF',\n ymax='INDEF', ncols=ncols,\n nlines=nlines, verbose='no',\n fluxconserve='yes',\n nxblock=2048, nyblock=2048,\n interpolant=\"linear\",\n boundary=\"constant\",\n constant=0)\n\n # open the file and copy the data to tranhdu\n tstruct = pyfits.open(tranfile[hdu])\n tranhdu[hdu] = tstruct[0].data\n tstruct.close()\n if varframe:\n tranhdu[\n hdu +\n nccds] = pyfits.open(\n tranfile[\n hdu +\n nccds])[0].data\n tranhdu[\n hdu +\n 2 *\n nccds] = pyfits.open(\n tranfile[\n hdu +\n 2 *\n nccds])[0].data\n\n else:\n log.message(\n \"Transform CCD #%i using dx=%s, dy=%s, rot=%s\" %\n (ccd,\n xsh[ccd] /\n 2.0,\n ysh[ccd] /\n 2.0,\n xrot[ccd]),\n with_stdout=verbose,\n with_header=False)\n tranhdu[hdu] = geometric_transform(\n tilehdu[ccd].data,\n tran_func,\n prefilter=False,\n order=1,\n extra_arguments=(\n xsh[ccd] / 2,\n ysh[ccd] / 2,\n 1,\n 1,\n xrot[ccd],\n yrot[ccd]))\n tstruct = pyfits.PrimaryHDU(tranhdu[hdu])\n tstruct.writeto(tranfile[hdu])\n if varframe:\n tranhdu[hdu + nccds] = geometric_transform(\n tilehdu[hdu + 3].data,\n tran_func,\n prefilter=False,\n order=1,\n extra_arguments=(\n xsh[ccd] / 2, ysh[ccd] / 2,\n 1, 1,\n xrot[ccd], yrot[ccd]))\n tranhdu[hdu + 2 * nccds] = geometric_transform(\n tilehdu[hdu + 6].data,\n tran_func,\n prefilter=False,\n order=1,\n extra_arguments=(\n xsh[ccd] / 2, ysh[ccd] / 2,\n 1, 1,\n xrot[ccd], yrot[ccd]))\n\n else:\n log.message(\n \"Transform CCD #%i using dx=%s, dy=%s, rot=%s\" %\n (ccd, 0, 0, 0), with_stdout=verbose, with_header=False)\n tranhdu[hdu] = tilehdu[ccd].data\n if varframe:\n tranhdu[hdu + nccds] = tilehdu[ccd + nccds].data\n tranhdu[hdu + 2 * nccds] = tilehdu[ccd + 2 * nccds].data\n\n # open outfile\n if varframe:\n outlist = 4 * [None]\n else:\n outlist = 2 * [None]\n\n # outstruct[0] = pyfits.PrimaryHDU()\n outlist[0] = struct[0].copy()\n naxis1 = int(gap / xbin * (nccds - 1))\n naxis2 = 0\n for i in range(1, nccds + 1):\n yw, xw = tranhdu[i].shape\n naxis1 += xw + int(abs(xsh[ccd] / xbin)) + 1\n naxis2 = max(naxis2, yw)\n outdata = numpy.zeros((naxis2, naxis1), numpy.float32)\n outdata.shape = naxis2, naxis1\n if varframe:\n vardata = outdata * 0\n bpmdata = outdata * 0 + 1\n\n # iterate over CCDs, stich them to produce a full image\n hdu = 0\n totxshift = 0\n for hdu in range(1, nccds + 1):\n\n # read DATASEC keywords\n ydsec, xdsec = tranhdu[hdu].shape\n\n # define size and shape of final image\n # tile CCDs to yield mosaiced image\n x1 = int((hdu - 1) * (xdsec + gap / xbin)) + int(totxshift)\n x2 = xdsec + x1\n y1 = int(0)\n y2 = int(ydsec)\n outdata[y1:y2, x1:x2] = tranhdu[hdu]\n totxshift += int(abs(xsh[hdu] / xbin)) + 1\n if varframe:\n vardata[y1:y2, x1:x2] = tranhdu[hdu + nccds]\n bpmdata[y1:y2, x1:x2] = tranhdu[hdu + 2 * nccds]\n\n # make sure to cover up all the gaps include bad areas\n if varframe:\n baddata = (outdata == 0)\n baddata = nd.maximum_filter(baddata, size=3)\n bpmdata[baddata] = 1\n \n\n # fill in the gaps if requested\n if fill:\n if varframe:\n outdata = fill_gaps(outdata, bpmdata)\n else:\n outdata = fill_gaps(outdata, 0)\n\n # add to the file\n outlist[1] = pyfits.ImageHDU(outdata)\n if varframe:\n outlist[2] = pyfits.ImageHDU(vardata)\n outlist[3] = pyfits.ImageHDU(bpmdata)\n\n # create the image structure\n outstruct = pyfits.HDUList(outlist)\n\n # update the head informaation\n # housekeeping keywords\n saltkey.put('NEXTEND', 2, outstruct[0])\n saltkey.new('EXTNAME', 'SCI', 'Extension name', outstruct[1])\n saltkey.new('EXTVER', 1, 'Extension number', outstruct[1])\n if varframe:\n saltkey.new('VAREXT', 2, 'Variance frame extension', outstruct[1])\n saltkey.new('BPMEXT', 3, 'BPM Extension', outstruct[1])\n\n try:\n saltkey.copy(struct[1], outstruct[1], 'CCDSUM')\n except:\n pass\n\n # Add keywords associated with geometry\n gstr = '%i %f %f %f %f %f %f' % (gap,\n xshift[0],\n yshift[0],\n rotation[0],\n xshift[1],\n yshift[1],\n rotation[1])\n saltkey.new('SALTGEOM', gstr, 'SALT geometry coefficients', outstruct[0])\n\n # WCS keywords\n saltkey.new('CRPIX1', 0, 'WCS: X reference pixel', outstruct[1])\n saltkey.new('CRPIX2', 0, 'WCS: Y reference pixel', outstruct[1])\n saltkey.new(\n 'CRVAL1',\n float(xbin),\n 'WCS: X reference coordinate value',\n outstruct[1])\n saltkey.new(\n 'CRVAL2',\n float(ybin),\n 'WCS: Y reference coordinate value',\n outstruct[1])\n saltkey.new('CDELT1', float(xbin), 'WCS: X pixel size', outstruct[1])\n saltkey.new('CDELT2', float(ybin), 'WCS: Y pixel size', outstruct[1])\n saltkey.new('CTYPE1', 'pixel', 'X type', outstruct[1])\n saltkey.new('CTYPE2', 'pixel', 'Y type', outstruct[1])\n\n # cleanup temporary files\n if cleanup:\n for tfile in tranfile:\n if os.path.isfile(tfile):\n saltio.delete(tfile)\n if os.path.isfile(tilefile):\n status = saltio.delete(tilefile)\n\n # return the file\n return outstruct\n\n\ndef fill_gaps(data, mask):\n \"\"\"Interpolate in the gaps in the data\n\n Parameters\n ----------\n data: np.ndarray\n data to have values filled in for\n\n mask: float or nd.ndarray\n If an nd.ndarray, it will be assumed to be a mask\n with values equal to 1 where they should be interpolated\n over. If a float, pixels with that value will be replaced\n\n \"\"\"\n ys, xs = data.shape\n if isinstance(mask, numpy.ndarray):\n mask = (mask == 0)\n for i in range(ys):\n x = numpy.arange(xs)\n rdata = data[i, :]\n rmask = mask[i, :]\n rmask = nd.minimum_filter(rmask, size=3)\n rdata = numpy.interp(x, x[rmask], rdata[rmask])\n data[i, rmask == 0] = rdata[rmask == 0]\n else:\n mask = (data != mask)\n for i in range(ys):\n x = numpy.arange(xs)\n rdata = data[i, :]\n rmask = mask[i, :]\n rmask = nd.minimum_filter(rmask, size=3)\n rdata = numpy.interp(x, x[rmask], rdata[rmask])\n data[i, rmask == 0] = rdata[rmask == 0]\n\n return data\n\n\ndef tran_func(a, xshift, yshift, xmag, ymag, xrot, yrot):\n xtran = ymag * a[0] * cos(yrot * pi / 180.0) \\\n - xmag * a[1] * sin(xrot * pi / 180) \\\n - yshift\n ytran = ymag * a[0] * sin(yrot * pi / 180.0) \\\n + xmag * a[1] * cos(xrot * pi / 180) \\\n - xshift\n return xtran, ytran\n\n\n# -----------------------------------------------------------\n# main code\nif not iraf.deftask('saltmosaic'):\n parfile = iraf.osfn(\"saltred$saltmosaic.par\")\n t = iraf.IrafTaskFactory(\n taskname=\"saltmosaic\",\n value=parfile,\n function=saltmosaic,\n pkgname='saltred')\n" ]
[ [ "numpy.interp", "numpy.zeros", "scipy.ndimage.maximum_filter", "scipy.ndimage.geometric_transform", "numpy.arange", "scipy.ndimage.minimum_filter" ] ]
tmpaul06/dgl
[ "8f458464b0e14c78978db4b91590e8ca718c5ec6" ]
[ "transformer/dataset/utils.py" ]
[ "import numpy as np\nimport torch as th\nimport os\nfrom dgl.data.utils import *\nimport spacy\nfrom tqdm import tqdm\n\nnlp = spacy.load('en')\n\n_urls = {\n 'wmt': 'https://s3.us-east-2.amazonaws.com/dgl.ai/dataset/wmt14bpe_de_en.zip',\n 'scripts': 'https://s3.us-east-2.amazonaws.com/dgl.ai/dataset/transformer_scripts.zip',\n}\n\n\ndef store_dependency_parses(in_filename, out_filename):\n \"\"\"Create dependency parses in advance so that training is fast\"\"\"\n with open(in_filename, 'r') as f:\n input_lines = f.readlines()\n\n print('Preparing dependency tokens for {} sentences using {}'.format(len(input_lines), in_filename))\n # Batch write\n batch_size = min(max(len(input_lines) // 100, 100), 500)\n with open(out_filename, 'w') as out_f:\n for i in tqdm(range(0, len(input_lines), batch_size)):\n lines = input_lines[i:(i + batch_size + 1)]\n out_lines = list()\n for line in lines:\n # Replace @ with ''. This is a cheap hack\n line = line.replace('@', '').strip()\n if not line:\n continue\n tokens = nlp(line)\n\n line_deps = list()\n for tok in tokens:\n line_deps.append(str((tok.i, tok.head.i)).replace(' ', ''))\n out_lines.append(' '.join(line_deps))\n out_f.write('\\n'.join(out_lines))\n\n\ndef prepare_dataset(dataset_name):\n \"download and generate datasets\"\n script_dir = os.path.join('scripts')\n if not os.path.exists(script_dir):\n download(_urls['scripts'], path='scripts.zip')\n extract_archive('scripts.zip', 'scripts')\n\n directory = os.path.join('data', dataset_name)\n if not os.path.exists(directory):\n os.makedirs(directory)\n else:\n return\n if dataset_name == 'multi30k':\n os.system('bash scripts/prepare-multi30k.sh')\n # Pre-create dependency parses for train, valid and test\n for fi in ['train', 'val', 'test2016']:\n store_dependency_parses('data/multi30k/{}.en.atok'.format(fi), 'data/multi30k/{}_deps.en.atok'.format(fi))\n elif dataset_name == 'wmt14':\n download(_urls['wmt'], path='wmt14.zip')\n os.system('bash scripts/prepare-wmt14.sh')\n elif dataset_name == 'copy' or dataset_name == 'tiny_copy':\n train_size = 9000\n valid_size = 1000\n test_size = 1000\n char_list = [chr(i) for i in range(ord('a'), ord('z') + 1)]\n with open(os.path.join(directory, 'train.in'), 'w') as f_in,\\\n open(os.path.join(directory, 'train.out'), 'w') as f_out:\n for i, l in zip(range(train_size), np.random.normal(15, 3, train_size).astype(int)):\n l = max(l, 1)\n line = ' '.join(np.random.choice(char_list, l)) + '\\n'\n f_in.write(line)\n f_out.write(line)\n\n with open(os.path.join(directory, 'valid.in'), 'w') as f_in,\\\n open(os.path.join(directory, 'valid.out'), 'w') as f_out:\n for i, l in zip(range(valid_size), np.random.normal(15, 3, valid_size).astype(int)):\n l = max(l, 1)\n line = ' '.join(np.random.choice(char_list, l)) + '\\n'\n f_in.write(line)\n f_out.write(line)\n\n with open(os.path.join(directory, 'test.in'), 'w') as f_in,\\\n open(os.path.join(directory, 'test.out'), 'w') as f_out:\n for i, l in zip(range(test_size), np.random.normal(15, 3, test_size).astype(int)):\n l = max(l, 1)\n line = ' '.join(np.random.choice(char_list, l)) + '\\n'\n f_in.write(line)\n f_out.write(line)\n\n with open(os.path.join(directory, 'vocab.txt'), 'w') as f:\n for c in char_list:\n f.write(c + '\\n')\n\n elif dataset_name == 'sort' or dataset_name == 'tiny_sort':\n train_size = 9000\n valid_size = 1000\n test_size = 1000\n char_list = [chr(i) for i in range(ord('a'), ord('z') + 1)]\n with open(os.path.join(directory, 'train.in'), 'w') as f_in,\\\n open(os.path.join(directory, 'train.out'), 'w') as f_out:\n for i, l in zip(range(train_size), np.random.normal(15, 3, train_size).astype(int)):\n l = max(l, 1)\n seq = np.random.choice(char_list, l)\n f_in.write(' '.join(seq) + '\\n')\n f_out.write(' '.join(np.sort(seq)) + '\\n')\n\n with open(os.path.join(directory, 'valid.in'), 'w') as f_in,\\\n open(os.path.join(directory, 'valid.out'), 'w') as f_out:\n for i, l in zip(range(valid_size), np.random.normal(15, 3, valid_size).astype(int)):\n l = max(l, 1)\n seq = np.random.choice(char_list, l)\n f_in.write(' '.join(seq) + '\\n')\n f_out.write(' '.join(np.sort(seq)) + '\\n')\n\n with open(os.path.join(directory, 'test.in'), 'w') as f_in,\\\n open(os.path.join(directory, 'test.out'), 'w') as f_out:\n for i, l in zip(range(test_size), np.random.normal(15, 3, test_size).astype(int)):\n l = max(l, 1)\n seq = np.random.choice(char_list, l)\n f_in.write(' '.join(seq) + '\\n')\n f_out.write(' '.join(np.sort(seq)) + '\\n')\n\n with open(os.path.join(directory, 'vocab.txt'), 'w') as f:\n for c in char_list:\n f.write(c + '\\n')\n" ]
[ [ "numpy.random.normal", "numpy.sort", "numpy.random.choice" ] ]
lhoestq/DeDLOC
[ "36f5a6d043c3d727f9d098a35fba94aa351a5cd4" ]
[ "swav/vissl/extra_scripts/create_ucf101_data_files.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport argparse\nimport os\nimport ssl\nfrom contextlib import contextmanager\nfrom typing import List, Optional, Tuple\n\nfrom PIL import Image\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets.utils import download_url, extract_archive\nfrom tqdm import tqdm\n\ntry:\n from pyunpack import Archive\nexcept ImportError:\n raise ValueError(\n \"You must have pyunpack and patool installed to run this script: pip install pyunpack patool.\"\n )\n\ntry:\n import av\nexcept ImportError:\n raise ValueError(\"You must have pyav installed to run this script: pip install av.\")\n\n\ndef get_argument_parser():\n \"\"\"\n List of arguments supported by the script\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-i\",\n \"--input\",\n type=str,\n help=\"The input folder contains the expanded UCF-101 archive files\",\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n type=str,\n help=\"The output folder containing the disk_folder output\",\n )\n parser.add_argument(\n \"-d\",\n \"--download\",\n action=\"store_const\",\n const=True,\n default=False,\n help=\"To download the original dataset and decompress it in the input folder\",\n )\n return parser\n\n\n@contextmanager\ndef without_ssl_certificate_check():\n default_context = ssl._create_default_https_context\n ssl._create_default_https_context = ssl._create_unverified_context\n yield\n ssl._create_default_https_context = default_context\n\n\ndef download_dataset(root: str):\n \"\"\"\n Download the UCF101 dataset archive and expand it in the folder provided as parameter\n \"\"\"\n IMAGE_URL = \"https://www.crcv.ucf.edu/data/UCF101/UCF101.rar\"\n SPLIT_URL = (\n \"https://www.crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-RecognitionTask.zip\"\n )\n\n # Download the raw inputs of UCF101, circumventing the SSL certificate issues\n with without_ssl_certificate_check():\n download_url(url=IMAGE_URL, root=root)\n download_url(url=SPLIT_URL, root=root)\n\n # Extract the archives\n print(\"Extracting archives...\")\n Archive(os.path.join(root, \"UCF101.rar\")).extractall(root)\n extract_archive(os.path.join(root, \"UCF101TrainTestSplits-RecognitionTask.zip\"))\n\n\nclass _ExtractMiddleFrameDataset:\n \"\"\"\n Dataset used to parallelize the transformation of the dataset via a DataLoader\n \"\"\"\n\n def __init__(self, data_path: str, annotation_path: str):\n self.data_path = data_path\n self.split_info = self._read_split_info(annotation_path)\n\n @staticmethod\n def _read_split_info(file_path: str) -> List[Tuple[str, str]]:\n samples = []\n with open(file_path) as f:\n for line in f:\n category, file_name = line.strip().split(\"/\")\n file_name = file_name.split(\" \")[0]\n samples.append((category, file_name))\n return samples\n\n @staticmethod\n def _extract_middle_frame(file_path: str) -> Optional[Image.Image]:\n \"\"\"\n Extract the middle frame out of a video clip.\n \"\"\"\n with av.open(file_path) as container:\n nb_frames = container.streams.video[0].frames\n vid_stream = container.streams.video[0]\n for i, frame in enumerate(container.decode(vid_stream)):\n if i - 1 == nb_frames // 2:\n return frame.to_image()\n return None\n\n def __len__(self):\n return len(self.split_info)\n\n def __getitem__(self, idx: int) -> Tuple[Image.Image, str, str]:\n category, video_name = self.split_info[idx]\n video_path = os.path.join(self.data_path, category, video_name)\n mid_frame = self._extract_middle_frame(video_path)\n image_name = os.path.splitext(video_name)[0] + \".jpg\"\n return mid_frame, image_name, category\n\n\ndef create_disk_folder_split(annotation_path: str, data_path: str, output_path: str):\n \"\"\"\n Create one split of the disk_folder format from the file at 'annotation_path' and the data stored\n in the folder 'data_path'.\n \"\"\"\n assert os.path.exists(\n annotation_path\n ), f\"Could not find annotation path {annotation_path}\"\n assert os.path.exists(data_path), f\"Could not find data folder {data_path}\"\n\n dataset = _ExtractMiddleFrameDataset(\n data_path=data_path, annotation_path=annotation_path\n )\n loader = DataLoader(dataset, num_workers=8, batch_size=1, collate_fn=lambda x: x[0])\n for batch in tqdm(loader):\n mid_frame, image_name, category = batch\n category_folder = os.path.join(output_path, category)\n os.makedirs(category_folder, exist_ok=True)\n image_path = os.path.join(category_folder, image_name)\n with open(image_path, \"w\") as image_file:\n mid_frame.save(image_file)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Example usage:\n\n ```\n python extra_scripts/create_ucf101_data_files.py -i /path/to/ucf101 -o /output_path/ucf101 -d\n ```\n \"\"\"\n args = get_argument_parser().parse_args()\n if args.download:\n download_dataset(args.input)\n\n data_path = os.path.join(args.input, \"UCF-101\")\n annotation_path = os.path.join(args.input, \"ucfTrainTestlist\")\n create_disk_folder_split(\n annotation_path=os.path.join(annotation_path, \"trainlist01.txt\"),\n data_path=data_path,\n output_path=os.path.join(args.output, \"train\"),\n )\n create_disk_folder_split(\n annotation_path=os.path.join(annotation_path, \"testlist01.txt\"),\n data_path=data_path,\n output_path=os.path.join(args.output, \"val\"),\n )\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
tatatodd/tensorflow
[ "8ae7343f3d24569b4bb142ddc7b58037267a2d3c" ]
[ "tensorflow/contrib/optimizer_v2/optimizer_v2.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Version 2 of class Optimizer.\"\"\"\n# pylint: disable=g-bad-name\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nimport six\n\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gradients\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.training import distribute as distribute_lib\nfrom tensorflow.python.training import distribution_strategy_context as distribute_ctx\nfrom tensorflow.python.training import optimizer as optimizer_v1\nfrom tensorflow.python.training import slot_creator\nfrom tensorflow.python.training.checkpointable import base as checkpointable\nfrom tensorflow.python.util import nest\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass _OptimizableVariable(object):\n \"\"\"Interface for abstracting over variables in the optimizers.\"\"\"\n\n @abc.abstractmethod\n def target(self):\n \"\"\"Returns the optimization target for this variable.\"\"\"\n raise NotImplementedError(\"Calling an abstract method.\")\n\n @abc.abstractmethod\n def update_op(self, optimizer, g, *args):\n \"\"\"Returns the update ops for updating the variable.\"\"\"\n raise NotImplementedError(\"Calling an abstract method.\")\n\n\nclass _RefVariableProcessor(_OptimizableVariable):\n \"\"\"Processor for Variable.\"\"\"\n\n def __init__(self, v):\n self._v = v\n\n def target(self):\n return self._v._ref() # pylint: disable=protected-access\n\n def update_op(self, optimizer, g, *args):\n if isinstance(g, ops.Tensor):\n update_op = optimizer._apply_dense(g, self._v, *args) # pylint: disable=protected-access\n if self._v.constraint is not None:\n with ops.control_dependencies([update_op]):\n return self._v.assign(self._v.constraint(self._v))\n else:\n return update_op\n else:\n assert isinstance(g, ops.IndexedSlices), (\"Gradient \", g, \" is neither a \"\n \"tensor nor IndexedSlices.\")\n if self._v.constraint is not None:\n raise RuntimeError(\n \"Cannot use a constraint function on a sparse variable.\")\n # pylint: disable=protected-access\n return optimizer._apply_sparse_duplicate_indices(g, self._v, *args)\n\n\nclass _DenseReadResourceVariableProcessor(_OptimizableVariable):\n \"\"\"Processor for dense ResourceVariables.\"\"\"\n\n def __init__(self, v):\n self._v = v\n\n def target(self):\n return self._v\n\n def update_op(self, optimizer, g, *args):\n # pylint: disable=protected-access\n update_op = optimizer._resource_apply_dense(g, self._v.op.inputs[0], *args)\n if self._v.constraint is not None:\n with ops.control_dependencies([update_op]):\n return self._v.assign(self._v.constraint(self._v))\n else:\n return update_op\n\n\nclass _DenseResourceVariableProcessor(_OptimizableVariable):\n \"\"\"Processor for dense ResourceVariables.\"\"\"\n\n def __init__(self, v):\n self._v = v\n\n def target(self):\n return self._v\n\n def update_op(self, optimizer, g, *args):\n # pylint: disable=protected-access\n if isinstance(g, ops.IndexedSlices):\n if self._v.constraint is not None:\n raise RuntimeError(\n \"Cannot use a constraint function on a sparse variable.\")\n return optimizer._resource_apply_sparse_duplicate_indices(\n g.values, self._v, g.indices, *args)\n update_op = optimizer._resource_apply_dense(g, self._v, *args)\n if self._v.constraint is not None:\n with ops.control_dependencies([update_op]):\n return self._v.assign(self._v.constraint(self._v))\n else:\n return update_op\n\n\nclass _TensorProcessor(_OptimizableVariable):\n \"\"\"Processor for ordinary Tensors.\n\n Even though a Tensor can't really be updated, sometimes it is useful to\n compute the gradients with respect to a Tensor using the optimizer. Updating\n the Tensor is, of course, unsupported.\n \"\"\"\n\n def __init__(self, v):\n self._v = v\n\n def target(self):\n return self._v\n\n def update_op(self, optimizer, g, *args):\n raise NotImplementedError(\"Trying to update a Tensor \", self._v)\n\n\ndef _get_processor(v):\n \"\"\"The processor of v.\"\"\"\n if context.executing_eagerly():\n if isinstance(v, ops.Tensor):\n return _TensorProcessor(v)\n else:\n return _DenseResourceVariableProcessor(v)\n if v.op.type == \"VarHandleOp\":\n return _DenseResourceVariableProcessor(v)\n if isinstance(v, variables.Variable):\n return _RefVariableProcessor(v)\n if isinstance(v, ops.Tensor):\n return _TensorProcessor(v)\n raise NotImplementedError(\"Trying to optimize unsupported type \", v)\n\n\ndef _var_key_v2(var):\n \"\"\"Key for representing a primary variable, for looking up slots.\"\"\"\n # pylint: disable=protected-access\n if hasattr(var, \"_distributed_container\"):\n distributed_container = var._distributed_container()\n assert distributed_container is not None\n if context.executing_eagerly():\n return distributed_container._unique_id\n return distributed_container._shared_name\n if context.executing_eagerly():\n return var._unique_id\n return var.op.name\n\n\ndef _resolve(value, name):\n if callable(value):\n value = value()\n return ops.convert_to_tensor(value, name=name)\n\n\ndef _is_dynamic(value):\n \"\"\"Returns true if __init__ arg `value` should be re-evaluated each step.\"\"\"\n if callable(value):\n return True\n # Don't need to do anything special in graph mode, since dynamic values\n # will propagate correctly automatically.\n # TODO(josh11b): Add per-device caching across steps using variables for\n # truly static values once we add distributed support.\n if context.executing_eagerly() and isinstance(\n value, resource_variable_ops.ResourceVariable):\n return True\n return False\n\n\nclass _OptimizerV2State(object):\n \"\"\"Holds per-graph and per-step optimizer state.\n\n Use _init_with_static_hyper() to create the state for a graph, and then\n _copy_with_dynamic_hyper() to convert that to state for a particular step.\n The difference between the two is that the former only has hyper\n parameter values that are static and the latter also has values that\n can change every step (according to _is_dynamic()).\n \"\"\"\n\n def __init__(self, op_name):\n self._op_name = op_name\n\n def _init_with_static_hyper(self, hyper):\n \"\"\"Initialize a fresh state object from hyper dict.\"\"\"\n # self._hyper contains a dict from name to a dict with the Tensor values.\n # This dict starts with a single item with key \"None\" with the hyper\n # parameter value converted to a Tensor. Other items have dtype keys\n # with that Tensor cast to that dtype.\n with ops.init_scope():\n self._hyper = {\n name: {\n None: ops.convert_to_tensor(value, name=name)\n } for name, (dynamic, value) in sorted(hyper.items()) if not dynamic\n }\n self._slots = {}\n self._non_slot_dict = {}\n # Extra state to help Optimizers implement Checkpointable. Holds information\n # about variables which will be restored as soon as they're created.\n self._deferred_dependencies = {} # Non-slot variables\n self._deferred_slot_restorations = {} # Slot variables\n\n def _copy_with_dynamic_hyper(self, hyper, distribution, non_slot_devices):\n \"\"\"Create a new state object for a particular step.\"\"\"\n ret = _OptimizerV2State(self._op_name)\n # pylint: disable=protected-access\n ret._slots = self._slots\n ret._non_slot_dict = self._non_slot_dict\n ret._deferred_dependencies = self._deferred_dependencies\n ret._deferred_slot_restorations = self._deferred_slot_restorations\n ret._hyper = {\n name: {\n None: _resolve(value, name)\n } for name, (dynamic, value) in sorted(hyper.items()) if dynamic\n }\n ret._hyper.update(self._hyper)\n ret._non_slot_devices = non_slot_devices\n ret._distribution = distribution\n return ret\n\n def _variables(self):\n \"\"\"Returns a list of all variables held by self.\"\"\"\n optimizer_variables = list(self._non_slot_dict.values())\n for variable_dict in self._slots.values():\n for slot_for_variable in variable_dict.values():\n optimizer_variables.append(slot_for_variable)\n # Sort variables by name so that the return is deterministic.\n return sorted(optimizer_variables, key=lambda v: v.name)\n\n def _slot_dict(self, slot_name):\n \"\"\"Returns a dict for caching slots created under the given name.\n\n Args:\n slot_name: Name for the slot.\n\n Returns:\n A dict that maps primary `Variable` objects to the slot created\n for that variable, under the given slot name.\n \"\"\"\n named_slots = self._slots.get(slot_name, None)\n if named_slots is None:\n named_slots = {}\n self._slots[slot_name] = named_slots\n return named_slots\n\n def create_slot(self, var, val, slot_name, optional_op_name=None):\n \"\"\"Find or create a slot for a variable.\n\n Args:\n var: A `Variable` object.\n val: A `Tensor`. The initial value of the slot.\n slot_name: Name for the slot.\n optional_op_name: Name to use when scoping the Variable that needs to be\n created for the slot.\n\n Returns:\n A `Variable` object.\n \"\"\"\n named_slots = self._slot_dict(slot_name)\n var_key = _var_key_v2(var)\n if var_key not in named_slots:\n new_slot_variable = slot_creator.create_slot(\n var, val, optional_op_name or self._op_name)\n self._restore_slot_variable(\n slot_name=slot_name, variable=var, slot_variable=new_slot_variable)\n named_slots[var_key] = new_slot_variable\n return named_slots[var_key]\n\n def create_slot_with_initializer(self,\n var,\n initializer,\n shape,\n dtype,\n slot_name,\n optional_op_name=None):\n \"\"\"Find or create a slot for a variable, using an Initializer.\n\n Args:\n var: A `Variable` object.\n initializer: An `Initializer`. The initial value of the slot.\n shape: Shape of the initial value of the slot.\n dtype: Type of the value of the slot.\n slot_name: Name for the slot.\n optional_op_name: Name to use when scoping the Variable that needs to be\n created for the slot.\n\n Returns:\n A `Variable` object.\n \"\"\"\n named_slots = self._slot_dict(slot_name)\n var_key = _var_key_v2(var)\n if var_key not in named_slots:\n new_slot_variable = slot_creator.create_slot_with_initializer(\n var, initializer, shape, dtype, optional_op_name or self._op_name)\n self._restore_slot_variable(\n slot_name=slot_name, variable=var, slot_variable=new_slot_variable)\n named_slots[var_key] = new_slot_variable\n return named_slots[var_key]\n\n def zeros_slot(self, var, slot_name, optional_op_name=None):\n \"\"\"Find or create a slot initialized with 0.0.\n\n Args:\n var: A `Variable` object.\n slot_name: Name for the slot.\n optional_op_name: Name to use when scoping the Variable that needs to be\n created for the slot.\n\n Returns:\n A `Variable` object.\n \"\"\"\n named_slots = self._slot_dict(slot_name)\n var_key = _var_key_v2(var)\n if var_key not in named_slots:\n new_slot_variable = slot_creator.create_zeros_slot(\n var, optional_op_name or self._op_name)\n self._restore_slot_variable(\n slot_name=slot_name, variable=var, slot_variable=new_slot_variable)\n named_slots[var_key] = new_slot_variable\n return named_slots[var_key]\n\n def _create_or_restore_slot_variable(self,\n slot_variable_position,\n slot_name,\n variable,\n optional_op_name=None):\n \"\"\"Restore a slot variable's value, possibly creating it.\n\n Called when a variable which has an associated slot variable is created or\n restored. When executing eagerly, we create the slot variable with a\n restoring initializer.\n\n No new variables are created when graph building. Instead,\n _restore_slot_variable catches these after normal creation and adds restore\n ops to the graph. This method is nonetheless important when graph building\n for the case when a slot variable has already been created but `variable`\n has just been added to a dependency graph (causing us to realize that the\n slot variable needs to be restored).\n\n Args:\n slot_variable_position: A `checkpointable._CheckpointPosition` object\n indicating the slot variable `Checkpointable` object to be restored.\n slot_name: The name of this `Optimizer`'s slot to restore into.\n variable: The variable object this slot is being created for.\n optional_op_name: Name to use when scoping the Variable that needs to be\n created for the slot.\n \"\"\"\n slot_variable = self.get_slot(var=variable, name=slot_name)\n if (slot_variable is None and context.executing_eagerly() and\n slot_variable_position.is_simple_variable()\n # Defer slot variable creation if there is an active variable creator\n # scope. Generally we'd like to eagerly create/restore slot variables\n # when possible, but this may mean that scopes intended to catch\n # `variable` also catch its eagerly created slot variable\n # unintentionally (specifically make_template would add a dependency on\n # a slot variable if not for this case). Deferring is mostly harmless\n # (aside from double initialization), and makes variable creator scopes\n # behave the same way they do when graph building.\n and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access\n initializer = checkpointable.CheckpointInitialValue(\n checkpoint_position=slot_variable_position)\n slot_variable = self.create_slot(\n var=variable,\n val=initializer,\n slot_name=slot_name,\n optional_op_name=optional_op_name)\n # Optimizers do not have unconditional dependencies on their slot\n # variables (nor do any other objects). They are only saved if the\n # variables they were created for are also saved.\n if slot_variable is not None:\n # If we've either made this slot variable, or if we've pulled out an\n # existing slot variable, we should restore it.\n slot_variable_position.restore(slot_variable)\n else:\n # We didn't make the slot variable. Defer restoring until it gets created\n # normally. We keep a list rather than the one with the highest restore\n # UID in case slot variables have their own dependencies, in which case\n # those could differ between restores.\n variable_key = _var_key_v2(variable)\n self._deferred_slot_restorations.setdefault(slot_name, {}).setdefault(\n variable_key, []).append(slot_variable_position)\n\n def get_slot(self, var, name):\n \"\"\"Return a slot named `name` created for `var` by the Optimizer.\n\n Some `Optimizer` subclasses use additional variables. For example\n `Momentum` and `Adagrad` use variables to accumulate updates. This method\n gives access to these `Variable` objects if for some reason you need them.\n\n Use `get_slot_names()` to get the list of slot names created by the\n `Optimizer`.\n\n Args:\n var: A variable passed to `minimize()` or `apply_gradients()`.\n name: A string.\n\n Returns:\n The `Variable` for the slot if it was created, `None` otherwise.\n \"\"\"\n named_slots = self._slots.get(name, None)\n if not named_slots:\n return None\n return named_slots.get(_var_key_v2(var), None)\n\n def get_slot_names(self):\n \"\"\"Return a list of the names of slots created by the `Optimizer`.\n\n See `get_slot()`.\n\n Returns:\n A list of strings.\n \"\"\"\n return sorted(self._slots.keys())\n\n def create_non_slot(self, initial_value, name, colocate_with=None):\n \"\"\"Add an extra variable, not associated with a slot.\"\"\"\n v = self._non_slot_dict.get(name, None)\n if v is None:\n if colocate_with is None:\n colocate_with = self._non_slot_devices\n with self._distribution.colocate_vars_with(colocate_with):\n # TODO(josh11b): Use get_variable() except for the legacy Adam use case.\n v = variable_scope.variable(initial_value, name=name, trainable=False)\n self._non_slot_dict[name] = v\n deferred_dependencies_list = self._deferred_dependencies.pop(name, ())\n for checkpoint_position in sorted(\n deferred_dependencies_list,\n key=lambda restore: restore.checkpoint.restore_uid,\n reverse=True):\n checkpoint_position.restore(v)\n return v\n\n def _restore_slot_variable(self, slot_name, variable, slot_variable):\n \"\"\"Restore a newly created slot variable's value.\"\"\"\n variable_key = _var_key_v2(variable)\n deferred_restorations = self._deferred_slot_restorations.get(\n slot_name, {}).pop(variable_key, [])\n # Iterate over restores, highest restore UID first to minimize the number\n # of assignments.\n deferred_restorations.sort(\n key=lambda position: position.restore_uid, reverse=True)\n for checkpoint_position in deferred_restorations:\n checkpoint_position.restore(slot_variable)\n\n def get_non_slot(self, name):\n \"\"\"Returns the non-slot variable identified by `name`.\"\"\"\n return self._non_slot_dict.get(name, None)\n\n def get_hyper(self, name, dtype=None):\n \"\"\"Returns the `name` hyper parameter, optionally cast to `dtype`.\"\"\"\n dtype_dict = self._hyper[name]\n # Do we have the value cast to dtype already cached? This should always\n # succeed when dtype is None.\n if dtype in dtype_dict:\n return dtype_dict[dtype]\n # Not cached, cast to dtype and save the result in the cache.\n result = math_ops.cast(dtype_dict[None], dtype)\n dtype_dict[dtype] = result\n return result\n\n\nclass OptimizerV2(optimizer_v1.Optimizer):\n \"\"\"Updated base class for optimizers.\n\n This class defines the API to add Ops to train a model. You never use this\n class directly, but instead instantiate one of its subclasses such as\n `GradientDescentOptimizer`, `AdagradOptimizer`, or `MomentumOptimizer`.\n\n ### Usage\n\n ```python\n # Create an optimizer with the desired parameters.\n opt = GradientDescentOptimizer(learning_rate=0.1)\n # Add Ops to the graph to minimize a cost by updating a list of variables.\n # \"cost\" is a Tensor, and the list of variables contains tf.Variable\n # objects.\n opt_op = opt.minimize(cost, var_list=<list of variables>)\n ```\n\n In the training program you will just have to run the returned Op.\n\n ```python\n # Execute opt_op to do one step of training:\n opt_op.run()\n ```\n\n ### Processing gradients before applying them.\n\n Calling `minimize()` takes care of both computing the gradients and\n applying them to the variables. If you want to process the gradients\n before applying them you can instead use the optimizer in three steps:\n\n 1. Compute the gradients with `compute_gradients()`.\n 2. Process the gradients as you wish.\n 3. Apply the processed gradients with `apply_gradients()`.\n\n Example:\n\n ```python\n # Create an optimizer.\n opt = GradientDescentOptimizer(learning_rate=0.1)\n\n # Compute the gradients for a list of variables.\n grads_and_vars = opt.compute_gradients(loss, <list of variables>)\n\n # grads_and_vars is a list of tuples (gradient, variable). Do whatever you\n # need to the 'gradient' part, for example cap them, etc.\n capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]\n\n # Ask the optimizer to apply the capped gradients.\n opt.apply_gradients(capped_grads_and_vars)\n ```\n\n ### Gating Gradients\n\n Both `minimize()` and `compute_gradients()` accept a `gate_gradients`\n argument that controls the degree of parallelism during the application of\n the gradients.\n\n The possible values are: `GATE_NONE`, `GATE_OP`, and `GATE_GRAPH`.\n\n <b>`GATE_NONE`</b>: Compute and apply gradients in parallel. This provides\n the maximum parallelism in execution, at the cost of some non-reproducibility\n in the results. For example the two gradients of `matmul` depend on the input\n values: With `GATE_NONE` one of the gradients could be applied to one of the\n inputs _before_ the other gradient is computed resulting in non-reproducible\n results.\n\n <b>`GATE_OP`</b>: For each Op, make sure all gradients are computed before\n they are used. This prevents race conditions for Ops that generate gradients\n for multiple inputs where the gradients depend on the inputs.\n\n <b>`GATE_GRAPH`</b>: Make sure all gradients for all variables are computed\n before any one of them is used. This provides the least parallelism but can\n be useful if you want to process all gradients before applying any of them.\n\n ### Slots\n\n Some optimizer subclasses, such as `MomentumOptimizer` and `AdagradOptimizer`\n allocate and manage additional variables associated with the variables to\n train. These are called <i>Slots</i>. Slots have names and you can ask the\n optimizer for the names of the slots that it uses. Once you have a slot name\n you can ask the optimizer for the variable it created to hold the slot value.\n\n This can be useful if you want to log debug a training algorithm, report stats\n about the slots, etc.\n\n ### Non-slot variables\n\n Some optimizer subclasses, such as `AdamOptimizer` have variables that\n are not associated with the variables to train, just the step itself.\n\n ### Hyper parameters\n\n These are arguments passed to the optimizer subclass constructor\n (the `__init__` method), and then passed to `self._set_hyper()`.\n They can be either regular Python values (like 1.0), tensors, or\n callables. If they are callable, the callable will be called during\n `apply_gradients()` to get the value for the hyper parameter.\n\n ### State\n\n Internal methods are passed a `state` argument with the correct\n values to use for the slot and non-slot variables, and the hyper\n parameters.\n \"\"\"\n\n # Values for gate_gradients.\n GATE_NONE = 0\n GATE_OP = 1\n GATE_GRAPH = 2\n\n def __init__(self, use_locking, name):\n \"\"\"Create a new Optimizer.\n\n This must be called by the constructors of subclasses.\n Note that Optimizer instances should not bind to a single graph,\n and so shouldn't keep Tensors as member variables. Generally\n you should be able to use the _set_hyper()/state.get_hyper()\n facility instead.\n\n Args:\n use_locking: Bool. If True apply use locks to prevent concurrent updates\n to variables.\n name: A non-empty string. The name to use for accumulators created\n for the optimizer.\n\n Raises:\n ValueError: If name is malformed.\n RuntimeError: If _create_slots has been overridden instead of\n _create_vars.\n \"\"\"\n # Note: We intentionally don't call parent __init__.\n\n # Optimizer._create_slots was replaced by _create_vars in OptimizerV2.\n if (self.__class__._create_slots.__code__ is not # pylint: disable=protected-access\n OptimizerV2._create_slots.__code__):\n raise RuntimeError(\n \"Override _create_vars instead of _create_slots when \"\n \"descending from OptimizerV2 (class %s)\" % self.__class__.__name__)\n if not name:\n raise ValueError(\"Must specify the optimizer name\")\n\n self._use_locking = use_locking\n self._name = name\n # Map from graph_key to state for that graph. We use the graph_key\n # since it works in both eager and graph mode, and gives the outer\n # graph inside functions.\n replica_context = distribute_ctx.get_replica_context()\n if replica_context is None:\n # In a cross-replica context for a DistributionStrategy, which means\n # only one Optimizer will be created, not one per replica.\n self._per_graph_state = {}\n else:\n # We use get_replica_context().merge_call() to get a single dict\n # shared across all model replicas when running with a\n # DistributionStrategy.\n self._per_graph_state = replica_context.merge_call(lambda _: {})\n\n # Hyper parameters, and whether they should be re-evaluated every step.\n self._hyper = {}\n\n def _set_hyper(self, name, value):\n self._hyper[name] = (_is_dynamic(value), value)\n\n def minimize(self,\n loss,\n global_step=None,\n var_list=None,\n gate_gradients=GATE_OP,\n aggregation_method=None,\n colocate_gradients_with_ops=False,\n name=None,\n grad_loss=None,\n stop_gradients=None,\n scale_loss_by_num_replicas=None):\n \"\"\"Add operations to minimize `loss` by updating `var_list`.\n\n This method simply combines calls `compute_gradients()` and\n `apply_gradients()`. If you want to process the gradient before applying\n them call `compute_gradients()` and `apply_gradients()` explicitly instead\n of using this function.\n\n Args:\n loss: A `Tensor` containing the value to minimize.\n global_step: Optional `Variable` to increment by one after the variables\n have been updated.\n var_list: Optional list or tuple of `Variable` objects to update to\n minimize `loss`. Defaults to the list of variables collected in the\n graph under the key `GraphKeys.TRAINABLE_VARIABLES`.\n gate_gradients: How to gate the computation of gradients. Can be\n `GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.\n aggregation_method: Specifies the method used to combine gradient terms.\n Valid values are defined in the class `AggregationMethod`.\n colocate_gradients_with_ops: If True, try colocating gradients with the\n corresponding op.\n name: Optional name for the returned operation.\n grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.\n stop_gradients: Optional. A Tensor or list of tensors not to differentiate\n through.\n scale_loss_by_num_replicas: Optional boolean. If true, scale the loss down\n by the number of replicas. By default, auto-detects whether this is\n needed.\n\n Returns:\n An Operation that updates the variables in `var_list`. If `global_step`\n was not `None`, that operation also increments `global_step`.\n\n Raises:\n ValueError: If some of the variables are not `Variable` objects.\n\n @compatibility(eager)\n When eager execution is enabled, `loss` should be a Python function that\n takes elements of `var_list` as arguments and computes the value to be\n minimized. If `var_list` is None, `loss` should take no arguments.\n Minimization (and gradient computation) is done with respect to the\n elements of `var_list` if not None, else with respect to any trainable\n variables created during the execution of the `loss` function.\n `gate_gradients`, `aggregation_method`, `colocate_gradients_with_ops` and\n `grad_loss` are ignored when eager execution is enabled.\n @end_compatibility\n \"\"\"\n grads_and_vars = self.compute_gradients(\n loss,\n var_list=var_list,\n gate_gradients=gate_gradients,\n aggregation_method=aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops,\n grad_loss=grad_loss,\n stop_gradients=stop_gradients,\n scale_loss_by_num_replicas=scale_loss_by_num_replicas)\n\n vars_with_grad = [v for g, v in grads_and_vars if g is not None]\n if not vars_with_grad:\n raise ValueError(\n \"No gradients provided for any variable, check your graph for ops\"\n \" that do not support gradients, between variables %s and loss %s.\" %\n ([str(v) for _, v in grads_and_vars], loss))\n\n return self.apply_gradients(\n grads_and_vars, global_step=global_step, name=name)\n\n def compute_gradients(self,\n loss,\n var_list=None,\n gate_gradients=GATE_OP,\n aggregation_method=None,\n colocate_gradients_with_ops=False,\n grad_loss=None,\n stop_gradients=None,\n scale_loss_by_num_replicas=None):\n \"\"\"Compute gradients of `loss` for the variables in `var_list`.\n\n This is the first part of `minimize()`. It returns a list\n of (gradient, variable) pairs where \"gradient\" is the gradient\n for \"variable\". Note that \"gradient\" can be a `Tensor`, an\n `IndexedSlices`, or `None` if there is no gradient for the\n given variable.\n\n Args:\n loss: A Tensor containing the value to minimize or a callable taking no\n arguments which returns the value to minimize. When eager execution is\n enabled it must be a callable.\n var_list: Optional list or tuple of `tf.Variable` to update to minimize\n `loss`. Defaults to the list of variables collected in the graph under\n the key `GraphKeys.TRAINABLE_VARIABLES`.\n gate_gradients: How to gate the computation of gradients. Can be\n `GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.\n aggregation_method: Specifies the method used to combine gradient terms.\n Valid values are defined in the class `AggregationMethod`.\n colocate_gradients_with_ops: If True, try colocating gradients with the\n corresponding op.\n grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.\n stop_gradients: Optional. A Tensor or list of tensors not to differentiate\n through.\n scale_loss_by_num_replicas: Optional boolean. If true, scale the loss down\n by the number of replicas. By default, auto-detects whether this is\n needed.\n\n Returns:\n A list of (gradient, variable) pairs. Variable is always present, but\n gradient can be `None`.\n\n Raises:\n TypeError: If `var_list` contains anything else than `Variable` objects.\n ValueError: If some arguments are invalid.\n RuntimeError: If called with eager execution enabled and `loss` is\n not callable.\n\n @compatibility(eager)\n When eager execution is enabled, `gate_gradients`, `aggregation_method`,\n and `colocate_gradients_with_ops` are ignored.\n @end_compatibility\n \"\"\"\n # TODO(josh11b): Test that we handle weight decay in a reasonable way.\n if callable(loss):\n with backprop.GradientTape() as tape:\n if var_list is not None:\n tape.watch(var_list)\n loss_value = loss()\n\n # Scale loss for number of replicas (callable-loss case). In this case,\n # we have to be careful to call distribute_lib.get_loss_reduction()\n # *after* loss() is evaluated, so we know what loss reduction it uses.\n if scale_loss_by_num_replicas is None:\n scale_loss_by_num_replicas = (\n distribute_lib.get_loss_reduction() == variable_scope\n .VariableAggregation.MEAN)\n if scale_loss_by_num_replicas:\n num_replicas = distribute_ctx.get_distribution_strategy().num_replicas\n if num_replicas > 1:\n loss_value *= 1. / num_replicas\n\n if var_list is None:\n var_list = tape.watched_variables()\n grads = tape.gradient(loss_value, var_list, grad_loss)\n return list(zip(grads, var_list))\n if context.executing_eagerly():\n raise RuntimeError(\"`loss` passed to Optimizer.compute_gradients should \"\n \"be a function when eager execution is enabled.\")\n\n # Scale loss for number of replicas (non-callable-loss case).\n if scale_loss_by_num_replicas is None:\n scale_loss_by_num_replicas = (\n distribute_lib.get_loss_reduction() == variable_scope\n .VariableAggregation.MEAN)\n if scale_loss_by_num_replicas:\n num_replicas = distribute_ctx.get_distribution_strategy().num_replicas\n if num_replicas > 1:\n loss *= 1. / num_replicas\n\n if gate_gradients not in [\n optimizer_v1.Optimizer.GATE_NONE, optimizer_v1.Optimizer.GATE_OP,\n optimizer_v1.Optimizer.GATE_GRAPH\n ]:\n raise ValueError(\n \"gate_gradients must be one of: Optimizer.GATE_NONE, \"\n \"Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s\" % gate_gradients)\n self._assert_valid_dtypes([loss])\n if grad_loss is not None:\n self._assert_valid_dtypes([grad_loss])\n if var_list is None:\n var_list = (\n variables.trainable_variables() + ops.get_collection(\n ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))\n else:\n var_list = nest.flatten(var_list)\n # pylint: disable=protected-access\n var_list += ops.get_collection(ops.GraphKeys._STREAMING_MODEL_PORTS)\n # pylint: enable=protected-access\n processors = [_get_processor(v) for v in var_list]\n if not var_list:\n raise ValueError(\"No variables to optimize.\")\n var_refs = [p.target() for p in processors]\n grads = gradients.gradients(\n loss,\n var_refs,\n grad_ys=grad_loss,\n gate_gradients=(gate_gradients == optimizer_v1.Optimizer.GATE_OP),\n aggregation_method=aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops,\n stop_gradients=stop_gradients)\n if gate_gradients == optimizer_v1.Optimizer.GATE_GRAPH:\n grads = control_flow_ops.tuple(grads)\n grads_and_vars = list(zip(grads, var_list))\n self._assert_valid_dtypes([\n v for g, v in grads_and_vars\n if g is not None and v.dtype != dtypes.resource\n ])\n return grads_and_vars\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"Apply gradients to variables.\n\n This is the second part of `minimize()`. It returns an `Operation` that\n applies gradients.\n\n Args:\n grads_and_vars: List of (gradient, variable) pairs as returned by\n `compute_gradients()`.\n global_step: Optional `Variable` to increment by one after the variables\n have been updated.\n name: Optional name for the returned operation. Default to the name\n passed to the `Optimizer` constructor.\n\n Returns:\n An `Operation` that applies the specified gradients. If `global_step`\n was not None, that operation also increments `global_step`.\n\n Raises:\n TypeError: If `grads_and_vars` is malformed.\n ValueError: If none of the variables have gradients.\n \"\"\"\n # This is a default implementation of apply_gradients() that can be shared\n # by most optimizers. It relies on the subclass implementing the following\n # methods: _create_vars(), _prepare(), _apply_dense(), and _apply_sparse().\n\n # Filter out variables with gradients of `None`.\n grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works.\n if not grads_and_vars:\n raise ValueError(\"No variables provided.\")\n filtered = tuple((g, v) for (g, v) in grads_and_vars if g is not None)\n if not filtered:\n raise ValueError(\"No gradients provided for any variable: %s.\" %\n ([str(v) for _, v in grads_and_vars],))\n return distribute_ctx.get_replica_context().merge_call(\n self._distributed_apply, filtered, global_step=global_step, name=name)\n\n def _get_or_create_state(self, var_list=None):\n \"\"\"Either looks up or creates `_OptimizerV2State`.\n\n If any variables are available, they should be passed via the `var_list`\n argument, and these will be used to determine the graph to create/retrieve\n state for. Otherwise the returned state is for the current default graph.\n\n Args:\n var_list: A list of variables to extract a graph from.\n\n Returns:\n An `_OptimizerV2State` object.\n \"\"\"\n # Determine the graph_key from the current graph.\n eager_execution = context.executing_eagerly()\n if eager_execution or var_list is None:\n graph = ops.get_default_graph()\n else:\n graph = ops._get_graph_from_inputs(var_list) # pylint: disable=protected-access\n assert graph is not None\n graph_key = graph._graph_key # pylint: disable=protected-access\n\n # Get the per graph state by looking up the graph_key.\n if graph_key in self._per_graph_state:\n per_graph_state = self._per_graph_state[graph_key]\n else:\n per_graph_state = _OptimizerV2State(self._name)\n per_graph_state._init_with_static_hyper(self._hyper) # pylint: disable=protected-access\n self._per_graph_state[graph_key] = per_graph_state\n return per_graph_state\n\n def _distributed_apply(self, distribution, grads_and_vars, global_step, name):\n \"\"\"`apply_gradients` for use with a `DistributionStrategy`.\"\"\"\n reduced_grads = distribution.batch_reduce(\n variable_scope.VariableAggregation.SUM, grads_and_vars)\n var_list = [v for _, v in grads_and_vars]\n grads_and_vars = zip(reduced_grads, var_list)\n\n unwrapped_var_list = [x for v in var_list for x in distribution.unwrap(v)]\n eager_execution = context.executing_eagerly()\n if eager_execution:\n # Give a clear error in this case instead of \"name not supported\n # for Eager Tensors\" when we compute non_slot_devices.\n for v in unwrapped_var_list:\n if isinstance(v, ops.Tensor):\n raise NotImplementedError(\"Trying to update a Tensor \", v)\n\n with ops.name_scope(name, self._name) as name:\n per_graph_state = self._get_or_create_state(var_list=unwrapped_var_list)\n # Include the current value of any dynamic hyper parameters in `state`.\n non_slot_devices = distribution.non_slot_devices(var_list)\n state = per_graph_state._copy_with_dynamic_hyper( # pylint: disable=protected-access\n self._hyper, distribution, non_slot_devices)\n\n # Create any slot and non-slot variables we need in `state`.\n with ops.init_scope():\n self._create_vars(var_list, state)\n\n with ops.name_scope(name): # Re-enter name_scope created above\n # Give the child class a chance to do something before we start\n # applying gradients.\n self._prepare(state)\n\n def update(v, g):\n \"\"\"Update variable `v` using gradient `g`.\"\"\"\n assert v is not None\n\n # Convert the grad to Tensor or IndexedSlices if necessary, and\n # look up a processor for each variable's type.\n try:\n g = ops.convert_to_tensor_or_indexed_slices(g)\n except TypeError:\n raise TypeError(\"Gradient must be convertible to a Tensor\"\n \" or IndexedSlices, or None: %s\" % g)\n if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):\n raise TypeError(\n \"Gradient must be a Tensor, IndexedSlices, or None: %s\" % g)\n processor = _get_processor(v)\n\n # We colocate all ops created in _apply_dense or _apply_sparse\n # on the same device as the variable.\n # TODO(apassos): figure out how to get the variable name here.\n scope_name = \"\" if eager_execution else v.op.name\n # device_policy is set because non-mirrored tensors will be read in\n # `update_op`.\n # TODO(josh11b): Make different state objects for each device to\n # avoid needing to set the device_policy.\n device_policy = context.context().device_policy(\n context.DEVICE_PLACEMENT_SILENT)\n with ops.name_scope(\"update_\" + scope_name), device_policy:\n return processor.update_op(self, g, state)\n\n # Use the processors to update the variables.\n update_ops = []\n for grad, var in grads_and_vars:\n update_ops.extend(distribution.update(var, update, grad, grouped=False))\n\n # Give the child class a chance to do something after applying\n # gradients\n def finish():\n # TODO(josh11b): Make different state objects for each device to\n # avoid needing to set the device_policy.\n with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):\n return self._finish(state)\n\n update_ops = control_flow_ops.group(update_ops)\n with ops.control_dependencies([update_ops]):\n finish_updates = distribution.update_non_slot(\n non_slot_devices, finish, grouped=False)\n # We said grouped=False, which means finish_updates is always a list.\n # It will be [None] when finish() returns None.\n if finish_updates == [None]:\n finish_updates = [update_ops]\n\n # Update `global_step` (if any).\n if global_step is None:\n apply_updates = distribution.group(finish_updates, name=name)\n else:\n with ops.control_dependencies(finish_updates):\n\n def update_global_step(global_step, name):\n return global_step.assign_add(1, read_value=False, name=name)\n\n apply_updates = distribution.update(global_step, update_global_step,\n name)\n\n # Add the training op to the TRAIN_OP graph collection in graph mode.\n if not eager_execution:\n if isinstance(apply_updates, ops.Tensor):\n apply_updates = apply_updates.op\n train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)\n if apply_updates not in train_op:\n train_op.append(apply_updates)\n\n return apply_updates\n\n def get_slot(self, var, name):\n \"\"\"Return a slot named `name` created for `var` by the Optimizer.\n\n Some `Optimizer` subclasses use additional variables. For example\n `Momentum` and `Adagrad` use variables to accumulate updates. This method\n gives access to these `Variable` objects if for some reason you need them.\n\n Use `get_slot_names()` to get the list of slot names created by the\n `Optimizer`.\n\n Args:\n var: A variable passed to `minimize()` or `apply_gradients()`.\n name: A string.\n\n Returns:\n The `Variable` for the slot if it was created, `None` otherwise.\n \"\"\"\n state = self._get_state_for_var(var)\n return state.get_slot(var, name) if state is not None else None\n\n def get_slot_names(self):\n \"\"\"Return a list of the names of slots created by the `Optimizer`.\n\n See `get_slot()`.\n\n Returns:\n A list of strings.\n \"\"\"\n state = self._get_per_graph_state()\n return state.get_slot_names() if state is not None else []\n\n def variables(self):\n \"\"\"A list of variables which encode the current state of `Optimizer`.\n\n Includes slot variables and additional global variables created by the\n optimizer in the current default graph.\n\n Returns:\n A list of variables.\n \"\"\"\n state = self._get_per_graph_state()\n return state._variables() if state is not None else [] # pylint: disable=protected-access\n\n # --------------\n # Methods to be implemented by subclasses if they want to use the\n # inherited implementation of apply_gradients() or compute_gradients().\n # --------------\n def _create_vars(self, var_list, state):\n \"\"\"Create all slots needed by the variables and any non-slot variables.\n\n Args:\n var_list: A list of `Variable` objects.\n state: An object with these methods: `create_slot(var, val, slot_name,\n optional_op_name)`, `create_slot_with_initializer(` `var, initializer,\n shape, dtype, slot_name, optional_op_name)`, `zeros_slot(var, slot_name,\n optional_op_name)`, `create_non_slot_variable(initial_value, name,\n colocate_with)`, `get_hyper(name)`\n \"\"\"\n # No slots needed by default\n pass\n\n def _prepare(self, state):\n \"\"\"Code to execute before applying gradients.\n\n Note that most uses of _prepare() in Optimizer have been subsumed\n by explicit support for hyper parameters in OptimizerV2\n\n Args:\n state: An object with a `get_hyper(name)` method.\n\n Returns:\n Return value will be ignored.\n \"\"\"\n pass\n\n def _apply_dense(self, grad, var, state):\n \"\"\"Add ops to apply dense gradients to `var`.\n\n Args:\n grad: A `Tensor`.\n var: A `Variable` object.\n state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,\n and `get_hyper(name)` methods.\n\n Returns:\n An `Operation`.\n \"\"\"\n raise NotImplementedError()\n\n def _resource_apply_dense(self, grad, handle, state):\n \"\"\"Add ops to apply dense gradients to the variable `handle`.\n\n Args:\n grad: a `Tensor` representing the gradient.\n handle: a `Tensor` of dtype `resource` which points to the variable to be\n updated.\n state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,\n and `get_hyper(name)` methods.\n\n Returns:\n An `Operation` which updates the value of the variable.\n \"\"\"\n raise NotImplementedError()\n\n def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices,\n state):\n \"\"\"Add ops to apply sparse gradients to `handle`, with repeated indices.\n\n Optimizers which override this method must deal with repeated indices. See\n the docstring of `_apply_sparse_duplicate_indices` for details. By default\n the correct behavior, to sum non-unique indices and their associated\n gradients, is enforced by first pre-processing `grad` and `indices` and\n passing them on to `_resource_apply_sparse`. Optimizers which deal correctly\n with duplicate indices may instead override this method to avoid the\n overhead of summing.\n\n Args:\n grad: a `Tensor` representing the gradient for the affected indices.\n handle: a `Tensor` of dtype `resource` which points to the variable to be\n updated.\n indices: a `Tensor` of integral type representing the indices for which\n the gradient is nonzero. Indices may be repeated.\n state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,\n and `get_hyper(name)` methods.\n\n Returns:\n An `Operation` which updates the value of the variable.\n \"\"\"\n # pylint: disable=protected-access\n summed_grad, unique_indices = optimizer_v1._deduplicate_indexed_slices(\n values=grad, indices=indices)\n # pylint: enable=protected-access\n return self._resource_apply_sparse(summed_grad, handle, unique_indices,\n state)\n\n def _resource_apply_sparse(self, grad, handle, indices, state):\n \"\"\"Add ops to apply sparse gradients to the variable `handle`.\n\n Similar to `_apply_sparse`, the `indices` argument to this method has been\n de-duplicated. Optimizers which deal correctly with non-unique indices may\n instead override `_resource_apply_sparse_duplicate_indices` to avoid this\n overhead.\n\n Args:\n grad: a `Tensor` representing the gradient for the affected indices.\n handle: a `Tensor` of dtype `resource` which points to the variable to be\n updated.\n indices: a `Tensor` of integral type representing the indices for which\n the gradient is nonzero. Indices are unique.\n state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,\n and `get_hyper(name)` methods.\n\n Returns:\n An `Operation` which updates the value of the variable.\n \"\"\"\n raise NotImplementedError()\n\n def _apply_sparse_duplicate_indices(self, grad, var, state):\n \"\"\"Add ops to apply sparse gradients to `var`, with repeated sparse indices.\n\n Optimizers which override this method must deal with IndexedSlices objects\n such as the following:\n\n IndexedSlicesValue(values=[1, 1], indices=[0, 0], dense_shape=[1])\n\n The correct interpretation is:\n\n IndexedSlicesValue(values=[2], indices=[0], dense_shape=[1])\n\n Many optimizers deal incorrectly with repeated indices when updating based\n on sparse gradients (e.g. summing squares rather than squaring the sum, or\n applying momentum terms multiple times). Adding first is always the correct\n behavior, so this is enforced here by reconstructing the IndexedSlices to\n have only unique indices, then calling _apply_sparse.\n\n Optimizers which deal correctly with repeated indices may instead override\n this method to avoid the overhead of summing indices.\n\n Args:\n grad: `IndexedSlices`.\n var: A `Variable` object.\n state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,\n and `get_hyper(name)` methods.\n\n Returns:\n An `Operation`.\n \"\"\"\n # pylint: disable=protected-access\n summed_values, unique_indices = optimizer_v1._deduplicate_indexed_slices(\n values=grad.values, indices=grad.indices)\n # pylint: enable=protected-access\n gradient_no_duplicate_indices = ops.IndexedSlices(\n indices=unique_indices,\n values=summed_values,\n dense_shape=grad.dense_shape)\n return self._apply_sparse(gradient_no_duplicate_indices, var, state)\n\n def _apply_sparse(self, grad, var, state):\n \"\"\"Add ops to apply sparse gradients to `var`.\n\n The IndexedSlices object passed to `grad` in this function is by default\n pre-processed in `_apply_sparse_duplicate_indices` to remove duplicate\n indices (see its docstring for details). Optimizers which can tolerate or\n have correct special cases for duplicate sparse indices may override\n `_apply_sparse_duplicate_indices` instead of this function, avoiding that\n overhead.\n\n Args:\n grad: `IndexedSlices`, with no repeated indices.\n var: A `Variable` object.\n state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,\n and `get_hyper(name)` methods.\n\n Returns:\n An `Operation`.\n \"\"\"\n raise NotImplementedError()\n\n def _finish(self, state):\n \"\"\"Do what is needed to finish the update.\n\n This is called inside a scope colocated with any non-slot variables.\n\n Args:\n state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,\n and `get_hyper(name)` methods.\n\n Returns:\n The operation to apply updates, or None if no updates.\n \"\"\"\n return None\n\n # --------------\n # Utility methods for subclasses.\n # --------------\n def _get_per_graph_state(self):\n # pylint: disable=protected-access\n return self._per_graph_state.get(ops.get_default_graph()._graph_key, None)\n\n def _get_state_for_var(self, var):\n # pylint: disable=protected-access\n return self._per_graph_state.get(var._graph_key, None)\n\n # --------------\n # Overridden methods from Checkpointable.\n # --------------\n\n def _track_checkpointable(self, *args, **kwargs):\n \"\"\"Optimizers may not track dependencies. Raises an error.\"\"\"\n raise NotImplementedError(\n \"Optimizers may not have dependencies. File a feature request if this \"\n \"limitation bothers you.\")\n\n @property\n def _checkpoint_dependencies(self):\n \"\"\"From Checkpointable. Gather graph-specific non-slot variables to save.\"\"\"\n current_graph_non_slot_variables = []\n state = self._get_per_graph_state()\n if state is not None:\n for name, variable_object in sorted(\n state._non_slot_dict.items(), # pylint: disable=protected-access\n # Avoid comparing variables\n key=lambda item: item[0]):\n current_graph_non_slot_variables.append(\n checkpointable.CheckpointableReference(\n name=name, ref=variable_object))\n # Note: ignores super(); Optimizers may not have any dependencies outside of\n # state objects.\n return current_graph_non_slot_variables\n\n def _lookup_dependency(self, name):\n \"\"\"From Checkpointable. Find a non-slot variable in the current graph.\"\"\"\n state = self._get_per_graph_state()\n if state is None:\n return None\n else:\n return state.get_non_slot(name)\n\n @property\n def _deferred_dependencies(self):\n \"\"\"Lets Checkpointable know where non-slot variables are created.\n\n If necessary, creates a new state object for the current default graph.\n Checkpointable will then add entries to that state's deferred dependency\n dictionary. The state object will check that dictionary when creating\n non-slot variables, restoring their value if an entry is found.\n\n Returns:\n A dictionary which holds deferred dependencies for the current default\n graph.\n \"\"\"\n state = self._get_or_create_state()\n return state._deferred_dependencies # pylint: disable=protected-access\n\n def _create_or_restore_slot_variable(self, slot_variable_position, slot_name,\n variable):\n \"\"\"Checkpointable: Restore a slot variable's value, possibly creating it.\n\n Called when a variable which has an associated slot variable is created or\n restored.\n\n Args:\n slot_variable_position: A `checkpointable._CheckpointPosition` object\n indicating the slot variable `Checkpointable` object to be restored.\n slot_name: The name of this `Optimizer`'s slot to restore into.\n variable: The variable object this slot is being created for.\n \"\"\"\n state = self._get_or_create_state(var_list=[variable])\n state._create_or_restore_slot_variable( # pylint: disable=protected-access\n slot_variable_position=slot_variable_position,\n slot_name=slot_name,\n variable=variable,\n optional_op_name=self._name)\n\n # --------------\n # Unsupported parent methods\n # --------------\n def _slot_dict(self, slot_name):\n raise NotImplementedError(\"_slot_dict() method unsupported in OptimizerV2\")\n\n def _get_or_make_slot(self, var, val, slot_name, op_name):\n raise NotImplementedError(\n \"_get_or_make_slot() method unsupported in OptimizerV2\")\n\n def _get_or_make_slot_with_initializer(self, var, initializer, shape, dtype,\n slot_name, op_name):\n raise NotImplementedError(\n \"_get_or_make_slot_with_initializer() method unsupported in \"\n \"OptimizerV2\")\n\n def _create_non_slot_variable(self, initial_value, name, colocate_with):\n raise NotImplementedError(\n \"_create_non_slot_variable() method unsupported in OptimizerV2\")\n\n def _get_non_slot_variable(self, name, graph=None):\n raise NotImplementedError(\n \"_get_non_slot_variable() method unsupported in OptimizerV2\")\n\n def _non_slot_variables(self):\n raise NotImplementedError(\n \"_non_slot_variables() method unsupported in OptimizerV2\")\n" ]
[ [ "tensorflow.python.ops.variable_scope.variable", "tensorflow.python.framework.ops.convert_to_tensor_or_indexed_slices", "tensorflow.python.training.checkpointable.base.CheckpointableReference", "tensorflow.python.util.nest.flatten", "tensorflow.python.training.distribution_strategy_context.get_distribution_strategy", "tensorflow.python.ops.control_flow_ops.tuple", "tensorflow.python.training.checkpointable.base.CheckpointInitialValue", "tensorflow.python.training.slot_creator.create_zeros_slot", "tensorflow.python.framework.ops.IndexedSlices", "tensorflow.python.eager.backprop.GradientTape", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.ops.gradients.gradients", "tensorflow.python.eager.context.context", "tensorflow.python.training.slot_creator.create_slot_with_initializer", "tensorflow.python.ops.variables.trainable_variables", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.framework.ops._get_graph_from_inputs", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.ops.init_scope", "tensorflow.python.training.distribute.get_loss_reduction", "tensorflow.python.training.distribution_strategy_context.get_replica_context", "tensorflow.python.training.optimizer._deduplicate_indexed_slices", "tensorflow.python.training.slot_creator.create_slot", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.framework.ops.get_collection_ref" ] ]
DS3Lab/LambdaML
[ "0afca7819e08632ba116fec8e102084e4040a47a" ]
[ "archived/functions/higgs/SVM_ADMM_reduce.py" ]
[ "import time\r\nimport numpy as np\r\n\r\nimport torch\r\nfrom torch.autograd import Variable\r\nfrom torch.utils.data.sampler import SubsetRandomSampler\r\n\r\nfrom archived.s3.get_object import get_object\r\nfrom archived.s3 import clear_bucket\r\nfrom archived.sync import reduce_epoch, delete_expired_merged_epoch\r\n\r\nfrom archived.old_model.SVM import SVM\r\nfrom data_loader.libsvm_dataset import DenseDatasetWithLines\r\n\r\n# lambda setting\r\n# file_bucket = \"s3-libsvm\"\r\n# tmp_bucket = \"tmp-grads\"\r\n# merged_bucket = \"merged-params\"\r\nlocal_dir = \"/tmp\"\r\n\r\n# algorithm setting\r\nnum_features = 30\r\nnum_classes = 2\r\nlearning_rate = 0.01\r\nbatch_size = 300\r\nnum_epochs = 10\r\nnum_admm_epochs = 30\r\nvalidation_ratio = .2\r\nshuffle_dataset = True\r\nrandom_seed = 42\r\nep_abs=1e-4\r\nep_rel=1e-2\r\n\r\n\r\ndef initialize_z_and_u(shape):\r\n z = np.random.rand(shape[0], shape[1]).astype(np.float)\r\n u = np.random.rand(shape[0], shape[1]).astype(np.float)\r\n return z, u\r\n\r\n\r\ndef update_z_u(w, z, u, rho, n, lam_0):\r\n z_new = w + u\r\n z_tem = abs(z_new) - lam_0 / float(n * rho)\r\n z_new = np.sign(z_new) * z_tem * (z_tem > 0)\r\n\r\n s = z_new - z\r\n r = w - np.ones(w.shape[0] * w.shape[1]).astype(np.float).reshape(w.shape) * z_new\r\n u_new = u + r\r\n return z_new, s, r, s\r\n\r\n\r\ndef update_z(w, u, rho, n, lam_0):\r\n z_new = w + u\r\n z_tem = abs(z_new) - lam_0 / float(n * rho)\r\n z_new = np.sign(z_new) * z_tem * (z_tem > 0)\r\n return z_new\r\n\r\n\r\ndef check_stop(ep_abs, ep_rel, r, s, n, p, w, z, u, rho):\r\n e_pri = (n*p)**(0.5) * ep_abs + ep_rel * (max(np.sum(w**2),np.sum(n*z**2)))**(0.5)\r\n e_dual = (p)**(0.5) * ep_abs + ep_rel * rho * (np.sum(u**2))**(0.5)/(n)**(0.5)\r\n print(\"r^2 = {}, s^2 = {}, e_pri = {}, e_dual = {}\".\r\n format(np.sum(r**2), e_pri, np.sum(s**2), e_dual))\r\n stop = (np.sum(r**2) <= e_pri**2) & (np.sum(s**2) <= e_dual**2)\r\n return(stop)\r\n\r\n\r\ndef handler(event, context):\r\n start_time = time.time()\r\n bucket = event['bucket_name']\r\n worker_index = event['rank']\r\n num_workers = event['num_workers']\r\n key = event['file']\r\n tmp_bucket = event['tmp_bucket']\r\n merged_bucket = event['merged_bucket']\r\n num_epochs = event['num_epochs']\r\n num_admm_epochs = event['num_admm_epochs']\r\n learning_rate = event['learning_rate']\r\n lam = event['lambda']\r\n rho = event['rho']\r\n batch_size = event['batch_size']\r\n\r\n print('bucket = {}'.format(bucket))\r\n print(\"file = {}\".format(key))\r\n print('number of workers = {}'.format(num_workers))\r\n print('worker index = {}'.format(worker_index))\r\n print('tmp bucket = {}'.format(tmp_bucket))\r\n print('merge bucket = {}'.format(merged_bucket))\r\n print('num epochs = {}'.format(num_epochs))\r\n print('num admm epochs = {}'.format(num_admm_epochs))\r\n print('learning rate = {}'.format(learning_rate))\r\n print(\"lambda = {}\".format(lam))\r\n print(\"rho = {}\".format(rho))\r\n print(\"batch_size = {}\".format(batch_size))\r\n\r\n # read file from s3\r\n file = get_object(bucket, key).read().decode('utf-8').split(\"\\n\")\r\n print(\"read data cost {} s\".format(time.time() - start_time))\r\n # file_path = \"../../dataset/agaricus_127d_train.libsvm\"\r\n # file = open(file_path).readlines()\r\n\r\n parse_start = time.time()\r\n dataset = DenseDatasetWithLines(file, num_features)\r\n print(\"parse data cost {} s\".format(time.time() - parse_start))\r\n\r\n preprocess_start = time.time()\r\n # Creating data indices for training and validation splits:\r\n dataset_size = len(dataset)\r\n\r\n indices = list(range(dataset_size))\r\n split = int(np.floor(validation_ratio * dataset_size))\r\n if shuffle_dataset:\r\n np.random.seed(random_seed)\r\n np.random.shuffle(indices)\r\n train_indices, val_indices = indices[split:], indices[:split]\r\n\r\n # Creating PT data samplers and loaders:\r\n train_sampler = SubsetRandomSampler(train_indices)\r\n valid_sampler = SubsetRandomSampler(val_indices)\r\n\r\n train_loader = torch.utils.data.DataLoader(dataset,\r\n batch_size=batch_size,\r\n sampler=train_sampler)\r\n validation_loader = torch.utils.data.DataLoader(dataset,\r\n batch_size=batch_size,\r\n sampler=valid_sampler)\r\n\r\n print(\"preprocess data cost {} s, dataset size = {}\"\r\n .format(time.time() - preprocess_start, dataset_size))\r\n\r\n model = SVM(num_features, num_classes).float()\r\n print(\"size of w = {}\".format(model.linear.weight.data.size()))\r\n\r\n z, u = initialize_z_and_u(model.linear.weight.data.size())\r\n print(\"size of z = {}\".format(z.shape))\r\n print(\"size of u = {}\".format(u.shape))\r\n\r\n # Loss and Optimizer\r\n # Softmax is internally computed.\r\n # Set parameters to be updated.\r\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\r\n\r\n # Training the Model\r\n train_start = time.time()\r\n stop = False\r\n for admm_epoch in range(num_admm_epochs):\r\n print(\"ADMM Epoch >>> {}\".format(admm_epoch))\r\n for epoch in range(num_epochs):\r\n epoch_start = time.time()\r\n epoch_loss = 0\r\n for batch_index, (items, labels) in enumerate(train_loader):\r\n # print(\"------worker {} epoch {} batch {}------\".format(worker_index, epoch, batch_index))\r\n batch_start = time.time()\r\n items = Variable(items.view(-1, num_features))\r\n labels = Variable(labels)\r\n\r\n # Forward + Backward + Optimize\r\n optimizer.zero_grad()\r\n outputs = model(items)\r\n\r\n classify_loss = torch.mean(torch.clamp(1 - outputs.t() * labels.float(), min=0)) # hinge loss\r\n epoch_loss += classify_loss\r\n\r\n u_z = torch.from_numpy(u).float() - torch.from_numpy(z).float()\r\n loss = classify_loss\r\n for name, param in model.named_parameters():\r\n if name.split('.')[-1] == \"weight\":\r\n loss += rho / 2.0 * torch.norm(param + u_z, p=2)\r\n #loss = classify_loss + rho / 2.0 * torch.norm(torch.sum(model.linear.weight, u_z))\r\n optimizer.zero_grad()\r\n loss.backward(retain_graph=True)\r\n optimizer.step()\r\n\r\n train_time = time.time() - epoch_start\r\n\r\n # Test the Model\r\n test_start = time.time()\r\n correct = 0\r\n total = 0\r\n test_loss = 0\r\n for items, labels in validation_loader:\r\n items = Variable(items.view(-1, num_features))\r\n labels = Variable(labels)\r\n outputs = model(items)\r\n test_loss += torch.mean(torch.clamp(1 - outputs.t() * labels.float(), min=0))\r\n _, predicted = torch.max(outputs.data, 1)\r\n total += labels.size(0)\r\n correct += (predicted == labels).sum()\r\n test_time = time.time() - test_start\r\n\r\n print('Epoch: [%d/%d], Step: [%d/%d], Time: %.4f, Loss: %.4f, epoch cost %.4f, '\r\n 'train cost %.4f s, test cost %.4f s: '\r\n 'accuracy of the model on the %d test samples: %d %%, test loss = %f'\r\n % (epoch + 1, num_epochs, batch_index + 1, len(train_indices) / batch_size,\r\n time.time() - train_start, epoch_loss.data, time.time() - epoch_start,\r\n train_time, test_time, len(val_indices), 100 * correct / total, test_loss / total))\r\n\r\n w = model.linear.weight.data.numpy()\r\n w_shape = w.shape\r\n b = model.linear.bias.data.numpy()\r\n b_shape = b.shape\r\n u_shape = u.shape\r\n\r\n w_and_b = np.concatenate((w.flatten(), b.flatten()))\r\n u_w_b = np.concatenate((u.flatten(), w_and_b.flatten()))\r\n cal_time = time.time() - epoch_start\r\n print(\"Epoch {} calculation cost = {} s\".format(epoch, cal_time))\r\n\r\n sync_start = time.time()\r\n postfix = \"{}\".format(admm_epoch)\r\n u_w_b_merge = reduce_epoch(u_w_b, tmp_bucket, merged_bucket, num_workers, worker_index, postfix)\r\n\r\n u_mean = u_w_b_merge[:u_shape[0] * u_shape[1]].reshape(u_shape) / float(num_workers)\r\n w_mean = u_w_b_merge[u_shape[0]*u_shape[1] : u_shape[0]*u_shape[1]+w_shape[0]*w_shape[1]].reshape(w_shape) / float(num_workers)\r\n b_mean = u_w_b_merge[u_shape[0]*u_shape[1]+w_shape[0]*w_shape[1]:].reshape(b_shape[0]) / float(num_workers)\r\n #model.linear.weight.data = torch.from_numpy(w)\r\n model.linear.bias.data = torch.from_numpy(b_mean).float()\r\n sync_time = time.time() - sync_start\r\n print(\"Epoch {} synchronization cost {} s\".format(epoch, sync_time))\r\n\r\n if worker_index == 0:\r\n delete_expired_merged_epoch(merged_bucket, admm_epoch)\r\n\r\n #z, u, r, s = update_z_u(w, z, u, rho, num_workers, lam)\r\n #stop = check_stop(ep_abs, ep_rel, r, s, dataset_size, num_features, w, z, u, rho)\r\n #print(\"stop = {}\".format(stop))\r\n\r\n #z = num_workers * rho / (2 * lam + num_workers * rho) * (w + u_mean)\r\n z = update_z(w_mean, u_mean, rho, num_workers, lam)\r\n #print(z)\r\n u = u + model.linear.weight.data.numpy() - z\r\n #print(u)\r\n\r\n # Test the Model\r\n correct = 0\r\n total = 0\r\n test_loss = 0\r\n for items, labels in validation_loader:\r\n items = Variable(items.view(-1, num_features))\r\n labels = Variable(labels)\r\n outputs = model(items)\r\n test_loss += torch.mean(torch.clamp(1 - outputs.t() * labels.float(), min=0))\r\n _, predicted = torch.max(outputs.data, 1)\r\n total += labels.size(0)\r\n correct += (predicted == labels).sum()\r\n\r\n print('Epoch: %d, time = %.4f, accuracy of the model on the %d test samples: %d %%, loss = %f'\r\n % (epoch, time.time() - train_start, len(val_indices), 100 * correct / total, test_loss / total))\r\n\r\n if worker_index == 0:\r\n clear_bucket(merged_bucket)\r\n clear_bucket(tmp_bucket)\r\n\r\n end_time = time.time()\r\n print(\"Elapsed time = {} s\".format(end_time - start_time))\r\n" ]
[ [ "torch.utils.data.DataLoader", "numpy.sum", "numpy.random.shuffle", "numpy.sign", "numpy.ones", "torch.autograd.Variable", "numpy.random.seed", "numpy.floor", "torch.norm", "torch.utils.data.sampler.SubsetRandomSampler", "torch.from_numpy", "numpy.random.rand", "torch.max" ] ]
MattUAV/pandas_market_calendars
[ "397efbf835085152c0eae2de97fe48bac58d8a82" ]
[ "pandas_market_calendars/exchange_calendar_hel.py" ]
[ "from datetime import time\r\n\r\nfrom pandas.tseries.holiday import Holiday, GoodFriday, EasterMonday, AbstractHolidayCalendar\r\nfrom pytz import timezone\r\n\r\nfrom .common_holidays import (\r\n new_years_day,\r\n epiphany,\r\n european_labour_day,\r\n ascension_day,\r\n midsummer_eve,\r\n christmas_eve,\r\n christmas,\r\n boxing_day,\r\n new_years_eve,\r\n)\r\nfrom .market_calendar import MarketCalendar #HolidayCalendar\r\n\r\nNewYearsDay = new_years_day()\r\n\r\nEpiphany = epiphany()\r\n\r\nLabourDay = european_labour_day()\r\n\r\nAscensionDay = ascension_day()\r\n\r\nMidsummerEve = midsummer_eve()\r\n\r\nIndependenceDay = Holiday('Finland Independence Day', month=12, day=6)\r\n\r\nChristmasEve = christmas_eve()\r\nChristmas = christmas()\r\nBoxingDay = boxing_day()\r\n\r\nNewYearsEve = new_years_eve()\r\n\r\n\r\nclass HELExchangeCalendar(MarketCalendar):\r\n \"\"\"\r\n Calendar for the Helsinki Stock Exchange in Finland.\r\n Open Time: 10:00 AM, CET (Eastern European Time)\r\n Close Time: 6:30 PM, CET (Eastern European Time)\r\n Regularly-Observed Holidays:\r\n - New Year's Day\r\n - Epiphany\r\n - Good Friday\r\n - Easter Monday\r\n - Labour Day\r\n - Ascension Day\r\n - Midsummer Eve\r\n - Independence Day\r\n - Christmas Eve\r\n - Christmas Day\r\n - Boxing Day\r\n - New Year's Eve\r\n Early Closes:\r\n - None\r\n \"\"\"\r\n\r\n aliases = ['HEL']\r\n\r\n @property\r\n def name(self):\r\n return \"HEL\"\r\n\r\n @property\r\n def tz(self):\r\n return timezone(\"Europe/Helsinki\")\r\n\r\n @property\r\n def open_time_default(self):\r\n return time(10, 1, tzinfo=self.tz)\r\n\r\n @property\r\n def close_time_default(self):\r\n return time(18, 30, tzinfo=self.tz)\r\n\r\n @property\r\n def regular_holidays(self):\r\n return AbstractHolidayCalendar(rules=[\r\n NewYearsDay,\r\n Epiphany,\r\n GoodFriday,\r\n EasterMonday,\r\n LabourDay,\r\n AscensionDay,\r\n MidsummerEve,\r\n IndependenceDay,\r\n ChristmasEve,\r\n Christmas,\r\n BoxingDay,\r\n NewYearsEve,\r\n ])" ]
[ [ "pandas.tseries.holiday.Holiday", "pandas.tseries.holiday.AbstractHolidayCalendar" ] ]
kensho-technologies/kwnlp-preprocessor
[ "97b13aa109018e38d528e1e9c11f69e0847aa069" ]
[ "kwnlp_preprocessor/task_21p1_gather_wikidata_chunks.py" ]
[ "# Copyright 2021-present Kensho Technologies, LLC.\nimport logging\nimport os\nimport re\n\nimport pandas as pd\n\nfrom kwnlp_preprocessor import argconfig\nfrom kwnlp_preprocessor import utils\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef main(wd_yyyymmdd: str, data_path: str = argconfig.DEFAULT_KWNLP_DATA_PATH) -> None:\n\n for sample in [\n \"p31-claim\",\n \"p279-claim\",\n \"qpq-claim\",\n \"item\",\n \"item-alias\",\n \"item-statements\",\n \"property\",\n \"property-alias\",\n \"skipped-entity\",\n ]:\n\n in_dump_path = os.path.join(\n data_path,\n f\"wikidata-derived-{wd_yyyymmdd}\",\n f\"{sample}-chunks\",\n )\n logger.info(f\"in_dump_path: {in_dump_path}\")\n\n out_dump_path = os.path.join(\n data_path,\n f\"wikidata-derived-{wd_yyyymmdd}\",\n f\"{sample}\",\n )\n out_dump_file = os.path.join(\n out_dump_path,\n f\"kwnlp-wikidata-{wd_yyyymmdd}-{sample}.csv\",\n )\n logger.info(f\"out_dump_path: {out_dump_path}\")\n os.makedirs(out_dump_path, exist_ok=True)\n\n pattern = re.compile(r\"kwnlp-wikidata-\\d{8}-chunk-(\\d{4})-\" + sample + \".csv\")\n all_file_names = [\n match.string for match in utils._get_ordered_files_from_path(in_dump_path, pattern)\n ]\n\n df = pd.DataFrame()\n for file_name in all_file_names:\n file_path = os.path.join(in_dump_path, file_name)\n df1 = pd.read_csv(file_path)\n df = pd.concat([df, df1])\n df.to_csv(out_dump_file, index=False)\n\n\nif __name__ == \"__main__\":\n\n description = \"gather wikidata chunks\"\n arg_names = [\"wd_yyyymmdd\", \"data_path\", \"loglevel\"]\n parser = argconfig.get_argparser(description, arg_names)\n\n args = parser.parse_args()\n logging.basicConfig(level=args.loglevel)\n logger.info(f\"args={args}\")\n\n main(args.wd_yyyymmdd, data_path=args.data_path)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "pandas.concat" ] ]
dc2016bte0006/Latex_OCR
[ "2e919617da8f2f7f3445ed8d1953a5664c1aaba7" ]
[ "eval.py" ]
[ "from dataset.dataset import Im2LatexDataset\r\nimport os\r\nimport sys\r\nimport argparse\r\nimport logging\r\nimport yaml\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torchtext.data import metrics\r\nfrom munch import Munch\r\nfrom tqdm.auto import tqdm\r\nimport wandb\r\nfrom Levenshtein import distance\r\n\r\nfrom models import get_model, Model\r\nfrom utils import *\r\n\r\n\r\ndef detokenize(tokens, tokenizer):\r\n toks = [tokenizer.convert_ids_to_tokens(tok) for tok in tokens]\r\n for b in range(len(toks)):\r\n for i in reversed(range(len(toks[b]))):\r\n if toks[b][i] is None:\r\n toks[b][i] = ''\r\n toks[b][i] = toks[b][i].replace('Ġ', ' ').strip()\r\n if toks[b][i] in (['[BOS]', '[EOS]', '[PAD]']):\r\n del toks[b][i]\r\n return toks\r\n\r\n\r\[email protected]_grad()\r\ndef evaluate(model: Model, dataset: Im2LatexDataset, args: Munch, num_batches: int = None, name: str = 'test'):\r\n \"\"\"evaluates the model. Returns bleu score on the dataset\r\n\r\n Args:\r\n model (torch.nn.Module): the model\r\n dataset (Im2LatexDataset): test dataset\r\n args (Munch): arguments\r\n num_batches (int): How many batches to evaluate on. Defaults to None (all batches).\r\n name (str, optional): name of the test e.g. val or test for wandb. Defaults to 'test'.\r\n\r\n Returns:\r\n bleu_score: BLEU score of validation set.\r\n \"\"\"\r\n assert len(dataset) > 0\r\n device = args.device\r\n log = {}\r\n bleus, edit_dists = [], []\r\n bleu_score, edit_distance = 0, 1\r\n pbar = tqdm(enumerate(iter(dataset)), total=len(dataset))\r\n for i, (seq, im) in pbar:\r\n if seq is None or im is None:\r\n continue\r\n tgt_seq, tgt_mask = seq['input_ids'].to(device), seq['attention_mask'].bool().to(device)\r\n encoded = model.encoder(im.to(device))\r\n #loss = decoder(tgt_seq, mask=tgt_mask, context=encoded)\r\n dec = model.decoder.generate(torch.LongTensor([args.bos_token]*len(encoded))[:, None].to(device), args.max_seq_len,\r\n eos_token=args.pad_token, context=encoded, temperature=args.get('temperature', .2))\r\n pred = detokenize(dec, dataset.tokenizer)\r\n truth = detokenize(seq['input_ids'], dataset.tokenizer)\r\n bleus.append(metrics.bleu_score(pred, [alternatives(x) for x in truth]))\r\n for predi, truthi in zip(token2str(dec, dataset.tokenizer), token2str(seq['input_ids'], dataset.tokenizer)):\r\n ts = post_process(truthi)\r\n if len(ts) > 0:\r\n edit_dists.append(distance(post_process(predi), ts)/len(ts))\r\n pbar.set_description('BLEU: %.3f, ED: %.2e' % (np.mean(bleus), np.mean(edit_dists)))\r\n if num_batches is not None and i >= num_batches:\r\n break\r\n if len(bleus) > 0:\r\n bleu_score = np.mean(bleus)\r\n log[name+'/bleu'] = bleu_score\r\n if len(edit_dists) > 0:\r\n edit_distance = np.mean(edit_dists)\r\n log[name+'/edit_distance'] = edit_distance\r\n if args.wandb:\r\n # samples\r\n pred = token2str(dec, dataset.tokenizer)\r\n truth = token2str(seq['input_ids'], dataset.tokenizer)\r\n table = wandb.Table(columns=[\"Truth\", \"Prediction\"])\r\n for k in range(min([len(pred), args.test_samples])):\r\n table.add_data(post_process(truth[k]), post_process(pred[k]))\r\n log[name+'/examples'] = table\r\n wandb.log(log)\r\n else:\r\n print('\\n%s\\n%s' % (truth, pred))\r\n print('BLEU: %.2f' % bleu_score)\r\n return bleu_score, edit_distance\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='Test model')\r\n parser.add_argument('--config', default='settings/config.yaml', help='path to yaml config file', type=argparse.FileType('r'))\r\n parser.add_argument('-c', '--checkpoint', default='checkpoints/weights.pth', type=str, help='path to model checkpoint')\r\n parser.add_argument('-d', '--data', default='dataset/data/val.pkl', type=str, help='Path to Dataset pkl file')\r\n parser.add_argument('--no-cuda', action='store_true', help='Use CPU')\r\n parser.add_argument('-b', '--batchsize', type=int, default=10, help='Batch size')\r\n parser.add_argument('--debug', action='store_true', help='DEBUG')\r\n parser.add_argument('-t', '--temperature', type=float, default=.333, help='sampling emperature')\r\n parser.add_argument('-n', '--num-batches', type=int, default=None, help='how many batches to evaluate on. Defaults to None (all)')\r\n\r\n parsed_args = parser.parse_args()\r\n with parsed_args.config as f:\r\n params = yaml.load(f, Loader=yaml.FullLoader)\r\n args = parse_args(Munch(params))\r\n args.testbatchsize = parsed_args.batchsize\r\n args.wandb = False\r\n args.temperature = parsed_args.temperature\r\n logging.getLogger().setLevel(logging.DEBUG if parsed_args.debug else logging.WARNING)\r\n seed_everything(args.seed if 'seed' in args else 42)\r\n model = get_model(args)\r\n if parsed_args.checkpoint is not None:\r\n model.load_state_dict(torch.load(parsed_args.checkpoint, args.device))\r\n dataset = Im2LatexDataset().load(parsed_args.data)\r\n valargs = args.copy()\r\n valargs.update(batchsize=args.testbatchsize, keep_smaller_batches=True, test=True)\r\n dataset.update(**valargs)\r\n evaluate(model, dataset, args, num_batches=parsed_args.num_batches)\r\n" ]
[ [ "torch.no_grad", "torch.load", "numpy.mean" ] ]
tilman/compositional_elements
[ "45271196ed01d0515357c7abdf35d6b87f2036d5" ]
[ "evaluation/compare_final2_compoelem.py" ]
[ "# call this script with `python -m evaluation.evaluate_poselines_globalaction`\nimport os\nimport numpy as np\nimport datetime\nfrom tqdm import tqdm\nfrom . import eval_utils\nimport pickle\nimport copyreg\nimport cv2\n\nfrom .compare_deepfeatures import negative_cosine_dist_flatten, eucl_dist_flatten\nfrom .compare_sift import compare_siftBFMatcher1\nfrom .compare_orb import compare_orbBFMatcher1\nfrom .compare_brief import compare_briefBFMatcher1\n\nfrom compoelem.config import config\nfrom compoelem.generate import global_action, pose_abstraction\nfrom compoelem.compare.pose_line import compare_pose_lines_3, compare_pose_lines_3, filter_pose_line_ga_result\nfrom compoelem.compare.normalize import minmax_norm_by_imgrect, minmax_norm_by_bbox, norm_by_global_action\n\n\n\n\n\n# fix cv2 keypoint pickling error\ndef _pickle_keypoint(keypoint): # : cv2.KeyPoint\n return cv2.KeyPoint, (\n keypoint.pt[0],\n keypoint.pt[1],\n keypoint.size,\n keypoint.angle,\n keypoint.response,\n keypoint.octave,\n keypoint.class_id,\n )\n# Apply the bundling to pickle\ncopyreg.pickle(cv2.KeyPoint().__class__, _pickle_keypoint)\n\ndef compare_setupA(data, sort_method, norm_method, glac_fallback, compare_other, additional_feature_weight):\n if norm_method != 'norm_by_global_action':\n raise NotImplementedError(\"only norm_by_global_action is implemented\")\n res_metrics = {}\n precision_curves = {}\n all_retrieval_res = []\n for query_data in tqdm(data, total=len(data)):\n compare_results = []\n #query_pose_lines = minmax_norm_by_imgrect(query_data[\"compoelem\"][pose_lines], query_data[\"width\"], query_data[\"height\"])\n query_pose_lines_seq = norm_by_global_action(query_data[\"compoelem\"][\"pose_lines\"], query_data[\"compoelem\"][\"global_action_lines\"], fallback=glac_fallback)\n for target_data in data:\n if query_data[\"className\"] == target_data[\"className\"] and query_data[\"imgName\"] == target_data[\"imgName\"]:\n continue\n if compare_other == 'vgg19_ncos':\n r_addition = negative_cosine_dist_flatten(query_data[\"imageNet_vgg19_bn_features\"], target_data[\"imageNet_vgg19_bn_features\"])\n elif compare_other == 'resnet50_cos':\n r_addition = negative_cosine_dist_flatten(query_data[\"places365_resnet50_feature_noFC\"], target_data[\"places365_resnet50_feature_noFC\"])\n elif compare_other == 'resnet50_eucl':\n r_addition = eucl_dist_flatten(query_data[\"places365_resnet50_feature_noFC\"], target_data[\"places365_resnet50_feature_noFC\"])\n elif compare_other == 'sift_bfm1':\n r_addition = compare_siftBFMatcher1(query_data[\"sift\"], target_data[\"sift\"])\n elif compare_other == 'orb_bfm1':\n r_addition = compare_orbBFMatcher1(query_data[\"orb\"], target_data[\"orb\"])\n elif compare_other == 'brief_bfm1':\n r_addition = compare_briefBFMatcher1(query_data[\"brief\"], target_data[\"brief\"])\n elif compare_other is None:\n r_addition = 0\n else:\n raise NotImplementedError(\"not implemented compare_other\", compare_other)\n\n #combined_ratio, hit_ratio, neg_mean_distance_hits = compare_pose_lines_3(query_pose_lines, minmax_norm_by_imgrect(target_data[\"compoelem\"][pose_lines], target_data[\"width\"], target_data[\"height\"]))\n target_pose_lines_seq = norm_by_global_action(target_data[\"compoelem\"][\"pose_lines\"], target_data[\"compoelem\"][\"global_action_lines\"], fallback=glac_fallback)\n pair_compare_results = []\n for query_pose_lines in query_pose_lines_seq:\n for target_pose_lines in target_pose_lines_seq:\n combined_ratio, hit_ratio, neg_mean_distance_hits = compare_pose_lines_3(query_pose_lines, target_pose_lines)\n pair_compare_results.append((combined_ratio, hit_ratio, neg_mean_distance_hits, target_data))\n combined_ratio, hit_ratio, neg_mean_distance_hits, target_data = filter_pose_line_ga_result(pair_compare_results)\n\n a = additional_feature_weight\n wra = r_addition * (1-a)\n r_combi1 = wra * (1 - combined_ratio * a)\n r_combi2 = wra + (1 - combined_ratio * a)\n r_combi3 = wra * (1 - neg_mean_distance_hits * a)\n r_combi4 = wra + (1 - neg_mean_distance_hits * a)\n\n\n compare_results.append((combined_ratio, hit_ratio, neg_mean_distance_hits, r_combi1, r_combi2, r_combi3, r_combi4, r_addition, target_data))\n compare_results = np.array(compare_results)\n sorted_compare_results = sort_method(compare_results)\n query_label = query_data[\"className\"]\n res_labels = list(map(lambda x: x[\"className\"], sorted_compare_results[:,-1]))\n res_keys = list(map(lambda x: x[\"className\"]+'_'+x[\"imgName\"], sorted_compare_results[:,-1]))\n all_retrieval_res.append(np.array([\n query_data[\"className\"]+'_'+query_data[\"imgName\"],\n query_label,\n res_keys,\n res_labels\n ]))\n metrics = eval_utils.score_retrievals(query_label, res_labels)\n label = metrics[\"label\"]\n if label in precision_curves:\n precision_curves[label].append(metrics[\"precision_at_rank\"])\n else:\n precision_curves[label] = [metrics[\"precision_at_rank\"]]\n for key in metrics.keys():\n if key != \"label\":\n if key not in res_metrics:\n res_metrics[key] = {}\n if label not in res_metrics[key]:\n res_metrics[key][label] = []\n res_metrics[key][label].append(metrics[key])\n return (eval_utils.get_eval_dataframe(res_metrics), precision_curves, np.array(all_retrieval_res))\n\ndef compare_setupB(data, sort_method, norm_method, glac_fallback, compare_other, additional_feature_weight):\n if compare_other is not None:\n raise NotImplementedError(\"compare other not implemented\")\n res_metrics = {}\n precision_curves = {}\n all_retrieval_res = []\n for query_data in tqdm(data, total=len(data)):\n compare_results = []\n if norm_method == 'none':\n query_pose_lines = query_data[\"compoelem\"][\"pose_lines\"]\n elif norm_method == 'minmax_norm_by_imgrect':\n query_pose_lines = minmax_norm_by_imgrect(query_data[\"compoelem\"][\"pose_lines\"], query_data[\"compoelem\"][\"width\"], query_data[\"compoelem\"][\"height\"])\n elif norm_method == 'minmax_norm_by_bbox':\n query_pose_lines = minmax_norm_by_bbox(query_data[\"compoelem\"][\"pose_lines\"])\n else:\n raise NotImplementedError(\"norm_method: {} not implemented\".format(norm_method))\n for target_data in data:\n if query_data[\"className\"] == target_data[\"className\"] and query_data[\"imgName\"] == target_data[\"imgName\"]:\n continue\n if norm_method == 'none':\n target_pose_lines = target_data[\"compoelem\"][\"pose_lines\"]\n elif norm_method == 'minmax_norm_by_imgrect':\n target_pose_lines = minmax_norm_by_imgrect(target_data[\"compoelem\"][\"pose_lines\"], target_data[\"compoelem\"][\"width\"], target_data[\"compoelem\"][\"height\"])\n elif norm_method == 'minmax_norm_by_bbox':\n target_pose_lines = minmax_norm_by_bbox(target_data[\"compoelem\"][\"pose_lines\"])\n else:\n raise NotImplementedError(\"norm_method: {} not implemented\".format(norm_method))\n combined_ratio, hit_ratio, neg_mean_distance_hits = compare_pose_lines_3(query_pose_lines, target_pose_lines)\n compare_results.append((combined_ratio, hit_ratio, neg_mean_distance_hits, target_data))\n compare_results = np.array(compare_results)\n sorted_compare_results = sort_method(compare_results)\n query_label = query_data[\"className\"]\n res_labels = list(map(lambda x: x[\"className\"], sorted_compare_results[:,-1]))\n res_keys = list(map(lambda x: x[\"className\"]+'_'+x[\"imgName\"], sorted_compare_results[:,-1]))\n all_retrieval_res.append(np.array([\n query_data[\"className\"]+'_'+query_data[\"imgName\"],\n query_label,\n res_keys,\n res_labels\n ]))\n metrics = eval_utils.score_retrievals(query_label, res_labels)\n label = metrics[\"label\"]\n if label in precision_curves:\n precision_curves[label].append(metrics[\"precision_at_rank\"])\n else:\n precision_curves[label] = [metrics[\"precision_at_rank\"]]\n for key in metrics.keys():\n if key != \"label\":\n if key not in res_metrics:\n res_metrics[key] = {}\n if label not in res_metrics[key]:\n res_metrics[key][label] = []\n res_metrics[key][label].append(metrics[key])\n return (eval_utils.get_eval_dataframe(res_metrics), precision_curves, np.array(all_retrieval_res))\n\n# indices for sorting functions\n# 0: combined_ratio\n# 1: hit_ratio\n# 2: neg_mean_distance_hits\n# 3: r_combi1\n# 4: r_combi2\n# 5: r_combi3\n# 6: r_combi4\n# 7: r_addition\n# 8: target_data\n\ndef cr_desc(compare_results):\n sorted_compare_results = compare_results[np.argsort(compare_results[:,0])][::-1]\n return sorted_compare_results\n\ndef nmd_desc(compare_results):\n sorted_compare_results = compare_results[np.argsort(compare_results[:,2])][::-1]\n return sorted_compare_results\n\ndef hr_nmd_desc(compare_results):\n # hr is primary and therefore second sorting key\n # nmd is seondary and therefore second first key\n sorted_compare_results = compare_results[np.lexsort((compare_results[:,2], compare_results[:,1]))][::-1]\n return sorted_compare_results\n\n\n# additional methods:\ndef hr_additional_desc(compare_results):\n # hr is primary and therefore second sorting key\n # r_addidtion is seondary and therefore second first key\n sorted_compare_results = compare_results[np.lexsort((-compare_results[:,7], compare_results[:,1]))][::-1]\n return sorted_compare_results\n\ndef hr_combi3_desc(compare_results):\n # hr is primary and therefore second sorting key\n # nmd is seondary and therefore second first key\n sorted_compare_results = compare_results[np.lexsort((-compare_results[:,5], compare_results[:,1]))][::-1]\n return sorted_compare_results\n\ndef hr_combi4_desc(compare_results):\n # hr is primary and therefore second sorting key\n # nmd is seondary and therefore second first key\n sorted_compare_results = compare_results[np.lexsort((-compare_results[:,6], compare_results[:,1]))][::-1]\n return sorted_compare_results\n\ndef combi1_asc(compare_results):\n sorted_compare_results = compare_results[np.argsort(compare_results[:,3])]\n return sorted_compare_results\n\ndef combi2_asc(compare_results):\n sorted_compare_results = compare_results[np.argsort(compare_results[:,4])]\n return sorted_compare_results\n\n\nosuname = os.uname().nodename\nprint(\"osuname\", osuname)\nif osuname == 'MBP-von-Tilman' or osuname == 'MacBook-Pro-von-Tilman.local':\n COMPOELEM_ROOT = \"/Users/tilman/Documents/Programme/Python/new_bachelor_thesis/compoelem\"\nelif osuname == 'lme117':\n COMPOELEM_ROOT = \"/home/zi14teho/compositional_elements\"\nelse:\n COMPOELEM_ROOT = os.getenv('COMPOELEM_ROOT')\nDATASTORE_NAME = \"combined_datastore_ceb_dataset\"\nDATASTORE_FILE = COMPOELEM_ROOT+\"/final_evaluation/\"+DATASTORE_NAME+\".pkl\"\nEVAL_RESULTS_FILE_DIR = COMPOELEM_ROOT+\"/final_evaluation/final2pkl/\"\nDATASTORE_NAME = \"combined_datastore_ceb_dataset\"\ndatastore = pickle.load(open(DATASTORE_FILE, \"rb\"))\ndatastore_name = DATASTORE_NAME\n\n# def eval_single_combination(\n# norm_method,\n# sort_method_name,\n \n# correction_angle,\n# cone_opening_angle,\n# cone_scale_factor,\n# cone_base_scale_factor,\n# filter_threshold,\n\n# poseline_fallback,\n# bisection_fallback,\n# glac_fallback,\n# ):\n\n# print({\n# \"norm_method\":norm_method,\n# \"sort_method_name\":sort_method_name,\n# \"correction_angle\":correction_angle,\n# \"cone_opening_angle\":cone_opening_angle,\n# \"cone_scale_factor\":cone_scale_factor,\n# \"cone_base_scale_factor\":cone_base_scale_factor,\n# \"filter_threshold\":filter_threshold,\n# \"poseline_fallback\":poseline_fallback,\n# \"bisection_fallback\":bisection_fallback,\n# \"glac_fallback\":glac_fallback,\n# })\ndef eval_single_combination(arg_obj):\n print(arg_obj)\n experiment_name = arg_obj[\"experiment_name\"]\n norm_method = arg_obj[\"norm_method\"]\n sort_method_name = arg_obj[\"sort_method_name\"]\n correction_angle = arg_obj[\"correction_angle\"]\n cone_opening_angle = arg_obj[\"cone_opening_angle\"]\n cone_scale_factor = arg_obj[\"cone_scale_factor\"]\n cone_base_scale_factor = arg_obj[\"cone_base_scale_factor\"]\n filter_threshold = arg_obj[\"filter_threshold\"]\n poseline_fallback = arg_obj[\"poseline_fallback\"]\n bisection_fallback = arg_obj[\"bisection_fallback\"]\n glac_fallback = arg_obj[\"glac_fallback\"]\n additional_feature_weight = arg_obj[\"additional_feature_weight\"] if \"additional_feature_weight\" in arg_obj else 0.5\n compare_other = arg_obj[\"compare_other\"] if \"compare_other\" in arg_obj else None\n\n setup = compare_setupA if norm_method == 'norm_by_global_action' else compare_setupB\n if sort_method_name == 'cr_desc':\n sort_method = cr_desc\n elif sort_method_name == 'nmd_desc':\n sort_method = nmd_desc\n elif sort_method_name == 'hr_nmd_desc':\n sort_method = hr_nmd_desc\n elif sort_method_name == 'hr_additional_desc':\n sort_method = hr_additional_desc\n elif sort_method_name == 'hr_combi3_desc':\n sort_method = hr_combi3_desc\n elif sort_method_name == 'hr_combi4_desc':\n sort_method = hr_combi4_desc\n elif sort_method_name == 'combi1_asc':\n sort_method = combi1_asc\n elif sort_method_name == 'combi2_asc':\n sort_method = combi2_asc\n else:\n raise NotImplementedError(\"sort_method: {} not implemented\".format(sort_method_name))\n\n config[\"bisection\"][\"correction_angle\"] = correction_angle\n config[\"bisection\"][\"cone_opening_angle\"] = cone_opening_angle\n config[\"bisection\"][\"cone_scale_factor\"] = cone_scale_factor\n config[\"bisection\"][\"cone_base_scale_factor\"] = cone_base_scale_factor\n config[\"compare\"][\"filter_threshold\"] = filter_threshold\n\n new_datastore_values = []\n for key in datastore.keys():\n poses = datastore[key][\"compoelem\"][\"poses\"]\n datastore[key][\"compoelem\"][\"global_action_lines\"] = global_action.get_global_action_lines(poses, bisection_fallback)\n datastore[key][\"compoelem\"][\"pose_lines\"] = pose_abstraction.get_pose_lines(poses, poseline_fallback)\n new_datastore_values.append(datastore[key])\n\n start_time = datetime.datetime.now()\n eval_dataframe, precision_curves, all_retrieval_res = setup(new_datastore_values, sort_method, norm_method, glac_fallback, compare_other, additional_feature_weight)\n norm_alias = {\n \"minmax_norm_by_imgrect\":\"Size\",\n \"minmax_norm_by_bbox\":\"Bbox\",\n \"norm_by_global_action\":\"Glac\",\n \"none\":\"None\",\n }\n filename = \"final2_time{}_norm{}_{}_ca{}_co{}_cs{}_cbs{}_th{}_fbPl{}_fbBis{}_fbGa{}_other{}_aw{}.pkl\".format(\n start_time.strftime(\"%d%m%y%H%M%S\"),\n\n norm_alias[norm_method],\n sort_method.__name__,\n\n correction_angle,\n cone_opening_angle,\n cone_scale_factor,\n cone_base_scale_factor,\n filter_threshold,\n\n poseline_fallback,\n bisection_fallback,\n glac_fallback,\n\n compare_other,\n additional_feature_weight,\n )\n print(\"filename\", filename, \"p@1\", eval_dataframe[\"p@1\"][\"total (mean)\"])\n res_summary = {\n \"experiment_name\": experiment_name,\n \"experiment_id\": filename,\n \"filename\": filename,\n \"datetime\": start_time,\n \"setup\": setup.__name__,\n \"eval_time_s\": (datetime.datetime.now() - start_time).seconds,\n \"datastore_name\": datastore_name,\n\n \"eval_dataframe\": eval_dataframe,\n \"precision_curves\": precision_curves,\n \"all_retrieval_res\": all_retrieval_res,\n \n \"config\": config,\n\n \"norm_method\": norm_method,\n \"compare_method\": \"compare_pose_lines_3\",\n \"sort_method\": sort_method.__name__,\n\n \"compare_other\": compare_other,\n\n \"correction_angle\": correction_angle,\n \"cone_opening_angle\": cone_opening_angle,\n \"cone_scale_factor\": cone_scale_factor,\n \"filter_threshold\": filter_threshold,\n\n \"poseline_fallback\": poseline_fallback,\n \"bisection_fallback\": bisection_fallback,\n \"glac_fallback\": glac_fallback,\n }\n pickle.dump(res_summary, open(EVAL_RESULTS_FILE_DIR+filename, \"wb\"))\n" ]
[ [ "numpy.array", "numpy.lexsort", "numpy.argsort" ] ]
jae1001/FibreCOP
[ "328cf5feb1c8447a20d52b23035098558c3a6e8b" ]
[ "ODFnOPsimulator.py" ]
[ "'''This program calculates the Chebyshev/Herman orientation parameters\r\nfrom simulated intensity distribution data.\r\n\r\nCode developed by Dr. A. Kaniyoor,\r\nMacromolecular Materials Laboratory,University of Cambridge, Cambridge, UK\r\n2020-2021\r\n\r\nReference Publication: Quantifying Alignment in Carbon Nanotube Yarns and Similar 2D Anisotropic Systems\r\nA. Kaniyoor, T.S. Gspann, J. E. Mizen, J.A. Elliott.\r\nTo be submitted\r\n\r\nThere are two main programs here. Program 1 (True by default) generates orientation distribution functions with varying widths \r\nand calculates orientation parameters from the ODFs. To view the ODFs, please enable command - plot ODF=True (False by default)\r\nProgram 2 (off/False by default) generates ODFs with secondary peaks whose height can be adjusted in the code, and calculates orientation parameters.\r\n\r\n'''\r\n\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy.special as sp\r\nfrom matplotlib import rcParams\r\nrcParams['font.family']='Arial'\r\nrcParams['legend.fontsize']=8 # 11 for 0.5 column figure\r\nrcParams['axes.labelsize']=12 #16 for 0.5 column figure,12 for 1 column figure\r\nrcParams['xtick.labelsize']=10 # 14 for 0.5 column figure\r\nrcParams['ytick.labelsize']=10\r\nrcParams['lines.markersize']=3\r\nrcParams['lines.linewidth']=1\r\nrcParams['lines.antialiased']=True\r\nrcParams['mathtext.default']='regular'\r\nrcParams['figure.figsize']=3.5,3.2 #3.5,2.6 is 1 column figure\r\nrcParams['figure.dpi']=150 # change dpi=300 for publication quality images\r\n\r\n\r\n\r\n# defining the functional forms\r\ndef gaussian(x, mu, hwidth):\r\n sigma=hwidth/(np.sqrt(2*np.log(2)))\r\n return (1/(sigma*np.sqrt(2*np.pi)))*np.exp(-(np.abs(x - mu)/(np.sqrt(2)*sigma))**2)\r\n\r\ndef lorentzian(x,mu,hwidth):\r\n return (1/(np.pi*hwidth))*(hwidth**2)/(((x-mu)**2)+hwidth**2)\r\n\r\ndef gnd(x,mu,hwidth,beta):\r\n alpha=hwidth/((np.log(2))**(1/beta))\r\n return (beta/(2*alpha*sp.gamma(1/beta)))*np.exp(-(np.abs(x - mu)/alpha)**beta)\r\n\r\n\r\n\r\n# generating angle data\r\na=5000\r\nx=[]\r\nx = np.linspace(0, 360, a)\r\ncosSqX=np.power(np.cos(x*np.pi/180),2)\r\nsinX=np.sin(x*np.pi/180)\r\ncosFtX=np.power(np.cos(x*np.pi/180),4)\r\n\r\n\r\n\r\n# Choose the program to run: program 1 - primary peaks only ; prgoram 2 - with secondary peaks\r\n\r\nprogram1 = True\r\nprogram2 = False\r\n\r\n\r\n\r\n\r\n\r\n###### PROGRAM 1 for generating ODFs with primary peaks #####\r\n\r\n\r\nif program1:\r\n \r\n # setting peak parameters\r\n c=[0.01,0.6,2.9,5.7,9.5,11.5,14.3,19.1,28.6,38.2,52.1,57.3,71.6,81.8,95.5,114.6,120,135,150,165,180]\r\n #c=[5]\r\n beta=1.5\r\n mu = [0,180,360]\r\n mu1=[-90,90,270,450]\r\n\r\n \r\n plotODF = False # True only if you want to view ODFs, usually when c is a fixed value\r\n\r\n \r\n # generating ODFs oriented along reference direction\r\n gaussianOP=np.empty_like(x)\r\n lorentzianOP=np.empty_like(x)\r\n gndOP=np.empty_like(x)\r\n for hwidth in c:\r\n g=gaussian(x, mu[0], hwidth)+gaussian(x, mu[1],hwidth)+gaussian(x, mu[2],hwidth)\r\n l=lorentzian(x,mu[0],hwidth)+lorentzian(x,mu[1],hwidth)+lorentzian(x,mu[2],hwidth)\r\n gn=gnd(x,mu[0],hwidth,beta)+gnd(x,mu[1],hwidth,beta)+gnd(x,mu[2],hwidth,beta)\r\n gaussianOP = np.vstack((gaussianOP,g))\r\n lorentzianOP= np.vstack((lorentzianOP,l))\r\n gndOP=np.vstack((gndOP,gn))\r\n\r\n #output= np.array([x,l])\r\n #np.savetxt('ModelData.txt',output.T)\r\n\r\n # generating ODFs oriented perpendicular to reference direction \r\n gaussianOP1=np.empty_like(x)\r\n lorentzianOP1=np.empty_like(x)\r\n gndOP1=np.empty_like(x)\r\n\r\n for hwidth in c:\r\n g1=gaussian(x, mu1[0],hwidth)+gaussian(x, mu1[1],hwidth)+gaussian(x, mu1[2],hwidth)+gaussian(x, mu1[3],hwidth)\r\n l1=lorentzian(x,mu1[0],hwidth)+lorentzian(x,mu1[1],hwidth)+lorentzian(x,mu1[2],hwidth)+lorentzian(x,mu1[3],hwidth)\r\n gn1=gnd(x, mu1[0], hwidth,beta)+gnd(x, mu1[1], hwidth,beta)++gnd(x, mu1[2], hwidth,beta)++gnd(x, mu1[3], hwidth,beta)\r\n gaussianOP1 = np.vstack((gaussianOP1,g1))\r\n lorentzianOP1= np.vstack((lorentzianOP1,l1))\r\n gndOP1=np.vstack((gndOP1,gn1))\r\n \r\n\r\n # plotting ODF\r\n if plotODF:\r\n plt.figure(figsize=(3.5,3))\r\n plt.plot(x,lorentzianOP[1::,:].T,'b-',label='LD')\r\n plt.plot(x,gndOP[1::,:].T,'g-.',label='GND')\r\n plt.plot(x,gaussianOP[1::,:].T,'r--',label='GD')\r\n plt.legend(bbox_to_anchor=(0,1.1,1,0), loc=\"lower left\",mode=\"expand\", ncol=3)\r\n plt.xlabel('Angle (\\xb0)')\r\n plt.ylabel('Intensity (a.u.)')\r\n plt.xticks([0,90,180,270,360])\r\n plt.locator_params('y',nbins=6)\r\n plt.ticklabel_format(axis='y',style='sci',scilimits=(0,0))\r\n plt.tight_layout()\r\n plt.minorticks_on()\r\n plt.show()\r\n plt.close()\r\n\r\n plt.figure(figsize=(3.5,3))\r\n plt.plot(x,lorentzianOP1[1::,:].T,'b-',label='LD')\r\n plt.plot(x,gndOP1[1::,:].T,'g-.',label='GND')\r\n plt.plot(x,gaussianOP1[1::,:].T,'r--',label='GD')\r\n plt.legend(bbox_to_anchor=(0,1.1,1,0), loc=\"lower left\",mode=\"expand\", ncol=3)\r\n plt.xlabel('Angle (\\xb0)')\r\n plt.ylabel('Intensity (a.u.)')\r\n plt.xticks([0,90,180,270,360])\r\n plt.locator_params('y',nbins=6)\r\n plt.ticklabel_format(axis='y',style='sci',scilimits=(0,0))\r\n plt.tight_layout()\r\n plt.show()\r\n plt.close()\r\n\r\n # calculating orientation parameters\r\n P2g=[]\r\n P2l=[]\r\n T2g=[]\r\n T2l=[]\r\n P2gn=[]\r\n T2gn=[]\r\n T4l=[]\r\n\r\n P2g1=[]\r\n P2l1=[]\r\n T2g1=[]\r\n T2l1=[]\r\n P2gn1=[]\r\n T2gn1=[]\r\n T4l1=[]\r\n\r\n for i in range(1,np.shape(gaussianOP)[0]):\r\n cosSq3DG=np.sum(gaussianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gaussianOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2g.append(np.round(1.5*cosSq3DG-0.5,3))\r\n cosSq3DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2l.append(np.round(1.5*cosSq3DL-0.5,3))\r\n cosSq3DGN=np.sum(gndOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gndOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2gn.append(np.round(1.5*cosSq3DGN-0.5,3))\r\n cosSq2DG=np.sum(gaussianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gaussianOP[i,0:int(a/2)])\r\n T2g.append(np.round(2*cosSq2DG-1,3))\r\n cosSq2DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)])\r\n T2l.append(np.round(2*cosSq2DL-1,3))\r\n cosSq2DGN=np.sum(gndOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gndOP[i,0:int(a/2)])\r\n T2gn.append(np.round(2*cosSq2DGN-1,3))\r\n \r\n \r\n for i in range(1,np.shape(gaussianOP)[0]):\r\n cosSq3DG1=np.sum(gaussianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gaussianOP1[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2g1.append(np.round(1.5*cosSq3DG1-0.5,3))\r\n cosSq3DL1=np.sum(lorentzianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(lorentzianOP1[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2l1.append(np.round(1.5*cosSq3DL1-0.5,3))\r\n cosSq3DGN1=np.sum(gndOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gndOP1[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2gn1.append(np.round(1.5*cosSq3DGN1-0.5,3))\r\n cosSq2DG1=np.sum(gaussianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gaussianOP1[i,0:int(a/2)])\r\n T2g1.append(np.round(2*cosSq2DG1-1,3))\r\n cosSq2DL1=np.sum(lorentzianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(lorentzianOP1[i,0:int(a/2)])\r\n T2l1.append(np.round(2*cosSq2DL1-1,3))\r\n cosSq2DGN1=np.sum(gndOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gndOP1[i,0:int(a/2)])\r\n T2gn1.append(np.round(2*cosSq2DGN1-1,3))\r\n \r\n \r\n # Plotting orientation parameters \r\n fw=2*np.asarray(c) # converting widths to full widths (FWHM)\r\n\r\n plt.plot(fw,P2l,'b-x', label='<P$_2$>$_{LD}$')\r\n plt.plot(fw,P2gn,'g--x', label='<P$_2$>$_{GND}$')\r\n plt.plot(fw,P2g,'r:x', label = '<P$_2$>$_{GD}$')\r\n plt.plot(fw,P2l1,'b-x')\r\n plt.plot(fw,P2gn1,'g--x')\r\n plt.plot(fw,P2g1,'r:x')\r\n\r\n\r\n plt.plot(fw,T2l,'b-o', label='<T$_2$>$_{LD}$')\r\n plt.plot(fw,T2gn,'g--o', label='<T$_2$>$_{GND}$')\r\n plt.plot(fw,T2g,'r:o', label='<T$_2$>$_{GD}$')\r\n plt.plot(fw,T2l1,'b-o')\r\n plt.plot(fw,T2gn1,'g--o')\r\n plt.plot(fw,T2g1,'r:o')\r\n\r\n plt.legend(bbox_to_anchor=(0,1,1,0), loc=\"lower left\",mode=\"expand\", ncol=3)\r\n #plt.legend()\r\n plt.xlabel('FWHM (\\xb0)')\r\n plt.ylabel('Orientation Parameter')\r\n #plt.title('OP vs FWHM')\r\n plt.minorticks_on()\r\n plt.tight_layout()\r\n plt.show()\r\n plt.close()\r\n\r\n print('\\n <T2> vs. FWHM')\r\n print('+LD\\n',T2l,'\\n-LD\\n',T2l1,'\\n+GD\\n',T2g,'\\n-GD\\n',T2g1,'\\n+GND\\n',T2gn,'\\n-GND\\n',T2gn1)\r\n print('\\n <P2> vs. FWHM')\r\n print('+LD\\n',P2l,'\\n-LD\\n',P2l1,'\\n+GD\\n',P2g,'\\n-GD\\n',P2g1,'\\n+GND\\n',P2gn,'\\n-GND\\n',P2gn1)\r\n\r\n\r\n\r\n\r\n '''\r\n # Plotting T4 values\r\n\r\n for i in range(1,np.shape(gaussianOP)[0]):\r\n cosFt2DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosFtX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)])\r\n T4l.append(np.round(8*cosFt2DL-8*cosSq2DL+1,3))\r\n cosFt2DL1=np.sum(lorentzianOP1[i,0:int(a/2)]*cosFtX[0:int(a/2)])/np.sum(lorentzianOP1[i,0:int(a/2)])\r\n T4l1.append(np.round(8*cosFt2DL1-8*cosSq2DL1+1,3))\r\n \r\n plt.figure()\r\n plt.plot(fw,T4l,'b-o', label='<T$_4$>$_{LD}$')\r\n plt.plot(fw,T4l1,'b-o')\r\n plt.legend()\r\n plt.xlabel('FWHM (\\xb0)')\r\n plt.ylabel('Orientation Parameter')\r\n plt.minorticks_on()\r\n plt.tight_layout()\r\n plt.show()\r\n plt.close()\r\n print('\\n',T4l,T4l1)\r\n '''\r\n \r\n\r\n\r\n \r\n # to calculate Orientation parameter for GND function with different shape factors\r\n b=np.linspace(1,2,5)\r\n for beta in b:\r\n gaussianOP=np.empty_like(x)\r\n lorentzianOP=np.empty_like(x)\r\n gndOP=np.empty_like(x)\r\n \r\n for hwidth in c:\r\n g=gaussian(x, mu[0], hwidth)+gaussian(x, mu[1],hwidth)+gaussian(x, mu[2],hwidth)\r\n l=lorentzian(x,mu[0],hwidth)+lorentzian(x,mu[1],hwidth)+lorentzian(x,mu[2],hwidth)\r\n gn=gnd(x,mu[0],hwidth,beta)+gnd(x,mu[1],hwidth,beta)+gnd(x,mu[2],hwidth,beta)\r\n gaussianOP = np.vstack((gaussianOP,g))\r\n lorentzianOP= np.vstack((lorentzianOP,l))\r\n gndOP=np.vstack((gndOP,gn))\r\n \r\n \r\n P2g=[]\r\n P2l=[]\r\n T2g=[]\r\n T2l=[]\r\n P2gn=[]\r\n T2gn=[]\r\n \r\n for i in range(1,np.shape(gaussianOP)[0]):\r\n cosSq3DG=np.sum(gaussianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gaussianOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2g.append(np.round(1.5*cosSq3DG-0.5,3))\r\n cosSq3DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2l.append(np.round(1.5*cosSq3DL-0.5,3))\r\n cosSq3DGN=np.sum(gndOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gndOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2gn.append(np.round(1.5*cosSq3DGN-0.5,3))\r\n cosSq2DG=np.sum(gaussianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gaussianOP[i,0:int(a/2)])\r\n T2g.append(np.round(2*cosSq2DG-1,3))\r\n cosSq2DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)])\r\n T2l.append(np.round(2*cosSq2DL-1,3))\r\n cosSq2DGN=np.sum(gndOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gndOP[i,0:int(a/2)])\r\n T2gn.append(np.round(2*cosSq2DGN-1,3))\r\n \r\n print('\\n <T2> vs. FWHM for GNDs with beta=', beta)\r\n print(T2gn)\r\n plt.plot(fw,T2gn,'--o',label=(r'$\\beta$='+str(beta)))\r\n \r\n #print('\\n <P2> vs. FWHM for GNDs with beta=', beta)\r\n #print(P2gn)\r\n #plt.plot(fw,P2gn,'--o',label=(r'$\\beta$='+str(beta)))\r\n\r\n \r\n \r\n plotNegSide=False\r\n if plotNegSide:\r\n gaussianOP1=np.empty_like(x)\r\n lorentzianOP1=np.empty_like(x)\r\n gndOP1=np.empty_like(x)\r\n \r\n for hwidth in c:\r\n g1=gaussian(x, mu1[0],hwidth)+gaussian(x, mu1[1],hwidth)+gaussian(x, mu1[2],hwidth)+gaussian(x, mu1[3],hwidth)\r\n l1=lorentzian(x,mu1[0],hwidth)+lorentzian(x,mu1[1],hwidth)+lorentzian(x,mu1[2],hwidth)+lorentzian(x,mu1[3],hwidth)\r\n gn1=gnd(x, mu1[0], hwidth,beta)+gnd(x, mu1[1], hwidth,beta)++gnd(x, mu1[2], hwidth,beta)++gnd(x, mu1[3], hwidth,beta)\r\n gaussianOP1 = np.vstack((gaussianOP1,g1))\r\n lorentzianOP1= np.vstack((lorentzianOP1,l1))\r\n gndOP1=np.vstack((gndOP1,gn1))\r\n \r\n P2g1=[]\r\n P2l1=[]\r\n T2g1=[]\r\n T2l1=[]\r\n P2gn1=[]\r\n T2gn1=[]\r\n for i in range(1,np.shape(gaussianOP)[0]):\r\n cosSq3DG1=np.sum(gaussianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gaussianOP1[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2g1.append(np.round(1.5*cosSq3DG1-0.5,3))\r\n cosSq3DL1=np.sum(lorentzianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(lorentzianOP1[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2l1.append(np.round(1.5*cosSq3DL1-0.5,3))\r\n cosSq3DGN1=np.sum(gndOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gndOP1[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2gn1.append(np.round(1.5*cosSq3DGN1-0.5,3))\r\n cosSq2DG1=np.sum(gaussianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gaussianOP1[i,0:int(a/2)])\r\n T2g1.append(np.round(2*cosSq2DG1-1,3))\r\n cosSq2DL1=np.sum(lorentzianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(lorentzianOP1[i,0:int(a/2)])\r\n T2l1.append(np.round(2*cosSq2DL1-1,3))\r\n cosSq2DGN1=np.sum(gndOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gndOP1[i,0:int(a/2)])\r\n T2gn1.append(np.round(2*cosSq2DGN1-1,3))\r\n \r\n \r\n print('\\n <T2> vs. FWHM for GNDs with beta=', beta)\r\n print(T2gn1)\r\n plt.plot(fw,T2gn1,'--o',label=(r'$ \\beta$='+str(beta)))\r\n #print('\\n <P2> vs. FWHM for GNDs with beta=', beta)\r\n #print(P2gn1)\r\n #plt.plot(fw,P2gn1,'--x',label=(r'$ \\beta$='+str(beta)))\r\n \r\n \r\n \r\n \r\n plt.plot(fw,T2l,'b-x',label='LD')\r\n plt.plot(fw,T2g,'k-x',label='GD')\r\n #plt.plot(fw,P2l,'b-x',label='LD')\r\n #plt.plot(fw,P2g,'k-x',label='GD')\r\n plt.legend(bbox_to_anchor=(0,1,1,0), loc=\"lower left\",mode=\"expand\", ncol=4)\r\n plt.xlabel('FWHM (\\xb0)')\r\n plt.ylabel('Orientation Parameter')\r\n plt.tight_layout()\r\n plt.minorticks_on()\r\n plt.show()\r\n plt.close()\r\n \r\n print('\\n <T2> vs. FWHM for LD')\r\n print(T2l)\r\n print('\\n <T2> vs. FWHM for GD')\r\n print(T2g)\r\n #print('P2 Lorentzian =',P2l)\r\n #print('P2 Gaussian =', P2g)\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n###### PROGRAM 2 for generating ODFs with secondary orientation peaks #####\r\nif program2:\r\n\r\n # Setting peak parameters\r\n\r\n c=[0.1,0.6,2.9,5.7,9.5,11.5,14.3,19.1,28.6,38.2,52.1,57.3,71.6,81.8,95.5,114.6,120,135,150,165,180]\r\n h=[0,0.1,0.25,0.5,0.75,1] # relative height of secondary peak\r\n #c=[5]\r\n #h=[0.1,0.75] # do not forget to change labels in legend of ODF plots if you change these values\r\n beta=1.5\r\n mu2 = [0,90,180,270,360] # setting peak positions\r\n\r\n plotODF=False\r\n\r\n # defining ODFs\r\n gaussianOP=np.empty_like(x)\r\n lorentzianOP=np.empty_like(x)\r\n gndOP=np.empty_like(x)\r\n\r\n for height in h:\r\n for hwidth in c:\r\n g=gaussian(x, mu2[0], hwidth)+height*gaussian(x, mu2[1],hwidth)+gaussian(x, mu2[2],hwidth)+height*gaussian(x, mu2[3],hwidth)+gaussian(x, mu2[4],hwidth)\r\n l=lorentzian(x,mu2[0],hwidth)+height*lorentzian(x,mu2[1],hwidth)+lorentzian(x,mu2[2],hwidth)+height*lorentzian(x,mu2[3],hwidth)+lorentzian(x,mu2[4],hwidth)\r\n gn=gnd(x,mu2[0],hwidth,beta)+height*gnd(x,mu2[1],hwidth,beta)+gnd(x,mu2[2],hwidth,beta)+height*gnd(x,mu2[3],hwidth,beta)+gnd(x,mu2[4],hwidth,beta)\r\n gaussianOP = np.vstack((gaussianOP,g))\r\n lorentzianOP= np.vstack((lorentzianOP,l))\r\n gndOP=np.vstack((gndOP,gn))\r\n \r\n\r\n #output= np.array([x,l])\r\n #np.savetxt('ModelData.txt',output.T)\r\n\r\n \r\n if plotODF:\r\n plt.figure(dpi=150,figsize=(3.5,2.6))\r\n plt.plot(x,lorentzianOP[1::,:].T)\r\n #plt.plot(x,gndOP[1::,:].T,label='GND')\r\n #plt.plot(x,gaussianOP[1::,:].T,label='GD')\r\n plt.xlabel('Angle (\\xb0)')\r\n plt.ylabel('Intensity (a.u.)')\r\n plt.xticks([0,90,180,270,360])\r\n plt.locator_params('y',nbins=6)\r\n plt.ticklabel_format(axis='y',style='sci',scilimits=(0,0))\r\n plt.tight_layout()\r\n plt.minorticks_on()\r\n plt.legend(('A$_2$/A$_1$=0.1','A$_2$/A$_1$=0.75'))\r\n plt.show() \r\n plt.close()\r\n\r\n P2g=[]\r\n P2l=[]\r\n T2g=[]\r\n T2l=[]\r\n P2gn=[]\r\n T2gn=[]\r\n T4l=[]\r\n\r\n \r\n for i in range(1,np.shape(gaussianOP)[0]):\r\n cosSq3DG=np.sum(gaussianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gaussianOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2g.append(np.round(1.5*cosSq3DG-0.5,3))\r\n cosSq3DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2l.append(np.round(1.5*cosSq3DL-0.5,3))\r\n cosSq3DGN=np.sum(gndOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gndOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2gn.append(np.round(1.5*cosSq3DGN-0.5,3))\r\n cosSq2DG=np.sum(gaussianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gaussianOP[i,0:int(a/2)])\r\n T2g.append(np.round(2*cosSq2DG-1,3))\r\n cosSq2DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)])\r\n T2l.append(np.round(2*cosSq2DL-1,3))\r\n cosSq2DGN=np.sum(gndOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gndOP[i,0:int(a/2)])\r\n T2gn.append(np.round(2*cosSq2DGN-1,3))\r\n cosFt2DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosFtX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)])\r\n T4l.append(np.round(8*cosFt2DL-8*cosSq2DL+1,3))\r\n \r\n \r\n \r\n fw=2*np.asarray(c)\r\n print('FWHM=',fw)\r\n print('\\n <T2> vs. FWHM for A2/A1=',h)\r\n print('LD\\n',T2l,'\\nGD\\n',T2g,'\\nGND\\n',T2gn)\r\n print('\\n <T4> vs. FWHM for A2/A1=',h)\r\n print('LD\\n',T4l)\r\n print('\\n <P2> vs. FWHM for A2/A1=',h)\r\n print('LD\\n',P2l,'\\nGD\\n',P2g,'\\nGND\\n',P2gn)\r\n\r\n\r\n \r\n #Plotting values\r\n \r\n index=0\r\n plt.figure(figsize=(3.5,2.6))\r\n plt.title('OP vs FWHM for varying A$_2$/A$_1$ (LD)')\r\n for i in range(0,len(T2l),len(c)):\r\n plt.plot(fw,T2l[i:i+len(c)],'--o', label='A$_2$/A$_1$='+str(h[index]))\r\n #plt.plot(fw,P2l[i:i+len(c)],'--x', label='A$_2$/A$_1$='+str(h[index]))\r\n index+=1\r\n \r\n plt.legend()\r\n plt.xlabel('FWHM (\\xb0)')\r\n plt.ylabel('Orientation Parameter')\r\n plt.minorticks_on()\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n \r\n index=0\r\n plt.figure(figsize=(3.5,2.6))\r\n plt.title('OP vs FWHM for varying A$_2$/A$_1$ (GND)')\r\n for i in range(0,len(T2gn),len(c)):\r\n plt.plot(fw,T2gn[i:i+len(c)],'--o', label='A$_2$/A$_1$='+str(h[index]))\r\n #plt.plot(fw,P2gn[i:i+21],'--x', label='A$_2$/A$_1$='+str(h[index]))\r\n index+=1\r\n plt.legend()\r\n plt.xlabel('FWHM (\\xb0)')\r\n plt.ylabel('Orientation Parameter')\r\n plt.minorticks_on()\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n index=0\r\n plt.figure(figsize=(3.5,2.6))\r\n plt.title('OP vs FWHM for varying A$_2$/A$_1$ (GD)')\r\n for i in range(0,len(T2g),len(c)):\r\n plt.plot(fw,T2g[i:i+len(c)],'--o', label='A$_2$/A$_1$='+str(h[index]))\r\n #plt.plot(fw,P2g[i:i+21],'--x', label='A$_2$/A$_1$='+str(h[index]))\r\n index+=1\r\n plt.legend()\r\n plt.xlabel('FWHM (\\xb0)')\r\n plt.ylabel('Orientation Parameter')\r\n plt.minorticks_on()\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n index=0\r\n plt.figure(figsize=(3.5,2.6))\r\n plt.title('T4 vs FWHM for varying A$_2$/A$_1$ (LD)')\r\n for i in range(0,len(T4l),len(c)):\r\n plt.plot(fw,T4l[i:i+len(c)],'--o', label='A$_2$/A$_1$='+str(h[index]))\r\n index+=1\r\n plt.plot(fw,T2l[0:len(c)],'-.s', label='<T$_2$>, A$_2$/A$_1$='+str(h[0]))\r\n plt.legend()\r\n plt.xlabel('FWHM (\\xb0)')\r\n plt.ylabel('Orientation Parameter')\r\n plt.minorticks_on()\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n \r\n '''\r\n # Plotting orientation parameter as function of relative areas of secondary peaks\r\n # activate when value of FWHM is fixed to a single value\r\n \r\n plt.figure(figsize=(3.5,2.6))\r\n for i in range(0,len(T2l),len(c)):\r\n line1, = plt.plot(h,T2l,'b-o') \r\n line2, = plt.plot(h,T2gn,'g--o')\r\n line3, = plt.plot(h,T2g,'r-.o')\r\n line4, = plt.plot(h,T4l,'k:o')\r\n \r\n \r\n plt.legend((line1,line2,line3,line4),('<T$_2$>$_{LD}$','<T$_2$>$_{GND}$','<T$_2$>$_{GD}$','<T$_4$>$_{LD}$'))\r\n plt.xlabel('Relative area, A$_2$/A$_1$')\r\n plt.ylabel('Orientation Parameter')\r\n plt.minorticks_on()\r\n plt.tight_layout()\r\n plt.show()\r\n '''\r\n\r\n" ]
[ [ "matplotlib.pyplot.tight_layout", "numpy.asarray", "scipy.special.gamma", "numpy.log", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "numpy.vstack", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure", "numpy.abs", "numpy.cos", "numpy.empty_like", "matplotlib.pyplot.title", "numpy.linspace", "numpy.sqrt", "matplotlib.pyplot.ticklabel_format", "matplotlib.pyplot.close", "matplotlib.pyplot.legend", "matplotlib.pyplot.minorticks_on", "matplotlib.pyplot.show", "numpy.shape", "matplotlib.pyplot.locator_params", "numpy.round", "numpy.sin", "matplotlib.pyplot.xlabel" ] ]
bjlittle/poc-ngvat
[ "03cab7c4b184d1fa47d3a1dfee77f48ec609723a" ]
[ "poc-3/data/test/synthetic/utils.py" ]
[ "import matplotlib; matplotlib.use(\"Agg\")\r\nimport torch\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nimport glob\r\nimport os\r\nimport shutil\r\nimport time\r\nimport sys\r\nimport collections\r\npjoin = os.path.join\r\n\r\nclass LogPrint():\r\n def __init__(self, file, ExpID, print_to_screen):\r\n self.file = file\r\n self.ExpID = ExpID\r\n self.print_to_screen = print_to_screen\r\n def __call__(self, some_str):\r\n sstr = \"[%s %s %s \" % (self.ExpID[-6:], os.getpid(), time.strftime(\"%Y/%m/%d-%H:%M:%S]\")) + str(some_str)\r\n print(sstr, file=self.file, flush=True)\r\n if self.print_to_screen:\r\n print(sstr)\r\n\r\ndef check_path(x):\r\n if x:\r\n complete_path = glob.glob(x)\r\n assert(len(complete_path) == 1), \"The provided path points to more than 1 entity. Please check.\"\r\n x = complete_path[0]\r\n return x\r\n\r\ndef my_makedirs(d):\r\n if not os.path.exists(d):\r\n os.makedirs(d)\r\n\r\ndef set_up_dir(project_name, resume, debug):\r\n TimeID = time.strftime(\"%Y%m%d-%H%M%S\")\r\n if \"SERVER\" in os.environ.keys():\r\n ExpID = \"SERVER\" + os.environ[\"SERVER\"] + \"-\" + TimeID\r\n else:\r\n ExpID = TimeID\r\n \r\n project_path = \"Debug_Dir\" if debug else pjoin(\"Experiments\", ExpID + \"_\" + project_name)\r\n rec_img_path = pjoin(project_path, \"reconstructed_images\")\r\n weights_path = pjoin(project_path, \"weights\")\r\n my_makedirs(rec_img_path)\r\n my_makedirs(weights_path)\r\n log_path = pjoin(weights_path, \"log_\" + ExpID + \".txt\")\r\n log = open(log_path, \"w+\")\r\n print(\" \".join([\"CUDA_VISIBLE_DEVICES=0 python\", *sys.argv]),\r\n file=log, flush=True) # save the script\r\n return TimeID, ExpID, rec_img_path, weights_path, log\r\n\r\ndef get_CodeID():\r\n script = \"git log --pretty=oneline >> wh_CodeID_file.tmp\"\r\n os.system(script)\r\n x = open(\"wh_CodeID_file.tmp\").readline()\r\n os.remove(\"wh_CodeID_file.tmp\")\r\n return x[:8]\r\n\r\ndef is_img(x):\r\n return any(x.endswith(extension) for extension in [\".png\", \".jpg\", \".jpeg\"])\r\n\r\ndef load_param_from_t7(model, in_layer_index, out_layer):\r\n out_layer.weight = torch.nn.Parameter(\r\n model.get(in_layer_index).weight.float())\r\n out_layer.bias = torch.nn.Parameter(model.get(in_layer_index).bias.float())\r\n\r\nclass LogHub(object):\r\n def __init__(self, momentum=0):\r\n self.losses = {}\r\n self.momentum = momentum\r\n\r\n def update(self, name, value):\r\n if name not in self.losses:\r\n self.losses[name] = value\r\n else:\r\n self.losses[name] = self.losses[name] * \\\r\n self.momentum + value * (1 - self.momentum)\r\n\r\n def format(self):\r\n keys = self.losses.keys()\r\n keys = sorted(keys)\r\n logtmp = \"\"\r\n for k in keys:\r\n logtmp += \"%s: %.3f | \" % (k, self.losses[k])\r\n return logtmp[:-3]\r\n\r\n\r\ndef smart_load(model_path):\r\n sth = torch.load(model_path, map_location=lambda storage, location: storage)\r\n if isinstance(sth, collections.OrderedDict): # state_dict\r\n return sth\r\n elif isinstance(sth, dict): # dict which has a value of state_dict\r\n for k, v in sth.items():\r\n if isinstance(v, collections.OrderedDict):\r\n return v\r\n print(\"smart load failed, please manually check the given model\")\r\n" ]
[ [ "matplotlib.use", "torch.load" ] ]
irom-lab/AMR-Policies
[ "43552ca0ddcd584a9faa12b5588874bac41bd205" ]
[ "gibson2/agents/tf_agents/agents/reinforce/reinforce_agent.py" ]
[ "# coding=utf-8\n# # Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A REINFORCE Agent.\nImplements the REINFORCE algorithm from (Williams, 1992):\nhttp://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf\n\nAMR-Changes: Modified to include AMR regularizer\nOriginal Authors:\nS. Guadarrama, A. Korattikara, O. Ramirez, P. Castro, E. Holly, S. Fishman, K. Wang, E. Gonina, N. Wu, E. Kokiopoulou, L. Sbaiz, J. Smith, G. Bart ́ok, J. Berent, C. Harris, V. Vanhoucke, and E. Brevdo. TF-Agents: A library for reinforcement learning in tensorflow, 2018. URL https://github.com/tensorflow/agents\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gin\nimport numpy as np\nimport tensorflow as tf\n\nfrom tf_agents.agents import tf_agent\nfrom tf_agents.policies import actor_policy\nfrom tf_agents.policies import greedy_policy\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.utils import common\nfrom tf_agents.utils import eager_utils\nfrom tf_agents.utils import value_ops\n\n\ndef _standard_normalize(values, axes=(0,)):\n \"\"\"Standard normalizes values `values`.\n Args:\n values: Tensor with values to be standardized.\n axes: Axes used to compute mean and variances.\n Returns:\n Standardized values (values - mean(values[axes])) / std(values[axes]).\n \"\"\"\n values_mean, values_var = tf.nn.moments(x=values, axes=axes, keepdims=True)\n epsilon = np.finfo(values.dtype.as_numpy_dtype).eps\n normalized_values = ((values - values_mean) / (tf.sqrt(values_var) + epsilon))\n return normalized_values\n\n\ndef _entropy_loss(distributions, spec, weights=None):\n \"\"\"Computes entropy loss.\n Args:\n distributions: A possibly batched tuple of distributions.\n spec: A nested tuple representing the action spec.\n weights: Optional scalar or element-wise (per-batch-entry) importance\n weights. Includes a mask for invalid timesteps.\n Returns:\n A Tensor representing the entropy loss.\n \"\"\"\n with tf.name_scope('entropy_regularization'):\n entropy = -tf.cast(common.entropy(distributions, spec), tf.float32)\n if weights is not None:\n entropy *= weights\n return tf.reduce_mean(input_tensor=entropy)\n\n\ndef _get_initial_policy_state(policy, time_steps):\n \"\"\"Gets the initial state of a policy.\"\"\"\n batch_size = (\n tf.compat.dimension_at_index(time_steps.discount.shape, 0) or\n tf.shape(time_steps.discount)[0])\n\n return policy.get_initial_state(batch_size=batch_size)\n\n\[email protected]\nclass ReinforceAgent(tf_agent.TFAgent):\n \"\"\"A REINFORCE Agent.\n Implements:\n REINFORCE algorithm from\n \"Simple statistical gradient-following algorithms for connectionist\n reinforcement learning\"\n Williams, R.J., 1992.\n http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf\n REINFORCE with state-value baseline, where state-values are estimated with\n function approximation, from\n \"Reinforcement learning: An introduction\" (Sec. 13.4)\n Sutton, R.S. and Barto, A.G., 2018.\n http://incompleteideas.net/book/the-book-2nd.html\n The REINFORCE agent can be optionally provided with:\n - value_network: A `tf_agents.network.Network` which parameterizes state-value\n estimation as a neural network. The network will be called with\n call(observation, step_type) and returns a floating point state-values\n tensor.\n - value_estimation_loss_coef: Weight on the value prediction loss.\n If value_network and value_estimation_loss_coef are provided, advantages are\n computed as\n `advantages = (discounted accumulated rewards) - (estimated state-values)`\n and the overall learning objective becomes:\n `(total loss) =\n (policy gradient loss) +\n value_estimation_loss_coef * (squared error of estimated state-values)`\n \"\"\"\n\n def __init__(self,\n time_step_spec,\n action_spec,\n actor_network,\n optimizer,\n value_network=None,\n value_estimation_loss_coef=0.2,\n advantage_fn=None,\n use_advantage_loss=True,\n gamma=1.0,\n normalize_returns=True,\n gradient_clipping=None,\n debug_summaries=False,\n summarize_grads_and_vars=False,\n entropy_regularization=None,\n train_step_counter=None,\n name=None):\n \"\"\"Creates a REINFORCE Agent.\n Args:\n time_step_spec: A `TimeStep` spec of the expected time_steps.\n action_spec: A nest of BoundedTensorSpec representing the actions.\n actor_network: A tf_agents.network.Network to be used by the agent. The\n network will be called with call(observation, step_type).\n optimizer: Optimizer for the actor network.\n value_network: (Optional) A `tf_agents.network.Network` to be used by the\n agent. The network will be called with call(observation, step_type) and\n returns a floating point value tensor.\n value_estimation_loss_coef: (Optional) Multiplier for value prediction\n loss to balance with policy gradient loss.\n advantage_fn: A function `A(returns, value_preds)` that takes returns and\n value function predictions as input and returns advantages. The default\n is `A(returns, value_preds) = returns - value_preds` if a value network\n is specified and `use_advantage_loss=True`, otherwise `A(returns,\n value_preds) = returns`.\n use_advantage_loss: Whether to use value function predictions for\n computing returns. `use_advantage_loss=False` is equivalent to setting\n `advantage_fn=lambda returns, value_preds: returns`.\n gamma: A discount factor for future rewards.\n normalize_returns: Whether to normalize returns across episodes when\n computing the loss.\n gradient_clipping: Norm length to clip gradients.\n debug_summaries: A bool to gather debug summaries.\n summarize_grads_and_vars: If True, gradient and network variable summaries\n will be written during training.\n entropy_regularization: Coefficient for entropy regularization loss term.\n train_step_counter: An optional counter to increment every time the train\n op is run. Defaults to the global_step.\n name: The name of this agent. All variables in this module will fall under\n that name. Defaults to the class name.\n \"\"\"\n tf.Module.__init__(self, name=name)\n\n actor_network.create_variables()\n\n self._actor_network = actor_network\n\n if self._actor_network._rnn_encoder:\n self.rnn_size = self._actor_network._rnn_encoder._dynamic_unroll.trainable_weights[0].shape[1]\n else:\n self.rnn_size = 0\n\n if value_network:\n value_network.create_variables()\n self._value_network = value_network\n\n collect_policy = actor_policy.ActorPolicy(\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n actor_network=self._actor_network,\n clip=True)\n\n policy = greedy_policy.GreedyPolicy(collect_policy)\n\n self._optimizer = optimizer\n self._gamma = gamma\n self._normalize_returns = normalize_returns\n self._gradient_clipping = gradient_clipping\n self._entropy_regularization = entropy_regularization\n self._value_estimation_loss_coef = value_estimation_loss_coef\n self._baseline = self._value_network is not None\n self._advantage_fn = advantage_fn\n if self._advantage_fn is None:\n if use_advantage_loss and self._baseline:\n self._advantage_fn = lambda returns, value_preds: returns - value_preds\n else:\n self._advantage_fn = lambda returns, _: returns\n\n super(ReinforceAgent, self).__init__(\n time_step_spec,\n action_spec,\n policy,\n collect_policy,\n train_sequence_length=None,\n debug_summaries=debug_summaries,\n summarize_grads_and_vars=summarize_grads_and_vars,\n train_step_counter=train_step_counter)\n\n def _initialize(self):\n pass\n\n def _train(self, experience, weights=None, lam=0):\n # Add a mask to ensure we reset the return calculation at episode\n # boundaries. This is needed in cases where episodes are truncated before\n # reaching a terminal state. Note experience is a batch of trajectories\n # where reward=next_step.reward so the mask may look shifted at first.\n\n non_last_mask = tf.cast(\n tf.math.not_equal(experience.next_step_type, ts.StepType.LAST),\n tf.float32)\n discounts = non_last_mask * experience.discount * self._gamma\n returns = value_ops.discounted_return(\n experience.reward, discounts, time_major=False)\n\n if self._debug_summaries:\n tf.compat.v2.summary.histogram(\n name='rewards', data=experience.reward, step=self.train_step_counter)\n tf.compat.v2.summary.histogram(\n name='discounts',\n data=experience.discount,\n step=self.train_step_counter)\n tf.compat.v2.summary.histogram(\n name='returns', data=returns, step=self.train_step_counter)\n\n with tf.GradientTape() as tape:\n loss_info = self.total_loss(\n experience, tf.stop_gradient(returns), weights=weights, lam=lam)\n tf.debugging.check_numerics(loss_info.loss, 'Loss is inf or nan')\n variables_to_train = self._actor_network.trainable_weights\n if self._baseline:\n variables_to_train += self._value_network.trainable_weights\n grads = tape.gradient(loss_info.loss, variables_to_train)\n\n grads_and_vars = list(zip(grads, variables_to_train))\n if self._gradient_clipping:\n grads_and_vars = eager_utils.clip_gradient_norms(grads_and_vars,\n self._gradient_clipping)\n\n if self._summarize_grads_and_vars:\n eager_utils.add_variables_summaries(grads_and_vars,\n self.train_step_counter)\n eager_utils.add_gradients_summaries(grads_and_vars,\n self.train_step_counter)\n\n self._optimizer.apply_gradients(\n grads_and_vars, global_step=self.train_step_counter)\n\n return tf.nest.map_structure(tf.identity, loss_info)\n\n def total_loss(self, experience, returns, weights, lam):\n # Ensure we see at least one full episode.\n time_steps = ts.TimeStep(experience.step_type,\n tf.zeros_like(experience.reward),\n tf.zeros_like(experience.discount),\n experience.observation)\n is_last = experience.is_last()\n num_episodes = tf.reduce_sum(tf.cast(is_last, tf.float32))\n tf.debugging.assert_greater(\n num_episodes,\n 0.0,\n message='No complete episode found. REINFORCE requires full episodes '\n 'to compute losses.')\n\n # Mask out partial episodes at the end of each batch of time_steps.\n # NOTE: We use is_last rather than is_boundary because the last transition\n # is the transition with the last valid reward. In other words, the\n # reward on the boundary transitions do not have valid rewards. Since\n # REINFORCE is calculating a loss w.r.t. the returns (and not bootstrapping)\n # keeping the boundary transitions is irrelevant.\n valid_mask = tf.cast(experience.is_last(), dtype=tf.float32)\n valid_mask = tf.math.cumsum(valid_mask, axis=1, reverse=True)\n valid_mask = tf.cast(valid_mask > 0, dtype=tf.float32)\n if weights is not None:\n weights *= valid_mask\n else:\n weights = valid_mask\n\n advantages = returns\n value_preds = None\n\n if self._baseline:\n value_preds, _ = self._value_network(time_steps.observation,\n time_steps.step_type)\n if self._debug_summaries:\n tf.compat.v2.summary.histogram(\n name='value_preds', data=value_preds, step=self.train_step_counter)\n\n advantages = self._advantage_fn(returns, value_preds)\n if self._debug_summaries:\n tf.compat.v2.summary.histogram(\n name='advantages', data=advantages, step=self.train_step_counter)\n\n # TODO(b/126592060): replace with tensor normalizer.\n if self._normalize_returns:\n advantages = _standard_normalize(advantages, axes=(0, 1))\n if self._debug_summaries:\n tf.compat.v2.summary.histogram(\n name='normalized_%s' %\n ('advantages' if self._baseline else 'returns'),\n data=advantages,\n step=self.train_step_counter)\n\n tf.nest.assert_same_structure(time_steps, self.time_step_spec)\n policy_state = _get_initial_policy_state(self.collect_policy, time_steps)\n actions_distribution = self.collect_policy.distribution(\n time_steps, policy_state=policy_state).action\n\n\n policy_gradient_loss = self.policy_gradient_loss(\n actions_distribution,\n experience.action,\n experience.is_boundary(),\n advantages,\n num_episodes,\n weights,\n )\n entropy_regularization_loss = self.entropy_regularization_loss(\n actions_distribution, weights)\n\n group_lasso_loss = self.group_lasso_loss()\n\n total_loss = policy_gradient_loss + entropy_regularization_loss + lam * group_lasso_loss\n\n value_estimation_loss = None\n if self._baseline:\n value_estimation_loss = self.value_estimation_loss(\n value_preds, returns, num_episodes, weights)\n total_loss += value_estimation_loss\n\n with tf.name_scope('Losses/'):\n tf.compat.v2.summary.scalar(\n name='policy_gradient_loss',\n data=policy_gradient_loss,\n step=self.train_step_counter)\n tf.compat.v2.summary.scalar(\n name='entropy_regularization_loss',\n data=entropy_regularization_loss,\n step=self.train_step_counter)\n tf.compat.v2.summary.scalar(\n name='group_lasso_loss',\n data=lam * group_lasso_loss,\n step=self.train_step_counter)\n if self._baseline:\n tf.compat.v2.summary.scalar(\n name='value_estimation_loss',\n data=value_estimation_loss,\n step=self.train_step_counter)\n tf.compat.v2.summary.scalar(\n name='total_loss', data=total_loss, step=self.train_step_counter)\n\n return tf_agent.LossInfo(total_loss, ())\n\n def policy_gradient_loss(self,\n actions_distribution,\n actions,\n is_boundary,\n returns,\n num_episodes,\n weights=None):\n \"\"\"Computes the policy gradient loss.\n Args:\n actions_distribution: A possibly batched tuple of action distributions.\n actions: Tensor with a batch of actions.\n is_boundary: Tensor of booleans that indicate if the corresponding action\n was in a boundary trajectory and should be ignored.\n returns: Tensor with a return from each timestep, aligned on index. Works\n better when returns are normalized.\n num_episodes: Number of episodes contained in the training data.\n weights: Optional scalar or element-wise (per-batch-entry) importance\n weights. May include a mask for invalid timesteps.\n Returns:\n policy_gradient_loss: A tensor that will contain policy gradient loss for\n the on-policy experience.\n \"\"\"\n # TODO(b/126594799): Add class IndependentNested(tfd.Distribution) to handle\n # nests of independent distributions like this.\n action_log_prob = common.log_probability(actions_distribution, actions,\n self.action_spec)\n\n # Filter out transitions between end state of previous episode and start\n # state of next episode.\n valid_mask = tf.cast(~is_boundary, tf.float32)\n action_log_prob *= valid_mask\n\n action_log_prob_times_return = action_log_prob * returns\n\n if weights is not None:\n action_log_prob_times_return *= weights\n\n if self._debug_summaries:\n tf.compat.v2.summary.histogram(\n name='action_log_prob',\n data=action_log_prob,\n step=self.train_step_counter)\n tf.compat.v2.summary.histogram(\n name='action_log_prob_times_return',\n data=action_log_prob_times_return,\n step=self.train_step_counter)\n\n # Policy gradient loss is defined as the sum, over timesteps, of action\n # log-probability times the cumulative return from that timestep onward.\n # For more information, see (Williams, 1992).\n policy_gradient_loss = -tf.reduce_sum(\n input_tensor=action_log_prob_times_return)\n\n # We take the mean over episodes by dividing by num_episodes.\n policy_gradient_loss = policy_gradient_loss / num_episodes\n\n return policy_gradient_loss\n\n def entropy_regularization_loss(self, actions_distribution, weights=None):\n \"\"\"Computes the optional entropy regularization loss.\n Extending REINFORCE by entropy regularization was originally proposed in\n \"Function optimization using connectionist reinforcement learning\n algorithms.\" (Williams and Peng, 1991).\n Args:\n actions_distribution: A possibly batched tuple of action distributions.\n weights: Optional scalar or element-wise (per-batch-entry) importance\n weights. May include a mask for invalid timesteps.\n Returns:\n entropy_regularization_loss: A tensor with the entropy regularization\n loss.\n \"\"\"\n if self._entropy_regularization:\n loss = _entropy_loss(actions_distribution, self.action_spec, weights)\n loss *= self._entropy_regularization\n else:\n loss = tf.constant(0.0, dtype=tf.float32)\n\n return loss\n\n def group_lasso_loss(self):\n mem_weights = self._actor_network._rnn_encoder._dynamic_unroll.trainable_weights[0]\n print('SANITY CHECK: 256 + rnn size')\n print(mem_weights.shape)\n group_lasso_loss = tf.reduce_sum(tf.sqrt(tf.reduce_sum(mem_weights ** 2, axis=0)))\n\n return group_lasso_loss\n\n def value_estimation_loss(self,\n value_preds,\n returns,\n num_episodes,\n weights=None):\n \"\"\"Computes the value estimation loss.\n Args:\n value_preds: Per-timestep estimated values.\n returns: Per-timestep returns for value function to predict.\n num_episodes: Number of episodes contained in the training data.\n weights: Optional scalar or element-wise (per-batch-entry) importance\n weights. May include a mask for invalid timesteps.\n Returns:\n value_estimation_loss: A scalar value_estimation_loss loss.\n \"\"\"\n value_estimation_error = tf.math.squared_difference(returns, value_preds)\n if weights is not None:\n value_estimation_error *= weights\n\n value_estimation_loss = (\n tf.reduce_sum(input_tensor=value_estimation_error) *\n self._value_estimation_loss_coef)\n\n # We take the mean over episodes by dividing by num_episodes.\n value_estimation_loss = value_estimation_loss / num_episodes\n\n return value_estimation_loss\n" ]
[ [ "tensorflow.compat.dimension_at_index", "tensorflow.name_scope", "tensorflow.reduce_sum", "tensorflow.debugging.check_numerics", "tensorflow.debugging.assert_greater", "tensorflow.nest.assert_same_structure", "tensorflow.GradientTape", "tensorflow.math.cumsum", "tensorflow.compat.v2.summary.histogram", "tensorflow.constant", "tensorflow.shape", "tensorflow.Module.__init__", "tensorflow.compat.v2.summary.scalar", "tensorflow.nest.map_structure", "tensorflow.zeros_like", "tensorflow.cast", "numpy.finfo", "tensorflow.nn.moments", "tensorflow.math.not_equal", "tensorflow.sqrt", "tensorflow.reduce_mean", "tensorflow.math.squared_difference", "tensorflow.stop_gradient" ] ]
CQCL/qvtsim
[ "875a480e4daf9331cc5ab43c49018a6f6e327183" ]
[ "numerical_class.py" ]
[ "#!/usr/bin/env python\n\n#####################################################################################\n#\n# Copyright 2022 Quantinuum\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# \n#####################################################################################\n\n\"\"\"Numerical estimation class.\"\"\"\n\nimport numpy as np\nimport pickle\nfrom datetime import datetime\n\nfrom qiskit import Aer, execute, QuantumCircuit\nimport qiskit.ignis.verification.quantum_volume as qv\n\nfrom estimation_class import QVEstimate\nfrom transpiler_passes import preset_passes\nfrom error_models import arbitrary_noise\nfrom utils import gate_counts, convert, qv_circuits, binstr\n\n\nclass NumericalEstimate(QVEstimate):\n \"\"\"Numerical estimation method for QVT heavy output probabilities.\"\"\"\n\n def __init__(self,\n qubit_list: list,\n optimization_lvl: list,\n error_name: str,\n error_list: list,\n ntrials: int):\n\n super().__init__(qubit_list, optimization_lvl, error_name, error_list)\n \n self.ntrials = ntrials\n self.gate_counts = {}\n \n def run(self):\n \"\"\"Runs numerical simulation over error_dict and qubit list. \"\"\"\n\n self.act_success = {}\n for n in self.qubit_list:\n start = datetime.today()\n noise_model = {\n e: arbitrary_noise(edict, n, True) \n for e, edict in self.error_dict.items()\n }\n qv_circs, heavy_outputs, self.ideal_success[n] = generate_ideal(\n n, \n self.ntrials\n )\n if self.optimization_lvl != 'high':\n # Everything but 'high' is independent of the error rate so done \n # outside next loop\n pm = preset_passes(self.optimization_lvl)\n qv_circs_new = [pm.run(qc) for qc in qv_circs]\n self.gate_counts[n] = gate_counts(qv_circs_new)\n \n for e in self.error_dict:\n if self.optimization_lvl == 'high':\n # 'high' optimization is set based on error rate so done \n # inside loop\n transpiler_options = {\n 'tol': estimate_errors(self.error_dict[e]), \n 'mirror': True\n }\n pm = preset_passes('high', transpiler_options)\n qv_circs_new = [pm.run(qc) for qc in qv_circs]\n self.gate_counts[n] = gate_counts(qv_circs_new)\n\n self.act_success[n, e] = act_outcomes(\n qv_circs_new,\n noise_model[e],\n heavy_outputs,\n self.optimization_lvl,\n ) \n \n time = datetime.today() - start\n print(f'finished n={n}, time={time}')\n\n\ndef generate_ideal(nqubits: int,\n reps: int,\n savefile: bool = True):\n \"\"\"\n Generate ideal circuits, heavy_outcomes, and success for all qubit numbers.\n \n Args:\n nqubits: number of qubits\n reps: number of random circuits\n savefile: if True then save ideal info to file\n \n \"\"\"\n backend_ideal = Aer.get_backend('statevector_simulator')\n \n # circuit generation\n qv_circs, qv_circs_nomeas = qv_circuits(nqubits, reps)\n \n # circuit simulation\n ideal_results = execute(\n qv_circs_nomeas,\n backend=backend_ideal\n ).result()\n \n # identify heavy outcomes\n plist = [\n np.array([\n np.abs(s)**2 \n for s in ideal_results.get_statevector(i)\n ])\n for i in range(reps)\n ]\n heavy_outcomes = [np.argsort(p)[len(p)//2:] for p in plist]\n ideal_results = [np.sum(p[h]) for p, h in zip(plist, heavy_outcomes)]\n \n if savefile:\n with open(f'qv_ideal_n{nqubits}.pkl', 'wb') as f:\n pickle.dump([qc.qasm() for qc in qv_circs_nomeas], f)\n pickle.dump(ideal_results, f)\n pickle.dump(heavy_outcomes, f)\n \n return qv_circs, heavy_outcomes, ideal_results\n \n \ndef act_outcomes(qv_circs: list,\n noise_model,\n heavy_outputs: list,\n optimization_level: str):\n \"\"\"\n Returns actual state under noise_model.\n \n Notes:\n - only works when optimization is done before execute\n \n Args:\n qv_circs: list of qv circuits as qasm strings\n noise_model: qiskit NoiseModel object\n heavy_outcomes: list of heavy outcomes for each circuits\n optimization_level: level of optimization of circuits\n backend_options: options used in execute for backend\n \n Returns:\n (list) list of probability of each outcome for each circuit\n \n \"\"\" \n heavy_probs = []\n for i, qc in enumerate(qv_circs):\n if optimization_level == 'high': \n meas_order = new_result_order(qc.num_qubits, qc)\n \n qc.remove_final_measurements()\n [qc.id(q) for q in range(qc.num_qubits)]\n qc.save_probabilities(label='end')\n\n backend = Aer.get_backend('qasm_simulator')\n ideal_results = execute(\n qc, \n noise_model=noise_model,\n backend=backend, \n optimization_level=0\n ).result()\n tmp_probs = ideal_results.results[0].data.end\n \n if optimization_level == 'high':\n heavy_probs.append(\n sum(\n tmp_probs[h]\n for h in np.argsort(meas_order)[heavy_outputs[i]]\n )\n )\n \n else:\n heavy_probs.append(\n sum(\n tmp_probs[h]\n for h in heavy_outputs[i]\n )\n )\n\n return heavy_probs\n\n\ndef read_meas_order(nqubits, \n qc: QuantumCircuit):\n \"\"\"Qubit order from measurement order of qasm str.\"\"\"\n \n qubits = [0] * nqubits\n for n in range(1, nqubits + 1):\n qubits[qc[-n][2][0].index] = nqubits - 1 - qc[-n][1][0].index\n\n return qubits[::-1]\n \n \ndef new_result_order(nqubits, \n qc: QuantumCircuit):\n \"\"\"Map for measurement index to new index.\"\"\"\n \n morder = read_meas_order(nqubits, qc)\n\n str_list = [binstr(i, nqubits) for i in range(2**nqubits)]\n meas_map = [\n int(''.join(np.array([b for b in bstr])[morder]), 2) \n for bstr in str_list\n ]\n \n return meas_map\n\n\ndef estimate_errors(error_dict: dict):\n \"\"\"Estimate TQ errors based on error_dict.\"\"\"\n\n tq_dep = 1\n sq_dep = 1\n if 'tq_dep' in error_dict:\n tq_dep *= convert(1 - error_dict['tq_dep'], 4, 'avg', 'dep')\n \n if 'tq_coh' in error_dict:\n tq_dep *= convert(1 - error_dict['tq_coh'], 4, 'avg', 'dep')\n \n if 'sq_dep' in error_dict:\n sq_dep *= convert(1 - error_dict['sq_dep'], 2, 'avg', 'dep')\n \n if 'sq_coh' in error_dict:\n sq_dep *= convert(1 - error_dict['sq_coh'], 2, 'avg', 'dep')\n\n if 'sq_dph' in error_dict:\n sq_dep *= convert(1 - error_dict['sq_dph'], 2, 'avg', 'dep')\n \n if 'tq_dph' in error_dict:\n tq_dep *= convert(1 - error_dict['tq_dph'], 2, 'avg', 'dep')\n \n sq_dep = convert(convert(sq_dep, 2, 'dep', 'proc') ** 2, 4, 'proc', 'dep')\n \n slice_fid = convert(sq_dep * tq_dep, 4, 'dep', 'avg')\n \n return slice_fid" ]
[ [ "numpy.array", "numpy.sum", "numpy.argsort", "numpy.abs" ] ]
skasiraj/Parameter-Estimation-BO
[ "90e701db7faec8e500a74a6d58bbbd121958f326" ]
[ "examples/rosenbrock/rosenbrock_estimate_params_scipy.py" ]
[ "\"\"\"\nEstimate the Rosenbrock function parameters a and b\nfor the function f(x,y) = (a - x)^2 + b(y - x^2)^2\nusing generated data similar to a typical parameter\nestimation problem\n\"\"\"\n\nimport time\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import minimize\nfrom scipy.optimize import Bounds\nfrom estimator.modelwrappers import ModelWrapper\nfrom estimator.utils import WeightedRMSE\nfrom rosenbrock_data_generate import generate_data\n\n# Change x,y,a, b to solve a\n# new generate data for a\n# parameter estimation problem\ngenerate_new_data = False\nif (generate_new_data):\n a = 10.0\n b = 200.0\n x = np.linspace(-2, 2, 10)\n y = np.linspace(-1, 3, 10)\n generate_data(a=a, b=b)\n\n\ndef rosenbrock(X, params):\n \"\"\"\n The Rosenbrock function.\n The function computed is::\n f(x,y) = (a - x)^2 + b(y - x^2)^2\n \"\"\"\n _x, _y = X\n return (params[0] - _x) ** 2 + params[1] * (_y - _x ** 2) ** 2\n\n\n# Try SciPy Optimizers for the same task\ndef loss_func_scipy(x, wrapper):\n \"\"\"\n Customized loss function specific to this problem\n \"\"\"\n loss = 0\n # customization specific to the problem\n _X, _Y = np.meshgrid(wrapper.x_inputs[0],\n wrapper.x_inputs[1])\n\n _Z = wrapper.model((_X, _Y), x)\n y_predict = _Z.reshape(1, -1)\n # end customization specific to the problem\n for i in range(wrapper.n_responses):\n # Factor in the weights\n loss += WeightedRMSE(wrapper.y_groundtruth[i], y_predict[i], wrapper.y_weights)\n wrapper.call_count += 1\n return loss\n\n\n# Read data (X,Y,Z) from the data.csv file which is used for fitting the\n# parameters a and b.\n# You edit this section for your specific problem\ndf = pd.read_csv('data.csv')\npivot_df = df.pivot(index='X', columns='Y',\n values='Z')\ny = pivot_df.columns.values\nx = pivot_df.index.values\ndata = df.to_numpy()\nx_input = [x, y] # experimental inputs read from the csv file.\ny_response = data[:, -1:].T\n\n# Set up the problem\n\n# Change the ranges of a and b if you generate new data if using a different a or b\n# these are the bounds within which # parameters are searched\nparameter_range = [[0.0, 5.0], # for default a\n [50.0, 150.0]] # for default b\npara_names = ['a', 'b']\n\nstart_time = time.time()\nestimator_name = 'rosenbrock-test-scipy'\n\nwrapper = ModelWrapper(model_function=rosenbrock, # model function used for evaluating responses = f(inputs,params)\n para_names=para_names,\n name=estimator_name,\n )\nwrapper.input_data(x_inputs=x_input,\n n_trials=100,\n y_groundtruth=y_response)\nbounds = Bounds([0.0, 5.0], [50.0, 150.0])\nres = minimize(loss_func_scipy,\n x0=[-50.0, 0.0],\n args=(wrapper,),\n method=\"Nelder-Mead\",\n options={'xtol': 1e-8, 'disp': True},\n bounds=bounds,\n )\nend_time = time.time()\na = open(\"debug-scipy.log\",'w')\na.write(\"Objective function called {} times \\n\".format(wrapper.call_count))\na.write(\"Parameters are {} \\n\".format(res['x']))\na.write(\"Total time taken in sec {} \\n \\n \\n\".format(end_time-start_time))\na.write(\"Optimizer results {}\".format(res))\nend_time = time.time()\nprint(res)\n" ]
[ [ "pandas.read_csv", "scipy.optimize.minimize", "scipy.optimize.Bounds", "numpy.meshgrid", "numpy.linspace" ] ]